1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "TargetInfo.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/Basic/CodeGenOptions.h" 22 #include "clang/CodeGen/CGFunctionInfo.h" 23 #include "clang/CodeGen/SwiftCallingConv.h" 24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/ADT/StringSwitch.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Type.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include <algorithm> // std::sort 32 33 using namespace clang; 34 using namespace CodeGen; 35 36 // Helper for coercing an aggregate argument or return value into an integer 37 // array of the same size (including padding) and alignment. This alternate 38 // coercion happens only for the RenderScript ABI and can be removed after 39 // runtimes that rely on it are no longer supported. 40 // 41 // RenderScript assumes that the size of the argument / return value in the IR 42 // is the same as the size of the corresponding qualified type. This helper 43 // coerces the aggregate type into an array of the same size (including 44 // padding). This coercion is used in lieu of expansion of struct members or 45 // other canonical coercions that return a coerced-type of larger size. 46 // 47 // Ty - The argument / return value type 48 // Context - The associated ASTContext 49 // LLVMContext - The associated LLVMContext 50 static ABIArgInfo coerceToIntArray(QualType Ty, 51 ASTContext &Context, 52 llvm::LLVMContext &LLVMContext) { 53 // Alignment and Size are measured in bits. 54 const uint64_t Size = Context.getTypeSize(Ty); 55 const uint64_t Alignment = Context.getTypeAlign(Ty); 56 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment); 57 const uint64_t NumElements = (Size + Alignment - 1) / Alignment; 58 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 59 } 60 61 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 62 llvm::Value *Array, 63 llvm::Value *Value, 64 unsigned FirstIndex, 65 unsigned LastIndex) { 66 // Alternatively, we could emit this as a loop in the source. 67 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 68 llvm::Value *Cell = 69 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); 70 Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); 71 } 72 } 73 74 static bool isAggregateTypeForABI(QualType T) { 75 return !CodeGenFunction::hasScalarEvaluationKind(T) || 76 T->isMemberFunctionPointerType(); 77 } 78 79 ABIArgInfo 80 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign, 81 llvm::Type *Padding) const { 82 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), 83 ByRef, Realign, Padding); 84 } 85 86 ABIArgInfo 87 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { 88 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), 89 /*ByRef*/ false, Realign); 90 } 91 92 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 93 QualType Ty) const { 94 return Address::invalid(); 95 } 96 97 ABIInfo::~ABIInfo() {} 98 99 /// Does the given lowering require more than the given number of 100 /// registers when expanded? 101 /// 102 /// This is intended to be the basis of a reasonable basic implementation 103 /// of should{Pass,Return}IndirectlyForSwift. 104 /// 105 /// For most targets, a limit of four total registers is reasonable; this 106 /// limits the amount of code required in order to move around the value 107 /// in case it wasn't produced immediately prior to the call by the caller 108 /// (or wasn't produced in exactly the right registers) or isn't used 109 /// immediately within the callee. But some targets may need to further 110 /// limit the register count due to an inability to support that many 111 /// return registers. 112 static bool occupiesMoreThan(CodeGenTypes &cgt, 113 ArrayRef<llvm::Type*> scalarTypes, 114 unsigned maxAllRegisters) { 115 unsigned intCount = 0, fpCount = 0; 116 for (llvm::Type *type : scalarTypes) { 117 if (type->isPointerTy()) { 118 intCount++; 119 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { 120 auto ptrWidth = cgt.getTarget().getPointerWidth(0); 121 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; 122 } else { 123 assert(type->isVectorTy() || type->isFloatingPointTy()); 124 fpCount++; 125 } 126 } 127 128 return (intCount + fpCount > maxAllRegisters); 129 } 130 131 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 132 llvm::Type *eltTy, 133 unsigned numElts) const { 134 // The default implementation of this assumes that the target guarantees 135 // 128-bit SIMD support but nothing more. 136 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16); 137 } 138 139 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 140 CGCXXABI &CXXABI) { 141 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 142 if (!RD) { 143 if (!RT->getDecl()->canPassInRegisters()) 144 return CGCXXABI::RAA_Indirect; 145 return CGCXXABI::RAA_Default; 146 } 147 return CXXABI.getRecordArgABI(RD); 148 } 149 150 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 151 CGCXXABI &CXXABI) { 152 const RecordType *RT = T->getAs<RecordType>(); 153 if (!RT) 154 return CGCXXABI::RAA_Default; 155 return getRecordArgABI(RT, CXXABI); 156 } 157 158 static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, 159 const ABIInfo &Info) { 160 QualType Ty = FI.getReturnType(); 161 162 if (const auto *RT = Ty->getAs<RecordType>()) 163 if (!isa<CXXRecordDecl>(RT->getDecl()) && 164 !RT->getDecl()->canPassInRegisters()) { 165 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty); 166 return true; 167 } 168 169 return CXXABI.classifyReturnType(FI); 170 } 171 172 /// Pass transparent unions as if they were the type of the first element. Sema 173 /// should ensure that all elements of the union have the same "machine type". 174 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 175 if (const RecordType *UT = Ty->getAsUnionType()) { 176 const RecordDecl *UD = UT->getDecl(); 177 if (UD->hasAttr<TransparentUnionAttr>()) { 178 assert(!UD->field_empty() && "sema created an empty transparent union"); 179 return UD->field_begin()->getType(); 180 } 181 } 182 return Ty; 183 } 184 185 CGCXXABI &ABIInfo::getCXXABI() const { 186 return CGT.getCXXABI(); 187 } 188 189 ASTContext &ABIInfo::getContext() const { 190 return CGT.getContext(); 191 } 192 193 llvm::LLVMContext &ABIInfo::getVMContext() const { 194 return CGT.getLLVMContext(); 195 } 196 197 const llvm::DataLayout &ABIInfo::getDataLayout() const { 198 return CGT.getDataLayout(); 199 } 200 201 const TargetInfo &ABIInfo::getTarget() const { 202 return CGT.getTarget(); 203 } 204 205 const CodeGenOptions &ABIInfo::getCodeGenOpts() const { 206 return CGT.getCodeGenOpts(); 207 } 208 209 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); } 210 211 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 212 return false; 213 } 214 215 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 216 uint64_t Members) const { 217 return false; 218 } 219 220 LLVM_DUMP_METHOD void ABIArgInfo::dump() const { 221 raw_ostream &OS = llvm::errs(); 222 OS << "(ABIArgInfo Kind="; 223 switch (TheKind) { 224 case Direct: 225 OS << "Direct Type="; 226 if (llvm::Type *Ty = getCoerceToType()) 227 Ty->print(OS); 228 else 229 OS << "null"; 230 break; 231 case Extend: 232 OS << "Extend"; 233 break; 234 case Ignore: 235 OS << "Ignore"; 236 break; 237 case InAlloca: 238 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 239 break; 240 case Indirect: 241 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 242 << " ByVal=" << getIndirectByVal() 243 << " Realign=" << getIndirectRealign(); 244 break; 245 case Expand: 246 OS << "Expand"; 247 break; 248 case CoerceAndExpand: 249 OS << "CoerceAndExpand Type="; 250 getCoerceAndExpandType()->print(OS); 251 break; 252 } 253 OS << ")\n"; 254 } 255 256 // Dynamically round a pointer up to a multiple of the given alignment. 257 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF, 258 llvm::Value *Ptr, 259 CharUnits Align) { 260 llvm::Value *PtrAsInt = Ptr; 261 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align; 262 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); 263 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, 264 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1)); 265 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, 266 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())); 267 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt, 268 Ptr->getType(), 269 Ptr->getName() + ".aligned"); 270 return PtrAsInt; 271 } 272 273 /// Emit va_arg for a platform using the common void* representation, 274 /// where arguments are simply emitted in an array of slots on the stack. 275 /// 276 /// This version implements the core direct-value passing rules. 277 /// 278 /// \param SlotSize - The size and alignment of a stack slot. 279 /// Each argument will be allocated to a multiple of this number of 280 /// slots, and all the slots will be aligned to this value. 281 /// \param AllowHigherAlign - The slot alignment is not a cap; 282 /// an argument type with an alignment greater than the slot size 283 /// will be emitted on a higher-alignment address, potentially 284 /// leaving one or more empty slots behind as padding. If this 285 /// is false, the returned address might be less-aligned than 286 /// DirectAlign. 287 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, 288 Address VAListAddr, 289 llvm::Type *DirectTy, 290 CharUnits DirectSize, 291 CharUnits DirectAlign, 292 CharUnits SlotSize, 293 bool AllowHigherAlign) { 294 // Cast the element type to i8* if necessary. Some platforms define 295 // va_list as a struct containing an i8* instead of just an i8*. 296 if (VAListAddr.getElementType() != CGF.Int8PtrTy) 297 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); 298 299 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); 300 301 // If the CC aligns values higher than the slot size, do so if needed. 302 Address Addr = Address::invalid(); 303 if (AllowHigherAlign && DirectAlign > SlotSize) { 304 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign), 305 DirectAlign); 306 } else { 307 Addr = Address(Ptr, SlotSize); 308 } 309 310 // Advance the pointer past the argument, then store that back. 311 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); 312 Address NextPtr = 313 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next"); 314 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr); 315 316 // If the argument is smaller than a slot, and this is a big-endian 317 // target, the argument will be right-adjusted in its slot. 318 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && 319 !DirectTy->isStructTy()) { 320 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); 321 } 322 323 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); 324 return Addr; 325 } 326 327 /// Emit va_arg for a platform using the common void* representation, 328 /// where arguments are simply emitted in an array of slots on the stack. 329 /// 330 /// \param IsIndirect - Values of this type are passed indirectly. 331 /// \param ValueInfo - The size and alignment of this type, generally 332 /// computed with getContext().getTypeInfoInChars(ValueTy). 333 /// \param SlotSizeAndAlign - The size and alignment of a stack slot. 334 /// Each argument will be allocated to a multiple of this number of 335 /// slots, and all the slots will be aligned to this value. 336 /// \param AllowHigherAlign - The slot alignment is not a cap; 337 /// an argument type with an alignment greater than the slot size 338 /// will be emitted on a higher-alignment address, potentially 339 /// leaving one or more empty slots behind as padding. 340 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, 341 QualType ValueTy, bool IsIndirect, 342 std::pair<CharUnits, CharUnits> ValueInfo, 343 CharUnits SlotSizeAndAlign, 344 bool AllowHigherAlign) { 345 // The size and alignment of the value that was passed directly. 346 CharUnits DirectSize, DirectAlign; 347 if (IsIndirect) { 348 DirectSize = CGF.getPointerSize(); 349 DirectAlign = CGF.getPointerAlign(); 350 } else { 351 DirectSize = ValueInfo.first; 352 DirectAlign = ValueInfo.second; 353 } 354 355 // Cast the address we've calculated to the right type. 356 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy); 357 if (IsIndirect) 358 DirectTy = DirectTy->getPointerTo(0); 359 360 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, 361 DirectSize, DirectAlign, 362 SlotSizeAndAlign, 363 AllowHigherAlign); 364 365 if (IsIndirect) { 366 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second); 367 } 368 369 return Addr; 370 371 } 372 373 static Address emitMergePHI(CodeGenFunction &CGF, 374 Address Addr1, llvm::BasicBlock *Block1, 375 Address Addr2, llvm::BasicBlock *Block2, 376 const llvm::Twine &Name = "") { 377 assert(Addr1.getType() == Addr2.getType()); 378 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); 379 PHI->addIncoming(Addr1.getPointer(), Block1); 380 PHI->addIncoming(Addr2.getPointer(), Block2); 381 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); 382 return Address(PHI, Align); 383 } 384 385 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 386 387 // If someone can figure out a general rule for this, that would be great. 388 // It's probably just doomed to be platform-dependent, though. 389 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 390 // Verified for: 391 // x86-64 FreeBSD, Linux, Darwin 392 // x86-32 FreeBSD, Linux, Darwin 393 // PowerPC Linux, Darwin 394 // ARM Darwin (*not* EABI) 395 // AArch64 Linux 396 return 32; 397 } 398 399 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 400 const FunctionNoProtoType *fnType) const { 401 // The following conventions are known to require this to be false: 402 // x86_stdcall 403 // MIPS 404 // For everything else, we just prefer false unless we opt out. 405 return false; 406 } 407 408 void 409 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 410 llvm::SmallString<24> &Opt) const { 411 // This assumes the user is passing a library name like "rt" instead of a 412 // filename like "librt.a/so", and that they don't care whether it's static or 413 // dynamic. 414 Opt = "-l"; 415 Opt += Lib; 416 } 417 418 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const { 419 // OpenCL kernels are called via an explicit runtime API with arguments 420 // set with clSetKernelArg(), not as normal sub-functions. 421 // Return SPIR_KERNEL by default as the kernel calling convention to 422 // ensure the fingerprint is fixed such way that each OpenCL argument 423 // gets one matching argument in the produced kernel function argument 424 // list to enable feasible implementation of clSetKernelArg() with 425 // aggregates etc. In case we would use the default C calling conv here, 426 // clSetKernelArg() might break depending on the target-specific 427 // conventions; different targets might split structs passed as values 428 // to multiple function arguments etc. 429 return llvm::CallingConv::SPIR_KERNEL; 430 } 431 432 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM, 433 llvm::PointerType *T, QualType QT) const { 434 return llvm::ConstantPointerNull::get(T); 435 } 436 437 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 438 const VarDecl *D) const { 439 assert(!CGM.getLangOpts().OpenCL && 440 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 441 "Address space agnostic languages only"); 442 return D ? D->getType().getAddressSpace() : LangAS::Default; 443 } 444 445 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast( 446 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr, 447 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const { 448 // Since target may map different address spaces in AST to the same address 449 // space, an address space conversion may end up as a bitcast. 450 if (auto *C = dyn_cast<llvm::Constant>(Src)) 451 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy); 452 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DestTy); 453 } 454 455 llvm::Constant * 456 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src, 457 LangAS SrcAddr, LangAS DestAddr, 458 llvm::Type *DestTy) const { 459 // Since target may map different address spaces in AST to the same address 460 // space, an address space conversion may end up as a bitcast. 461 return llvm::ConstantExpr::getPointerCast(Src, DestTy); 462 } 463 464 llvm::SyncScope::ID 465 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, 466 SyncScope Scope, 467 llvm::AtomicOrdering Ordering, 468 llvm::LLVMContext &Ctx) const { 469 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */ 470 } 471 472 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 473 474 /// isEmptyField - Return true iff a the field is "empty", that is it 475 /// is an unnamed bit-field or an (array of) empty record(s). 476 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 477 bool AllowArrays) { 478 if (FD->isUnnamedBitfield()) 479 return true; 480 481 QualType FT = FD->getType(); 482 483 // Constant arrays of empty records count as empty, strip them off. 484 // Constant arrays of zero length always count as empty. 485 if (AllowArrays) 486 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 487 if (AT->getSize() == 0) 488 return true; 489 FT = AT->getElementType(); 490 } 491 492 const RecordType *RT = FT->getAs<RecordType>(); 493 if (!RT) 494 return false; 495 496 // C++ record fields are never empty, at least in the Itanium ABI. 497 // 498 // FIXME: We should use a predicate for whether this behavior is true in the 499 // current ABI. 500 if (isa<CXXRecordDecl>(RT->getDecl())) 501 return false; 502 503 return isEmptyRecord(Context, FT, AllowArrays); 504 } 505 506 /// isEmptyRecord - Return true iff a structure contains only empty 507 /// fields. Note that a structure with a flexible array member is not 508 /// considered empty. 509 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 510 const RecordType *RT = T->getAs<RecordType>(); 511 if (!RT) 512 return false; 513 const RecordDecl *RD = RT->getDecl(); 514 if (RD->hasFlexibleArrayMember()) 515 return false; 516 517 // If this is a C++ record, check the bases first. 518 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 519 for (const auto &I : CXXRD->bases()) 520 if (!isEmptyRecord(Context, I.getType(), true)) 521 return false; 522 523 for (const auto *I : RD->fields()) 524 if (!isEmptyField(Context, I, AllowArrays)) 525 return false; 526 return true; 527 } 528 529 /// isSingleElementStruct - Determine if a structure is a "single 530 /// element struct", i.e. it has exactly one non-empty field or 531 /// exactly one field which is itself a single element 532 /// struct. Structures with flexible array members are never 533 /// considered single element structs. 534 /// 535 /// \return The field declaration for the single non-empty field, if 536 /// it exists. 537 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 538 const RecordType *RT = T->getAs<RecordType>(); 539 if (!RT) 540 return nullptr; 541 542 const RecordDecl *RD = RT->getDecl(); 543 if (RD->hasFlexibleArrayMember()) 544 return nullptr; 545 546 const Type *Found = nullptr; 547 548 // If this is a C++ record, check the bases first. 549 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 550 for (const auto &I : CXXRD->bases()) { 551 // Ignore empty records. 552 if (isEmptyRecord(Context, I.getType(), true)) 553 continue; 554 555 // If we already found an element then this isn't a single-element struct. 556 if (Found) 557 return nullptr; 558 559 // If this is non-empty and not a single element struct, the composite 560 // cannot be a single element struct. 561 Found = isSingleElementStruct(I.getType(), Context); 562 if (!Found) 563 return nullptr; 564 } 565 } 566 567 // Check for single element. 568 for (const auto *FD : RD->fields()) { 569 QualType FT = FD->getType(); 570 571 // Ignore empty fields. 572 if (isEmptyField(Context, FD, true)) 573 continue; 574 575 // If we already found an element then this isn't a single-element 576 // struct. 577 if (Found) 578 return nullptr; 579 580 // Treat single element arrays as the element. 581 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 582 if (AT->getSize().getZExtValue() != 1) 583 break; 584 FT = AT->getElementType(); 585 } 586 587 if (!isAggregateTypeForABI(FT)) { 588 Found = FT.getTypePtr(); 589 } else { 590 Found = isSingleElementStruct(FT, Context); 591 if (!Found) 592 return nullptr; 593 } 594 } 595 596 // We don't consider a struct a single-element struct if it has 597 // padding beyond the element type. 598 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 599 return nullptr; 600 601 return Found; 602 } 603 604 namespace { 605 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, 606 const ABIArgInfo &AI) { 607 // This default implementation defers to the llvm backend's va_arg 608 // instruction. It can handle only passing arguments directly 609 // (typically only handled in the backend for primitive types), or 610 // aggregates passed indirectly by pointer (NOTE: if the "byval" 611 // flag has ABI impact in the callee, this implementation cannot 612 // work.) 613 614 // Only a few cases are covered here at the moment -- those needed 615 // by the default abi. 616 llvm::Value *Val; 617 618 if (AI.isIndirect()) { 619 assert(!AI.getPaddingType() && 620 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 621 assert( 622 !AI.getIndirectRealign() && 623 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"); 624 625 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); 626 CharUnits TyAlignForABI = TyInfo.second; 627 628 llvm::Type *BaseTy = 629 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 630 llvm::Value *Addr = 631 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); 632 return Address(Addr, TyAlignForABI); 633 } else { 634 assert((AI.isDirect() || AI.isExtend()) && 635 "Unexpected ArgInfo Kind in generic VAArg emitter!"); 636 637 assert(!AI.getInReg() && 638 "Unexpected InReg seen in arginfo in generic VAArg emitter!"); 639 assert(!AI.getPaddingType() && 640 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 641 assert(!AI.getDirectOffset() && 642 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"); 643 assert(!AI.getCoerceToType() && 644 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"); 645 646 Address Temp = CGF.CreateMemTemp(Ty, "varet"); 647 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); 648 CGF.Builder.CreateStore(Val, Temp); 649 return Temp; 650 } 651 } 652 653 /// DefaultABIInfo - The default implementation for ABI specific 654 /// details. This implementation provides information which results in 655 /// self-consistent and sensible LLVM IR generation, but does not 656 /// conform to any particular ABI. 657 class DefaultABIInfo : public ABIInfo { 658 public: 659 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 660 661 ABIArgInfo classifyReturnType(QualType RetTy) const; 662 ABIArgInfo classifyArgumentType(QualType RetTy) const; 663 664 void computeInfo(CGFunctionInfo &FI) const override { 665 if (!getCXXABI().classifyReturnType(FI)) 666 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 667 for (auto &I : FI.arguments()) 668 I.info = classifyArgumentType(I.type); 669 } 670 671 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 672 QualType Ty) const override { 673 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); 674 } 675 }; 676 677 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 678 public: 679 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 680 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 681 }; 682 683 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 684 Ty = useFirstFieldIfTransparentUnion(Ty); 685 686 if (isAggregateTypeForABI(Ty)) { 687 // Records with non-trivial destructors/copy-constructors should not be 688 // passed by value. 689 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 690 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 691 692 return getNaturalAlignIndirect(Ty); 693 } 694 695 // Treat an enum type as its underlying type. 696 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 697 Ty = EnumTy->getDecl()->getIntegerType(); 698 699 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) 700 : ABIArgInfo::getDirect()); 701 } 702 703 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 704 if (RetTy->isVoidType()) 705 return ABIArgInfo::getIgnore(); 706 707 if (isAggregateTypeForABI(RetTy)) 708 return getNaturalAlignIndirect(RetTy); 709 710 // Treat an enum type as its underlying type. 711 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 712 RetTy = EnumTy->getDecl()->getIntegerType(); 713 714 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) 715 : ABIArgInfo::getDirect()); 716 } 717 718 //===----------------------------------------------------------------------===// 719 // WebAssembly ABI Implementation 720 // 721 // This is a very simple ABI that relies a lot on DefaultABIInfo. 722 //===----------------------------------------------------------------------===// 723 724 class WebAssemblyABIInfo final : public SwiftABIInfo { 725 DefaultABIInfo defaultInfo; 726 727 public: 728 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT) 729 : SwiftABIInfo(CGT), defaultInfo(CGT) {} 730 731 private: 732 ABIArgInfo classifyReturnType(QualType RetTy) const; 733 ABIArgInfo classifyArgumentType(QualType Ty) const; 734 735 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 736 // non-virtual, but computeInfo and EmitVAArg are virtual, so we 737 // overload them. 738 void computeInfo(CGFunctionInfo &FI) const override { 739 if (!getCXXABI().classifyReturnType(FI)) 740 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 741 for (auto &Arg : FI.arguments()) 742 Arg.info = classifyArgumentType(Arg.type); 743 } 744 745 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 746 QualType Ty) const override; 747 748 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 749 bool asReturnValue) const override { 750 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 751 } 752 753 bool isSwiftErrorInRegister() const override { 754 return false; 755 } 756 }; 757 758 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { 759 public: 760 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 761 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {} 762 763 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 764 CodeGen::CodeGenModule &CGM) const override { 765 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 766 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 767 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) { 768 llvm::Function *Fn = cast<llvm::Function>(GV); 769 llvm::AttrBuilder B; 770 B.addAttribute("wasm-import-module", Attr->getImportModule()); 771 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 772 } 773 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) { 774 llvm::Function *Fn = cast<llvm::Function>(GV); 775 llvm::AttrBuilder B; 776 B.addAttribute("wasm-import-name", Attr->getImportName()); 777 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 778 } 779 } 780 781 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) { 782 llvm::Function *Fn = cast<llvm::Function>(GV); 783 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype()) 784 Fn->addFnAttr("no-prototype"); 785 } 786 } 787 }; 788 789 /// Classify argument of given type \p Ty. 790 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { 791 Ty = useFirstFieldIfTransparentUnion(Ty); 792 793 if (isAggregateTypeForABI(Ty)) { 794 // Records with non-trivial destructors/copy-constructors should not be 795 // passed by value. 796 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 797 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 798 // Ignore empty structs/unions. 799 if (isEmptyRecord(getContext(), Ty, true)) 800 return ABIArgInfo::getIgnore(); 801 // Lower single-element structs to just pass a regular value. TODO: We 802 // could do reasonable-size multiple-element structs too, using getExpand(), 803 // though watch out for things like bitfields. 804 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 805 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 806 } 807 808 // Otherwise just do the default thing. 809 return defaultInfo.classifyArgumentType(Ty); 810 } 811 812 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { 813 if (isAggregateTypeForABI(RetTy)) { 814 // Records with non-trivial destructors/copy-constructors should not be 815 // returned by value. 816 if (!getRecordArgABI(RetTy, getCXXABI())) { 817 // Ignore empty structs/unions. 818 if (isEmptyRecord(getContext(), RetTy, true)) 819 return ABIArgInfo::getIgnore(); 820 // Lower single-element structs to just return a regular value. TODO: We 821 // could do reasonable-size multiple-element structs too, using 822 // ABIArgInfo::getDirect(). 823 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 824 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 825 } 826 } 827 828 // Otherwise just do the default thing. 829 return defaultInfo.classifyReturnType(RetTy); 830 } 831 832 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 833 QualType Ty) const { 834 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false, 835 getContext().getTypeInfoInChars(Ty), 836 CharUnits::fromQuantity(4), 837 /*AllowHigherAlign=*/ true); 838 } 839 840 //===----------------------------------------------------------------------===// 841 // le32/PNaCl bitcode ABI Implementation 842 // 843 // This is a simplified version of the x86_32 ABI. Arguments and return values 844 // are always passed on the stack. 845 //===----------------------------------------------------------------------===// 846 847 class PNaClABIInfo : public ABIInfo { 848 public: 849 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 850 851 ABIArgInfo classifyReturnType(QualType RetTy) const; 852 ABIArgInfo classifyArgumentType(QualType RetTy) const; 853 854 void computeInfo(CGFunctionInfo &FI) const override; 855 Address EmitVAArg(CodeGenFunction &CGF, 856 Address VAListAddr, QualType Ty) const override; 857 }; 858 859 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 860 public: 861 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 862 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 863 }; 864 865 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 866 if (!getCXXABI().classifyReturnType(FI)) 867 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 868 869 for (auto &I : FI.arguments()) 870 I.info = classifyArgumentType(I.type); 871 } 872 873 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 874 QualType Ty) const { 875 // The PNaCL ABI is a bit odd, in that varargs don't use normal 876 // function classification. Structs get passed directly for varargs 877 // functions, through a rewriting transform in 878 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows 879 // this target to actually support a va_arg instructions with an 880 // aggregate type, unlike other targets. 881 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 882 } 883 884 /// Classify argument of given type \p Ty. 885 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 886 if (isAggregateTypeForABI(Ty)) { 887 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 888 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 889 return getNaturalAlignIndirect(Ty); 890 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 891 // Treat an enum type as its underlying type. 892 Ty = EnumTy->getDecl()->getIntegerType(); 893 } else if (Ty->isFloatingType()) { 894 // Floating-point types don't go inreg. 895 return ABIArgInfo::getDirect(); 896 } 897 898 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) 899 : ABIArgInfo::getDirect()); 900 } 901 902 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 903 if (RetTy->isVoidType()) 904 return ABIArgInfo::getIgnore(); 905 906 // In the PNaCl ABI we always return records/structures on the stack. 907 if (isAggregateTypeForABI(RetTy)) 908 return getNaturalAlignIndirect(RetTy); 909 910 // Treat an enum type as its underlying type. 911 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 912 RetTy = EnumTy->getDecl()->getIntegerType(); 913 914 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) 915 : ABIArgInfo::getDirect()); 916 } 917 918 /// IsX86_MMXType - Return true if this is an MMX type. 919 bool IsX86_MMXType(llvm::Type *IRType) { 920 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 921 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 922 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 923 IRType->getScalarSizeInBits() != 64; 924 } 925 926 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 927 StringRef Constraint, 928 llvm::Type* Ty) { 929 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint) 930 .Cases("y", "&y", "^Ym", true) 931 .Default(false); 932 if (IsMMXCons && Ty->isVectorTy()) { 933 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { 934 // Invalid MMX constraint 935 return nullptr; 936 } 937 938 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 939 } 940 941 // No operation needed 942 return Ty; 943 } 944 945 /// Returns true if this type can be passed in SSE registers with the 946 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 947 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 948 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 949 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) { 950 if (BT->getKind() == BuiltinType::LongDouble) { 951 if (&Context.getTargetInfo().getLongDoubleFormat() == 952 &llvm::APFloat::x87DoubleExtended()) 953 return false; 954 } 955 return true; 956 } 957 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 958 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 959 // registers specially. 960 unsigned VecSize = Context.getTypeSize(VT); 961 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 962 return true; 963 } 964 return false; 965 } 966 967 /// Returns true if this aggregate is small enough to be passed in SSE registers 968 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 969 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 970 return NumMembers <= 4; 971 } 972 973 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86. 974 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { 975 auto AI = ABIArgInfo::getDirect(T); 976 AI.setInReg(true); 977 AI.setCanBeFlattened(false); 978 return AI; 979 } 980 981 //===----------------------------------------------------------------------===// 982 // X86-32 ABI Implementation 983 //===----------------------------------------------------------------------===// 984 985 /// Similar to llvm::CCState, but for Clang. 986 struct CCState { 987 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {} 988 989 unsigned CC; 990 unsigned FreeRegs; 991 unsigned FreeSSERegs; 992 }; 993 994 enum { 995 // Vectorcall only allows the first 6 parameters to be passed in registers. 996 VectorcallMaxParamNumAsReg = 6 997 }; 998 999 /// X86_32ABIInfo - The X86-32 ABI information. 1000 class X86_32ABIInfo : public SwiftABIInfo { 1001 enum Class { 1002 Integer, 1003 Float 1004 }; 1005 1006 static const unsigned MinABIStackAlignInBytes = 4; 1007 1008 bool IsDarwinVectorABI; 1009 bool IsRetSmallStructInRegABI; 1010 bool IsWin32StructABI; 1011 bool IsSoftFloatABI; 1012 bool IsMCUABI; 1013 bool IsLinuxABI; 1014 unsigned DefaultNumRegisterParameters; 1015 1016 static bool isRegisterSize(unsigned Size) { 1017 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 1018 } 1019 1020 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 1021 // FIXME: Assumes vectorcall is in use. 1022 return isX86VectorTypeForVectorCall(getContext(), Ty); 1023 } 1024 1025 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 1026 uint64_t NumMembers) const override { 1027 // FIXME: Assumes vectorcall is in use. 1028 return isX86VectorCallAggregateSmallEnough(NumMembers); 1029 } 1030 1031 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 1032 1033 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1034 /// such that the argument will be passed in memory. 1035 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 1036 1037 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; 1038 1039 /// Return the alignment to use for the given type on the stack. 1040 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 1041 1042 Class classify(QualType Ty) const; 1043 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 1044 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 1045 1046 /// Updates the number of available free registers, returns 1047 /// true if any registers were allocated. 1048 bool updateFreeRegs(QualType Ty, CCState &State) const; 1049 1050 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg, 1051 bool &NeedsPadding) const; 1052 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const; 1053 1054 bool canExpandIndirectArgument(QualType Ty) const; 1055 1056 /// Rewrite the function info so that all memory arguments use 1057 /// inalloca. 1058 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 1059 1060 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1061 CharUnits &StackOffset, ABIArgInfo &Info, 1062 QualType Type) const; 1063 void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State, 1064 bool &UsedInAlloca) const; 1065 1066 public: 1067 1068 void computeInfo(CGFunctionInfo &FI) const override; 1069 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 1070 QualType Ty) const override; 1071 1072 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1073 bool RetSmallStructInRegABI, bool Win32StructABI, 1074 unsigned NumRegisterParameters, bool SoftFloatABI) 1075 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), 1076 IsRetSmallStructInRegABI(RetSmallStructInRegABI), 1077 IsWin32StructABI(Win32StructABI), 1078 IsSoftFloatABI(SoftFloatABI), 1079 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()), 1080 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux()), 1081 DefaultNumRegisterParameters(NumRegisterParameters) {} 1082 1083 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 1084 bool asReturnValue) const override { 1085 // LLVM's x86-32 lowering currently only assigns up to three 1086 // integer registers and three fp registers. Oddly, it'll use up to 1087 // four vector registers for vectors, but those can overlap with the 1088 // scalar registers. 1089 return occupiesMoreThan(CGT, scalars, /*total*/ 3); 1090 } 1091 1092 bool isSwiftErrorInRegister() const override { 1093 // x86-32 lowering does not support passing swifterror in a register. 1094 return false; 1095 } 1096 }; 1097 1098 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 1099 public: 1100 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1101 bool RetSmallStructInRegABI, bool Win32StructABI, 1102 unsigned NumRegisterParameters, bool SoftFloatABI) 1103 : TargetCodeGenInfo(new X86_32ABIInfo( 1104 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, 1105 NumRegisterParameters, SoftFloatABI)) {} 1106 1107 static bool isStructReturnInRegABI( 1108 const llvm::Triple &Triple, const CodeGenOptions &Opts); 1109 1110 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 1111 CodeGen::CodeGenModule &CGM) const override; 1112 1113 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1114 // Darwin uses different dwarf register numbers for EH. 1115 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 1116 return 4; 1117 } 1118 1119 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1120 llvm::Value *Address) const override; 1121 1122 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1123 StringRef Constraint, 1124 llvm::Type* Ty) const override { 1125 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1126 } 1127 1128 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 1129 std::string &Constraints, 1130 std::vector<llvm::Type *> &ResultRegTypes, 1131 std::vector<llvm::Type *> &ResultTruncRegTypes, 1132 std::vector<LValue> &ResultRegDests, 1133 std::string &AsmString, 1134 unsigned NumOutputs) const override; 1135 1136 llvm::Constant * 1137 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1138 unsigned Sig = (0xeb << 0) | // jmp rel8 1139 (0x06 << 8) | // .+0x08 1140 ('v' << 16) | 1141 ('2' << 24); 1142 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1143 } 1144 1145 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 1146 return "movl\t%ebp, %ebp" 1147 "\t\t// marker for objc_retainAutoreleaseReturnValue"; 1148 } 1149 }; 1150 1151 } 1152 1153 /// Rewrite input constraint references after adding some output constraints. 1154 /// In the case where there is one output and one input and we add one output, 1155 /// we need to replace all operand references greater than or equal to 1: 1156 /// mov $0, $1 1157 /// mov eax, $1 1158 /// The result will be: 1159 /// mov $0, $2 1160 /// mov eax, $2 1161 static void rewriteInputConstraintReferences(unsigned FirstIn, 1162 unsigned NumNewOuts, 1163 std::string &AsmString) { 1164 std::string Buf; 1165 llvm::raw_string_ostream OS(Buf); 1166 size_t Pos = 0; 1167 while (Pos < AsmString.size()) { 1168 size_t DollarStart = AsmString.find('$', Pos); 1169 if (DollarStart == std::string::npos) 1170 DollarStart = AsmString.size(); 1171 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 1172 if (DollarEnd == std::string::npos) 1173 DollarEnd = AsmString.size(); 1174 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 1175 Pos = DollarEnd; 1176 size_t NumDollars = DollarEnd - DollarStart; 1177 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 1178 // We have an operand reference. 1179 size_t DigitStart = Pos; 1180 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 1181 if (DigitEnd == std::string::npos) 1182 DigitEnd = AsmString.size(); 1183 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 1184 unsigned OperandIndex; 1185 if (!OperandStr.getAsInteger(10, OperandIndex)) { 1186 if (OperandIndex >= FirstIn) 1187 OperandIndex += NumNewOuts; 1188 OS << OperandIndex; 1189 } else { 1190 OS << OperandStr; 1191 } 1192 Pos = DigitEnd; 1193 } 1194 } 1195 AsmString = std::move(OS.str()); 1196 } 1197 1198 /// Add output constraints for EAX:EDX because they are return registers. 1199 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 1200 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 1201 std::vector<llvm::Type *> &ResultRegTypes, 1202 std::vector<llvm::Type *> &ResultTruncRegTypes, 1203 std::vector<LValue> &ResultRegDests, std::string &AsmString, 1204 unsigned NumOutputs) const { 1205 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 1206 1207 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 1208 // larger. 1209 if (!Constraints.empty()) 1210 Constraints += ','; 1211 if (RetWidth <= 32) { 1212 Constraints += "={eax}"; 1213 ResultRegTypes.push_back(CGF.Int32Ty); 1214 } else { 1215 // Use the 'A' constraint for EAX:EDX. 1216 Constraints += "=A"; 1217 ResultRegTypes.push_back(CGF.Int64Ty); 1218 } 1219 1220 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 1221 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 1222 ResultTruncRegTypes.push_back(CoerceTy); 1223 1224 // Coerce the integer by bitcasting the return slot pointer. 1225 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), 1226 CoerceTy->getPointerTo())); 1227 ResultRegDests.push_back(ReturnSlot); 1228 1229 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 1230 } 1231 1232 /// shouldReturnTypeInRegister - Determine if the given type should be 1233 /// returned in a register (for the Darwin and MCU ABI). 1234 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 1235 ASTContext &Context) const { 1236 uint64_t Size = Context.getTypeSize(Ty); 1237 1238 // For i386, type must be register sized. 1239 // For the MCU ABI, it only needs to be <= 8-byte 1240 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) 1241 return false; 1242 1243 if (Ty->isVectorType()) { 1244 // 64- and 128- bit vectors inside structures are not returned in 1245 // registers. 1246 if (Size == 64 || Size == 128) 1247 return false; 1248 1249 return true; 1250 } 1251 1252 // If this is a builtin, pointer, enum, complex type, member pointer, or 1253 // member function pointer it is ok. 1254 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 1255 Ty->isAnyComplexType() || Ty->isEnumeralType() || 1256 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 1257 return true; 1258 1259 // Arrays are treated like records. 1260 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 1261 return shouldReturnTypeInRegister(AT->getElementType(), Context); 1262 1263 // Otherwise, it must be a record type. 1264 const RecordType *RT = Ty->getAs<RecordType>(); 1265 if (!RT) return false; 1266 1267 // FIXME: Traverse bases here too. 1268 1269 // Structure types are passed in register if all fields would be 1270 // passed in a register. 1271 for (const auto *FD : RT->getDecl()->fields()) { 1272 // Empty fields are ignored. 1273 if (isEmptyField(Context, FD, true)) 1274 continue; 1275 1276 // Check fields recursively. 1277 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 1278 return false; 1279 } 1280 return true; 1281 } 1282 1283 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 1284 // Treat complex types as the element type. 1285 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 1286 Ty = CTy->getElementType(); 1287 1288 // Check for a type which we know has a simple scalar argument-passing 1289 // convention without any padding. (We're specifically looking for 32 1290 // and 64-bit integer and integer-equivalents, float, and double.) 1291 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 1292 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 1293 return false; 1294 1295 uint64_t Size = Context.getTypeSize(Ty); 1296 return Size == 32 || Size == 64; 1297 } 1298 1299 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, 1300 uint64_t &Size) { 1301 for (const auto *FD : RD->fields()) { 1302 // Scalar arguments on the stack get 4 byte alignment on x86. If the 1303 // argument is smaller than 32-bits, expanding the struct will create 1304 // alignment padding. 1305 if (!is32Or64BitBasicType(FD->getType(), Context)) 1306 return false; 1307 1308 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 1309 // how to expand them yet, and the predicate for telling if a bitfield still 1310 // counts as "basic" is more complicated than what we were doing previously. 1311 if (FD->isBitField()) 1312 return false; 1313 1314 Size += Context.getTypeSize(FD->getType()); 1315 } 1316 return true; 1317 } 1318 1319 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, 1320 uint64_t &Size) { 1321 // Don't do this if there are any non-empty bases. 1322 for (const CXXBaseSpecifier &Base : RD->bases()) { 1323 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(), 1324 Size)) 1325 return false; 1326 } 1327 if (!addFieldSizes(Context, RD, Size)) 1328 return false; 1329 return true; 1330 } 1331 1332 /// Test whether an argument type which is to be passed indirectly (on the 1333 /// stack) would have the equivalent layout if it was expanded into separate 1334 /// arguments. If so, we prefer to do the latter to avoid inhibiting 1335 /// optimizations. 1336 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const { 1337 // We can only expand structure types. 1338 const RecordType *RT = Ty->getAs<RecordType>(); 1339 if (!RT) 1340 return false; 1341 const RecordDecl *RD = RT->getDecl(); 1342 uint64_t Size = 0; 1343 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1344 if (!IsWin32StructABI) { 1345 // On non-Windows, we have to conservatively match our old bitcode 1346 // prototypes in order to be ABI-compatible at the bitcode level. 1347 if (!CXXRD->isCLike()) 1348 return false; 1349 } else { 1350 // Don't do this for dynamic classes. 1351 if (CXXRD->isDynamicClass()) 1352 return false; 1353 } 1354 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size)) 1355 return false; 1356 } else { 1357 if (!addFieldSizes(getContext(), RD, Size)) 1358 return false; 1359 } 1360 1361 // We can do this if there was no alignment padding. 1362 return Size == getContext().getTypeSize(Ty); 1363 } 1364 1365 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { 1366 // If the return value is indirect, then the hidden argument is consuming one 1367 // integer register. 1368 if (State.FreeRegs) { 1369 --State.FreeRegs; 1370 if (!IsMCUABI) 1371 return getNaturalAlignIndirectInReg(RetTy); 1372 } 1373 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 1374 } 1375 1376 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 1377 CCState &State) const { 1378 if (RetTy->isVoidType()) 1379 return ABIArgInfo::getIgnore(); 1380 1381 const Type *Base = nullptr; 1382 uint64_t NumElts = 0; 1383 if ((State.CC == llvm::CallingConv::X86_VectorCall || 1384 State.CC == llvm::CallingConv::X86_RegCall) && 1385 isHomogeneousAggregate(RetTy, Base, NumElts)) { 1386 // The LLVM struct type for such an aggregate should lower properly. 1387 return ABIArgInfo::getDirect(); 1388 } 1389 1390 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 1391 // On Darwin, some vectors are returned in registers. 1392 if (IsDarwinVectorABI) { 1393 uint64_t Size = getContext().getTypeSize(RetTy); 1394 1395 // 128-bit vectors are a special case; they are returned in 1396 // registers and we need to make sure to pick a type the LLVM 1397 // backend will like. 1398 if (Size == 128) 1399 return ABIArgInfo::getDirect(llvm::VectorType::get( 1400 llvm::Type::getInt64Ty(getVMContext()), 2)); 1401 1402 // Always return in register if it fits in a general purpose 1403 // register, or if it is 64 bits and has a single element. 1404 if ((Size == 8 || Size == 16 || Size == 32) || 1405 (Size == 64 && VT->getNumElements() == 1)) 1406 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1407 Size)); 1408 1409 return getIndirectReturnResult(RetTy, State); 1410 } 1411 1412 return ABIArgInfo::getDirect(); 1413 } 1414 1415 if (isAggregateTypeForABI(RetTy)) { 1416 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 1417 // Structures with flexible arrays are always indirect. 1418 if (RT->getDecl()->hasFlexibleArrayMember()) 1419 return getIndirectReturnResult(RetTy, State); 1420 } 1421 1422 // If specified, structs and unions are always indirect. 1423 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) 1424 return getIndirectReturnResult(RetTy, State); 1425 1426 // Ignore empty structs/unions. 1427 if (isEmptyRecord(getContext(), RetTy, true)) 1428 return ABIArgInfo::getIgnore(); 1429 1430 // Small structures which are register sized are generally returned 1431 // in a register. 1432 if (shouldReturnTypeInRegister(RetTy, getContext())) { 1433 uint64_t Size = getContext().getTypeSize(RetTy); 1434 1435 // As a special-case, if the struct is a "single-element" struct, and 1436 // the field is of type "float" or "double", return it in a 1437 // floating-point register. (MSVC does not apply this special case.) 1438 // We apply a similar transformation for pointer types to improve the 1439 // quality of the generated IR. 1440 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 1441 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 1442 || SeltTy->hasPointerRepresentation()) 1443 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 1444 1445 // FIXME: We should be able to narrow this integer in cases with dead 1446 // padding. 1447 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 1448 } 1449 1450 return getIndirectReturnResult(RetTy, State); 1451 } 1452 1453 // Treat an enum type as its underlying type. 1454 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1455 RetTy = EnumTy->getDecl()->getIntegerType(); 1456 1457 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) 1458 : ABIArgInfo::getDirect()); 1459 } 1460 1461 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 1462 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 1463 } 1464 1465 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 1466 const RecordType *RT = Ty->getAs<RecordType>(); 1467 if (!RT) 1468 return 0; 1469 const RecordDecl *RD = RT->getDecl(); 1470 1471 // If this is a C++ record, check the bases first. 1472 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 1473 for (const auto &I : CXXRD->bases()) 1474 if (!isRecordWithSSEVectorType(Context, I.getType())) 1475 return false; 1476 1477 for (const auto *i : RD->fields()) { 1478 QualType FT = i->getType(); 1479 1480 if (isSSEVectorType(Context, FT)) 1481 return true; 1482 1483 if (isRecordWithSSEVectorType(Context, FT)) 1484 return true; 1485 } 1486 1487 return false; 1488 } 1489 1490 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 1491 unsigned Align) const { 1492 // Otherwise, if the alignment is less than or equal to the minimum ABI 1493 // alignment, just use the default; the backend will handle this. 1494 if (Align <= MinABIStackAlignInBytes) 1495 return 0; // Use default alignment. 1496 1497 if (IsLinuxABI) { 1498 // i386 System V ABI 2.1: Structures and unions assume the alignment of their 1499 // most strictly aligned component. 1500 // 1501 // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't 1502 // want to spend any effort dealing with the ramifications of ABI breaks. 1503 return Align; 1504 } else if (!IsDarwinVectorABI) { 1505 // On non-Darwin and non-Linux, the stack type alignment is always 4. 1506 // Set explicit alignment, since we may need to realign the top. 1507 return MinABIStackAlignInBytes; 1508 } 1509 1510 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 1511 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 1512 isRecordWithSSEVectorType(getContext(), Ty))) 1513 return 16; 1514 1515 return MinABIStackAlignInBytes; 1516 } 1517 1518 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 1519 CCState &State) const { 1520 if (!ByVal) { 1521 if (State.FreeRegs) { 1522 --State.FreeRegs; // Non-byval indirects just use one pointer. 1523 if (!IsMCUABI) 1524 return getNaturalAlignIndirectInReg(Ty); 1525 } 1526 return getNaturalAlignIndirect(Ty, false); 1527 } 1528 1529 // Compute the byval alignment. 1530 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 1531 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 1532 if (StackAlign == 0) 1533 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true); 1534 1535 // If the stack alignment is less than the type alignment, realign the 1536 // argument. 1537 bool Realign = TypeAlign > StackAlign; 1538 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), 1539 /*ByVal=*/true, Realign); 1540 } 1541 1542 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 1543 const Type *T = isSingleElementStruct(Ty, getContext()); 1544 if (!T) 1545 T = Ty.getTypePtr(); 1546 1547 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 1548 BuiltinType::Kind K = BT->getKind(); 1549 if (K == BuiltinType::Float || K == BuiltinType::Double) 1550 return Float; 1551 } 1552 return Integer; 1553 } 1554 1555 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const { 1556 if (!IsSoftFloatABI) { 1557 Class C = classify(Ty); 1558 if (C == Float) 1559 return false; 1560 } 1561 1562 unsigned Size = getContext().getTypeSize(Ty); 1563 unsigned SizeInRegs = (Size + 31) / 32; 1564 1565 if (SizeInRegs == 0) 1566 return false; 1567 1568 if (!IsMCUABI) { 1569 if (SizeInRegs > State.FreeRegs) { 1570 State.FreeRegs = 0; 1571 return false; 1572 } 1573 } else { 1574 // The MCU psABI allows passing parameters in-reg even if there are 1575 // earlier parameters that are passed on the stack. Also, 1576 // it does not allow passing >8-byte structs in-register, 1577 // even if there are 3 free registers available. 1578 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2) 1579 return false; 1580 } 1581 1582 State.FreeRegs -= SizeInRegs; 1583 return true; 1584 } 1585 1586 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State, 1587 bool &InReg, 1588 bool &NeedsPadding) const { 1589 // On Windows, aggregates other than HFAs are never passed in registers, and 1590 // they do not consume register slots. Homogenous floating-point aggregates 1591 // (HFAs) have already been dealt with at this point. 1592 if (IsWin32StructABI && isAggregateTypeForABI(Ty)) 1593 return false; 1594 1595 NeedsPadding = false; 1596 InReg = !IsMCUABI; 1597 1598 if (!updateFreeRegs(Ty, State)) 1599 return false; 1600 1601 if (IsMCUABI) 1602 return true; 1603 1604 if (State.CC == llvm::CallingConv::X86_FastCall || 1605 State.CC == llvm::CallingConv::X86_VectorCall || 1606 State.CC == llvm::CallingConv::X86_RegCall) { 1607 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs) 1608 NeedsPadding = true; 1609 1610 return false; 1611 } 1612 1613 return true; 1614 } 1615 1616 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const { 1617 if (!updateFreeRegs(Ty, State)) 1618 return false; 1619 1620 if (IsMCUABI) 1621 return false; 1622 1623 if (State.CC == llvm::CallingConv::X86_FastCall || 1624 State.CC == llvm::CallingConv::X86_VectorCall || 1625 State.CC == llvm::CallingConv::X86_RegCall) { 1626 if (getContext().getTypeSize(Ty) > 32) 1627 return false; 1628 1629 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() || 1630 Ty->isReferenceType()); 1631 } 1632 1633 return true; 1634 } 1635 1636 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1637 CCState &State) const { 1638 // FIXME: Set alignment on indirect arguments. 1639 1640 Ty = useFirstFieldIfTransparentUnion(Ty); 1641 1642 // Check with the C++ ABI first. 1643 const RecordType *RT = Ty->getAs<RecordType>(); 1644 if (RT) { 1645 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1646 if (RAA == CGCXXABI::RAA_Indirect) { 1647 return getIndirectResult(Ty, false, State); 1648 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1649 // The field index doesn't matter, we'll fix it up later. 1650 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1651 } 1652 } 1653 1654 // Regcall uses the concept of a homogenous vector aggregate, similar 1655 // to other targets. 1656 const Type *Base = nullptr; 1657 uint64_t NumElts = 0; 1658 if (State.CC == llvm::CallingConv::X86_RegCall && 1659 isHomogeneousAggregate(Ty, Base, NumElts)) { 1660 1661 if (State.FreeSSERegs >= NumElts) { 1662 State.FreeSSERegs -= NumElts; 1663 if (Ty->isBuiltinType() || Ty->isVectorType()) 1664 return ABIArgInfo::getDirect(); 1665 return ABIArgInfo::getExpand(); 1666 } 1667 return getIndirectResult(Ty, /*ByVal=*/false, State); 1668 } 1669 1670 if (isAggregateTypeForABI(Ty)) { 1671 // Structures with flexible arrays are always indirect. 1672 // FIXME: This should not be byval! 1673 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 1674 return getIndirectResult(Ty, true, State); 1675 1676 // Ignore empty structs/unions on non-Windows. 1677 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true)) 1678 return ABIArgInfo::getIgnore(); 1679 1680 llvm::LLVMContext &LLVMContext = getVMContext(); 1681 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1682 bool NeedsPadding = false; 1683 bool InReg; 1684 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { 1685 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 1686 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1687 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1688 if (InReg) 1689 return ABIArgInfo::getDirectInReg(Result); 1690 else 1691 return ABIArgInfo::getDirect(Result); 1692 } 1693 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1694 1695 // Expand small (<= 128-bit) record types when we know that the stack layout 1696 // of those arguments will match the struct. This is important because the 1697 // LLVM backend isn't smart enough to remove byval, which inhibits many 1698 // optimizations. 1699 // Don't do this for the MCU if there are still free integer registers 1700 // (see X86_64 ABI for full explanation). 1701 if (getContext().getTypeSize(Ty) <= 4 * 32 && 1702 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty)) 1703 return ABIArgInfo::getExpandWithPadding( 1704 State.CC == llvm::CallingConv::X86_FastCall || 1705 State.CC == llvm::CallingConv::X86_VectorCall || 1706 State.CC == llvm::CallingConv::X86_RegCall, 1707 PaddingType); 1708 1709 return getIndirectResult(Ty, true, State); 1710 } 1711 1712 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1713 // On Darwin, some vectors are passed in memory, we handle this by passing 1714 // it as an i8/i16/i32/i64. 1715 if (IsDarwinVectorABI) { 1716 uint64_t Size = getContext().getTypeSize(Ty); 1717 if ((Size == 8 || Size == 16 || Size == 32) || 1718 (Size == 64 && VT->getNumElements() == 1)) 1719 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1720 Size)); 1721 } 1722 1723 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1724 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1725 1726 return ABIArgInfo::getDirect(); 1727 } 1728 1729 1730 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1731 Ty = EnumTy->getDecl()->getIntegerType(); 1732 1733 bool InReg = shouldPrimitiveUseInReg(Ty, State); 1734 1735 if (Ty->isPromotableIntegerType()) { 1736 if (InReg) 1737 return ABIArgInfo::getExtendInReg(Ty); 1738 return ABIArgInfo::getExtend(Ty); 1739 } 1740 1741 if (InReg) 1742 return ABIArgInfo::getDirectInReg(); 1743 return ABIArgInfo::getDirect(); 1744 } 1745 1746 void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State, 1747 bool &UsedInAlloca) const { 1748 // Vectorcall x86 works subtly different than in x64, so the format is 1749 // a bit different than the x64 version. First, all vector types (not HVAs) 1750 // are assigned, with the first 6 ending up in the YMM0-5 or XMM0-5 registers. 1751 // This differs from the x64 implementation, where the first 6 by INDEX get 1752 // registers. 1753 // After that, integers AND HVAs are assigned Left to Right in the same pass. 1754 // Integers are passed as ECX/EDX if one is available (in order). HVAs will 1755 // first take up the remaining YMM/XMM registers. If insufficient registers 1756 // remain but an integer register (ECX/EDX) is available, it will be passed 1757 // in that, else, on the stack. 1758 for (auto &I : FI.arguments()) { 1759 // First pass do all the vector types. 1760 const Type *Base = nullptr; 1761 uint64_t NumElts = 0; 1762 const QualType& Ty = I.type; 1763 if ((Ty->isVectorType() || Ty->isBuiltinType()) && 1764 isHomogeneousAggregate(Ty, Base, NumElts)) { 1765 if (State.FreeSSERegs >= NumElts) { 1766 State.FreeSSERegs -= NumElts; 1767 I.info = ABIArgInfo::getDirect(); 1768 } else { 1769 I.info = classifyArgumentType(Ty, State); 1770 } 1771 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1772 } 1773 } 1774 1775 for (auto &I : FI.arguments()) { 1776 // Second pass, do the rest! 1777 const Type *Base = nullptr; 1778 uint64_t NumElts = 0; 1779 const QualType& Ty = I.type; 1780 bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts); 1781 1782 if (IsHva && !Ty->isVectorType() && !Ty->isBuiltinType()) { 1783 // Assign true HVAs (non vector/native FP types). 1784 if (State.FreeSSERegs >= NumElts) { 1785 State.FreeSSERegs -= NumElts; 1786 I.info = getDirectX86Hva(); 1787 } else { 1788 I.info = getIndirectResult(Ty, /*ByVal=*/false, State); 1789 } 1790 } else if (!IsHva) { 1791 // Assign all Non-HVAs, so this will exclude Vector/FP args. 1792 I.info = classifyArgumentType(Ty, State); 1793 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1794 } 1795 } 1796 } 1797 1798 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1799 CCState State(FI.getCallingConvention()); 1800 if (IsMCUABI) 1801 State.FreeRegs = 3; 1802 else if (State.CC == llvm::CallingConv::X86_FastCall) 1803 State.FreeRegs = 2; 1804 else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1805 State.FreeRegs = 2; 1806 State.FreeSSERegs = 6; 1807 } else if (FI.getHasRegParm()) 1808 State.FreeRegs = FI.getRegParm(); 1809 else if (State.CC == llvm::CallingConv::X86_RegCall) { 1810 State.FreeRegs = 5; 1811 State.FreeSSERegs = 8; 1812 } else 1813 State.FreeRegs = DefaultNumRegisterParameters; 1814 1815 if (!::classifyReturnType(getCXXABI(), FI, *this)) { 1816 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1817 } else if (FI.getReturnInfo().isIndirect()) { 1818 // The C++ ABI is not aware of register usage, so we have to check if the 1819 // return value was sret and put it in a register ourselves if appropriate. 1820 if (State.FreeRegs) { 1821 --State.FreeRegs; // The sret parameter consumes a register. 1822 if (!IsMCUABI) 1823 FI.getReturnInfo().setInReg(true); 1824 } 1825 } 1826 1827 // The chain argument effectively gives us another free register. 1828 if (FI.isChainCall()) 1829 ++State.FreeRegs; 1830 1831 bool UsedInAlloca = false; 1832 if (State.CC == llvm::CallingConv::X86_VectorCall) { 1833 computeVectorCallArgs(FI, State, UsedInAlloca); 1834 } else { 1835 // If not vectorcall, revert to normal behavior. 1836 for (auto &I : FI.arguments()) { 1837 I.info = classifyArgumentType(I.type, State); 1838 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1839 } 1840 } 1841 1842 // If we needed to use inalloca for any argument, do a second pass and rewrite 1843 // all the memory arguments to use inalloca. 1844 if (UsedInAlloca) 1845 rewriteWithInAlloca(FI); 1846 } 1847 1848 void 1849 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1850 CharUnits &StackOffset, ABIArgInfo &Info, 1851 QualType Type) const { 1852 // Arguments are always 4-byte-aligned. 1853 CharUnits FieldAlign = CharUnits::fromQuantity(4); 1854 1855 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct"); 1856 Info = ABIArgInfo::getInAlloca(FrameFields.size()); 1857 FrameFields.push_back(CGT.ConvertTypeForMem(Type)); 1858 StackOffset += getContext().getTypeSizeInChars(Type); 1859 1860 // Insert padding bytes to respect alignment. 1861 CharUnits FieldEnd = StackOffset; 1862 StackOffset = FieldEnd.alignTo(FieldAlign); 1863 if (StackOffset != FieldEnd) { 1864 CharUnits NumBytes = StackOffset - FieldEnd; 1865 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1866 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); 1867 FrameFields.push_back(Ty); 1868 } 1869 } 1870 1871 static bool isArgInAlloca(const ABIArgInfo &Info) { 1872 // Leave ignored and inreg arguments alone. 1873 switch (Info.getKind()) { 1874 case ABIArgInfo::InAlloca: 1875 return true; 1876 case ABIArgInfo::Indirect: 1877 assert(Info.getIndirectByVal()); 1878 return true; 1879 case ABIArgInfo::Ignore: 1880 return false; 1881 case ABIArgInfo::Direct: 1882 case ABIArgInfo::Extend: 1883 if (Info.getInReg()) 1884 return false; 1885 return true; 1886 case ABIArgInfo::Expand: 1887 case ABIArgInfo::CoerceAndExpand: 1888 // These are aggregate types which are never passed in registers when 1889 // inalloca is involved. 1890 return true; 1891 } 1892 llvm_unreachable("invalid enum"); 1893 } 1894 1895 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 1896 assert(IsWin32StructABI && "inalloca only supported on win32"); 1897 1898 // Build a packed struct type for all of the arguments in memory. 1899 SmallVector<llvm::Type *, 6> FrameFields; 1900 1901 // The stack alignment is always 4. 1902 CharUnits StackAlign = CharUnits::fromQuantity(4); 1903 1904 CharUnits StackOffset; 1905 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 1906 1907 // Put 'this' into the struct before 'sret', if necessary. 1908 bool IsThisCall = 1909 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 1910 ABIArgInfo &Ret = FI.getReturnInfo(); 1911 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 1912 isArgInAlloca(I->info)) { 1913 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1914 ++I; 1915 } 1916 1917 // Put the sret parameter into the inalloca struct if it's in memory. 1918 if (Ret.isIndirect() && !Ret.getInReg()) { 1919 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); 1920 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); 1921 // On Windows, the hidden sret parameter is always returned in eax. 1922 Ret.setInAllocaSRet(IsWin32StructABI); 1923 } 1924 1925 // Skip the 'this' parameter in ecx. 1926 if (IsThisCall) 1927 ++I; 1928 1929 // Put arguments passed in memory into the struct. 1930 for (; I != E; ++I) { 1931 if (isArgInAlloca(I->info)) 1932 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1933 } 1934 1935 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 1936 /*isPacked=*/true), 1937 StackAlign); 1938 } 1939 1940 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, 1941 Address VAListAddr, QualType Ty) const { 1942 1943 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 1944 1945 // x86-32 changes the alignment of certain arguments on the stack. 1946 // 1947 // Just messing with TypeInfo like this works because we never pass 1948 // anything indirectly. 1949 TypeInfo.second = CharUnits::fromQuantity( 1950 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity())); 1951 1952 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 1953 TypeInfo, CharUnits::fromQuantity(4), 1954 /*AllowHigherAlign*/ true); 1955 } 1956 1957 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 1958 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 1959 assert(Triple.getArch() == llvm::Triple::x86); 1960 1961 switch (Opts.getStructReturnConvention()) { 1962 case CodeGenOptions::SRCK_Default: 1963 break; 1964 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 1965 return false; 1966 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 1967 return true; 1968 } 1969 1970 if (Triple.isOSDarwin() || Triple.isOSIAMCU()) 1971 return true; 1972 1973 switch (Triple.getOS()) { 1974 case llvm::Triple::DragonFly: 1975 case llvm::Triple::FreeBSD: 1976 case llvm::Triple::OpenBSD: 1977 case llvm::Triple::Win32: 1978 return true; 1979 default: 1980 return false; 1981 } 1982 } 1983 1984 void X86_32TargetCodeGenInfo::setTargetAttributes( 1985 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 1986 if (GV->isDeclaration()) 1987 return; 1988 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 1989 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1990 llvm::Function *Fn = cast<llvm::Function>(GV); 1991 Fn->addFnAttr("stackrealign"); 1992 } 1993 if (FD->hasAttr<AnyX86InterruptAttr>()) { 1994 llvm::Function *Fn = cast<llvm::Function>(GV); 1995 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 1996 } 1997 } 1998 } 1999 2000 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 2001 CodeGen::CodeGenFunction &CGF, 2002 llvm::Value *Address) const { 2003 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2004 2005 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 2006 2007 // 0-7 are the eight integer registers; the order is different 2008 // on Darwin (for EH), but the range is the same. 2009 // 8 is %eip. 2010 AssignToArrayRange(Builder, Address, Four8, 0, 8); 2011 2012 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 2013 // 12-16 are st(0..4). Not sure why we stop at 4. 2014 // These have size 16, which is sizeof(long double) on 2015 // platforms with 8-byte alignment for that type. 2016 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 2017 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 2018 2019 } else { 2020 // 9 is %eflags, which doesn't get a size on Darwin for some 2021 // reason. 2022 Builder.CreateAlignedStore( 2023 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), 2024 CharUnits::One()); 2025 2026 // 11-16 are st(0..5). Not sure why we stop at 5. 2027 // These have size 12, which is sizeof(long double) on 2028 // platforms with 4-byte alignment for that type. 2029 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 2030 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 2031 } 2032 2033 return false; 2034 } 2035 2036 //===----------------------------------------------------------------------===// 2037 // X86-64 ABI Implementation 2038 //===----------------------------------------------------------------------===// 2039 2040 2041 namespace { 2042 /// The AVX ABI level for X86 targets. 2043 enum class X86AVXABILevel { 2044 None, 2045 AVX, 2046 AVX512 2047 }; 2048 2049 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. 2050 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { 2051 switch (AVXLevel) { 2052 case X86AVXABILevel::AVX512: 2053 return 512; 2054 case X86AVXABILevel::AVX: 2055 return 256; 2056 case X86AVXABILevel::None: 2057 return 128; 2058 } 2059 llvm_unreachable("Unknown AVXLevel"); 2060 } 2061 2062 /// X86_64ABIInfo - The X86_64 ABI information. 2063 class X86_64ABIInfo : public SwiftABIInfo { 2064 enum Class { 2065 Integer = 0, 2066 SSE, 2067 SSEUp, 2068 X87, 2069 X87Up, 2070 ComplexX87, 2071 NoClass, 2072 Memory 2073 }; 2074 2075 /// merge - Implement the X86_64 ABI merging algorithm. 2076 /// 2077 /// Merge an accumulating classification \arg Accum with a field 2078 /// classification \arg Field. 2079 /// 2080 /// \param Accum - The accumulating classification. This should 2081 /// always be either NoClass or the result of a previous merge 2082 /// call. In addition, this should never be Memory (the caller 2083 /// should just return Memory for the aggregate). 2084 static Class merge(Class Accum, Class Field); 2085 2086 /// postMerge - Implement the X86_64 ABI post merging algorithm. 2087 /// 2088 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 2089 /// final MEMORY or SSE classes when necessary. 2090 /// 2091 /// \param AggregateSize - The size of the current aggregate in 2092 /// the classification process. 2093 /// 2094 /// \param Lo - The classification for the parts of the type 2095 /// residing in the low word of the containing object. 2096 /// 2097 /// \param Hi - The classification for the parts of the type 2098 /// residing in the higher words of the containing object. 2099 /// 2100 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 2101 2102 /// classify - Determine the x86_64 register classes in which the 2103 /// given type T should be passed. 2104 /// 2105 /// \param Lo - The classification for the parts of the type 2106 /// residing in the low word of the containing object. 2107 /// 2108 /// \param Hi - The classification for the parts of the type 2109 /// residing in the high word of the containing object. 2110 /// 2111 /// \param OffsetBase - The bit offset of this type in the 2112 /// containing object. Some parameters are classified different 2113 /// depending on whether they straddle an eightbyte boundary. 2114 /// 2115 /// \param isNamedArg - Whether the argument in question is a "named" 2116 /// argument, as used in AMD64-ABI 3.5.7. 2117 /// 2118 /// If a word is unused its result will be NoClass; if a type should 2119 /// be passed in Memory then at least the classification of \arg Lo 2120 /// will be Memory. 2121 /// 2122 /// The \arg Lo class will be NoClass iff the argument is ignored. 2123 /// 2124 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 2125 /// also be ComplexX87. 2126 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 2127 bool isNamedArg) const; 2128 2129 llvm::Type *GetByteVectorType(QualType Ty) const; 2130 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 2131 unsigned IROffset, QualType SourceTy, 2132 unsigned SourceOffset) const; 2133 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 2134 unsigned IROffset, QualType SourceTy, 2135 unsigned SourceOffset) const; 2136 2137 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2138 /// such that the argument will be returned in memory. 2139 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 2140 2141 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2142 /// such that the argument will be passed in memory. 2143 /// 2144 /// \param freeIntRegs - The number of free integer registers remaining 2145 /// available. 2146 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 2147 2148 ABIArgInfo classifyReturnType(QualType RetTy) const; 2149 2150 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs, 2151 unsigned &neededInt, unsigned &neededSSE, 2152 bool isNamedArg) const; 2153 2154 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt, 2155 unsigned &NeededSSE) const; 2156 2157 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 2158 unsigned &NeededSSE) const; 2159 2160 bool IsIllegalVectorType(QualType Ty) const; 2161 2162 /// The 0.98 ABI revision clarified a lot of ambiguities, 2163 /// unfortunately in ways that were not always consistent with 2164 /// certain previous compilers. In particular, platforms which 2165 /// required strict binary compatibility with older versions of GCC 2166 /// may need to exempt themselves. 2167 bool honorsRevision0_98() const { 2168 return !getTarget().getTriple().isOSDarwin(); 2169 } 2170 2171 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to 2172 /// classify it as INTEGER (for compatibility with older clang compilers). 2173 bool classifyIntegerMMXAsSSE() const { 2174 // Clang <= 3.8 did not do this. 2175 if (getContext().getLangOpts().getClangABICompat() <= 2176 LangOptions::ClangABI::Ver3_8) 2177 return false; 2178 2179 const llvm::Triple &Triple = getTarget().getTriple(); 2180 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4) 2181 return false; 2182 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10) 2183 return false; 2184 return true; 2185 } 2186 2187 X86AVXABILevel AVXLevel; 2188 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 2189 // 64-bit hardware. 2190 bool Has64BitPointers; 2191 2192 public: 2193 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : 2194 SwiftABIInfo(CGT), AVXLevel(AVXLevel), 2195 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 2196 } 2197 2198 bool isPassedUsingAVXType(QualType type) const { 2199 unsigned neededInt, neededSSE; 2200 // The freeIntRegs argument doesn't matter here. 2201 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 2202 /*isNamedArg*/true); 2203 if (info.isDirect()) { 2204 llvm::Type *ty = info.getCoerceToType(); 2205 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 2206 return (vectorTy->getBitWidth() > 128); 2207 } 2208 return false; 2209 } 2210 2211 void computeInfo(CGFunctionInfo &FI) const override; 2212 2213 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2214 QualType Ty) const override; 2215 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 2216 QualType Ty) const override; 2217 2218 bool has64BitPointers() const { 2219 return Has64BitPointers; 2220 } 2221 2222 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 2223 bool asReturnValue) const override { 2224 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2225 } 2226 bool isSwiftErrorInRegister() const override { 2227 return true; 2228 } 2229 }; 2230 2231 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 2232 class WinX86_64ABIInfo : public SwiftABIInfo { 2233 public: 2234 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) 2235 : SwiftABIInfo(CGT), 2236 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} 2237 2238 void computeInfo(CGFunctionInfo &FI) const override; 2239 2240 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2241 QualType Ty) const override; 2242 2243 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 2244 // FIXME: Assumes vectorcall is in use. 2245 return isX86VectorTypeForVectorCall(getContext(), Ty); 2246 } 2247 2248 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 2249 uint64_t NumMembers) const override { 2250 // FIXME: Assumes vectorcall is in use. 2251 return isX86VectorCallAggregateSmallEnough(NumMembers); 2252 } 2253 2254 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars, 2255 bool asReturnValue) const override { 2256 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2257 } 2258 2259 bool isSwiftErrorInRegister() const override { 2260 return true; 2261 } 2262 2263 private: 2264 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType, 2265 bool IsVectorCall, bool IsRegCall) const; 2266 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, 2267 const ABIArgInfo ¤t) const; 2268 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs, 2269 bool IsVectorCall, bool IsRegCall) const; 2270 2271 bool IsMingw64; 2272 }; 2273 2274 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2275 public: 2276 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2277 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {} 2278 2279 const X86_64ABIInfo &getABIInfo() const { 2280 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2281 } 2282 2283 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks 2284 /// the autoreleaseRV/retainRV optimization. 2285 bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const override { 2286 return true; 2287 } 2288 2289 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2290 return 7; 2291 } 2292 2293 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2294 llvm::Value *Address) const override { 2295 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2296 2297 // 0-15 are the 16 integer registers. 2298 // 16 is %rip. 2299 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2300 return false; 2301 } 2302 2303 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 2304 StringRef Constraint, 2305 llvm::Type* Ty) const override { 2306 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 2307 } 2308 2309 bool isNoProtoCallVariadic(const CallArgList &args, 2310 const FunctionNoProtoType *fnType) const override { 2311 // The default CC on x86-64 sets %al to the number of SSA 2312 // registers used, and GCC sets this when calling an unprototyped 2313 // function, so we override the default behavior. However, don't do 2314 // that when AVX types are involved: the ABI explicitly states it is 2315 // undefined, and it doesn't work in practice because of how the ABI 2316 // defines varargs anyway. 2317 if (fnType->getCallConv() == CC_C) { 2318 bool HasAVXType = false; 2319 for (CallArgList::const_iterator 2320 it = args.begin(), ie = args.end(); it != ie; ++it) { 2321 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 2322 HasAVXType = true; 2323 break; 2324 } 2325 } 2326 2327 if (!HasAVXType) 2328 return true; 2329 } 2330 2331 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 2332 } 2333 2334 llvm::Constant * 2335 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 2336 unsigned Sig = (0xeb << 0) | // jmp rel8 2337 (0x06 << 8) | // .+0x08 2338 ('v' << 16) | 2339 ('2' << 24); 2340 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 2341 } 2342 2343 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2344 CodeGen::CodeGenModule &CGM) const override { 2345 if (GV->isDeclaration()) 2346 return; 2347 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2348 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2349 llvm::Function *Fn = cast<llvm::Function>(GV); 2350 Fn->addFnAttr("stackrealign"); 2351 } 2352 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2353 llvm::Function *Fn = cast<llvm::Function>(GV); 2354 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2355 } 2356 } 2357 } 2358 }; 2359 2360 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 2361 // If the argument does not end in .lib, automatically add the suffix. 2362 // If the argument contains a space, enclose it in quotes. 2363 // This matches the behavior of MSVC. 2364 bool Quote = (Lib.find(" ") != StringRef::npos); 2365 std::string ArgStr = Quote ? "\"" : ""; 2366 ArgStr += Lib; 2367 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a")) 2368 ArgStr += ".lib"; 2369 ArgStr += Quote ? "\"" : ""; 2370 return ArgStr; 2371 } 2372 2373 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 2374 public: 2375 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2376 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, 2377 unsigned NumRegisterParameters) 2378 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, 2379 Win32StructABI, NumRegisterParameters, false) {} 2380 2381 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2382 CodeGen::CodeGenModule &CGM) const override; 2383 2384 void getDependentLibraryOption(llvm::StringRef Lib, 2385 llvm::SmallString<24> &Opt) const override { 2386 Opt = "/DEFAULTLIB:"; 2387 Opt += qualifyWindowsLibrary(Lib); 2388 } 2389 2390 void getDetectMismatchOption(llvm::StringRef Name, 2391 llvm::StringRef Value, 2392 llvm::SmallString<32> &Opt) const override { 2393 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2394 } 2395 }; 2396 2397 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2398 CodeGen::CodeGenModule &CGM) { 2399 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) { 2400 2401 if (CGM.getCodeGenOpts().StackProbeSize != 4096) 2402 Fn->addFnAttr("stack-probe-size", 2403 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 2404 if (CGM.getCodeGenOpts().NoStackArgProbe) 2405 Fn->addFnAttr("no-stack-arg-probe"); 2406 } 2407 } 2408 2409 void WinX86_32TargetCodeGenInfo::setTargetAttributes( 2410 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2411 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 2412 if (GV->isDeclaration()) 2413 return; 2414 addStackProbeTargetAttributes(D, GV, CGM); 2415 } 2416 2417 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2418 public: 2419 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2420 X86AVXABILevel AVXLevel) 2421 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 2422 2423 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2424 CodeGen::CodeGenModule &CGM) const override; 2425 2426 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2427 return 7; 2428 } 2429 2430 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2431 llvm::Value *Address) const override { 2432 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2433 2434 // 0-15 are the 16 integer registers. 2435 // 16 is %rip. 2436 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2437 return false; 2438 } 2439 2440 void getDependentLibraryOption(llvm::StringRef Lib, 2441 llvm::SmallString<24> &Opt) const override { 2442 Opt = "/DEFAULTLIB:"; 2443 Opt += qualifyWindowsLibrary(Lib); 2444 } 2445 2446 void getDetectMismatchOption(llvm::StringRef Name, 2447 llvm::StringRef Value, 2448 llvm::SmallString<32> &Opt) const override { 2449 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2450 } 2451 }; 2452 2453 void WinX86_64TargetCodeGenInfo::setTargetAttributes( 2454 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 2455 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 2456 if (GV->isDeclaration()) 2457 return; 2458 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2459 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2460 llvm::Function *Fn = cast<llvm::Function>(GV); 2461 Fn->addFnAttr("stackrealign"); 2462 } 2463 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2464 llvm::Function *Fn = cast<llvm::Function>(GV); 2465 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2466 } 2467 } 2468 2469 addStackProbeTargetAttributes(D, GV, CGM); 2470 } 2471 } 2472 2473 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 2474 Class &Hi) const { 2475 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 2476 // 2477 // (a) If one of the classes is Memory, the whole argument is passed in 2478 // memory. 2479 // 2480 // (b) If X87UP is not preceded by X87, the whole argument is passed in 2481 // memory. 2482 // 2483 // (c) If the size of the aggregate exceeds two eightbytes and the first 2484 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 2485 // argument is passed in memory. NOTE: This is necessary to keep the 2486 // ABI working for processors that don't support the __m256 type. 2487 // 2488 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 2489 // 2490 // Some of these are enforced by the merging logic. Others can arise 2491 // only with unions; for example: 2492 // union { _Complex double; unsigned; } 2493 // 2494 // Note that clauses (b) and (c) were added in 0.98. 2495 // 2496 if (Hi == Memory) 2497 Lo = Memory; 2498 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 2499 Lo = Memory; 2500 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 2501 Lo = Memory; 2502 if (Hi == SSEUp && Lo != SSE) 2503 Hi = SSE; 2504 } 2505 2506 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 2507 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 2508 // classified recursively so that always two fields are 2509 // considered. The resulting class is calculated according to 2510 // the classes of the fields in the eightbyte: 2511 // 2512 // (a) If both classes are equal, this is the resulting class. 2513 // 2514 // (b) If one of the classes is NO_CLASS, the resulting class is 2515 // the other class. 2516 // 2517 // (c) If one of the classes is MEMORY, the result is the MEMORY 2518 // class. 2519 // 2520 // (d) If one of the classes is INTEGER, the result is the 2521 // INTEGER. 2522 // 2523 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 2524 // MEMORY is used as class. 2525 // 2526 // (f) Otherwise class SSE is used. 2527 2528 // Accum should never be memory (we should have returned) or 2529 // ComplexX87 (because this cannot be passed in a structure). 2530 assert((Accum != Memory && Accum != ComplexX87) && 2531 "Invalid accumulated classification during merge."); 2532 if (Accum == Field || Field == NoClass) 2533 return Accum; 2534 if (Field == Memory) 2535 return Memory; 2536 if (Accum == NoClass) 2537 return Field; 2538 if (Accum == Integer || Field == Integer) 2539 return Integer; 2540 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 2541 Accum == X87 || Accum == X87Up) 2542 return Memory; 2543 return SSE; 2544 } 2545 2546 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 2547 Class &Lo, Class &Hi, bool isNamedArg) const { 2548 // FIXME: This code can be simplified by introducing a simple value class for 2549 // Class pairs with appropriate constructor methods for the various 2550 // situations. 2551 2552 // FIXME: Some of the split computations are wrong; unaligned vectors 2553 // shouldn't be passed in registers for example, so there is no chance they 2554 // can straddle an eightbyte. Verify & simplify. 2555 2556 Lo = Hi = NoClass; 2557 2558 Class &Current = OffsetBase < 64 ? Lo : Hi; 2559 Current = Memory; 2560 2561 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2562 BuiltinType::Kind k = BT->getKind(); 2563 2564 if (k == BuiltinType::Void) { 2565 Current = NoClass; 2566 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 2567 Lo = Integer; 2568 Hi = Integer; 2569 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 2570 Current = Integer; 2571 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 2572 Current = SSE; 2573 } else if (k == BuiltinType::LongDouble) { 2574 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2575 if (LDF == &llvm::APFloat::IEEEquad()) { 2576 Lo = SSE; 2577 Hi = SSEUp; 2578 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) { 2579 Lo = X87; 2580 Hi = X87Up; 2581 } else if (LDF == &llvm::APFloat::IEEEdouble()) { 2582 Current = SSE; 2583 } else 2584 llvm_unreachable("unexpected long double representation!"); 2585 } 2586 // FIXME: _Decimal32 and _Decimal64 are SSE. 2587 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 2588 return; 2589 } 2590 2591 if (const EnumType *ET = Ty->getAs<EnumType>()) { 2592 // Classify the underlying integer type. 2593 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 2594 return; 2595 } 2596 2597 if (Ty->hasPointerRepresentation()) { 2598 Current = Integer; 2599 return; 2600 } 2601 2602 if (Ty->isMemberPointerType()) { 2603 if (Ty->isMemberFunctionPointerType()) { 2604 if (Has64BitPointers) { 2605 // If Has64BitPointers, this is an {i64, i64}, so classify both 2606 // Lo and Hi now. 2607 Lo = Hi = Integer; 2608 } else { 2609 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 2610 // straddles an eightbyte boundary, Hi should be classified as well. 2611 uint64_t EB_FuncPtr = (OffsetBase) / 64; 2612 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 2613 if (EB_FuncPtr != EB_ThisAdj) { 2614 Lo = Hi = Integer; 2615 } else { 2616 Current = Integer; 2617 } 2618 } 2619 } else { 2620 Current = Integer; 2621 } 2622 return; 2623 } 2624 2625 if (const VectorType *VT = Ty->getAs<VectorType>()) { 2626 uint64_t Size = getContext().getTypeSize(VT); 2627 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { 2628 // gcc passes the following as integer: 2629 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float> 2630 // 2 bytes - <2 x char>, <1 x short> 2631 // 1 byte - <1 x char> 2632 Current = Integer; 2633 2634 // If this type crosses an eightbyte boundary, it should be 2635 // split. 2636 uint64_t EB_Lo = (OffsetBase) / 64; 2637 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; 2638 if (EB_Lo != EB_Hi) 2639 Hi = Lo; 2640 } else if (Size == 64) { 2641 QualType ElementType = VT->getElementType(); 2642 2643 // gcc passes <1 x double> in memory. :( 2644 if (ElementType->isSpecificBuiltinType(BuiltinType::Double)) 2645 return; 2646 2647 // gcc passes <1 x long long> as SSE but clang used to unconditionally 2648 // pass them as integer. For platforms where clang is the de facto 2649 // platform compiler, we must continue to use integer. 2650 if (!classifyIntegerMMXAsSSE() && 2651 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) || 2652 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) || 2653 ElementType->isSpecificBuiltinType(BuiltinType::Long) || 2654 ElementType->isSpecificBuiltinType(BuiltinType::ULong))) 2655 Current = Integer; 2656 else 2657 Current = SSE; 2658 2659 // If this type crosses an eightbyte boundary, it should be 2660 // split. 2661 if (OffsetBase && OffsetBase != 64) 2662 Hi = Lo; 2663 } else if (Size == 128 || 2664 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { 2665 // Arguments of 256-bits are split into four eightbyte chunks. The 2666 // least significant one belongs to class SSE and all the others to class 2667 // SSEUP. The original Lo and Hi design considers that types can't be 2668 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 2669 // This design isn't correct for 256-bits, but since there're no cases 2670 // where the upper parts would need to be inspected, avoid adding 2671 // complexity and just consider Hi to match the 64-256 part. 2672 // 2673 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 2674 // registers if they are "named", i.e. not part of the "..." of a 2675 // variadic function. 2676 // 2677 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are 2678 // split into eight eightbyte chunks, one SSE and seven SSEUP. 2679 Lo = SSE; 2680 Hi = SSEUp; 2681 } 2682 return; 2683 } 2684 2685 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2686 QualType ET = getContext().getCanonicalType(CT->getElementType()); 2687 2688 uint64_t Size = getContext().getTypeSize(Ty); 2689 if (ET->isIntegralOrEnumerationType()) { 2690 if (Size <= 64) 2691 Current = Integer; 2692 else if (Size <= 128) 2693 Lo = Hi = Integer; 2694 } else if (ET == getContext().FloatTy) { 2695 Current = SSE; 2696 } else if (ET == getContext().DoubleTy) { 2697 Lo = Hi = SSE; 2698 } else if (ET == getContext().LongDoubleTy) { 2699 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2700 if (LDF == &llvm::APFloat::IEEEquad()) 2701 Current = Memory; 2702 else if (LDF == &llvm::APFloat::x87DoubleExtended()) 2703 Current = ComplexX87; 2704 else if (LDF == &llvm::APFloat::IEEEdouble()) 2705 Lo = Hi = SSE; 2706 else 2707 llvm_unreachable("unexpected long double representation!"); 2708 } 2709 2710 // If this complex type crosses an eightbyte boundary then it 2711 // should be split. 2712 uint64_t EB_Real = (OffsetBase) / 64; 2713 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 2714 if (Hi == NoClass && EB_Real != EB_Imag) 2715 Hi = Lo; 2716 2717 return; 2718 } 2719 2720 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2721 // Arrays are treated like structures. 2722 2723 uint64_t Size = getContext().getTypeSize(Ty); 2724 2725 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2726 // than eight eightbytes, ..., it has class MEMORY. 2727 if (Size > 512) 2728 return; 2729 2730 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 2731 // fields, it has class MEMORY. 2732 // 2733 // Only need to check alignment of array base. 2734 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 2735 return; 2736 2737 // Otherwise implement simplified merge. We could be smarter about 2738 // this, but it isn't worth it and would be harder to verify. 2739 Current = NoClass; 2740 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 2741 uint64_t ArraySize = AT->getSize().getZExtValue(); 2742 2743 // The only case a 256-bit wide vector could be used is when the array 2744 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2745 // to work for sizes wider than 128, early check and fallback to memory. 2746 // 2747 if (Size > 128 && 2748 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) 2749 return; 2750 2751 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 2752 Class FieldLo, FieldHi; 2753 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 2754 Lo = merge(Lo, FieldLo); 2755 Hi = merge(Hi, FieldHi); 2756 if (Lo == Memory || Hi == Memory) 2757 break; 2758 } 2759 2760 postMerge(Size, Lo, Hi); 2761 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 2762 return; 2763 } 2764 2765 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2766 uint64_t Size = getContext().getTypeSize(Ty); 2767 2768 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2769 // than eight eightbytes, ..., it has class MEMORY. 2770 if (Size > 512) 2771 return; 2772 2773 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 2774 // copy constructor or a non-trivial destructor, it is passed by invisible 2775 // reference. 2776 if (getRecordArgABI(RT, getCXXABI())) 2777 return; 2778 2779 const RecordDecl *RD = RT->getDecl(); 2780 2781 // Assume variable sized types are passed in memory. 2782 if (RD->hasFlexibleArrayMember()) 2783 return; 2784 2785 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2786 2787 // Reset Lo class, this will be recomputed. 2788 Current = NoClass; 2789 2790 // If this is a C++ record, classify the bases first. 2791 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2792 for (const auto &I : CXXRD->bases()) { 2793 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2794 "Unexpected base class!"); 2795 const CXXRecordDecl *Base = 2796 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2797 2798 // Classify this field. 2799 // 2800 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 2801 // single eightbyte, each is classified separately. Each eightbyte gets 2802 // initialized to class NO_CLASS. 2803 Class FieldLo, FieldHi; 2804 uint64_t Offset = 2805 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 2806 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 2807 Lo = merge(Lo, FieldLo); 2808 Hi = merge(Hi, FieldHi); 2809 if (Lo == Memory || Hi == Memory) { 2810 postMerge(Size, Lo, Hi); 2811 return; 2812 } 2813 } 2814 } 2815 2816 // Classify the fields one at a time, merging the results. 2817 unsigned idx = 0; 2818 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2819 i != e; ++i, ++idx) { 2820 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2821 bool BitField = i->isBitField(); 2822 2823 // Ignore padding bit-fields. 2824 if (BitField && i->isUnnamedBitfield()) 2825 continue; 2826 2827 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 2828 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 2829 // 2830 // The only case a 256-bit wide vector could be used is when the struct 2831 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2832 // to work for sizes wider than 128, early check and fallback to memory. 2833 // 2834 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) || 2835 Size > getNativeVectorSizeForAVXABI(AVXLevel))) { 2836 Lo = Memory; 2837 postMerge(Size, Lo, Hi); 2838 return; 2839 } 2840 // Note, skip this test for bit-fields, see below. 2841 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 2842 Lo = Memory; 2843 postMerge(Size, Lo, Hi); 2844 return; 2845 } 2846 2847 // Classify this field. 2848 // 2849 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 2850 // exceeds a single eightbyte, each is classified 2851 // separately. Each eightbyte gets initialized to class 2852 // NO_CLASS. 2853 Class FieldLo, FieldHi; 2854 2855 // Bit-fields require special handling, they do not force the 2856 // structure to be passed in memory even if unaligned, and 2857 // therefore they can straddle an eightbyte. 2858 if (BitField) { 2859 assert(!i->isUnnamedBitfield()); 2860 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2861 uint64_t Size = i->getBitWidthValue(getContext()); 2862 2863 uint64_t EB_Lo = Offset / 64; 2864 uint64_t EB_Hi = (Offset + Size - 1) / 64; 2865 2866 if (EB_Lo) { 2867 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 2868 FieldLo = NoClass; 2869 FieldHi = Integer; 2870 } else { 2871 FieldLo = Integer; 2872 FieldHi = EB_Hi ? Integer : NoClass; 2873 } 2874 } else 2875 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 2876 Lo = merge(Lo, FieldLo); 2877 Hi = merge(Hi, FieldHi); 2878 if (Lo == Memory || Hi == Memory) 2879 break; 2880 } 2881 2882 postMerge(Size, Lo, Hi); 2883 } 2884 } 2885 2886 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 2887 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2888 // place naturally. 2889 if (!isAggregateTypeForABI(Ty)) { 2890 // Treat an enum type as its underlying type. 2891 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2892 Ty = EnumTy->getDecl()->getIntegerType(); 2893 2894 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) 2895 : ABIArgInfo::getDirect()); 2896 } 2897 2898 return getNaturalAlignIndirect(Ty); 2899 } 2900 2901 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 2902 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 2903 uint64_t Size = getContext().getTypeSize(VecTy); 2904 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); 2905 if (Size <= 64 || Size > LargestVector) 2906 return true; 2907 } 2908 2909 return false; 2910 } 2911 2912 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 2913 unsigned freeIntRegs) const { 2914 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2915 // place naturally. 2916 // 2917 // This assumption is optimistic, as there could be free registers available 2918 // when we need to pass this argument in memory, and LLVM could try to pass 2919 // the argument in the free register. This does not seem to happen currently, 2920 // but this code would be much safer if we could mark the argument with 2921 // 'onstack'. See PR12193. 2922 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 2923 // Treat an enum type as its underlying type. 2924 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2925 Ty = EnumTy->getDecl()->getIntegerType(); 2926 2927 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) 2928 : ABIArgInfo::getDirect()); 2929 } 2930 2931 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 2932 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 2933 2934 // Compute the byval alignment. We specify the alignment of the byval in all 2935 // cases so that the mid-level optimizer knows the alignment of the byval. 2936 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 2937 2938 // Attempt to avoid passing indirect results using byval when possible. This 2939 // is important for good codegen. 2940 // 2941 // We do this by coercing the value into a scalar type which the backend can 2942 // handle naturally (i.e., without using byval). 2943 // 2944 // For simplicity, we currently only do this when we have exhausted all of the 2945 // free integer registers. Doing this when there are free integer registers 2946 // would require more care, as we would have to ensure that the coerced value 2947 // did not claim the unused register. That would require either reording the 2948 // arguments to the function (so that any subsequent inreg values came first), 2949 // or only doing this optimization when there were no following arguments that 2950 // might be inreg. 2951 // 2952 // We currently expect it to be rare (particularly in well written code) for 2953 // arguments to be passed on the stack when there are still free integer 2954 // registers available (this would typically imply large structs being passed 2955 // by value), so this seems like a fair tradeoff for now. 2956 // 2957 // We can revisit this if the backend grows support for 'onstack' parameter 2958 // attributes. See PR12193. 2959 if (freeIntRegs == 0) { 2960 uint64_t Size = getContext().getTypeSize(Ty); 2961 2962 // If this type fits in an eightbyte, coerce it into the matching integral 2963 // type, which will end up on the stack (with alignment 8). 2964 if (Align == 8 && Size <= 64) 2965 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2966 Size)); 2967 } 2968 2969 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); 2970 } 2971 2972 /// The ABI specifies that a value should be passed in a full vector XMM/YMM 2973 /// register. Pick an LLVM IR type that will be passed as a vector register. 2974 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 2975 // Wrapper structs/arrays that only contain vectors are passed just like 2976 // vectors; strip them off if present. 2977 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) 2978 Ty = QualType(InnerTy, 0); 2979 2980 llvm::Type *IRType = CGT.ConvertType(Ty); 2981 if (isa<llvm::VectorType>(IRType) || 2982 IRType->getTypeID() == llvm::Type::FP128TyID) 2983 return IRType; 2984 2985 // We couldn't find the preferred IR vector type for 'Ty'. 2986 uint64_t Size = getContext().getTypeSize(Ty); 2987 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"); 2988 2989 // Return a LLVM IR vector type based on the size of 'Ty'. 2990 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2991 Size / 64); 2992 } 2993 2994 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 2995 /// is known to either be off the end of the specified type or being in 2996 /// alignment padding. The user type specified is known to be at most 128 bits 2997 /// in size, and have passed through X86_64ABIInfo::classify with a successful 2998 /// classification that put one of the two halves in the INTEGER class. 2999 /// 3000 /// It is conservatively correct to return false. 3001 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 3002 unsigned EndBit, ASTContext &Context) { 3003 // If the bytes being queried are off the end of the type, there is no user 3004 // data hiding here. This handles analysis of builtins, vectors and other 3005 // types that don't contain interesting padding. 3006 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 3007 if (TySize <= StartBit) 3008 return true; 3009 3010 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 3011 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 3012 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 3013 3014 // Check each element to see if the element overlaps with the queried range. 3015 for (unsigned i = 0; i != NumElts; ++i) { 3016 // If the element is after the span we care about, then we're done.. 3017 unsigned EltOffset = i*EltSize; 3018 if (EltOffset >= EndBit) break; 3019 3020 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 3021 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 3022 EndBit-EltOffset, Context)) 3023 return false; 3024 } 3025 // If it overlaps no elements, then it is safe to process as padding. 3026 return true; 3027 } 3028 3029 if (const RecordType *RT = Ty->getAs<RecordType>()) { 3030 const RecordDecl *RD = RT->getDecl(); 3031 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3032 3033 // If this is a C++ record, check the bases first. 3034 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3035 for (const auto &I : CXXRD->bases()) { 3036 assert(!I.isVirtual() && !I.getType()->isDependentType() && 3037 "Unexpected base class!"); 3038 const CXXRecordDecl *Base = 3039 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 3040 3041 // If the base is after the span we care about, ignore it. 3042 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 3043 if (BaseOffset >= EndBit) continue; 3044 3045 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 3046 if (!BitsContainNoUserData(I.getType(), BaseStart, 3047 EndBit-BaseOffset, Context)) 3048 return false; 3049 } 3050 } 3051 3052 // Verify that no field has data that overlaps the region of interest. Yes 3053 // this could be sped up a lot by being smarter about queried fields, 3054 // however we're only looking at structs up to 16 bytes, so we don't care 3055 // much. 3056 unsigned idx = 0; 3057 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3058 i != e; ++i, ++idx) { 3059 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 3060 3061 // If we found a field after the region we care about, then we're done. 3062 if (FieldOffset >= EndBit) break; 3063 3064 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 3065 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 3066 Context)) 3067 return false; 3068 } 3069 3070 // If nothing in this record overlapped the area of interest, then we're 3071 // clean. 3072 return true; 3073 } 3074 3075 return false; 3076 } 3077 3078 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 3079 /// float member at the specified offset. For example, {int,{float}} has a 3080 /// float at offset 4. It is conservatively correct for this routine to return 3081 /// false. 3082 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 3083 const llvm::DataLayout &TD) { 3084 // Base case if we find a float. 3085 if (IROffset == 0 && IRType->isFloatTy()) 3086 return true; 3087 3088 // If this is a struct, recurse into the field at the specified offset. 3089 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3090 const llvm::StructLayout *SL = TD.getStructLayout(STy); 3091 unsigned Elt = SL->getElementContainingOffset(IROffset); 3092 IROffset -= SL->getElementOffset(Elt); 3093 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 3094 } 3095 3096 // If this is an array, recurse into the field at the specified offset. 3097 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3098 llvm::Type *EltTy = ATy->getElementType(); 3099 unsigned EltSize = TD.getTypeAllocSize(EltTy); 3100 IROffset -= IROffset/EltSize*EltSize; 3101 return ContainsFloatAtOffset(EltTy, IROffset, TD); 3102 } 3103 3104 return false; 3105 } 3106 3107 3108 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 3109 /// low 8 bytes of an XMM register, corresponding to the SSE class. 3110 llvm::Type *X86_64ABIInfo:: 3111 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3112 QualType SourceTy, unsigned SourceOffset) const { 3113 // The only three choices we have are either double, <2 x float>, or float. We 3114 // pass as float if the last 4 bytes is just padding. This happens for 3115 // structs that contain 3 floats. 3116 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 3117 SourceOffset*8+64, getContext())) 3118 return llvm::Type::getFloatTy(getVMContext()); 3119 3120 // We want to pass as <2 x float> if the LLVM IR type contains a float at 3121 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 3122 // case. 3123 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 3124 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 3125 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 3126 3127 return llvm::Type::getDoubleTy(getVMContext()); 3128 } 3129 3130 3131 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 3132 /// an 8-byte GPR. This means that we either have a scalar or we are talking 3133 /// about the high or low part of an up-to-16-byte struct. This routine picks 3134 /// the best LLVM IR type to represent this, which may be i64 or may be anything 3135 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 3136 /// etc). 3137 /// 3138 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 3139 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 3140 /// the 8-byte value references. PrefType may be null. 3141 /// 3142 /// SourceTy is the source-level type for the entire argument. SourceOffset is 3143 /// an offset into this that we're processing (which is always either 0 or 8). 3144 /// 3145 llvm::Type *X86_64ABIInfo:: 3146 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3147 QualType SourceTy, unsigned SourceOffset) const { 3148 // If we're dealing with an un-offset LLVM IR type, then it means that we're 3149 // returning an 8-byte unit starting with it. See if we can safely use it. 3150 if (IROffset == 0) { 3151 // Pointers and int64's always fill the 8-byte unit. 3152 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 3153 IRType->isIntegerTy(64)) 3154 return IRType; 3155 3156 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 3157 // goodness in the source type is just tail padding. This is allowed to 3158 // kick in for struct {double,int} on the int, but not on 3159 // struct{double,int,int} because we wouldn't return the second int. We 3160 // have to do this analysis on the source type because we can't depend on 3161 // unions being lowered a specific way etc. 3162 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 3163 IRType->isIntegerTy(32) || 3164 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 3165 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 3166 cast<llvm::IntegerType>(IRType)->getBitWidth(); 3167 3168 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 3169 SourceOffset*8+64, getContext())) 3170 return IRType; 3171 } 3172 } 3173 3174 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3175 // If this is a struct, recurse into the field at the specified offset. 3176 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 3177 if (IROffset < SL->getSizeInBytes()) { 3178 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 3179 IROffset -= SL->getElementOffset(FieldIdx); 3180 3181 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 3182 SourceTy, SourceOffset); 3183 } 3184 } 3185 3186 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3187 llvm::Type *EltTy = ATy->getElementType(); 3188 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 3189 unsigned EltOffset = IROffset/EltSize*EltSize; 3190 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 3191 SourceOffset); 3192 } 3193 3194 // Okay, we don't have any better idea of what to pass, so we pass this in an 3195 // integer register that isn't too big to fit the rest of the struct. 3196 unsigned TySizeInBytes = 3197 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 3198 3199 assert(TySizeInBytes != SourceOffset && "Empty field?"); 3200 3201 // It is always safe to classify this as an integer type up to i64 that 3202 // isn't larger than the structure. 3203 return llvm::IntegerType::get(getVMContext(), 3204 std::min(TySizeInBytes-SourceOffset, 8U)*8); 3205 } 3206 3207 3208 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 3209 /// be used as elements of a two register pair to pass or return, return a 3210 /// first class aggregate to represent them. For example, if the low part of 3211 /// a by-value argument should be passed as i32* and the high part as float, 3212 /// return {i32*, float}. 3213 static llvm::Type * 3214 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 3215 const llvm::DataLayout &TD) { 3216 // In order to correctly satisfy the ABI, we need to the high part to start 3217 // at offset 8. If the high and low parts we inferred are both 4-byte types 3218 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 3219 // the second element at offset 8. Check for this: 3220 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 3221 unsigned HiAlign = TD.getABITypeAlignment(Hi); 3222 unsigned HiStart = llvm::alignTo(LoSize, HiAlign); 3223 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 3224 3225 // To handle this, we have to increase the size of the low part so that the 3226 // second element will start at an 8 byte offset. We can't increase the size 3227 // of the second element because it might make us access off the end of the 3228 // struct. 3229 if (HiStart != 8) { 3230 // There are usually two sorts of types the ABI generation code can produce 3231 // for the low part of a pair that aren't 8 bytes in size: float or 3232 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and 3233 // NaCl). 3234 // Promote these to a larger type. 3235 if (Lo->isFloatTy()) 3236 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 3237 else { 3238 assert((Lo->isIntegerTy() || Lo->isPointerTy()) 3239 && "Invalid/unknown lo type"); 3240 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 3241 } 3242 } 3243 3244 llvm::StructType *Result = llvm::StructType::get(Lo, Hi); 3245 3246 // Verify that the second element is at an 8-byte offset. 3247 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 3248 "Invalid x86-64 argument pair!"); 3249 return Result; 3250 } 3251 3252 ABIArgInfo X86_64ABIInfo:: 3253 classifyReturnType(QualType RetTy) const { 3254 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 3255 // classification algorithm. 3256 X86_64ABIInfo::Class Lo, Hi; 3257 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 3258 3259 // Check some invariants. 3260 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3261 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3262 3263 llvm::Type *ResType = nullptr; 3264 switch (Lo) { 3265 case NoClass: 3266 if (Hi == NoClass) 3267 return ABIArgInfo::getIgnore(); 3268 // If the low part is just padding, it takes no register, leave ResType 3269 // null. 3270 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3271 "Unknown missing lo part"); 3272 break; 3273 3274 case SSEUp: 3275 case X87Up: 3276 llvm_unreachable("Invalid classification for lo word."); 3277 3278 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 3279 // hidden argument. 3280 case Memory: 3281 return getIndirectReturnResult(RetTy); 3282 3283 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 3284 // available register of the sequence %rax, %rdx is used. 3285 case Integer: 3286 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3287 3288 // If we have a sign or zero extended integer, make sure to return Extend 3289 // so that the parameter gets the right LLVM IR attributes. 3290 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3291 // Treat an enum type as its underlying type. 3292 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3293 RetTy = EnumTy->getDecl()->getIntegerType(); 3294 3295 if (RetTy->isIntegralOrEnumerationType() && 3296 RetTy->isPromotableIntegerType()) 3297 return ABIArgInfo::getExtend(RetTy); 3298 } 3299 break; 3300 3301 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 3302 // available SSE register of the sequence %xmm0, %xmm1 is used. 3303 case SSE: 3304 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3305 break; 3306 3307 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 3308 // returned on the X87 stack in %st0 as 80-bit x87 number. 3309 case X87: 3310 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 3311 break; 3312 3313 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 3314 // part of the value is returned in %st0 and the imaginary part in 3315 // %st1. 3316 case ComplexX87: 3317 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 3318 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 3319 llvm::Type::getX86_FP80Ty(getVMContext())); 3320 break; 3321 } 3322 3323 llvm::Type *HighPart = nullptr; 3324 switch (Hi) { 3325 // Memory was handled previously and X87 should 3326 // never occur as a hi class. 3327 case Memory: 3328 case X87: 3329 llvm_unreachable("Invalid classification for hi word."); 3330 3331 case ComplexX87: // Previously handled. 3332 case NoClass: 3333 break; 3334 3335 case Integer: 3336 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3337 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3338 return ABIArgInfo::getDirect(HighPart, 8); 3339 break; 3340 case SSE: 3341 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3342 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3343 return ABIArgInfo::getDirect(HighPart, 8); 3344 break; 3345 3346 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 3347 // is passed in the next available eightbyte chunk if the last used 3348 // vector register. 3349 // 3350 // SSEUP should always be preceded by SSE, just widen. 3351 case SSEUp: 3352 assert(Lo == SSE && "Unexpected SSEUp classification."); 3353 ResType = GetByteVectorType(RetTy); 3354 break; 3355 3356 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 3357 // returned together with the previous X87 value in %st0. 3358 case X87Up: 3359 // If X87Up is preceded by X87, we don't need to do 3360 // anything. However, in some cases with unions it may not be 3361 // preceded by X87. In such situations we follow gcc and pass the 3362 // extra bits in an SSE reg. 3363 if (Lo != X87) { 3364 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3365 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3366 return ABIArgInfo::getDirect(HighPart, 8); 3367 } 3368 break; 3369 } 3370 3371 // If a high part was specified, merge it together with the low part. It is 3372 // known to pass in the high eightbyte of the result. We do this by forming a 3373 // first class struct aggregate with the high and low part: {low, high} 3374 if (HighPart) 3375 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3376 3377 return ABIArgInfo::getDirect(ResType); 3378 } 3379 3380 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 3381 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 3382 bool isNamedArg) 3383 const 3384 { 3385 Ty = useFirstFieldIfTransparentUnion(Ty); 3386 3387 X86_64ABIInfo::Class Lo, Hi; 3388 classify(Ty, 0, Lo, Hi, isNamedArg); 3389 3390 // Check some invariants. 3391 // FIXME: Enforce these by construction. 3392 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3393 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3394 3395 neededInt = 0; 3396 neededSSE = 0; 3397 llvm::Type *ResType = nullptr; 3398 switch (Lo) { 3399 case NoClass: 3400 if (Hi == NoClass) 3401 return ABIArgInfo::getIgnore(); 3402 // If the low part is just padding, it takes no register, leave ResType 3403 // null. 3404 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3405 "Unknown missing lo part"); 3406 break; 3407 3408 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 3409 // on the stack. 3410 case Memory: 3411 3412 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 3413 // COMPLEX_X87, it is passed in memory. 3414 case X87: 3415 case ComplexX87: 3416 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 3417 ++neededInt; 3418 return getIndirectResult(Ty, freeIntRegs); 3419 3420 case SSEUp: 3421 case X87Up: 3422 llvm_unreachable("Invalid classification for lo word."); 3423 3424 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 3425 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 3426 // and %r9 is used. 3427 case Integer: 3428 ++neededInt; 3429 3430 // Pick an 8-byte type based on the preferred type. 3431 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 3432 3433 // If we have a sign or zero extended integer, make sure to return Extend 3434 // so that the parameter gets the right LLVM IR attributes. 3435 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3436 // Treat an enum type as its underlying type. 3437 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3438 Ty = EnumTy->getDecl()->getIntegerType(); 3439 3440 if (Ty->isIntegralOrEnumerationType() && 3441 Ty->isPromotableIntegerType()) 3442 return ABIArgInfo::getExtend(Ty); 3443 } 3444 3445 break; 3446 3447 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 3448 // available SSE register is used, the registers are taken in the 3449 // order from %xmm0 to %xmm7. 3450 case SSE: { 3451 llvm::Type *IRType = CGT.ConvertType(Ty); 3452 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 3453 ++neededSSE; 3454 break; 3455 } 3456 } 3457 3458 llvm::Type *HighPart = nullptr; 3459 switch (Hi) { 3460 // Memory was handled previously, ComplexX87 and X87 should 3461 // never occur as hi classes, and X87Up must be preceded by X87, 3462 // which is passed in memory. 3463 case Memory: 3464 case X87: 3465 case ComplexX87: 3466 llvm_unreachable("Invalid classification for hi word."); 3467 3468 case NoClass: break; 3469 3470 case Integer: 3471 ++neededInt; 3472 // Pick an 8-byte type based on the preferred type. 3473 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3474 3475 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3476 return ABIArgInfo::getDirect(HighPart, 8); 3477 break; 3478 3479 // X87Up generally doesn't occur here (long double is passed in 3480 // memory), except in situations involving unions. 3481 case X87Up: 3482 case SSE: 3483 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3484 3485 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3486 return ABIArgInfo::getDirect(HighPart, 8); 3487 3488 ++neededSSE; 3489 break; 3490 3491 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 3492 // eightbyte is passed in the upper half of the last used SSE 3493 // register. This only happens when 128-bit vectors are passed. 3494 case SSEUp: 3495 assert(Lo == SSE && "Unexpected SSEUp classification"); 3496 ResType = GetByteVectorType(Ty); 3497 break; 3498 } 3499 3500 // If a high part was specified, merge it together with the low part. It is 3501 // known to pass in the high eightbyte of the result. We do this by forming a 3502 // first class struct aggregate with the high and low part: {low, high} 3503 if (HighPart) 3504 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3505 3506 return ABIArgInfo::getDirect(ResType); 3507 } 3508 3509 ABIArgInfo 3510 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 3511 unsigned &NeededSSE) const { 3512 auto RT = Ty->getAs<RecordType>(); 3513 assert(RT && "classifyRegCallStructType only valid with struct types"); 3514 3515 if (RT->getDecl()->hasFlexibleArrayMember()) 3516 return getIndirectReturnResult(Ty); 3517 3518 // Sum up bases 3519 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 3520 if (CXXRD->isDynamicClass()) { 3521 NeededInt = NeededSSE = 0; 3522 return getIndirectReturnResult(Ty); 3523 } 3524 3525 for (const auto &I : CXXRD->bases()) 3526 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE) 3527 .isIndirect()) { 3528 NeededInt = NeededSSE = 0; 3529 return getIndirectReturnResult(Ty); 3530 } 3531 } 3532 3533 // Sum up members 3534 for (const auto *FD : RT->getDecl()->fields()) { 3535 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) { 3536 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE) 3537 .isIndirect()) { 3538 NeededInt = NeededSSE = 0; 3539 return getIndirectReturnResult(Ty); 3540 } 3541 } else { 3542 unsigned LocalNeededInt, LocalNeededSSE; 3543 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt, 3544 LocalNeededSSE, true) 3545 .isIndirect()) { 3546 NeededInt = NeededSSE = 0; 3547 return getIndirectReturnResult(Ty); 3548 } 3549 NeededInt += LocalNeededInt; 3550 NeededSSE += LocalNeededSSE; 3551 } 3552 } 3553 3554 return ABIArgInfo::getDirect(); 3555 } 3556 3557 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty, 3558 unsigned &NeededInt, 3559 unsigned &NeededSSE) const { 3560 3561 NeededInt = 0; 3562 NeededSSE = 0; 3563 3564 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE); 3565 } 3566 3567 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3568 3569 const unsigned CallingConv = FI.getCallingConvention(); 3570 // It is possible to force Win64 calling convention on any x86_64 target by 3571 // using __attribute__((ms_abi)). In such case to correctly emit Win64 3572 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo. 3573 if (CallingConv == llvm::CallingConv::Win64) { 3574 WinX86_64ABIInfo Win64ABIInfo(CGT); 3575 Win64ABIInfo.computeInfo(FI); 3576 return; 3577 } 3578 3579 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall; 3580 3581 // Keep track of the number of assigned registers. 3582 unsigned FreeIntRegs = IsRegCall ? 11 : 6; 3583 unsigned FreeSSERegs = IsRegCall ? 16 : 8; 3584 unsigned NeededInt, NeededSSE; 3585 3586 if (!::classifyReturnType(getCXXABI(), FI, *this)) { 3587 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() && 3588 !FI.getReturnType()->getTypePtr()->isUnionType()) { 3589 FI.getReturnInfo() = 3590 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE); 3591 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3592 FreeIntRegs -= NeededInt; 3593 FreeSSERegs -= NeededSSE; 3594 } else { 3595 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3596 } 3597 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) { 3598 // Complex Long Double Type is passed in Memory when Regcall 3599 // calling convention is used. 3600 const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>(); 3601 if (getContext().getCanonicalType(CT->getElementType()) == 3602 getContext().LongDoubleTy) 3603 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3604 } else 3605 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3606 } 3607 3608 // If the return value is indirect, then the hidden argument is consuming one 3609 // integer register. 3610 if (FI.getReturnInfo().isIndirect()) 3611 --FreeIntRegs; 3612 3613 // The chain argument effectively gives us another free register. 3614 if (FI.isChainCall()) 3615 ++FreeIntRegs; 3616 3617 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 3618 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 3619 // get assigned (in left-to-right order) for passing as follows... 3620 unsigned ArgNo = 0; 3621 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3622 it != ie; ++it, ++ArgNo) { 3623 bool IsNamedArg = ArgNo < NumRequiredArgs; 3624 3625 if (IsRegCall && it->type->isStructureOrClassType()) 3626 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE); 3627 else 3628 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, 3629 NeededSSE, IsNamedArg); 3630 3631 // AMD64-ABI 3.2.3p3: If there are no registers available for any 3632 // eightbyte of an argument, the whole argument is passed on the 3633 // stack. If registers have already been assigned for some 3634 // eightbytes of such an argument, the assignments get reverted. 3635 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3636 FreeIntRegs -= NeededInt; 3637 FreeSSERegs -= NeededSSE; 3638 } else { 3639 it->info = getIndirectResult(it->type, FreeIntRegs); 3640 } 3641 } 3642 } 3643 3644 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, 3645 Address VAListAddr, QualType Ty) { 3646 Address overflow_arg_area_p = 3647 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 3648 llvm::Value *overflow_arg_area = 3649 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 3650 3651 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 3652 // byte boundary if alignment needed by type exceeds 8 byte boundary. 3653 // It isn't stated explicitly in the standard, but in practice we use 3654 // alignment greater than 16 where necessary. 3655 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 3656 if (Align > CharUnits::fromQuantity(8)) { 3657 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, 3658 Align); 3659 } 3660 3661 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 3662 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3663 llvm::Value *Res = 3664 CGF.Builder.CreateBitCast(overflow_arg_area, 3665 llvm::PointerType::getUnqual(LTy)); 3666 3667 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 3668 // l->overflow_arg_area + sizeof(type). 3669 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 3670 // an 8 byte boundary. 3671 3672 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 3673 llvm::Value *Offset = 3674 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 3675 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 3676 "overflow_arg_area.next"); 3677 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 3678 3679 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 3680 return Address(Res, Align); 3681 } 3682 3683 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3684 QualType Ty) const { 3685 // Assume that va_list type is correct; should be pointer to LLVM type: 3686 // struct { 3687 // i32 gp_offset; 3688 // i32 fp_offset; 3689 // i8* overflow_arg_area; 3690 // i8* reg_save_area; 3691 // }; 3692 unsigned neededInt, neededSSE; 3693 3694 Ty = getContext().getCanonicalType(Ty); 3695 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 3696 /*isNamedArg*/false); 3697 3698 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 3699 // in the registers. If not go to step 7. 3700 if (!neededInt && !neededSSE) 3701 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3702 3703 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 3704 // general purpose registers needed to pass type and num_fp to hold 3705 // the number of floating point registers needed. 3706 3707 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 3708 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 3709 // l->fp_offset > 304 - num_fp * 16 go to step 7. 3710 // 3711 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 3712 // register save space). 3713 3714 llvm::Value *InRegs = nullptr; 3715 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); 3716 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; 3717 if (neededInt) { 3718 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 3719 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 3720 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 3721 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 3722 } 3723 3724 if (neededSSE) { 3725 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 3726 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 3727 llvm::Value *FitsInFP = 3728 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 3729 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 3730 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 3731 } 3732 3733 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 3734 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 3735 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 3736 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 3737 3738 // Emit code to load the value if it was passed in registers. 3739 3740 CGF.EmitBlock(InRegBlock); 3741 3742 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 3743 // an offset of l->gp_offset and/or l->fp_offset. This may require 3744 // copying to a temporary location in case the parameter is passed 3745 // in different register classes or requires an alignment greater 3746 // than 8 for general purpose registers and 16 for XMM registers. 3747 // 3748 // FIXME: This really results in shameful code when we end up needing to 3749 // collect arguments from different places; often what should result in a 3750 // simple assembling of a structure from scattered addresses has many more 3751 // loads than necessary. Can we clean this up? 3752 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3753 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( 3754 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area"); 3755 3756 Address RegAddr = Address::invalid(); 3757 if (neededInt && neededSSE) { 3758 // FIXME: Cleanup. 3759 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 3760 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 3761 Address Tmp = CGF.CreateMemTemp(Ty); 3762 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 3763 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 3764 llvm::Type *TyLo = ST->getElementType(0); 3765 llvm::Type *TyHi = ST->getElementType(1); 3766 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 3767 "Unexpected ABI info for mixed regs"); 3768 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 3769 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 3770 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset); 3771 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset); 3772 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 3773 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 3774 3775 // Copy the first element. 3776 // FIXME: Our choice of alignment here and below is probably pessimistic. 3777 llvm::Value *V = CGF.Builder.CreateAlignedLoad( 3778 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo), 3779 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo))); 3780 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 3781 3782 // Copy the second element. 3783 V = CGF.Builder.CreateAlignedLoad( 3784 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi), 3785 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi))); 3786 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 3787 3788 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 3789 } else if (neededInt) { 3790 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset), 3791 CharUnits::fromQuantity(8)); 3792 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 3793 3794 // Copy to a temporary if necessary to ensure the appropriate alignment. 3795 std::pair<CharUnits, CharUnits> SizeAlign = 3796 getContext().getTypeInfoInChars(Ty); 3797 uint64_t TySize = SizeAlign.first.getQuantity(); 3798 CharUnits TyAlign = SizeAlign.second; 3799 3800 // Copy into a temporary if the type is more aligned than the 3801 // register save area. 3802 if (TyAlign.getQuantity() > 8) { 3803 Address Tmp = CGF.CreateMemTemp(Ty); 3804 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); 3805 RegAddr = Tmp; 3806 } 3807 3808 } else if (neededSSE == 1) { 3809 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 3810 CharUnits::fromQuantity(16)); 3811 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 3812 } else { 3813 assert(neededSSE == 2 && "Invalid number of needed registers!"); 3814 // SSE registers are spaced 16 bytes apart in the register save 3815 // area, we need to collect the two eightbytes together. 3816 // The ABI isn't explicit about this, but it seems reasonable 3817 // to assume that the slots are 16-byte aligned, since the stack is 3818 // naturally 16-byte aligned and the prologue is expected to store 3819 // all the SSE registers to the RSA. 3820 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 3821 CharUnits::fromQuantity(16)); 3822 Address RegAddrHi = 3823 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, 3824 CharUnits::fromQuantity(16)); 3825 llvm::Type *ST = AI.canHaveCoerceToType() 3826 ? AI.getCoerceToType() 3827 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy); 3828 llvm::Value *V; 3829 Address Tmp = CGF.CreateMemTemp(Ty); 3830 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 3831 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( 3832 RegAddrLo, ST->getStructElementType(0))); 3833 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 3834 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast( 3835 RegAddrHi, ST->getStructElementType(1))); 3836 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 3837 3838 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 3839 } 3840 3841 // AMD64-ABI 3.5.7p5: Step 5. Set: 3842 // l->gp_offset = l->gp_offset + num_gp * 8 3843 // l->fp_offset = l->fp_offset + num_fp * 16. 3844 if (neededInt) { 3845 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 3846 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 3847 gp_offset_p); 3848 } 3849 if (neededSSE) { 3850 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 3851 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 3852 fp_offset_p); 3853 } 3854 CGF.EmitBranch(ContBlock); 3855 3856 // Emit code to load the value if it was passed in memory. 3857 3858 CGF.EmitBlock(InMemBlock); 3859 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3860 3861 // Return the appropriate result. 3862 3863 CGF.EmitBlock(ContBlock); 3864 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, 3865 "vaarg.addr"); 3866 return ResAddr; 3867 } 3868 3869 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 3870 QualType Ty) const { 3871 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 3872 CGF.getContext().getTypeInfoInChars(Ty), 3873 CharUnits::fromQuantity(8), 3874 /*allowHigherAlign*/ false); 3875 } 3876 3877 ABIArgInfo 3878 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, 3879 const ABIArgInfo ¤t) const { 3880 // Assumes vectorCall calling convention. 3881 const Type *Base = nullptr; 3882 uint64_t NumElts = 0; 3883 3884 if (!Ty->isBuiltinType() && !Ty->isVectorType() && 3885 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) { 3886 FreeSSERegs -= NumElts; 3887 return getDirectX86Hva(); 3888 } 3889 return current; 3890 } 3891 3892 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 3893 bool IsReturnType, bool IsVectorCall, 3894 bool IsRegCall) const { 3895 3896 if (Ty->isVoidType()) 3897 return ABIArgInfo::getIgnore(); 3898 3899 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3900 Ty = EnumTy->getDecl()->getIntegerType(); 3901 3902 TypeInfo Info = getContext().getTypeInfo(Ty); 3903 uint64_t Width = Info.Width; 3904 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align); 3905 3906 const RecordType *RT = Ty->getAs<RecordType>(); 3907 if (RT) { 3908 if (!IsReturnType) { 3909 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 3910 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 3911 } 3912 3913 if (RT->getDecl()->hasFlexibleArrayMember()) 3914 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3915 3916 } 3917 3918 const Type *Base = nullptr; 3919 uint64_t NumElts = 0; 3920 // vectorcall adds the concept of a homogenous vector aggregate, similar to 3921 // other targets. 3922 if ((IsVectorCall || IsRegCall) && 3923 isHomogeneousAggregate(Ty, Base, NumElts)) { 3924 if (IsRegCall) { 3925 if (FreeSSERegs >= NumElts) { 3926 FreeSSERegs -= NumElts; 3927 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 3928 return ABIArgInfo::getDirect(); 3929 return ABIArgInfo::getExpand(); 3930 } 3931 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3932 } else if (IsVectorCall) { 3933 if (FreeSSERegs >= NumElts && 3934 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) { 3935 FreeSSERegs -= NumElts; 3936 return ABIArgInfo::getDirect(); 3937 } else if (IsReturnType) { 3938 return ABIArgInfo::getExpand(); 3939 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) { 3940 // HVAs are delayed and reclassified in the 2nd step. 3941 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3942 } 3943 } 3944 } 3945 3946 if (Ty->isMemberPointerType()) { 3947 // If the member pointer is represented by an LLVM int or ptr, pass it 3948 // directly. 3949 llvm::Type *LLTy = CGT.ConvertType(Ty); 3950 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 3951 return ABIArgInfo::getDirect(); 3952 } 3953 3954 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { 3955 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 3956 // not 1, 2, 4, or 8 bytes, must be passed by reference." 3957 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 3958 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3959 3960 // Otherwise, coerce it to a small integer. 3961 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 3962 } 3963 3964 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 3965 switch (BT->getKind()) { 3966 case BuiltinType::Bool: 3967 // Bool type is always extended to the ABI, other builtin types are not 3968 // extended. 3969 return ABIArgInfo::getExtend(Ty); 3970 3971 case BuiltinType::LongDouble: 3972 // Mingw64 GCC uses the old 80 bit extended precision floating point 3973 // unit. It passes them indirectly through memory. 3974 if (IsMingw64) { 3975 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 3976 if (LDF == &llvm::APFloat::x87DoubleExtended()) 3977 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3978 } 3979 break; 3980 3981 case BuiltinType::Int128: 3982 case BuiltinType::UInt128: 3983 // If it's a parameter type, the normal ABI rule is that arguments larger 3984 // than 8 bytes are passed indirectly. GCC follows it. We follow it too, 3985 // even though it isn't particularly efficient. 3986 if (!IsReturnType) 3987 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3988 3989 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that. 3990 // Clang matches them for compatibility. 3991 return ABIArgInfo::getDirect( 3992 llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), 2)); 3993 3994 default: 3995 break; 3996 } 3997 } 3998 3999 return ABIArgInfo::getDirect(); 4000 } 4001 4002 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, 4003 unsigned FreeSSERegs, 4004 bool IsVectorCall, 4005 bool IsRegCall) const { 4006 unsigned Count = 0; 4007 for (auto &I : FI.arguments()) { 4008 // Vectorcall in x64 only permits the first 6 arguments to be passed 4009 // as XMM/YMM registers. 4010 if (Count < VectorcallMaxParamNumAsReg) 4011 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); 4012 else { 4013 // Since these cannot be passed in registers, pretend no registers 4014 // are left. 4015 unsigned ZeroSSERegsAvail = 0; 4016 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false, 4017 IsVectorCall, IsRegCall); 4018 } 4019 ++Count; 4020 } 4021 4022 for (auto &I : FI.arguments()) { 4023 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info); 4024 } 4025 } 4026 4027 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 4028 bool IsVectorCall = 4029 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall; 4030 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall; 4031 4032 unsigned FreeSSERegs = 0; 4033 if (IsVectorCall) { 4034 // We can use up to 4 SSE return registers with vectorcall. 4035 FreeSSERegs = 4; 4036 } else if (IsRegCall) { 4037 // RegCall gives us 16 SSE registers. 4038 FreeSSERegs = 16; 4039 } 4040 4041 if (!getCXXABI().classifyReturnType(FI)) 4042 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true, 4043 IsVectorCall, IsRegCall); 4044 4045 if (IsVectorCall) { 4046 // We can use up to 6 SSE register parameters with vectorcall. 4047 FreeSSERegs = 6; 4048 } else if (IsRegCall) { 4049 // RegCall gives us 16 SSE registers, we can reuse the return registers. 4050 FreeSSERegs = 16; 4051 } 4052 4053 if (IsVectorCall) { 4054 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall); 4055 } else { 4056 for (auto &I : FI.arguments()) 4057 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); 4058 } 4059 4060 } 4061 4062 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4063 QualType Ty) const { 4064 4065 bool IsIndirect = false; 4066 4067 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4068 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4069 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) { 4070 uint64_t Width = getContext().getTypeSize(Ty); 4071 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); 4072 } 4073 4074 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 4075 CGF.getContext().getTypeInfoInChars(Ty), 4076 CharUnits::fromQuantity(8), 4077 /*allowHigherAlign*/ false); 4078 } 4079 4080 // PowerPC-32 4081 namespace { 4082 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 4083 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 4084 bool IsSoftFloatABI; 4085 4086 CharUnits getParamTypeAlignment(QualType Ty) const; 4087 4088 public: 4089 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI) 4090 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {} 4091 4092 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4093 QualType Ty) const override; 4094 }; 4095 4096 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 4097 public: 4098 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI) 4099 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {} 4100 4101 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4102 // This is recovered from gcc output. 4103 return 1; // r1 is the dedicated stack pointer 4104 } 4105 4106 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4107 llvm::Value *Address) const override; 4108 }; 4109 } 4110 4111 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 4112 // Complex types are passed just like their elements 4113 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 4114 Ty = CTy->getElementType(); 4115 4116 if (Ty->isVectorType()) 4117 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 4118 : 4); 4119 4120 // For single-element float/vector structs, we consider the whole type 4121 // to have the same alignment requirements as its single element. 4122 const Type *AlignTy = nullptr; 4123 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) { 4124 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 4125 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || 4126 (BT && BT->isFloatingPoint())) 4127 AlignTy = EltType; 4128 } 4129 4130 if (AlignTy) 4131 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4); 4132 return CharUnits::fromQuantity(4); 4133 } 4134 4135 // TODO: this implementation is now likely redundant with 4136 // DefaultABIInfo::EmitVAArg. 4137 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, 4138 QualType Ty) const { 4139 if (getTarget().getTriple().isOSDarwin()) { 4140 auto TI = getContext().getTypeInfoInChars(Ty); 4141 TI.second = getParamTypeAlignment(Ty); 4142 4143 CharUnits SlotSize = CharUnits::fromQuantity(4); 4144 return emitVoidPtrVAArg(CGF, VAList, Ty, 4145 classifyArgumentType(Ty).isIndirect(), TI, SlotSize, 4146 /*AllowHigherAlign=*/true); 4147 } 4148 4149 const unsigned OverflowLimit = 8; 4150 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4151 // TODO: Implement this. For now ignore. 4152 (void)CTy; 4153 return Address::invalid(); // FIXME? 4154 } 4155 4156 // struct __va_list_tag { 4157 // unsigned char gpr; 4158 // unsigned char fpr; 4159 // unsigned short reserved; 4160 // void *overflow_arg_area; 4161 // void *reg_save_area; 4162 // }; 4163 4164 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 4165 bool isInt = 4166 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); 4167 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64; 4168 4169 // All aggregates are passed indirectly? That doesn't seem consistent 4170 // with the argument-lowering code. 4171 bool isIndirect = Ty->isAggregateType(); 4172 4173 CGBuilderTy &Builder = CGF.Builder; 4174 4175 // The calling convention either uses 1-2 GPRs or 1 FPR. 4176 Address NumRegsAddr = Address::invalid(); 4177 if (isInt || IsSoftFloatABI) { 4178 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr"); 4179 } else { 4180 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr"); 4181 } 4182 4183 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); 4184 4185 // "Align" the register count when TY is i64. 4186 if (isI64 || (isF64 && IsSoftFloatABI)) { 4187 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); 4188 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); 4189 } 4190 4191 llvm::Value *CC = 4192 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond"); 4193 4194 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 4195 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 4196 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 4197 4198 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 4199 4200 llvm::Type *DirectTy = CGF.ConvertType(Ty); 4201 if (isIndirect) DirectTy = DirectTy->getPointerTo(0); 4202 4203 // Case 1: consume registers. 4204 Address RegAddr = Address::invalid(); 4205 { 4206 CGF.EmitBlock(UsingRegs); 4207 4208 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4); 4209 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), 4210 CharUnits::fromQuantity(8)); 4211 assert(RegAddr.getElementType() == CGF.Int8Ty); 4212 4213 // Floating-point registers start after the general-purpose registers. 4214 if (!(isInt || IsSoftFloatABI)) { 4215 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, 4216 CharUnits::fromQuantity(32)); 4217 } 4218 4219 // Get the address of the saved value by scaling the number of 4220 // registers we've used by the number of 4221 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); 4222 llvm::Value *RegOffset = 4223 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); 4224 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty, 4225 RegAddr.getPointer(), RegOffset), 4226 RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); 4227 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); 4228 4229 // Increase the used-register count. 4230 NumRegs = 4231 Builder.CreateAdd(NumRegs, 4232 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); 4233 Builder.CreateStore(NumRegs, NumRegsAddr); 4234 4235 CGF.EmitBranch(Cont); 4236 } 4237 4238 // Case 2: consume space in the overflow area. 4239 Address MemAddr = Address::invalid(); 4240 { 4241 CGF.EmitBlock(UsingOverflow); 4242 4243 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr); 4244 4245 // Everything in the overflow area is rounded up to a size of at least 4. 4246 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); 4247 4248 CharUnits Size; 4249 if (!isIndirect) { 4250 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); 4251 Size = TypeInfo.first.alignTo(OverflowAreaAlign); 4252 } else { 4253 Size = CGF.getPointerSize(); 4254 } 4255 4256 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3); 4257 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), 4258 OverflowAreaAlign); 4259 // Round up address of argument to alignment 4260 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 4261 if (Align > OverflowAreaAlign) { 4262 llvm::Value *Ptr = OverflowArea.getPointer(); 4263 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), 4264 Align); 4265 } 4266 4267 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); 4268 4269 // Increase the overflow area. 4270 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); 4271 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); 4272 CGF.EmitBranch(Cont); 4273 } 4274 4275 CGF.EmitBlock(Cont); 4276 4277 // Merge the cases with a phi. 4278 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, 4279 "vaarg.addr"); 4280 4281 // Load the pointer if the argument was passed indirectly. 4282 if (isIndirect) { 4283 Result = Address(Builder.CreateLoad(Result, "aggr"), 4284 getContext().getTypeAlignInChars(Ty)); 4285 } 4286 4287 return Result; 4288 } 4289 4290 bool 4291 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4292 llvm::Value *Address) const { 4293 // This is calculated from the LLVM and GCC tables and verified 4294 // against gcc output. AFAIK all ABIs use the same encoding. 4295 4296 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4297 4298 llvm::IntegerType *i8 = CGF.Int8Ty; 4299 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4300 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4301 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4302 4303 // 0-31: r0-31, the 4-byte general-purpose registers 4304 AssignToArrayRange(Builder, Address, Four8, 0, 31); 4305 4306 // 32-63: fp0-31, the 8-byte floating-point registers 4307 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4308 4309 // 64-76 are various 4-byte special-purpose registers: 4310 // 64: mq 4311 // 65: lr 4312 // 66: ctr 4313 // 67: ap 4314 // 68-75 cr0-7 4315 // 76: xer 4316 AssignToArrayRange(Builder, Address, Four8, 64, 76); 4317 4318 // 77-108: v0-31, the 16-byte vector registers 4319 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4320 4321 // 109: vrsave 4322 // 110: vscr 4323 // 111: spe_acc 4324 // 112: spefscr 4325 // 113: sfp 4326 AssignToArrayRange(Builder, Address, Four8, 109, 113); 4327 4328 return false; 4329 } 4330 4331 // PowerPC-64 4332 4333 namespace { 4334 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 4335 class PPC64_SVR4_ABIInfo : public SwiftABIInfo { 4336 public: 4337 enum ABIKind { 4338 ELFv1 = 0, 4339 ELFv2 4340 }; 4341 4342 private: 4343 static const unsigned GPRBits = 64; 4344 ABIKind Kind; 4345 bool HasQPX; 4346 bool IsSoftFloatABI; 4347 4348 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and 4349 // will be passed in a QPX register. 4350 bool IsQPXVectorTy(const Type *Ty) const { 4351 if (!HasQPX) 4352 return false; 4353 4354 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4355 unsigned NumElements = VT->getNumElements(); 4356 if (NumElements == 1) 4357 return false; 4358 4359 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) { 4360 if (getContext().getTypeSize(Ty) <= 256) 4361 return true; 4362 } else if (VT->getElementType()-> 4363 isSpecificBuiltinType(BuiltinType::Float)) { 4364 if (getContext().getTypeSize(Ty) <= 128) 4365 return true; 4366 } 4367 } 4368 4369 return false; 4370 } 4371 4372 bool IsQPXVectorTy(QualType Ty) const { 4373 return IsQPXVectorTy(Ty.getTypePtr()); 4374 } 4375 4376 public: 4377 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX, 4378 bool SoftFloatABI) 4379 : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX), 4380 IsSoftFloatABI(SoftFloatABI) {} 4381 4382 bool isPromotableTypeForABI(QualType Ty) const; 4383 CharUnits getParamTypeAlignment(QualType Ty) const; 4384 4385 ABIArgInfo classifyReturnType(QualType RetTy) const; 4386 ABIArgInfo classifyArgumentType(QualType Ty) const; 4387 4388 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4389 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4390 uint64_t Members) const override; 4391 4392 // TODO: We can add more logic to computeInfo to improve performance. 4393 // Example: For aggregate arguments that fit in a register, we could 4394 // use getDirectInReg (as is done below for structs containing a single 4395 // floating-point value) to avoid pushing them to memory on function 4396 // entry. This would require changing the logic in PPCISelLowering 4397 // when lowering the parameters in the caller and args in the callee. 4398 void computeInfo(CGFunctionInfo &FI) const override { 4399 if (!getCXXABI().classifyReturnType(FI)) 4400 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4401 for (auto &I : FI.arguments()) { 4402 // We rely on the default argument classification for the most part. 4403 // One exception: An aggregate containing a single floating-point 4404 // or vector item must be passed in a register if one is available. 4405 const Type *T = isSingleElementStruct(I.type, getContext()); 4406 if (T) { 4407 const BuiltinType *BT = T->getAs<BuiltinType>(); 4408 if (IsQPXVectorTy(T) || 4409 (T->isVectorType() && getContext().getTypeSize(T) == 128) || 4410 (BT && BT->isFloatingPoint())) { 4411 QualType QT(T, 0); 4412 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 4413 continue; 4414 } 4415 } 4416 I.info = classifyArgumentType(I.type); 4417 } 4418 } 4419 4420 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4421 QualType Ty) const override; 4422 4423 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 4424 bool asReturnValue) const override { 4425 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 4426 } 4427 4428 bool isSwiftErrorInRegister() const override { 4429 return false; 4430 } 4431 }; 4432 4433 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 4434 4435 public: 4436 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 4437 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX, 4438 bool SoftFloatABI) 4439 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX, 4440 SoftFloatABI)) {} 4441 4442 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4443 // This is recovered from gcc output. 4444 return 1; // r1 is the dedicated stack pointer 4445 } 4446 4447 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4448 llvm::Value *Address) const override; 4449 }; 4450 4451 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 4452 public: 4453 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 4454 4455 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4456 // This is recovered from gcc output. 4457 return 1; // r1 is the dedicated stack pointer 4458 } 4459 4460 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4461 llvm::Value *Address) const override; 4462 }; 4463 4464 } 4465 4466 // Return true if the ABI requires Ty to be passed sign- or zero- 4467 // extended to 64 bits. 4468 bool 4469 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 4470 // Treat an enum type as its underlying type. 4471 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4472 Ty = EnumTy->getDecl()->getIntegerType(); 4473 4474 // Promotable integer types are required to be promoted by the ABI. 4475 if (Ty->isPromotableIntegerType()) 4476 return true; 4477 4478 // In addition to the usual promotable integer types, we also need to 4479 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 4480 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4481 switch (BT->getKind()) { 4482 case BuiltinType::Int: 4483 case BuiltinType::UInt: 4484 return true; 4485 default: 4486 break; 4487 } 4488 4489 return false; 4490 } 4491 4492 /// isAlignedParamType - Determine whether a type requires 16-byte or 4493 /// higher alignment in the parameter area. Always returns at least 8. 4494 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 4495 // Complex types are passed just like their elements. 4496 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 4497 Ty = CTy->getElementType(); 4498 4499 // Only vector types of size 16 bytes need alignment (larger types are 4500 // passed via reference, smaller types are not aligned). 4501 if (IsQPXVectorTy(Ty)) { 4502 if (getContext().getTypeSize(Ty) > 128) 4503 return CharUnits::fromQuantity(32); 4504 4505 return CharUnits::fromQuantity(16); 4506 } else if (Ty->isVectorType()) { 4507 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); 4508 } 4509 4510 // For single-element float/vector structs, we consider the whole type 4511 // to have the same alignment requirements as its single element. 4512 const Type *AlignAsType = nullptr; 4513 const Type *EltType = isSingleElementStruct(Ty, getContext()); 4514 if (EltType) { 4515 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 4516 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() && 4517 getContext().getTypeSize(EltType) == 128) || 4518 (BT && BT->isFloatingPoint())) 4519 AlignAsType = EltType; 4520 } 4521 4522 // Likewise for ELFv2 homogeneous aggregates. 4523 const Type *Base = nullptr; 4524 uint64_t Members = 0; 4525 if (!AlignAsType && Kind == ELFv2 && 4526 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 4527 AlignAsType = Base; 4528 4529 // With special case aggregates, only vector base types need alignment. 4530 if (AlignAsType && IsQPXVectorTy(AlignAsType)) { 4531 if (getContext().getTypeSize(AlignAsType) > 128) 4532 return CharUnits::fromQuantity(32); 4533 4534 return CharUnits::fromQuantity(16); 4535 } else if (AlignAsType) { 4536 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8); 4537 } 4538 4539 // Otherwise, we only need alignment for any aggregate type that 4540 // has an alignment requirement of >= 16 bytes. 4541 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { 4542 if (HasQPX && getContext().getTypeAlign(Ty) >= 256) 4543 return CharUnits::fromQuantity(32); 4544 return CharUnits::fromQuantity(16); 4545 } 4546 4547 return CharUnits::fromQuantity(8); 4548 } 4549 4550 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 4551 /// aggregate. Base is set to the base element type, and Members is set 4552 /// to the number of base elements. 4553 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 4554 uint64_t &Members) const { 4555 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 4556 uint64_t NElements = AT->getSize().getZExtValue(); 4557 if (NElements == 0) 4558 return false; 4559 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 4560 return false; 4561 Members *= NElements; 4562 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 4563 const RecordDecl *RD = RT->getDecl(); 4564 if (RD->hasFlexibleArrayMember()) 4565 return false; 4566 4567 Members = 0; 4568 4569 // If this is a C++ record, check the bases first. 4570 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 4571 for (const auto &I : CXXRD->bases()) { 4572 // Ignore empty records. 4573 if (isEmptyRecord(getContext(), I.getType(), true)) 4574 continue; 4575 4576 uint64_t FldMembers; 4577 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 4578 return false; 4579 4580 Members += FldMembers; 4581 } 4582 } 4583 4584 for (const auto *FD : RD->fields()) { 4585 // Ignore (non-zero arrays of) empty records. 4586 QualType FT = FD->getType(); 4587 while (const ConstantArrayType *AT = 4588 getContext().getAsConstantArrayType(FT)) { 4589 if (AT->getSize().getZExtValue() == 0) 4590 return false; 4591 FT = AT->getElementType(); 4592 } 4593 if (isEmptyRecord(getContext(), FT, true)) 4594 continue; 4595 4596 // For compatibility with GCC, ignore empty bitfields in C++ mode. 4597 if (getContext().getLangOpts().CPlusPlus && 4598 FD->isZeroLengthBitField(getContext())) 4599 continue; 4600 4601 uint64_t FldMembers; 4602 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 4603 return false; 4604 4605 Members = (RD->isUnion() ? 4606 std::max(Members, FldMembers) : Members + FldMembers); 4607 } 4608 4609 if (!Base) 4610 return false; 4611 4612 // Ensure there is no padding. 4613 if (getContext().getTypeSize(Base) * Members != 4614 getContext().getTypeSize(Ty)) 4615 return false; 4616 } else { 4617 Members = 1; 4618 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 4619 Members = 2; 4620 Ty = CT->getElementType(); 4621 } 4622 4623 // Most ABIs only support float, double, and some vector type widths. 4624 if (!isHomogeneousAggregateBaseType(Ty)) 4625 return false; 4626 4627 // The base type must be the same for all members. Types that 4628 // agree in both total size and mode (float vs. vector) are 4629 // treated as being equivalent here. 4630 const Type *TyPtr = Ty.getTypePtr(); 4631 if (!Base) { 4632 Base = TyPtr; 4633 // If it's a non-power-of-2 vector, its size is already a power-of-2, 4634 // so make sure to widen it explicitly. 4635 if (const VectorType *VT = Base->getAs<VectorType>()) { 4636 QualType EltTy = VT->getElementType(); 4637 unsigned NumElements = 4638 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); 4639 Base = getContext() 4640 .getVectorType(EltTy, NumElements, VT->getVectorKind()) 4641 .getTypePtr(); 4642 } 4643 } 4644 4645 if (Base->isVectorType() != TyPtr->isVectorType() || 4646 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 4647 return false; 4648 } 4649 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 4650 } 4651 4652 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 4653 // Homogeneous aggregates for ELFv2 must have base types of float, 4654 // double, long double, or 128-bit vectors. 4655 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4656 if (BT->getKind() == BuiltinType::Float || 4657 BT->getKind() == BuiltinType::Double || 4658 BT->getKind() == BuiltinType::LongDouble || 4659 (getContext().getTargetInfo().hasFloat128Type() && 4660 (BT->getKind() == BuiltinType::Float128))) { 4661 if (IsSoftFloatABI) 4662 return false; 4663 return true; 4664 } 4665 } 4666 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4667 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty)) 4668 return true; 4669 } 4670 return false; 4671 } 4672 4673 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 4674 const Type *Base, uint64_t Members) const { 4675 // Vector and fp128 types require one register, other floating point types 4676 // require one or two registers depending on their size. 4677 uint32_t NumRegs = 4678 ((getContext().getTargetInfo().hasFloat128Type() && 4679 Base->isFloat128Type()) || 4680 Base->isVectorType()) ? 1 4681 : (getContext().getTypeSize(Base) + 63) / 64; 4682 4683 // Homogeneous Aggregates may occupy at most 8 registers. 4684 return Members * NumRegs <= 8; 4685 } 4686 4687 ABIArgInfo 4688 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 4689 Ty = useFirstFieldIfTransparentUnion(Ty); 4690 4691 if (Ty->isAnyComplexType()) 4692 return ABIArgInfo::getDirect(); 4693 4694 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 4695 // or via reference (larger than 16 bytes). 4696 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) { 4697 uint64_t Size = getContext().getTypeSize(Ty); 4698 if (Size > 128) 4699 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4700 else if (Size < 128) { 4701 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 4702 return ABIArgInfo::getDirect(CoerceTy); 4703 } 4704 } 4705 4706 if (isAggregateTypeForABI(Ty)) { 4707 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 4708 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4709 4710 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); 4711 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 4712 4713 // ELFv2 homogeneous aggregates are passed as array types. 4714 const Type *Base = nullptr; 4715 uint64_t Members = 0; 4716 if (Kind == ELFv2 && 4717 isHomogeneousAggregate(Ty, Base, Members)) { 4718 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 4719 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 4720 return ABIArgInfo::getDirect(CoerceTy); 4721 } 4722 4723 // If an aggregate may end up fully in registers, we do not 4724 // use the ByVal method, but pass the aggregate as array. 4725 // This is usually beneficial since we avoid forcing the 4726 // back-end to store the argument to memory. 4727 uint64_t Bits = getContext().getTypeSize(Ty); 4728 if (Bits > 0 && Bits <= 8 * GPRBits) { 4729 llvm::Type *CoerceTy; 4730 4731 // Types up to 8 bytes are passed as integer type (which will be 4732 // properly aligned in the argument save area doubleword). 4733 if (Bits <= GPRBits) 4734 CoerceTy = 4735 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 4736 // Larger types are passed as arrays, with the base type selected 4737 // according to the required alignment in the save area. 4738 else { 4739 uint64_t RegBits = ABIAlign * 8; 4740 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits; 4741 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 4742 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 4743 } 4744 4745 return ABIArgInfo::getDirect(CoerceTy); 4746 } 4747 4748 // All other aggregates are passed ByVal. 4749 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 4750 /*ByVal=*/true, 4751 /*Realign=*/TyAlign > ABIAlign); 4752 } 4753 4754 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) 4755 : ABIArgInfo::getDirect()); 4756 } 4757 4758 ABIArgInfo 4759 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 4760 if (RetTy->isVoidType()) 4761 return ABIArgInfo::getIgnore(); 4762 4763 if (RetTy->isAnyComplexType()) 4764 return ABIArgInfo::getDirect(); 4765 4766 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 4767 // or via reference (larger than 16 bytes). 4768 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) { 4769 uint64_t Size = getContext().getTypeSize(RetTy); 4770 if (Size > 128) 4771 return getNaturalAlignIndirect(RetTy); 4772 else if (Size < 128) { 4773 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 4774 return ABIArgInfo::getDirect(CoerceTy); 4775 } 4776 } 4777 4778 if (isAggregateTypeForABI(RetTy)) { 4779 // ELFv2 homogeneous aggregates are returned as array types. 4780 const Type *Base = nullptr; 4781 uint64_t Members = 0; 4782 if (Kind == ELFv2 && 4783 isHomogeneousAggregate(RetTy, Base, Members)) { 4784 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 4785 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 4786 return ABIArgInfo::getDirect(CoerceTy); 4787 } 4788 4789 // ELFv2 small aggregates are returned in up to two registers. 4790 uint64_t Bits = getContext().getTypeSize(RetTy); 4791 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 4792 if (Bits == 0) 4793 return ABIArgInfo::getIgnore(); 4794 4795 llvm::Type *CoerceTy; 4796 if (Bits > GPRBits) { 4797 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 4798 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy); 4799 } else 4800 CoerceTy = 4801 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 4802 return ABIArgInfo::getDirect(CoerceTy); 4803 } 4804 4805 // All other aggregates are returned indirectly. 4806 return getNaturalAlignIndirect(RetTy); 4807 } 4808 4809 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy) 4810 : ABIArgInfo::getDirect()); 4811 } 4812 4813 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 4814 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4815 QualType Ty) const { 4816 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 4817 TypeInfo.second = getParamTypeAlignment(Ty); 4818 4819 CharUnits SlotSize = CharUnits::fromQuantity(8); 4820 4821 // If we have a complex type and the base type is smaller than 8 bytes, 4822 // the ABI calls for the real and imaginary parts to be right-adjusted 4823 // in separate doublewords. However, Clang expects us to produce a 4824 // pointer to a structure with the two parts packed tightly. So generate 4825 // loads of the real and imaginary parts relative to the va_list pointer, 4826 // and store them to a temporary structure. 4827 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4828 CharUnits EltSize = TypeInfo.first / 2; 4829 if (EltSize < SlotSize) { 4830 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, 4831 SlotSize * 2, SlotSize, 4832 SlotSize, /*AllowHigher*/ true); 4833 4834 Address RealAddr = Addr; 4835 Address ImagAddr = RealAddr; 4836 if (CGF.CGM.getDataLayout().isBigEndian()) { 4837 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, 4838 SlotSize - EltSize); 4839 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, 4840 2 * SlotSize - EltSize); 4841 } else { 4842 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); 4843 } 4844 4845 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); 4846 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); 4847 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); 4848 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); 4849 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); 4850 4851 Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); 4852 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), 4853 /*init*/ true); 4854 return Temp; 4855 } 4856 } 4857 4858 // Otherwise, just use the general rule. 4859 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 4860 TypeInfo, SlotSize, /*AllowHigher*/ true); 4861 } 4862 4863 static bool 4864 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4865 llvm::Value *Address) { 4866 // This is calculated from the LLVM and GCC tables and verified 4867 // against gcc output. AFAIK all ABIs use the same encoding. 4868 4869 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4870 4871 llvm::IntegerType *i8 = CGF.Int8Ty; 4872 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4873 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4874 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4875 4876 // 0-31: r0-31, the 8-byte general-purpose registers 4877 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 4878 4879 // 32-63: fp0-31, the 8-byte floating-point registers 4880 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4881 4882 // 64-67 are various 8-byte special-purpose registers: 4883 // 64: mq 4884 // 65: lr 4885 // 66: ctr 4886 // 67: ap 4887 AssignToArrayRange(Builder, Address, Eight8, 64, 67); 4888 4889 // 68-76 are various 4-byte special-purpose registers: 4890 // 68-75 cr0-7 4891 // 76: xer 4892 AssignToArrayRange(Builder, Address, Four8, 68, 76); 4893 4894 // 77-108: v0-31, the 16-byte vector registers 4895 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4896 4897 // 109: vrsave 4898 // 110: vscr 4899 // 111: spe_acc 4900 // 112: spefscr 4901 // 113: sfp 4902 // 114: tfhar 4903 // 115: tfiar 4904 // 116: texasr 4905 AssignToArrayRange(Builder, Address, Eight8, 109, 116); 4906 4907 return false; 4908 } 4909 4910 bool 4911 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 4912 CodeGen::CodeGenFunction &CGF, 4913 llvm::Value *Address) const { 4914 4915 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 4916 } 4917 4918 bool 4919 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4920 llvm::Value *Address) const { 4921 4922 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 4923 } 4924 4925 //===----------------------------------------------------------------------===// 4926 // AArch64 ABI Implementation 4927 //===----------------------------------------------------------------------===// 4928 4929 namespace { 4930 4931 class AArch64ABIInfo : public SwiftABIInfo { 4932 public: 4933 enum ABIKind { 4934 AAPCS = 0, 4935 DarwinPCS, 4936 Win64 4937 }; 4938 4939 private: 4940 ABIKind Kind; 4941 4942 public: 4943 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) 4944 : SwiftABIInfo(CGT), Kind(Kind) {} 4945 4946 private: 4947 ABIKind getABIKind() const { return Kind; } 4948 bool isDarwinPCS() const { return Kind == DarwinPCS; } 4949 4950 ABIArgInfo classifyReturnType(QualType RetTy) const; 4951 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4952 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4953 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4954 uint64_t Members) const override; 4955 4956 bool isIllegalVectorType(QualType Ty) const; 4957 4958 void computeInfo(CGFunctionInfo &FI) const override { 4959 if (!::classifyReturnType(getCXXABI(), FI, *this)) 4960 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4961 4962 for (auto &it : FI.arguments()) 4963 it.info = classifyArgumentType(it.type); 4964 } 4965 4966 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, 4967 CodeGenFunction &CGF) const; 4968 4969 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 4970 CodeGenFunction &CGF) const; 4971 4972 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4973 QualType Ty) const override { 4974 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) 4975 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 4976 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 4977 } 4978 4979 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 4980 QualType Ty) const override; 4981 4982 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 4983 bool asReturnValue) const override { 4984 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 4985 } 4986 bool isSwiftErrorInRegister() const override { 4987 return true; 4988 } 4989 4990 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 4991 unsigned elts) const override; 4992 }; 4993 4994 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 4995 public: 4996 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 4997 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} 4998 4999 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 5000 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue"; 5001 } 5002 5003 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5004 return 31; 5005 } 5006 5007 bool doesReturnSlotInterfereWithArgs() const override { return false; } 5008 5009 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5010 CodeGen::CodeGenModule &CGM) const override { 5011 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5012 if (!FD) 5013 return; 5014 llvm::Function *Fn = cast<llvm::Function>(GV); 5015 5016 auto Kind = CGM.getCodeGenOpts().getSignReturnAddress(); 5017 if (Kind != CodeGenOptions::SignReturnAddressScope::None) { 5018 Fn->addFnAttr("sign-return-address", 5019 Kind == CodeGenOptions::SignReturnAddressScope::All 5020 ? "all" 5021 : "non-leaf"); 5022 5023 auto Key = CGM.getCodeGenOpts().getSignReturnAddressKey(); 5024 Fn->addFnAttr("sign-return-address-key", 5025 Key == CodeGenOptions::SignReturnAddressKeyValue::AKey 5026 ? "a_key" 5027 : "b_key"); 5028 } 5029 5030 if (CGM.getCodeGenOpts().BranchTargetEnforcement) 5031 Fn->addFnAttr("branch-target-enforcement"); 5032 } 5033 }; 5034 5035 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo { 5036 public: 5037 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K) 5038 : AArch64TargetCodeGenInfo(CGT, K) {} 5039 5040 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5041 CodeGen::CodeGenModule &CGM) const override; 5042 5043 void getDependentLibraryOption(llvm::StringRef Lib, 5044 llvm::SmallString<24> &Opt) const override { 5045 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 5046 } 5047 5048 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 5049 llvm::SmallString<32> &Opt) const override { 5050 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 5051 } 5052 }; 5053 5054 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes( 5055 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 5056 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 5057 if (GV->isDeclaration()) 5058 return; 5059 addStackProbeTargetAttributes(D, GV, CGM); 5060 } 5061 } 5062 5063 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { 5064 Ty = useFirstFieldIfTransparentUnion(Ty); 5065 5066 // Handle illegal vector types here. 5067 if (isIllegalVectorType(Ty)) { 5068 uint64_t Size = getContext().getTypeSize(Ty); 5069 // Android promotes <2 x i8> to i16, not i32 5070 if (isAndroid() && (Size <= 16)) { 5071 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); 5072 return ABIArgInfo::getDirect(ResType); 5073 } 5074 if (Size <= 32) { 5075 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 5076 return ABIArgInfo::getDirect(ResType); 5077 } 5078 if (Size == 64) { 5079 llvm::Type *ResType = 5080 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 5081 return ABIArgInfo::getDirect(ResType); 5082 } 5083 if (Size == 128) { 5084 llvm::Type *ResType = 5085 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 5086 return ABIArgInfo::getDirect(ResType); 5087 } 5088 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5089 } 5090 5091 if (!isAggregateTypeForABI(Ty)) { 5092 // Treat an enum type as its underlying type. 5093 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5094 Ty = EnumTy->getDecl()->getIntegerType(); 5095 5096 return (Ty->isPromotableIntegerType() && isDarwinPCS() 5097 ? ABIArgInfo::getExtend(Ty) 5098 : ABIArgInfo::getDirect()); 5099 } 5100 5101 // Structures with either a non-trivial destructor or a non-trivial 5102 // copy constructor are always indirect. 5103 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5104 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 5105 CGCXXABI::RAA_DirectInMemory); 5106 } 5107 5108 // Empty records are always ignored on Darwin, but actually passed in C++ mode 5109 // elsewhere for GNU compatibility. 5110 uint64_t Size = getContext().getTypeSize(Ty); 5111 bool IsEmpty = isEmptyRecord(getContext(), Ty, true); 5112 if (IsEmpty || Size == 0) { 5113 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 5114 return ABIArgInfo::getIgnore(); 5115 5116 // GNU C mode. The only argument that gets ignored is an empty one with size 5117 // 0. 5118 if (IsEmpty && Size == 0) 5119 return ABIArgInfo::getIgnore(); 5120 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5121 } 5122 5123 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 5124 const Type *Base = nullptr; 5125 uint64_t Members = 0; 5126 if (isHomogeneousAggregate(Ty, Base, Members)) { 5127 return ABIArgInfo::getDirect( 5128 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 5129 } 5130 5131 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 5132 if (Size <= 128) { 5133 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5134 // same size and alignment. 5135 if (getTarget().isRenderScriptTarget()) { 5136 return coerceToIntArray(Ty, getContext(), getVMContext()); 5137 } 5138 unsigned Alignment; 5139 if (Kind == AArch64ABIInfo::AAPCS) { 5140 Alignment = getContext().getTypeUnadjustedAlign(Ty); 5141 Alignment = Alignment < 128 ? 64 : 128; 5142 } else { 5143 Alignment = getContext().getTypeAlign(Ty); 5144 } 5145 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes 5146 5147 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5148 // For aggregates with 16-byte alignment, we use i128. 5149 if (Alignment < 128 && Size == 128) { 5150 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 5151 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 5152 } 5153 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 5154 } 5155 5156 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5157 } 5158 5159 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { 5160 if (RetTy->isVoidType()) 5161 return ABIArgInfo::getIgnore(); 5162 5163 // Large vector types should be returned via memory. 5164 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 5165 return getNaturalAlignIndirect(RetTy); 5166 5167 if (!isAggregateTypeForABI(RetTy)) { 5168 // Treat an enum type as its underlying type. 5169 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5170 RetTy = EnumTy->getDecl()->getIntegerType(); 5171 5172 return (RetTy->isPromotableIntegerType() && isDarwinPCS() 5173 ? ABIArgInfo::getExtend(RetTy) 5174 : ABIArgInfo::getDirect()); 5175 } 5176 5177 uint64_t Size = getContext().getTypeSize(RetTy); 5178 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0) 5179 return ABIArgInfo::getIgnore(); 5180 5181 const Type *Base = nullptr; 5182 uint64_t Members = 0; 5183 if (isHomogeneousAggregate(RetTy, Base, Members)) 5184 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 5185 return ABIArgInfo::getDirect(); 5186 5187 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 5188 if (Size <= 128) { 5189 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5190 // same size and alignment. 5191 if (getTarget().isRenderScriptTarget()) { 5192 return coerceToIntArray(RetTy, getContext(), getVMContext()); 5193 } 5194 unsigned Alignment = getContext().getTypeAlign(RetTy); 5195 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes 5196 5197 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5198 // For aggregates with 16-byte alignment, we use i128. 5199 if (Alignment < 128 && Size == 128) { 5200 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 5201 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 5202 } 5203 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 5204 } 5205 5206 return getNaturalAlignIndirect(RetTy); 5207 } 5208 5209 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 5210 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 5211 if (const VectorType *VT = Ty->getAs<VectorType>()) { 5212 // Check whether VT is legal. 5213 unsigned NumElements = VT->getNumElements(); 5214 uint64_t Size = getContext().getTypeSize(VT); 5215 // NumElements should be power of 2. 5216 if (!llvm::isPowerOf2_32(NumElements)) 5217 return true; 5218 return Size != 64 && (Size != 128 || NumElements == 1); 5219 } 5220 return false; 5221 } 5222 5223 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize, 5224 llvm::Type *eltTy, 5225 unsigned elts) const { 5226 if (!llvm::isPowerOf2_32(elts)) 5227 return false; 5228 if (totalSize.getQuantity() != 8 && 5229 (totalSize.getQuantity() != 16 || elts == 1)) 5230 return false; 5231 return true; 5232 } 5233 5234 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5235 // Homogeneous aggregates for AAPCS64 must have base types of a floating 5236 // point type or a short-vector type. This is the same as the 32-bit ABI, 5237 // but with the difference that any floating-point type is allowed, 5238 // including __fp16. 5239 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5240 if (BT->isFloatingPoint()) 5241 return true; 5242 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 5243 unsigned VecSize = getContext().getTypeSize(VT); 5244 if (VecSize == 64 || VecSize == 128) 5245 return true; 5246 } 5247 return false; 5248 } 5249 5250 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 5251 uint64_t Members) const { 5252 return Members <= 4; 5253 } 5254 5255 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, 5256 QualType Ty, 5257 CodeGenFunction &CGF) const { 5258 ABIArgInfo AI = classifyArgumentType(Ty); 5259 bool IsIndirect = AI.isIndirect(); 5260 5261 llvm::Type *BaseTy = CGF.ConvertType(Ty); 5262 if (IsIndirect) 5263 BaseTy = llvm::PointerType::getUnqual(BaseTy); 5264 else if (AI.getCoerceToType()) 5265 BaseTy = AI.getCoerceToType(); 5266 5267 unsigned NumRegs = 1; 5268 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 5269 BaseTy = ArrTy->getElementType(); 5270 NumRegs = ArrTy->getNumElements(); 5271 } 5272 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 5273 5274 // The AArch64 va_list type and handling is specified in the Procedure Call 5275 // Standard, section B.4: 5276 // 5277 // struct { 5278 // void *__stack; 5279 // void *__gr_top; 5280 // void *__vr_top; 5281 // int __gr_offs; 5282 // int __vr_offs; 5283 // }; 5284 5285 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 5286 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 5287 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 5288 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 5289 5290 CharUnits TySize = getContext().getTypeSizeInChars(Ty); 5291 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty); 5292 5293 Address reg_offs_p = Address::invalid(); 5294 llvm::Value *reg_offs = nullptr; 5295 int reg_top_index; 5296 int RegSize = IsIndirect ? 8 : TySize.getQuantity(); 5297 if (!IsFPR) { 5298 // 3 is the field number of __gr_offs 5299 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); 5300 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 5301 reg_top_index = 1; // field number for __gr_top 5302 RegSize = llvm::alignTo(RegSize, 8); 5303 } else { 5304 // 4 is the field number of __vr_offs. 5305 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); 5306 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 5307 reg_top_index = 2; // field number for __vr_top 5308 RegSize = 16 * NumRegs; 5309 } 5310 5311 //======================================= 5312 // Find out where argument was passed 5313 //======================================= 5314 5315 // If reg_offs >= 0 we're already using the stack for this type of 5316 // argument. We don't want to keep updating reg_offs (in case it overflows, 5317 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 5318 // whatever they get). 5319 llvm::Value *UsingStack = nullptr; 5320 UsingStack = CGF.Builder.CreateICmpSGE( 5321 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 5322 5323 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 5324 5325 // Otherwise, at least some kind of argument could go in these registers, the 5326 // question is whether this particular type is too big. 5327 CGF.EmitBlock(MaybeRegBlock); 5328 5329 // Integer arguments may need to correct register alignment (for example a 5330 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 5331 // align __gr_offs to calculate the potential address. 5332 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { 5333 int Align = TyAlign.getQuantity(); 5334 5335 reg_offs = CGF.Builder.CreateAdd( 5336 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 5337 "align_regoffs"); 5338 reg_offs = CGF.Builder.CreateAnd( 5339 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 5340 "aligned_regoffs"); 5341 } 5342 5343 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 5344 // The fact that this is done unconditionally reflects the fact that 5345 // allocating an argument to the stack also uses up all the remaining 5346 // registers of the appropriate kind. 5347 llvm::Value *NewOffset = nullptr; 5348 NewOffset = CGF.Builder.CreateAdd( 5349 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 5350 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 5351 5352 // Now we're in a position to decide whether this argument really was in 5353 // registers or not. 5354 llvm::Value *InRegs = nullptr; 5355 InRegs = CGF.Builder.CreateICmpSLE( 5356 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 5357 5358 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 5359 5360 //======================================= 5361 // Argument was in registers 5362 //======================================= 5363 5364 // Now we emit the code for if the argument was originally passed in 5365 // registers. First start the appropriate block: 5366 CGF.EmitBlock(InRegBlock); 5367 5368 llvm::Value *reg_top = nullptr; 5369 Address reg_top_p = 5370 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); 5371 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 5372 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs), 5373 CharUnits::fromQuantity(IsFPR ? 16 : 8)); 5374 Address RegAddr = Address::invalid(); 5375 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); 5376 5377 if (IsIndirect) { 5378 // If it's been passed indirectly (actually a struct), whatever we find from 5379 // stored registers or on the stack will actually be a struct **. 5380 MemTy = llvm::PointerType::getUnqual(MemTy); 5381 } 5382 5383 const Type *Base = nullptr; 5384 uint64_t NumMembers = 0; 5385 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 5386 if (IsHFA && NumMembers > 1) { 5387 // Homogeneous aggregates passed in registers will have their elements split 5388 // and stored 16-bytes apart regardless of size (they're notionally in qN, 5389 // qN+1, ...). We reload and store into a temporary local variable 5390 // contiguously. 5391 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 5392 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); 5393 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 5394 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 5395 Address Tmp = CGF.CreateTempAlloca(HFATy, 5396 std::max(TyAlign, BaseTyInfo.second)); 5397 5398 // On big-endian platforms, the value will be right-aligned in its slot. 5399 int Offset = 0; 5400 if (CGF.CGM.getDataLayout().isBigEndian() && 5401 BaseTyInfo.first.getQuantity() < 16) 5402 Offset = 16 - BaseTyInfo.first.getQuantity(); 5403 5404 for (unsigned i = 0; i < NumMembers; ++i) { 5405 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); 5406 Address LoadAddr = 5407 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); 5408 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); 5409 5410 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i); 5411 5412 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 5413 CGF.Builder.CreateStore(Elem, StoreAddr); 5414 } 5415 5416 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); 5417 } else { 5418 // Otherwise the object is contiguous in memory. 5419 5420 // It might be right-aligned in its slot. 5421 CharUnits SlotSize = BaseAddr.getAlignment(); 5422 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && 5423 (IsHFA || !isAggregateTypeForABI(Ty)) && 5424 TySize < SlotSize) { 5425 CharUnits Offset = SlotSize - TySize; 5426 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); 5427 } 5428 5429 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); 5430 } 5431 5432 CGF.EmitBranch(ContBlock); 5433 5434 //======================================= 5435 // Argument was on the stack 5436 //======================================= 5437 CGF.EmitBlock(OnStackBlock); 5438 5439 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); 5440 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); 5441 5442 // Again, stack arguments may need realignment. In this case both integer and 5443 // floating-point ones might be affected. 5444 if (!IsIndirect && TyAlign.getQuantity() > 8) { 5445 int Align = TyAlign.getQuantity(); 5446 5447 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); 5448 5449 OnStackPtr = CGF.Builder.CreateAdd( 5450 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 5451 "align_stack"); 5452 OnStackPtr = CGF.Builder.CreateAnd( 5453 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 5454 "align_stack"); 5455 5456 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); 5457 } 5458 Address OnStackAddr(OnStackPtr, 5459 std::max(CharUnits::fromQuantity(8), TyAlign)); 5460 5461 // All stack slots are multiples of 8 bytes. 5462 CharUnits StackSlotSize = CharUnits::fromQuantity(8); 5463 CharUnits StackSize; 5464 if (IsIndirect) 5465 StackSize = StackSlotSize; 5466 else 5467 StackSize = TySize.alignTo(StackSlotSize); 5468 5469 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); 5470 llvm::Value *NewStack = 5471 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack"); 5472 5473 // Write the new value of __stack for the next call to va_arg 5474 CGF.Builder.CreateStore(NewStack, stack_p); 5475 5476 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 5477 TySize < StackSlotSize) { 5478 CharUnits Offset = StackSlotSize - TySize; 5479 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); 5480 } 5481 5482 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); 5483 5484 CGF.EmitBranch(ContBlock); 5485 5486 //======================================= 5487 // Tidy up 5488 //======================================= 5489 CGF.EmitBlock(ContBlock); 5490 5491 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 5492 OnStackAddr, OnStackBlock, "vaargs.addr"); 5493 5494 if (IsIndirect) 5495 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), 5496 TyAlign); 5497 5498 return ResAddr; 5499 } 5500 5501 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, 5502 CodeGenFunction &CGF) const { 5503 // The backend's lowering doesn't support va_arg for aggregates or 5504 // illegal vector types. Lower VAArg here for these cases and use 5505 // the LLVM va_arg instruction for everything else. 5506 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 5507 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 5508 5509 CharUnits SlotSize = CharUnits::fromQuantity(8); 5510 5511 // Empty records are ignored for parameter passing purposes. 5512 if (isEmptyRecord(getContext(), Ty, true)) { 5513 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 5514 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 5515 return Addr; 5516 } 5517 5518 // The size of the actual thing passed, which might end up just 5519 // being a pointer for indirect types. 5520 auto TyInfo = getContext().getTypeInfoInChars(Ty); 5521 5522 // Arguments bigger than 16 bytes which aren't homogeneous 5523 // aggregates should be passed indirectly. 5524 bool IsIndirect = false; 5525 if (TyInfo.first.getQuantity() > 16) { 5526 const Type *Base = nullptr; 5527 uint64_t Members = 0; 5528 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); 5529 } 5530 5531 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 5532 TyInfo, SlotSize, /*AllowHigherAlign*/ true); 5533 } 5534 5535 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 5536 QualType Ty) const { 5537 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 5538 CGF.getContext().getTypeInfoInChars(Ty), 5539 CharUnits::fromQuantity(8), 5540 /*allowHigherAlign*/ false); 5541 } 5542 5543 //===----------------------------------------------------------------------===// 5544 // ARM ABI Implementation 5545 //===----------------------------------------------------------------------===// 5546 5547 namespace { 5548 5549 class ARMABIInfo : public SwiftABIInfo { 5550 public: 5551 enum ABIKind { 5552 APCS = 0, 5553 AAPCS = 1, 5554 AAPCS_VFP = 2, 5555 AAPCS16_VFP = 3, 5556 }; 5557 5558 private: 5559 ABIKind Kind; 5560 5561 public: 5562 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) 5563 : SwiftABIInfo(CGT), Kind(_Kind) { 5564 setCCs(); 5565 } 5566 5567 bool isEABI() const { 5568 switch (getTarget().getTriple().getEnvironment()) { 5569 case llvm::Triple::Android: 5570 case llvm::Triple::EABI: 5571 case llvm::Triple::EABIHF: 5572 case llvm::Triple::GNUEABI: 5573 case llvm::Triple::GNUEABIHF: 5574 case llvm::Triple::MuslEABI: 5575 case llvm::Triple::MuslEABIHF: 5576 return true; 5577 default: 5578 return false; 5579 } 5580 } 5581 5582 bool isEABIHF() const { 5583 switch (getTarget().getTriple().getEnvironment()) { 5584 case llvm::Triple::EABIHF: 5585 case llvm::Triple::GNUEABIHF: 5586 case llvm::Triple::MuslEABIHF: 5587 return true; 5588 default: 5589 return false; 5590 } 5591 } 5592 5593 ABIKind getABIKind() const { return Kind; } 5594 5595 private: 5596 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic, 5597 unsigned functionCallConv) const; 5598 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, 5599 unsigned functionCallConv) const; 5600 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base, 5601 uint64_t Members) const; 5602 ABIArgInfo coerceIllegalVector(QualType Ty) const; 5603 bool isIllegalVectorType(QualType Ty) const; 5604 5605 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 5606 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 5607 uint64_t Members) const override; 5608 5609 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const; 5610 5611 void computeInfo(CGFunctionInfo &FI) const override; 5612 5613 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5614 QualType Ty) const override; 5615 5616 llvm::CallingConv::ID getLLVMDefaultCC() const; 5617 llvm::CallingConv::ID getABIDefaultCC() const; 5618 void setCCs(); 5619 5620 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 5621 bool asReturnValue) const override { 5622 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 5623 } 5624 bool isSwiftErrorInRegister() const override { 5625 return true; 5626 } 5627 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 5628 unsigned elts) const override; 5629 }; 5630 5631 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 5632 public: 5633 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 5634 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 5635 5636 const ARMABIInfo &getABIInfo() const { 5637 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 5638 } 5639 5640 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5641 return 13; 5642 } 5643 5644 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 5645 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue"; 5646 } 5647 5648 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5649 llvm::Value *Address) const override { 5650 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 5651 5652 // 0-15 are the 16 integer registers. 5653 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 5654 return false; 5655 } 5656 5657 unsigned getSizeOfUnwindException() const override { 5658 if (getABIInfo().isEABI()) return 88; 5659 return TargetCodeGenInfo::getSizeOfUnwindException(); 5660 } 5661 5662 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5663 CodeGen::CodeGenModule &CGM) const override { 5664 if (GV->isDeclaration()) 5665 return; 5666 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5667 if (!FD) 5668 return; 5669 5670 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 5671 if (!Attr) 5672 return; 5673 5674 const char *Kind; 5675 switch (Attr->getInterrupt()) { 5676 case ARMInterruptAttr::Generic: Kind = ""; break; 5677 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 5678 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 5679 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 5680 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 5681 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 5682 } 5683 5684 llvm::Function *Fn = cast<llvm::Function>(GV); 5685 5686 Fn->addFnAttr("interrupt", Kind); 5687 5688 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind(); 5689 if (ABI == ARMABIInfo::APCS) 5690 return; 5691 5692 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 5693 // however this is not necessarily true on taking any interrupt. Instruct 5694 // the backend to perform a realignment as part of the function prologue. 5695 llvm::AttrBuilder B; 5696 B.addStackAlignmentAttr(8); 5697 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 5698 } 5699 }; 5700 5701 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 5702 public: 5703 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 5704 : ARMTargetCodeGenInfo(CGT, K) {} 5705 5706 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5707 CodeGen::CodeGenModule &CGM) const override; 5708 5709 void getDependentLibraryOption(llvm::StringRef Lib, 5710 llvm::SmallString<24> &Opt) const override { 5711 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 5712 } 5713 5714 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 5715 llvm::SmallString<32> &Opt) const override { 5716 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 5717 } 5718 }; 5719 5720 void WindowsARMTargetCodeGenInfo::setTargetAttributes( 5721 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 5722 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 5723 if (GV->isDeclaration()) 5724 return; 5725 addStackProbeTargetAttributes(D, GV, CGM); 5726 } 5727 } 5728 5729 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 5730 if (!::classifyReturnType(getCXXABI(), FI, *this)) 5731 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(), 5732 FI.getCallingConvention()); 5733 5734 for (auto &I : FI.arguments()) 5735 I.info = classifyArgumentType(I.type, FI.isVariadic(), 5736 FI.getCallingConvention()); 5737 5738 5739 // Always honor user-specified calling convention. 5740 if (FI.getCallingConvention() != llvm::CallingConv::C) 5741 return; 5742 5743 llvm::CallingConv::ID cc = getRuntimeCC(); 5744 if (cc != llvm::CallingConv::C) 5745 FI.setEffectiveCallingConvention(cc); 5746 } 5747 5748 /// Return the default calling convention that LLVM will use. 5749 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 5750 // The default calling convention that LLVM will infer. 5751 if (isEABIHF() || getTarget().getTriple().isWatchABI()) 5752 return llvm::CallingConv::ARM_AAPCS_VFP; 5753 else if (isEABI()) 5754 return llvm::CallingConv::ARM_AAPCS; 5755 else 5756 return llvm::CallingConv::ARM_APCS; 5757 } 5758 5759 /// Return the calling convention that our ABI would like us to use 5760 /// as the C calling convention. 5761 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 5762 switch (getABIKind()) { 5763 case APCS: return llvm::CallingConv::ARM_APCS; 5764 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 5765 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 5766 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 5767 } 5768 llvm_unreachable("bad ABI kind"); 5769 } 5770 5771 void ARMABIInfo::setCCs() { 5772 assert(getRuntimeCC() == llvm::CallingConv::C); 5773 5774 // Don't muddy up the IR with a ton of explicit annotations if 5775 // they'd just match what LLVM will infer from the triple. 5776 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 5777 if (abiCC != getLLVMDefaultCC()) 5778 RuntimeCC = abiCC; 5779 } 5780 5781 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const { 5782 uint64_t Size = getContext().getTypeSize(Ty); 5783 if (Size <= 32) { 5784 llvm::Type *ResType = 5785 llvm::Type::getInt32Ty(getVMContext()); 5786 return ABIArgInfo::getDirect(ResType); 5787 } 5788 if (Size == 64 || Size == 128) { 5789 llvm::Type *ResType = llvm::VectorType::get( 5790 llvm::Type::getInt32Ty(getVMContext()), Size / 32); 5791 return ABIArgInfo::getDirect(ResType); 5792 } 5793 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5794 } 5795 5796 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty, 5797 const Type *Base, 5798 uint64_t Members) const { 5799 assert(Base && "Base class should be set for homogeneous aggregate"); 5800 // Base can be a floating-point or a vector. 5801 if (const VectorType *VT = Base->getAs<VectorType>()) { 5802 // FP16 vectors should be converted to integer vectors 5803 if (!getTarget().hasLegalHalfType() && 5804 (VT->getElementType()->isFloat16Type() || 5805 VT->getElementType()->isHalfType())) { 5806 uint64_t Size = getContext().getTypeSize(VT); 5807 llvm::Type *NewVecTy = llvm::VectorType::get( 5808 llvm::Type::getInt32Ty(getVMContext()), Size / 32); 5809 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members); 5810 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 5811 } 5812 } 5813 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 5814 } 5815 5816 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, 5817 unsigned functionCallConv) const { 5818 // 6.1.2.1 The following argument types are VFP CPRCs: 5819 // A single-precision floating-point type (including promoted 5820 // half-precision types); A double-precision floating-point type; 5821 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 5822 // with a Base Type of a single- or double-precision floating-point type, 5823 // 64-bit containerized vectors or 128-bit containerized vectors with one 5824 // to four Elements. 5825 // Variadic functions should always marshal to the base standard. 5826 bool IsAAPCS_VFP = 5827 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false); 5828 5829 Ty = useFirstFieldIfTransparentUnion(Ty); 5830 5831 // Handle illegal vector types here. 5832 if (isIllegalVectorType(Ty)) 5833 return coerceIllegalVector(Ty); 5834 5835 // _Float16 and __fp16 get passed as if it were an int or float, but with 5836 // the top 16 bits unspecified. This is not done for OpenCL as it handles the 5837 // half type natively, and does not need to interwork with AAPCS code. 5838 if ((Ty->isFloat16Type() || Ty->isHalfType()) && 5839 !getContext().getLangOpts().NativeHalfArgsAndReturns) { 5840 llvm::Type *ResType = IsAAPCS_VFP ? 5841 llvm::Type::getFloatTy(getVMContext()) : 5842 llvm::Type::getInt32Ty(getVMContext()); 5843 return ABIArgInfo::getDirect(ResType); 5844 } 5845 5846 if (!isAggregateTypeForABI(Ty)) { 5847 // Treat an enum type as its underlying type. 5848 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 5849 Ty = EnumTy->getDecl()->getIntegerType(); 5850 } 5851 5852 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) 5853 : ABIArgInfo::getDirect()); 5854 } 5855 5856 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5857 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 5858 } 5859 5860 // Ignore empty records. 5861 if (isEmptyRecord(getContext(), Ty, true)) 5862 return ABIArgInfo::getIgnore(); 5863 5864 if (IsAAPCS_VFP) { 5865 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 5866 // into VFP registers. 5867 const Type *Base = nullptr; 5868 uint64_t Members = 0; 5869 if (isHomogeneousAggregate(Ty, Base, Members)) 5870 return classifyHomogeneousAggregate(Ty, Base, Members); 5871 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 5872 // WatchOS does have homogeneous aggregates. Note that we intentionally use 5873 // this convention even for a variadic function: the backend will use GPRs 5874 // if needed. 5875 const Type *Base = nullptr; 5876 uint64_t Members = 0; 5877 if (isHomogeneousAggregate(Ty, Base, Members)) { 5878 assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); 5879 llvm::Type *Ty = 5880 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); 5881 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 5882 } 5883 } 5884 5885 if (getABIKind() == ARMABIInfo::AAPCS16_VFP && 5886 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { 5887 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're 5888 // bigger than 128-bits, they get placed in space allocated by the caller, 5889 // and a pointer is passed. 5890 return ABIArgInfo::getIndirect( 5891 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); 5892 } 5893 5894 // Support byval for ARM. 5895 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 5896 // most 8-byte. We realign the indirect argument if type alignment is bigger 5897 // than ABI alignment. 5898 uint64_t ABIAlign = 4; 5899 uint64_t TyAlign; 5900 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 5901 getABIKind() == ARMABIInfo::AAPCS) { 5902 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity(); 5903 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 5904 } else { 5905 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 5906 } 5907 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 5908 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval"); 5909 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 5910 /*ByVal=*/true, 5911 /*Realign=*/TyAlign > ABIAlign); 5912 } 5913 5914 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of 5915 // same size and alignment. 5916 if (getTarget().isRenderScriptTarget()) { 5917 return coerceToIntArray(Ty, getContext(), getVMContext()); 5918 } 5919 5920 // Otherwise, pass by coercing to a structure of the appropriate size. 5921 llvm::Type* ElemTy; 5922 unsigned SizeRegs; 5923 // FIXME: Try to match the types of the arguments more accurately where 5924 // we can. 5925 if (TyAlign <= 4) { 5926 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 5927 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 5928 } else { 5929 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 5930 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 5931 } 5932 5933 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 5934 } 5935 5936 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 5937 llvm::LLVMContext &VMContext) { 5938 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 5939 // is called integer-like if its size is less than or equal to one word, and 5940 // the offset of each of its addressable sub-fields is zero. 5941 5942 uint64_t Size = Context.getTypeSize(Ty); 5943 5944 // Check that the type fits in a word. 5945 if (Size > 32) 5946 return false; 5947 5948 // FIXME: Handle vector types! 5949 if (Ty->isVectorType()) 5950 return false; 5951 5952 // Float types are never treated as "integer like". 5953 if (Ty->isRealFloatingType()) 5954 return false; 5955 5956 // If this is a builtin or pointer type then it is ok. 5957 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 5958 return true; 5959 5960 // Small complex integer types are "integer like". 5961 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 5962 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 5963 5964 // Single element and zero sized arrays should be allowed, by the definition 5965 // above, but they are not. 5966 5967 // Otherwise, it must be a record type. 5968 const RecordType *RT = Ty->getAs<RecordType>(); 5969 if (!RT) return false; 5970 5971 // Ignore records with flexible arrays. 5972 const RecordDecl *RD = RT->getDecl(); 5973 if (RD->hasFlexibleArrayMember()) 5974 return false; 5975 5976 // Check that all sub-fields are at offset 0, and are themselves "integer 5977 // like". 5978 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 5979 5980 bool HadField = false; 5981 unsigned idx = 0; 5982 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 5983 i != e; ++i, ++idx) { 5984 const FieldDecl *FD = *i; 5985 5986 // Bit-fields are not addressable, we only need to verify they are "integer 5987 // like". We still have to disallow a subsequent non-bitfield, for example: 5988 // struct { int : 0; int x } 5989 // is non-integer like according to gcc. 5990 if (FD->isBitField()) { 5991 if (!RD->isUnion()) 5992 HadField = true; 5993 5994 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 5995 return false; 5996 5997 continue; 5998 } 5999 6000 // Check if this field is at offset 0. 6001 if (Layout.getFieldOffset(idx) != 0) 6002 return false; 6003 6004 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 6005 return false; 6006 6007 // Only allow at most one field in a structure. This doesn't match the 6008 // wording above, but follows gcc in situations with a field following an 6009 // empty structure. 6010 if (!RD->isUnion()) { 6011 if (HadField) 6012 return false; 6013 6014 HadField = true; 6015 } 6016 } 6017 6018 return true; 6019 } 6020 6021 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic, 6022 unsigned functionCallConv) const { 6023 6024 // Variadic functions should always marshal to the base standard. 6025 bool IsAAPCS_VFP = 6026 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true); 6027 6028 if (RetTy->isVoidType()) 6029 return ABIArgInfo::getIgnore(); 6030 6031 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 6032 // Large vector types should be returned via memory. 6033 if (getContext().getTypeSize(RetTy) > 128) 6034 return getNaturalAlignIndirect(RetTy); 6035 // FP16 vectors should be converted to integer vectors 6036 if (!getTarget().hasLegalHalfType() && 6037 (VT->getElementType()->isFloat16Type() || 6038 VT->getElementType()->isHalfType())) 6039 return coerceIllegalVector(RetTy); 6040 } 6041 6042 // _Float16 and __fp16 get returned as if it were an int or float, but with 6043 // the top 16 bits unspecified. This is not done for OpenCL as it handles the 6044 // half type natively, and does not need to interwork with AAPCS code. 6045 if ((RetTy->isFloat16Type() || RetTy->isHalfType()) && 6046 !getContext().getLangOpts().NativeHalfArgsAndReturns) { 6047 llvm::Type *ResType = IsAAPCS_VFP ? 6048 llvm::Type::getFloatTy(getVMContext()) : 6049 llvm::Type::getInt32Ty(getVMContext()); 6050 return ABIArgInfo::getDirect(ResType); 6051 } 6052 6053 if (!isAggregateTypeForABI(RetTy)) { 6054 // Treat an enum type as its underlying type. 6055 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6056 RetTy = EnumTy->getDecl()->getIntegerType(); 6057 6058 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) 6059 : ABIArgInfo::getDirect(); 6060 } 6061 6062 // Are we following APCS? 6063 if (getABIKind() == APCS) { 6064 if (isEmptyRecord(getContext(), RetTy, false)) 6065 return ABIArgInfo::getIgnore(); 6066 6067 // Complex types are all returned as packed integers. 6068 // 6069 // FIXME: Consider using 2 x vector types if the back end handles them 6070 // correctly. 6071 if (RetTy->isAnyComplexType()) 6072 return ABIArgInfo::getDirect(llvm::IntegerType::get( 6073 getVMContext(), getContext().getTypeSize(RetTy))); 6074 6075 // Integer like structures are returned in r0. 6076 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 6077 // Return in the smallest viable integer type. 6078 uint64_t Size = getContext().getTypeSize(RetTy); 6079 if (Size <= 8) 6080 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6081 if (Size <= 16) 6082 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6083 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6084 } 6085 6086 // Otherwise return in memory. 6087 return getNaturalAlignIndirect(RetTy); 6088 } 6089 6090 // Otherwise this is an AAPCS variant. 6091 6092 if (isEmptyRecord(getContext(), RetTy, true)) 6093 return ABIArgInfo::getIgnore(); 6094 6095 // Check for homogeneous aggregates with AAPCS-VFP. 6096 if (IsAAPCS_VFP) { 6097 const Type *Base = nullptr; 6098 uint64_t Members = 0; 6099 if (isHomogeneousAggregate(RetTy, Base, Members)) 6100 return classifyHomogeneousAggregate(RetTy, Base, Members); 6101 } 6102 6103 // Aggregates <= 4 bytes are returned in r0; other aggregates 6104 // are returned indirectly. 6105 uint64_t Size = getContext().getTypeSize(RetTy); 6106 if (Size <= 32) { 6107 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of 6108 // same size and alignment. 6109 if (getTarget().isRenderScriptTarget()) { 6110 return coerceToIntArray(RetTy, getContext(), getVMContext()); 6111 } 6112 if (getDataLayout().isBigEndian()) 6113 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 6114 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6115 6116 // Return in the smallest viable integer type. 6117 if (Size <= 8) 6118 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6119 if (Size <= 16) 6120 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6121 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6122 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) { 6123 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); 6124 llvm::Type *CoerceTy = 6125 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); 6126 return ABIArgInfo::getDirect(CoerceTy); 6127 } 6128 6129 return getNaturalAlignIndirect(RetTy); 6130 } 6131 6132 /// isIllegalVector - check whether Ty is an illegal vector type. 6133 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 6134 if (const VectorType *VT = Ty->getAs<VectorType> ()) { 6135 // On targets that don't support FP16, FP16 is expanded into float, and we 6136 // don't want the ABI to depend on whether or not FP16 is supported in 6137 // hardware. Thus return false to coerce FP16 vectors into integer vectors. 6138 if (!getTarget().hasLegalHalfType() && 6139 (VT->getElementType()->isFloat16Type() || 6140 VT->getElementType()->isHalfType())) 6141 return true; 6142 if (isAndroid()) { 6143 // Android shipped using Clang 3.1, which supported a slightly different 6144 // vector ABI. The primary differences were that 3-element vector types 6145 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path 6146 // accepts that legacy behavior for Android only. 6147 // Check whether VT is legal. 6148 unsigned NumElements = VT->getNumElements(); 6149 // NumElements should be power of 2 or equal to 3. 6150 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) 6151 return true; 6152 } else { 6153 // Check whether VT is legal. 6154 unsigned NumElements = VT->getNumElements(); 6155 uint64_t Size = getContext().getTypeSize(VT); 6156 // NumElements should be power of 2. 6157 if (!llvm::isPowerOf2_32(NumElements)) 6158 return true; 6159 // Size should be greater than 32 bits. 6160 return Size <= 32; 6161 } 6162 } 6163 return false; 6164 } 6165 6166 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 6167 llvm::Type *eltTy, 6168 unsigned numElts) const { 6169 if (!llvm::isPowerOf2_32(numElts)) 6170 return false; 6171 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy); 6172 if (size > 64) 6173 return false; 6174 if (vectorSize.getQuantity() != 8 && 6175 (vectorSize.getQuantity() != 16 || numElts == 1)) 6176 return false; 6177 return true; 6178 } 6179 6180 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 6181 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 6182 // double, or 64-bit or 128-bit vectors. 6183 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 6184 if (BT->getKind() == BuiltinType::Float || 6185 BT->getKind() == BuiltinType::Double || 6186 BT->getKind() == BuiltinType::LongDouble) 6187 return true; 6188 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 6189 unsigned VecSize = getContext().getTypeSize(VT); 6190 if (VecSize == 64 || VecSize == 128) 6191 return true; 6192 } 6193 return false; 6194 } 6195 6196 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 6197 uint64_t Members) const { 6198 return Members <= 4; 6199 } 6200 6201 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention, 6202 bool acceptHalf) const { 6203 // Give precedence to user-specified calling conventions. 6204 if (callConvention != llvm::CallingConv::C) 6205 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP); 6206 else 6207 return (getABIKind() == AAPCS_VFP) || 6208 (acceptHalf && (getABIKind() == AAPCS16_VFP)); 6209 } 6210 6211 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6212 QualType Ty) const { 6213 CharUnits SlotSize = CharUnits::fromQuantity(4); 6214 6215 // Empty records are ignored for parameter passing purposes. 6216 if (isEmptyRecord(getContext(), Ty, true)) { 6217 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); 6218 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 6219 return Addr; 6220 } 6221 6222 CharUnits TySize = getContext().getTypeSizeInChars(Ty); 6223 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty); 6224 6225 // Use indirect if size of the illegal vector is bigger than 16 bytes. 6226 bool IsIndirect = false; 6227 const Type *Base = nullptr; 6228 uint64_t Members = 0; 6229 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { 6230 IsIndirect = true; 6231 6232 // ARMv7k passes structs bigger than 16 bytes indirectly, in space 6233 // allocated by the caller. 6234 } else if (TySize > CharUnits::fromQuantity(16) && 6235 getABIKind() == ARMABIInfo::AAPCS16_VFP && 6236 !isHomogeneousAggregate(Ty, Base, Members)) { 6237 IsIndirect = true; 6238 6239 // Otherwise, bound the type's ABI alignment. 6240 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 6241 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 6242 // Our callers should be prepared to handle an under-aligned address. 6243 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP || 6244 getABIKind() == ARMABIInfo::AAPCS) { 6245 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 6246 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); 6247 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 6248 // ARMv7k allows type alignment up to 16 bytes. 6249 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 6250 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); 6251 } else { 6252 TyAlignForABI = CharUnits::fromQuantity(4); 6253 } 6254 6255 std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI }; 6256 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, 6257 SlotSize, /*AllowHigherAlign*/ true); 6258 } 6259 6260 //===----------------------------------------------------------------------===// 6261 // NVPTX ABI Implementation 6262 //===----------------------------------------------------------------------===// 6263 6264 namespace { 6265 6266 class NVPTXABIInfo : public ABIInfo { 6267 public: 6268 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 6269 6270 ABIArgInfo classifyReturnType(QualType RetTy) const; 6271 ABIArgInfo classifyArgumentType(QualType Ty) const; 6272 6273 void computeInfo(CGFunctionInfo &FI) const override; 6274 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6275 QualType Ty) const override; 6276 }; 6277 6278 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 6279 public: 6280 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 6281 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 6282 6283 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6284 CodeGen::CodeGenModule &M) const override; 6285 bool shouldEmitStaticExternCAliases() const override; 6286 6287 private: 6288 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the 6289 // resulting MDNode to the nvvm.annotations MDNode. 6290 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand); 6291 }; 6292 6293 /// Checks if the type is unsupported directly by the current target. 6294 static bool isUnsupportedType(ASTContext &Context, QualType T) { 6295 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type()) 6296 return true; 6297 if (!Context.getTargetInfo().hasFloat128Type() && T->isFloat128Type()) 6298 return true; 6299 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() && 6300 Context.getTypeSize(T) > 64) 6301 return true; 6302 if (const auto *AT = T->getAsArrayTypeUnsafe()) 6303 return isUnsupportedType(Context, AT->getElementType()); 6304 const auto *RT = T->getAs<RecordType>(); 6305 if (!RT) 6306 return false; 6307 const RecordDecl *RD = RT->getDecl(); 6308 6309 // If this is a C++ record, check the bases first. 6310 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 6311 for (const CXXBaseSpecifier &I : CXXRD->bases()) 6312 if (isUnsupportedType(Context, I.getType())) 6313 return true; 6314 6315 for (const FieldDecl *I : RD->fields()) 6316 if (isUnsupportedType(Context, I->getType())) 6317 return true; 6318 return false; 6319 } 6320 6321 /// Coerce the given type into an array with maximum allowed size of elements. 6322 static ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, ASTContext &Context, 6323 llvm::LLVMContext &LLVMContext, 6324 unsigned MaxSize) { 6325 // Alignment and Size are measured in bits. 6326 const uint64_t Size = Context.getTypeSize(Ty); 6327 const uint64_t Alignment = Context.getTypeAlign(Ty); 6328 const unsigned Div = std::min<unsigned>(MaxSize, Alignment); 6329 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Div); 6330 const uint64_t NumElements = (Size + Div - 1) / Div; 6331 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 6332 } 6333 6334 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 6335 if (RetTy->isVoidType()) 6336 return ABIArgInfo::getIgnore(); 6337 6338 if (getContext().getLangOpts().OpenMP && 6339 getContext().getLangOpts().OpenMPIsDevice && 6340 isUnsupportedType(getContext(), RetTy)) 6341 return coerceToIntArrayWithLimit(RetTy, getContext(), getVMContext(), 64); 6342 6343 // note: this is different from default ABI 6344 if (!RetTy->isScalarType()) 6345 return ABIArgInfo::getDirect(); 6346 6347 // Treat an enum type as its underlying type. 6348 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6349 RetTy = EnumTy->getDecl()->getIntegerType(); 6350 6351 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) 6352 : ABIArgInfo::getDirect()); 6353 } 6354 6355 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 6356 // Treat an enum type as its underlying type. 6357 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6358 Ty = EnumTy->getDecl()->getIntegerType(); 6359 6360 // Return aggregates type as indirect by value 6361 if (isAggregateTypeForABI(Ty)) 6362 return getNaturalAlignIndirect(Ty, /* byval */ true); 6363 6364 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) 6365 : ABIArgInfo::getDirect()); 6366 } 6367 6368 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 6369 if (!getCXXABI().classifyReturnType(FI)) 6370 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6371 for (auto &I : FI.arguments()) 6372 I.info = classifyArgumentType(I.type); 6373 6374 // Always honor user-specified calling convention. 6375 if (FI.getCallingConvention() != llvm::CallingConv::C) 6376 return; 6377 6378 FI.setEffectiveCallingConvention(getRuntimeCC()); 6379 } 6380 6381 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6382 QualType Ty) const { 6383 llvm_unreachable("NVPTX does not support varargs"); 6384 } 6385 6386 void NVPTXTargetCodeGenInfo::setTargetAttributes( 6387 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 6388 if (GV->isDeclaration()) 6389 return; 6390 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6391 if (!FD) return; 6392 6393 llvm::Function *F = cast<llvm::Function>(GV); 6394 6395 // Perform special handling in OpenCL mode 6396 if (M.getLangOpts().OpenCL) { 6397 // Use OpenCL function attributes to check for kernel functions 6398 // By default, all functions are device functions 6399 if (FD->hasAttr<OpenCLKernelAttr>()) { 6400 // OpenCL __kernel functions get kernel metadata 6401 // Create !{<func-ref>, metadata !"kernel", i32 1} node 6402 addNVVMMetadata(F, "kernel", 1); 6403 // And kernel functions are not subject to inlining 6404 F->addFnAttr(llvm::Attribute::NoInline); 6405 } 6406 } 6407 6408 // Perform special handling in CUDA mode. 6409 if (M.getLangOpts().CUDA) { 6410 // CUDA __global__ functions get a kernel metadata entry. Since 6411 // __global__ functions cannot be called from the device, we do not 6412 // need to set the noinline attribute. 6413 if (FD->hasAttr<CUDAGlobalAttr>()) { 6414 // Create !{<func-ref>, metadata !"kernel", i32 1} node 6415 addNVVMMetadata(F, "kernel", 1); 6416 } 6417 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { 6418 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 6419 llvm::APSInt MaxThreads(32); 6420 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); 6421 if (MaxThreads > 0) 6422 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); 6423 6424 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was 6425 // not specified in __launch_bounds__ or if the user specified a 0 value, 6426 // we don't have to add a PTX directive. 6427 if (Attr->getMinBlocks()) { 6428 llvm::APSInt MinBlocks(32); 6429 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); 6430 if (MinBlocks > 0) 6431 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 6432 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); 6433 } 6434 } 6435 } 6436 } 6437 6438 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, 6439 int Operand) { 6440 llvm::Module *M = F->getParent(); 6441 llvm::LLVMContext &Ctx = M->getContext(); 6442 6443 // Get "nvvm.annotations" metadata node 6444 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 6445 6446 llvm::Metadata *MDVals[] = { 6447 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name), 6448 llvm::ConstantAsMetadata::get( 6449 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 6450 // Append metadata to nvvm.annotations 6451 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 6452 } 6453 6454 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { 6455 return false; 6456 } 6457 } 6458 6459 //===----------------------------------------------------------------------===// 6460 // SystemZ ABI Implementation 6461 //===----------------------------------------------------------------------===// 6462 6463 namespace { 6464 6465 class SystemZABIInfo : public SwiftABIInfo { 6466 bool HasVector; 6467 6468 public: 6469 SystemZABIInfo(CodeGenTypes &CGT, bool HV) 6470 : SwiftABIInfo(CGT), HasVector(HV) {} 6471 6472 bool isPromotableIntegerType(QualType Ty) const; 6473 bool isCompoundType(QualType Ty) const; 6474 bool isVectorArgumentType(QualType Ty) const; 6475 bool isFPArgumentType(QualType Ty) const; 6476 QualType GetSingleElementType(QualType Ty) const; 6477 6478 ABIArgInfo classifyReturnType(QualType RetTy) const; 6479 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 6480 6481 void computeInfo(CGFunctionInfo &FI) const override { 6482 if (!getCXXABI().classifyReturnType(FI)) 6483 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6484 for (auto &I : FI.arguments()) 6485 I.info = classifyArgumentType(I.type); 6486 } 6487 6488 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6489 QualType Ty) const override; 6490 6491 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars, 6492 bool asReturnValue) const override { 6493 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 6494 } 6495 bool isSwiftErrorInRegister() const override { 6496 return false; 6497 } 6498 }; 6499 6500 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 6501 public: 6502 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector) 6503 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {} 6504 }; 6505 6506 } 6507 6508 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 6509 // Treat an enum type as its underlying type. 6510 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6511 Ty = EnumTy->getDecl()->getIntegerType(); 6512 6513 // Promotable integer types are required to be promoted by the ABI. 6514 if (Ty->isPromotableIntegerType()) 6515 return true; 6516 6517 // 32-bit values must also be promoted. 6518 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 6519 switch (BT->getKind()) { 6520 case BuiltinType::Int: 6521 case BuiltinType::UInt: 6522 return true; 6523 default: 6524 return false; 6525 } 6526 return false; 6527 } 6528 6529 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 6530 return (Ty->isAnyComplexType() || 6531 Ty->isVectorType() || 6532 isAggregateTypeForABI(Ty)); 6533 } 6534 6535 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { 6536 return (HasVector && 6537 Ty->isVectorType() && 6538 getContext().getTypeSize(Ty) <= 128); 6539 } 6540 6541 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 6542 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 6543 switch (BT->getKind()) { 6544 case BuiltinType::Float: 6545 case BuiltinType::Double: 6546 return true; 6547 default: 6548 return false; 6549 } 6550 6551 return false; 6552 } 6553 6554 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { 6555 if (const RecordType *RT = Ty->getAsStructureType()) { 6556 const RecordDecl *RD = RT->getDecl(); 6557 QualType Found; 6558 6559 // If this is a C++ record, check the bases first. 6560 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 6561 for (const auto &I : CXXRD->bases()) { 6562 QualType Base = I.getType(); 6563 6564 // Empty bases don't affect things either way. 6565 if (isEmptyRecord(getContext(), Base, true)) 6566 continue; 6567 6568 if (!Found.isNull()) 6569 return Ty; 6570 Found = GetSingleElementType(Base); 6571 } 6572 6573 // Check the fields. 6574 for (const auto *FD : RD->fields()) { 6575 // For compatibility with GCC, ignore empty bitfields in C++ mode. 6576 // Unlike isSingleElementStruct(), empty structure and array fields 6577 // do count. So do anonymous bitfields that aren't zero-sized. 6578 if (getContext().getLangOpts().CPlusPlus && 6579 FD->isZeroLengthBitField(getContext())) 6580 continue; 6581 6582 // Unlike isSingleElementStruct(), arrays do not count. 6583 // Nested structures still do though. 6584 if (!Found.isNull()) 6585 return Ty; 6586 Found = GetSingleElementType(FD->getType()); 6587 } 6588 6589 // Unlike isSingleElementStruct(), trailing padding is allowed. 6590 // An 8-byte aligned struct s { float f; } is passed as a double. 6591 if (!Found.isNull()) 6592 return Found; 6593 } 6594 6595 return Ty; 6596 } 6597 6598 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6599 QualType Ty) const { 6600 // Assume that va_list type is correct; should be pointer to LLVM type: 6601 // struct { 6602 // i64 __gpr; 6603 // i64 __fpr; 6604 // i8 *__overflow_arg_area; 6605 // i8 *__reg_save_area; 6606 // }; 6607 6608 // Every non-vector argument occupies 8 bytes and is passed by preference 6609 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are 6610 // always passed on the stack. 6611 Ty = getContext().getCanonicalType(Ty); 6612 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6613 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); 6614 llvm::Type *DirectTy = ArgTy; 6615 ABIArgInfo AI = classifyArgumentType(Ty); 6616 bool IsIndirect = AI.isIndirect(); 6617 bool InFPRs = false; 6618 bool IsVector = false; 6619 CharUnits UnpaddedSize; 6620 CharUnits DirectAlign; 6621 if (IsIndirect) { 6622 DirectTy = llvm::PointerType::getUnqual(DirectTy); 6623 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); 6624 } else { 6625 if (AI.getCoerceToType()) 6626 ArgTy = AI.getCoerceToType(); 6627 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy(); 6628 IsVector = ArgTy->isVectorTy(); 6629 UnpaddedSize = TyInfo.first; 6630 DirectAlign = TyInfo.second; 6631 } 6632 CharUnits PaddedSize = CharUnits::fromQuantity(8); 6633 if (IsVector && UnpaddedSize > PaddedSize) 6634 PaddedSize = CharUnits::fromQuantity(16); 6635 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size."); 6636 6637 CharUnits Padding = (PaddedSize - UnpaddedSize); 6638 6639 llvm::Type *IndexTy = CGF.Int64Ty; 6640 llvm::Value *PaddedSizeV = 6641 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); 6642 6643 if (IsVector) { 6644 // Work out the address of a vector argument on the stack. 6645 // Vector arguments are always passed in the high bits of a 6646 // single (8 byte) or double (16 byte) stack slot. 6647 Address OverflowArgAreaPtr = 6648 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 6649 Address OverflowArgArea = 6650 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 6651 TyInfo.second); 6652 Address MemAddr = 6653 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr"); 6654 6655 // Update overflow_arg_area_ptr pointer 6656 llvm::Value *NewOverflowArgArea = 6657 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 6658 "overflow_arg_area"); 6659 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 6660 6661 return MemAddr; 6662 } 6663 6664 assert(PaddedSize.getQuantity() == 8); 6665 6666 unsigned MaxRegs, RegCountField, RegSaveIndex; 6667 CharUnits RegPadding; 6668 if (InFPRs) { 6669 MaxRegs = 4; // Maximum of 4 FPR arguments 6670 RegCountField = 1; // __fpr 6671 RegSaveIndex = 16; // save offset for f0 6672 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR 6673 } else { 6674 MaxRegs = 5; // Maximum of 5 GPR arguments 6675 RegCountField = 0; // __gpr 6676 RegSaveIndex = 2; // save offset for r2 6677 RegPadding = Padding; // values are passed in the low bits of a GPR 6678 } 6679 6680 Address RegCountPtr = 6681 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); 6682 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 6683 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 6684 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 6685 "fits_in_regs"); 6686 6687 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 6688 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 6689 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 6690 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 6691 6692 // Emit code to load the value if it was passed in registers. 6693 CGF.EmitBlock(InRegBlock); 6694 6695 // Work out the address of an argument register. 6696 llvm::Value *ScaledRegCount = 6697 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 6698 llvm::Value *RegBase = 6699 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() 6700 + RegPadding.getQuantity()); 6701 llvm::Value *RegOffset = 6702 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 6703 Address RegSaveAreaPtr = 6704 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); 6705 llvm::Value *RegSaveArea = 6706 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 6707 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset, 6708 "raw_reg_addr"), 6709 PaddedSize); 6710 Address RegAddr = 6711 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr"); 6712 6713 // Update the register count 6714 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 6715 llvm::Value *NewRegCount = 6716 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 6717 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 6718 CGF.EmitBranch(ContBlock); 6719 6720 // Emit code to load the value if it was passed in memory. 6721 CGF.EmitBlock(InMemBlock); 6722 6723 // Work out the address of a stack argument. 6724 Address OverflowArgAreaPtr = 6725 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); 6726 Address OverflowArgArea = 6727 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 6728 PaddedSize); 6729 Address RawMemAddr = 6730 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); 6731 Address MemAddr = 6732 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); 6733 6734 // Update overflow_arg_area_ptr pointer 6735 llvm::Value *NewOverflowArgArea = 6736 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 6737 "overflow_arg_area"); 6738 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 6739 CGF.EmitBranch(ContBlock); 6740 6741 // Return the appropriate result. 6742 CGF.EmitBlock(ContBlock); 6743 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 6744 MemAddr, InMemBlock, "va_arg.addr"); 6745 6746 if (IsIndirect) 6747 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), 6748 TyInfo.second); 6749 6750 return ResAddr; 6751 } 6752 6753 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 6754 if (RetTy->isVoidType()) 6755 return ABIArgInfo::getIgnore(); 6756 if (isVectorArgumentType(RetTy)) 6757 return ABIArgInfo::getDirect(); 6758 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 6759 return getNaturalAlignIndirect(RetTy); 6760 return (isPromotableIntegerType(RetTy) ? ABIArgInfo::getExtend(RetTy) 6761 : ABIArgInfo::getDirect()); 6762 } 6763 6764 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 6765 // Handle the generic C++ ABI. 6766 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6767 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6768 6769 // Integers and enums are extended to full register width. 6770 if (isPromotableIntegerType(Ty)) 6771 return ABIArgInfo::getExtend(Ty); 6772 6773 // Handle vector types and vector-like structure types. Note that 6774 // as opposed to float-like structure types, we do not allow any 6775 // padding for vector-like structures, so verify the sizes match. 6776 uint64_t Size = getContext().getTypeSize(Ty); 6777 QualType SingleElementTy = GetSingleElementType(Ty); 6778 if (isVectorArgumentType(SingleElementTy) && 6779 getContext().getTypeSize(SingleElementTy) == Size) 6780 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); 6781 6782 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 6783 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 6784 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6785 6786 // Handle small structures. 6787 if (const RecordType *RT = Ty->getAs<RecordType>()) { 6788 // Structures with flexible arrays have variable length, so really 6789 // fail the size test above. 6790 const RecordDecl *RD = RT->getDecl(); 6791 if (RD->hasFlexibleArrayMember()) 6792 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6793 6794 // The structure is passed as an unextended integer, a float, or a double. 6795 llvm::Type *PassTy; 6796 if (isFPArgumentType(SingleElementTy)) { 6797 assert(Size == 32 || Size == 64); 6798 if (Size == 32) 6799 PassTy = llvm::Type::getFloatTy(getVMContext()); 6800 else 6801 PassTy = llvm::Type::getDoubleTy(getVMContext()); 6802 } else 6803 PassTy = llvm::IntegerType::get(getVMContext(), Size); 6804 return ABIArgInfo::getDirect(PassTy); 6805 } 6806 6807 // Non-structure compounds are passed indirectly. 6808 if (isCompoundType(Ty)) 6809 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6810 6811 return ABIArgInfo::getDirect(nullptr); 6812 } 6813 6814 //===----------------------------------------------------------------------===// 6815 // MSP430 ABI Implementation 6816 //===----------------------------------------------------------------------===// 6817 6818 namespace { 6819 6820 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 6821 public: 6822 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 6823 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 6824 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6825 CodeGen::CodeGenModule &M) const override; 6826 }; 6827 6828 } 6829 6830 void MSP430TargetCodeGenInfo::setTargetAttributes( 6831 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 6832 if (GV->isDeclaration()) 6833 return; 6834 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 6835 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>(); 6836 if (!InterruptAttr) 6837 return; 6838 6839 // Handle 'interrupt' attribute: 6840 llvm::Function *F = cast<llvm::Function>(GV); 6841 6842 // Step 1: Set ISR calling convention. 6843 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 6844 6845 // Step 2: Add attributes goodness. 6846 F->addFnAttr(llvm::Attribute::NoInline); 6847 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber())); 6848 } 6849 } 6850 6851 //===----------------------------------------------------------------------===// 6852 // MIPS ABI Implementation. This works for both little-endian and 6853 // big-endian variants. 6854 //===----------------------------------------------------------------------===// 6855 6856 namespace { 6857 class MipsABIInfo : public ABIInfo { 6858 bool IsO32; 6859 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 6860 void CoerceToIntArgs(uint64_t TySize, 6861 SmallVectorImpl<llvm::Type *> &ArgList) const; 6862 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 6863 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 6864 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 6865 public: 6866 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 6867 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 6868 StackAlignInBytes(IsO32 ? 8 : 16) {} 6869 6870 ABIArgInfo classifyReturnType(QualType RetTy) const; 6871 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 6872 void computeInfo(CGFunctionInfo &FI) const override; 6873 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6874 QualType Ty) const override; 6875 ABIArgInfo extendType(QualType Ty) const; 6876 }; 6877 6878 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 6879 unsigned SizeOfUnwindException; 6880 public: 6881 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 6882 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 6883 SizeOfUnwindException(IsO32 ? 24 : 32) {} 6884 6885 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 6886 return 29; 6887 } 6888 6889 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6890 CodeGen::CodeGenModule &CGM) const override { 6891 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6892 if (!FD) return; 6893 llvm::Function *Fn = cast<llvm::Function>(GV); 6894 6895 if (FD->hasAttr<MipsLongCallAttr>()) 6896 Fn->addFnAttr("long-call"); 6897 else if (FD->hasAttr<MipsShortCallAttr>()) 6898 Fn->addFnAttr("short-call"); 6899 6900 // Other attributes do not have a meaning for declarations. 6901 if (GV->isDeclaration()) 6902 return; 6903 6904 if (FD->hasAttr<Mips16Attr>()) { 6905 Fn->addFnAttr("mips16"); 6906 } 6907 else if (FD->hasAttr<NoMips16Attr>()) { 6908 Fn->addFnAttr("nomips16"); 6909 } 6910 6911 if (FD->hasAttr<MicroMipsAttr>()) 6912 Fn->addFnAttr("micromips"); 6913 else if (FD->hasAttr<NoMicroMipsAttr>()) 6914 Fn->addFnAttr("nomicromips"); 6915 6916 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>(); 6917 if (!Attr) 6918 return; 6919 6920 const char *Kind; 6921 switch (Attr->getInterrupt()) { 6922 case MipsInterruptAttr::eic: Kind = "eic"; break; 6923 case MipsInterruptAttr::sw0: Kind = "sw0"; break; 6924 case MipsInterruptAttr::sw1: Kind = "sw1"; break; 6925 case MipsInterruptAttr::hw0: Kind = "hw0"; break; 6926 case MipsInterruptAttr::hw1: Kind = "hw1"; break; 6927 case MipsInterruptAttr::hw2: Kind = "hw2"; break; 6928 case MipsInterruptAttr::hw3: Kind = "hw3"; break; 6929 case MipsInterruptAttr::hw4: Kind = "hw4"; break; 6930 case MipsInterruptAttr::hw5: Kind = "hw5"; break; 6931 } 6932 6933 Fn->addFnAttr("interrupt", Kind); 6934 6935 } 6936 6937 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6938 llvm::Value *Address) const override; 6939 6940 unsigned getSizeOfUnwindException() const override { 6941 return SizeOfUnwindException; 6942 } 6943 }; 6944 } 6945 6946 void MipsABIInfo::CoerceToIntArgs( 6947 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const { 6948 llvm::IntegerType *IntTy = 6949 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 6950 6951 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 6952 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 6953 ArgList.push_back(IntTy); 6954 6955 // If necessary, add one more integer type to ArgList. 6956 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 6957 6958 if (R) 6959 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 6960 } 6961 6962 // In N32/64, an aligned double precision floating point field is passed in 6963 // a register. 6964 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 6965 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 6966 6967 if (IsO32) { 6968 CoerceToIntArgs(TySize, ArgList); 6969 return llvm::StructType::get(getVMContext(), ArgList); 6970 } 6971 6972 if (Ty->isComplexType()) 6973 return CGT.ConvertType(Ty); 6974 6975 const RecordType *RT = Ty->getAs<RecordType>(); 6976 6977 // Unions/vectors are passed in integer registers. 6978 if (!RT || !RT->isStructureOrClassType()) { 6979 CoerceToIntArgs(TySize, ArgList); 6980 return llvm::StructType::get(getVMContext(), ArgList); 6981 } 6982 6983 const RecordDecl *RD = RT->getDecl(); 6984 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 6985 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 6986 6987 uint64_t LastOffset = 0; 6988 unsigned idx = 0; 6989 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 6990 6991 // Iterate over fields in the struct/class and check if there are any aligned 6992 // double fields. 6993 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 6994 i != e; ++i, ++idx) { 6995 const QualType Ty = i->getType(); 6996 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 6997 6998 if (!BT || BT->getKind() != BuiltinType::Double) 6999 continue; 7000 7001 uint64_t Offset = Layout.getFieldOffset(idx); 7002 if (Offset % 64) // Ignore doubles that are not aligned. 7003 continue; 7004 7005 // Add ((Offset - LastOffset) / 64) args of type i64. 7006 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 7007 ArgList.push_back(I64); 7008 7009 // Add double type. 7010 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 7011 LastOffset = Offset + 64; 7012 } 7013 7014 CoerceToIntArgs(TySize - LastOffset, IntArgList); 7015 ArgList.append(IntArgList.begin(), IntArgList.end()); 7016 7017 return llvm::StructType::get(getVMContext(), ArgList); 7018 } 7019 7020 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 7021 uint64_t Offset) const { 7022 if (OrigOffset + MinABIStackAlignInBytes > Offset) 7023 return nullptr; 7024 7025 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 7026 } 7027 7028 ABIArgInfo 7029 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 7030 Ty = useFirstFieldIfTransparentUnion(Ty); 7031 7032 uint64_t OrigOffset = Offset; 7033 uint64_t TySize = getContext().getTypeSize(Ty); 7034 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 7035 7036 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 7037 (uint64_t)StackAlignInBytes); 7038 unsigned CurrOffset = llvm::alignTo(Offset, Align); 7039 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8; 7040 7041 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 7042 // Ignore empty aggregates. 7043 if (TySize == 0) 7044 return ABIArgInfo::getIgnore(); 7045 7046 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 7047 Offset = OrigOffset + MinABIStackAlignInBytes; 7048 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7049 } 7050 7051 // If we have reached here, aggregates are passed directly by coercing to 7052 // another structure type. Padding is inserted if the offset of the 7053 // aggregate is unaligned. 7054 ABIArgInfo ArgInfo = 7055 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 7056 getPaddingType(OrigOffset, CurrOffset)); 7057 ArgInfo.setInReg(true); 7058 return ArgInfo; 7059 } 7060 7061 // Treat an enum type as its underlying type. 7062 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7063 Ty = EnumTy->getDecl()->getIntegerType(); 7064 7065 // All integral types are promoted to the GPR width. 7066 if (Ty->isIntegralOrEnumerationType()) 7067 return extendType(Ty); 7068 7069 return ABIArgInfo::getDirect( 7070 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 7071 } 7072 7073 llvm::Type* 7074 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 7075 const RecordType *RT = RetTy->getAs<RecordType>(); 7076 SmallVector<llvm::Type*, 8> RTList; 7077 7078 if (RT && RT->isStructureOrClassType()) { 7079 const RecordDecl *RD = RT->getDecl(); 7080 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 7081 unsigned FieldCnt = Layout.getFieldCount(); 7082 7083 // N32/64 returns struct/classes in floating point registers if the 7084 // following conditions are met: 7085 // 1. The size of the struct/class is no larger than 128-bit. 7086 // 2. The struct/class has one or two fields all of which are floating 7087 // point types. 7088 // 3. The offset of the first field is zero (this follows what gcc does). 7089 // 7090 // Any other composite results are returned in integer registers. 7091 // 7092 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 7093 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 7094 for (; b != e; ++b) { 7095 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 7096 7097 if (!BT || !BT->isFloatingPoint()) 7098 break; 7099 7100 RTList.push_back(CGT.ConvertType(b->getType())); 7101 } 7102 7103 if (b == e) 7104 return llvm::StructType::get(getVMContext(), RTList, 7105 RD->hasAttr<PackedAttr>()); 7106 7107 RTList.clear(); 7108 } 7109 } 7110 7111 CoerceToIntArgs(Size, RTList); 7112 return llvm::StructType::get(getVMContext(), RTList); 7113 } 7114 7115 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 7116 uint64_t Size = getContext().getTypeSize(RetTy); 7117 7118 if (RetTy->isVoidType()) 7119 return ABIArgInfo::getIgnore(); 7120 7121 // O32 doesn't treat zero-sized structs differently from other structs. 7122 // However, N32/N64 ignores zero sized return values. 7123 if (!IsO32 && Size == 0) 7124 return ABIArgInfo::getIgnore(); 7125 7126 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 7127 if (Size <= 128) { 7128 if (RetTy->isAnyComplexType()) 7129 return ABIArgInfo::getDirect(); 7130 7131 // O32 returns integer vectors in registers and N32/N64 returns all small 7132 // aggregates in registers. 7133 if (!IsO32 || 7134 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 7135 ABIArgInfo ArgInfo = 7136 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 7137 ArgInfo.setInReg(true); 7138 return ArgInfo; 7139 } 7140 } 7141 7142 return getNaturalAlignIndirect(RetTy); 7143 } 7144 7145 // Treat an enum type as its underlying type. 7146 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 7147 RetTy = EnumTy->getDecl()->getIntegerType(); 7148 7149 if (RetTy->isPromotableIntegerType()) 7150 return ABIArgInfo::getExtend(RetTy); 7151 7152 if ((RetTy->isUnsignedIntegerOrEnumerationType() || 7153 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32) 7154 return ABIArgInfo::getSignExtend(RetTy); 7155 7156 return ABIArgInfo::getDirect(); 7157 } 7158 7159 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 7160 ABIArgInfo &RetInfo = FI.getReturnInfo(); 7161 if (!getCXXABI().classifyReturnType(FI)) 7162 RetInfo = classifyReturnType(FI.getReturnType()); 7163 7164 // Check if a pointer to an aggregate is passed as a hidden argument. 7165 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 7166 7167 for (auto &I : FI.arguments()) 7168 I.info = classifyArgumentType(I.type, Offset); 7169 } 7170 7171 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7172 QualType OrigTy) const { 7173 QualType Ty = OrigTy; 7174 7175 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. 7176 // Pointers are also promoted in the same way but this only matters for N32. 7177 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 7178 unsigned PtrWidth = getTarget().getPointerWidth(0); 7179 bool DidPromote = false; 7180 if ((Ty->isIntegerType() && 7181 getContext().getIntWidth(Ty) < SlotSizeInBits) || 7182 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { 7183 DidPromote = true; 7184 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, 7185 Ty->isSignedIntegerType()); 7186 } 7187 7188 auto TyInfo = getContext().getTypeInfoInChars(Ty); 7189 7190 // The alignment of things in the argument area is never larger than 7191 // StackAlignInBytes. 7192 TyInfo.second = 7193 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes)); 7194 7195 // MinABIStackAlignInBytes is the size of argument slots on the stack. 7196 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); 7197 7198 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 7199 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true); 7200 7201 7202 // If there was a promotion, "unpromote" into a temporary. 7203 // TODO: can we just use a pointer into a subset of the original slot? 7204 if (DidPromote) { 7205 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp"); 7206 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); 7207 7208 // Truncate down to the right width. 7209 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() 7210 : CGF.IntPtrTy); 7211 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); 7212 if (OrigTy->isPointerType()) 7213 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); 7214 7215 CGF.Builder.CreateStore(V, Temp); 7216 Addr = Temp; 7217 } 7218 7219 return Addr; 7220 } 7221 7222 ABIArgInfo MipsABIInfo::extendType(QualType Ty) const { 7223 int TySize = getContext().getTypeSize(Ty); 7224 7225 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. 7226 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 7227 return ABIArgInfo::getSignExtend(Ty); 7228 7229 return ABIArgInfo::getExtend(Ty); 7230 } 7231 7232 bool 7233 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7234 llvm::Value *Address) const { 7235 // This information comes from gcc's implementation, which seems to 7236 // as canonical as it gets. 7237 7238 // Everything on MIPS is 4 bytes. Double-precision FP registers 7239 // are aliased to pairs of single-precision FP registers. 7240 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 7241 7242 // 0-31 are the general purpose registers, $0 - $31. 7243 // 32-63 are the floating-point registers, $f0 - $f31. 7244 // 64 and 65 are the multiply/divide registers, $hi and $lo. 7245 // 66 is the (notional, I think) register for signal-handler return. 7246 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 7247 7248 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 7249 // They are one bit wide and ignored here. 7250 7251 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 7252 // (coprocessor 1 is the FP unit) 7253 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 7254 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 7255 // 176-181 are the DSP accumulator registers. 7256 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 7257 return false; 7258 } 7259 7260 //===----------------------------------------------------------------------===// 7261 // AVR ABI Implementation. 7262 //===----------------------------------------------------------------------===// 7263 7264 namespace { 7265 class AVRTargetCodeGenInfo : public TargetCodeGenInfo { 7266 public: 7267 AVRTargetCodeGenInfo(CodeGenTypes &CGT) 7268 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { } 7269 7270 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7271 CodeGen::CodeGenModule &CGM) const override { 7272 if (GV->isDeclaration()) 7273 return; 7274 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 7275 if (!FD) return; 7276 auto *Fn = cast<llvm::Function>(GV); 7277 7278 if (FD->getAttr<AVRInterruptAttr>()) 7279 Fn->addFnAttr("interrupt"); 7280 7281 if (FD->getAttr<AVRSignalAttr>()) 7282 Fn->addFnAttr("signal"); 7283 } 7284 }; 7285 } 7286 7287 //===----------------------------------------------------------------------===// 7288 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 7289 // Currently subclassed only to implement custom OpenCL C function attribute 7290 // handling. 7291 //===----------------------------------------------------------------------===// 7292 7293 namespace { 7294 7295 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 7296 public: 7297 TCETargetCodeGenInfo(CodeGenTypes &CGT) 7298 : DefaultTargetCodeGenInfo(CGT) {} 7299 7300 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7301 CodeGen::CodeGenModule &M) const override; 7302 }; 7303 7304 void TCETargetCodeGenInfo::setTargetAttributes( 7305 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 7306 if (GV->isDeclaration()) 7307 return; 7308 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7309 if (!FD) return; 7310 7311 llvm::Function *F = cast<llvm::Function>(GV); 7312 7313 if (M.getLangOpts().OpenCL) { 7314 if (FD->hasAttr<OpenCLKernelAttr>()) { 7315 // OpenCL C Kernel functions are not subject to inlining 7316 F->addFnAttr(llvm::Attribute::NoInline); 7317 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 7318 if (Attr) { 7319 // Convert the reqd_work_group_size() attributes to metadata. 7320 llvm::LLVMContext &Context = F->getContext(); 7321 llvm::NamedMDNode *OpenCLMetadata = 7322 M.getModule().getOrInsertNamedMetadata( 7323 "opencl.kernel_wg_size_info"); 7324 7325 SmallVector<llvm::Metadata *, 5> Operands; 7326 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 7327 7328 Operands.push_back( 7329 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 7330 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 7331 Operands.push_back( 7332 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 7333 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 7334 Operands.push_back( 7335 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 7336 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 7337 7338 // Add a boolean constant operand for "required" (true) or "hint" 7339 // (false) for implementing the work_group_size_hint attr later. 7340 // Currently always true as the hint is not yet implemented. 7341 Operands.push_back( 7342 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 7343 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 7344 } 7345 } 7346 } 7347 } 7348 7349 } 7350 7351 //===----------------------------------------------------------------------===// 7352 // Hexagon ABI Implementation 7353 //===----------------------------------------------------------------------===// 7354 7355 namespace { 7356 7357 class HexagonABIInfo : public ABIInfo { 7358 7359 7360 public: 7361 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 7362 7363 private: 7364 7365 ABIArgInfo classifyReturnType(QualType RetTy) const; 7366 ABIArgInfo classifyArgumentType(QualType RetTy) const; 7367 7368 void computeInfo(CGFunctionInfo &FI) const override; 7369 7370 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7371 QualType Ty) const override; 7372 }; 7373 7374 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 7375 public: 7376 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 7377 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 7378 7379 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 7380 return 29; 7381 } 7382 }; 7383 7384 } 7385 7386 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 7387 if (!getCXXABI().classifyReturnType(FI)) 7388 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7389 for (auto &I : FI.arguments()) 7390 I.info = classifyArgumentType(I.type); 7391 } 7392 7393 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 7394 if (!isAggregateTypeForABI(Ty)) { 7395 // Treat an enum type as its underlying type. 7396 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7397 Ty = EnumTy->getDecl()->getIntegerType(); 7398 7399 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty) 7400 : ABIArgInfo::getDirect()); 7401 } 7402 7403 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 7404 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7405 7406 // Ignore empty records. 7407 if (isEmptyRecord(getContext(), Ty, true)) 7408 return ABIArgInfo::getIgnore(); 7409 7410 uint64_t Size = getContext().getTypeSize(Ty); 7411 if (Size > 64) 7412 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 7413 // Pass in the smallest viable integer type. 7414 else if (Size > 32) 7415 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 7416 else if (Size > 16) 7417 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 7418 else if (Size > 8) 7419 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 7420 else 7421 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 7422 } 7423 7424 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 7425 if (RetTy->isVoidType()) 7426 return ABIArgInfo::getIgnore(); 7427 7428 // Large vector types should be returned via memory. 7429 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 7430 return getNaturalAlignIndirect(RetTy); 7431 7432 if (!isAggregateTypeForABI(RetTy)) { 7433 // Treat an enum type as its underlying type. 7434 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 7435 RetTy = EnumTy->getDecl()->getIntegerType(); 7436 7437 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy) 7438 : ABIArgInfo::getDirect()); 7439 } 7440 7441 if (isEmptyRecord(getContext(), RetTy, true)) 7442 return ABIArgInfo::getIgnore(); 7443 7444 // Aggregates <= 8 bytes are returned in r0; other aggregates 7445 // are returned indirectly. 7446 uint64_t Size = getContext().getTypeSize(RetTy); 7447 if (Size <= 64) { 7448 // Return in the smallest viable integer type. 7449 if (Size <= 8) 7450 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 7451 if (Size <= 16) 7452 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 7453 if (Size <= 32) 7454 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 7455 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 7456 } 7457 7458 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); 7459 } 7460 7461 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7462 QualType Ty) const { 7463 // FIXME: Someone needs to audit that this handle alignment correctly. 7464 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 7465 getContext().getTypeInfoInChars(Ty), 7466 CharUnits::fromQuantity(4), 7467 /*AllowHigherAlign*/ true); 7468 } 7469 7470 //===----------------------------------------------------------------------===// 7471 // Lanai ABI Implementation 7472 //===----------------------------------------------------------------------===// 7473 7474 namespace { 7475 class LanaiABIInfo : public DefaultABIInfo { 7476 public: 7477 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7478 7479 bool shouldUseInReg(QualType Ty, CCState &State) const; 7480 7481 void computeInfo(CGFunctionInfo &FI) const override { 7482 CCState State(FI.getCallingConvention()); 7483 // Lanai uses 4 registers to pass arguments unless the function has the 7484 // regparm attribute set. 7485 if (FI.getHasRegParm()) { 7486 State.FreeRegs = FI.getRegParm(); 7487 } else { 7488 State.FreeRegs = 4; 7489 } 7490 7491 if (!getCXXABI().classifyReturnType(FI)) 7492 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7493 for (auto &I : FI.arguments()) 7494 I.info = classifyArgumentType(I.type, State); 7495 } 7496 7497 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 7498 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 7499 }; 7500 } // end anonymous namespace 7501 7502 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const { 7503 unsigned Size = getContext().getTypeSize(Ty); 7504 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U; 7505 7506 if (SizeInRegs == 0) 7507 return false; 7508 7509 if (SizeInRegs > State.FreeRegs) { 7510 State.FreeRegs = 0; 7511 return false; 7512 } 7513 7514 State.FreeRegs -= SizeInRegs; 7515 7516 return true; 7517 } 7518 7519 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal, 7520 CCState &State) const { 7521 if (!ByVal) { 7522 if (State.FreeRegs) { 7523 --State.FreeRegs; // Non-byval indirects just use one pointer. 7524 return getNaturalAlignIndirectInReg(Ty); 7525 } 7526 return getNaturalAlignIndirect(Ty, false); 7527 } 7528 7529 // Compute the byval alignment. 7530 const unsigned MinABIStackAlignInBytes = 4; 7531 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 7532 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 7533 /*Realign=*/TypeAlign > 7534 MinABIStackAlignInBytes); 7535 } 7536 7537 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty, 7538 CCState &State) const { 7539 // Check with the C++ ABI first. 7540 const RecordType *RT = Ty->getAs<RecordType>(); 7541 if (RT) { 7542 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 7543 if (RAA == CGCXXABI::RAA_Indirect) { 7544 return getIndirectResult(Ty, /*ByVal=*/false, State); 7545 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 7546 return getNaturalAlignIndirect(Ty, /*ByRef=*/true); 7547 } 7548 } 7549 7550 if (isAggregateTypeForABI(Ty)) { 7551 // Structures with flexible arrays are always indirect. 7552 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 7553 return getIndirectResult(Ty, /*ByVal=*/true, State); 7554 7555 // Ignore empty structs/unions. 7556 if (isEmptyRecord(getContext(), Ty, true)) 7557 return ABIArgInfo::getIgnore(); 7558 7559 llvm::LLVMContext &LLVMContext = getVMContext(); 7560 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 7561 if (SizeInRegs <= State.FreeRegs) { 7562 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 7563 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 7564 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 7565 State.FreeRegs -= SizeInRegs; 7566 return ABIArgInfo::getDirectInReg(Result); 7567 } else { 7568 State.FreeRegs = 0; 7569 } 7570 return getIndirectResult(Ty, true, State); 7571 } 7572 7573 // Treat an enum type as its underlying type. 7574 if (const auto *EnumTy = Ty->getAs<EnumType>()) 7575 Ty = EnumTy->getDecl()->getIntegerType(); 7576 7577 bool InReg = shouldUseInReg(Ty, State); 7578 if (Ty->isPromotableIntegerType()) { 7579 if (InReg) 7580 return ABIArgInfo::getDirectInReg(); 7581 return ABIArgInfo::getExtend(Ty); 7582 } 7583 if (InReg) 7584 return ABIArgInfo::getDirectInReg(); 7585 return ABIArgInfo::getDirect(); 7586 } 7587 7588 namespace { 7589 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo { 7590 public: 7591 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 7592 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {} 7593 }; 7594 } 7595 7596 //===----------------------------------------------------------------------===// 7597 // AMDGPU ABI Implementation 7598 //===----------------------------------------------------------------------===// 7599 7600 namespace { 7601 7602 class AMDGPUABIInfo final : public DefaultABIInfo { 7603 private: 7604 static const unsigned MaxNumRegsForArgsRet = 16; 7605 7606 unsigned numRegsForType(QualType Ty) const; 7607 7608 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 7609 bool isHomogeneousAggregateSmallEnough(const Type *Base, 7610 uint64_t Members) const override; 7611 7612 public: 7613 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : 7614 DefaultABIInfo(CGT) {} 7615 7616 ABIArgInfo classifyReturnType(QualType RetTy) const; 7617 ABIArgInfo classifyKernelArgumentType(QualType Ty) const; 7618 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const; 7619 7620 void computeInfo(CGFunctionInfo &FI) const override; 7621 }; 7622 7623 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 7624 return true; 7625 } 7626 7627 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough( 7628 const Type *Base, uint64_t Members) const { 7629 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32; 7630 7631 // Homogeneous Aggregates may occupy at most 16 registers. 7632 return Members * NumRegs <= MaxNumRegsForArgsRet; 7633 } 7634 7635 /// Estimate number of registers the type will use when passed in registers. 7636 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const { 7637 unsigned NumRegs = 0; 7638 7639 if (const VectorType *VT = Ty->getAs<VectorType>()) { 7640 // Compute from the number of elements. The reported size is based on the 7641 // in-memory size, which includes the padding 4th element for 3-vectors. 7642 QualType EltTy = VT->getElementType(); 7643 unsigned EltSize = getContext().getTypeSize(EltTy); 7644 7645 // 16-bit element vectors should be passed as packed. 7646 if (EltSize == 16) 7647 return (VT->getNumElements() + 1) / 2; 7648 7649 unsigned EltNumRegs = (EltSize + 31) / 32; 7650 return EltNumRegs * VT->getNumElements(); 7651 } 7652 7653 if (const RecordType *RT = Ty->getAs<RecordType>()) { 7654 const RecordDecl *RD = RT->getDecl(); 7655 assert(!RD->hasFlexibleArrayMember()); 7656 7657 for (const FieldDecl *Field : RD->fields()) { 7658 QualType FieldTy = Field->getType(); 7659 NumRegs += numRegsForType(FieldTy); 7660 } 7661 7662 return NumRegs; 7663 } 7664 7665 return (getContext().getTypeSize(Ty) + 31) / 32; 7666 } 7667 7668 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const { 7669 llvm::CallingConv::ID CC = FI.getCallingConvention(); 7670 7671 if (!getCXXABI().classifyReturnType(FI)) 7672 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7673 7674 unsigned NumRegsLeft = MaxNumRegsForArgsRet; 7675 for (auto &Arg : FI.arguments()) { 7676 if (CC == llvm::CallingConv::AMDGPU_KERNEL) { 7677 Arg.info = classifyKernelArgumentType(Arg.type); 7678 } else { 7679 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft); 7680 } 7681 } 7682 } 7683 7684 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const { 7685 if (isAggregateTypeForABI(RetTy)) { 7686 // Records with non-trivial destructors/copy-constructors should not be 7687 // returned by value. 7688 if (!getRecordArgABI(RetTy, getCXXABI())) { 7689 // Ignore empty structs/unions. 7690 if (isEmptyRecord(getContext(), RetTy, true)) 7691 return ABIArgInfo::getIgnore(); 7692 7693 // Lower single-element structs to just return a regular value. 7694 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 7695 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 7696 7697 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 7698 const RecordDecl *RD = RT->getDecl(); 7699 if (RD->hasFlexibleArrayMember()) 7700 return DefaultABIInfo::classifyReturnType(RetTy); 7701 } 7702 7703 // Pack aggregates <= 4 bytes into single VGPR or pair. 7704 uint64_t Size = getContext().getTypeSize(RetTy); 7705 if (Size <= 16) 7706 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 7707 7708 if (Size <= 32) 7709 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 7710 7711 if (Size <= 64) { 7712 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); 7713 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); 7714 } 7715 7716 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet) 7717 return ABIArgInfo::getDirect(); 7718 } 7719 } 7720 7721 // Otherwise just do the default thing. 7722 return DefaultABIInfo::classifyReturnType(RetTy); 7723 } 7724 7725 /// For kernels all parameters are really passed in a special buffer. It doesn't 7726 /// make sense to pass anything byval, so everything must be direct. 7727 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const { 7728 Ty = useFirstFieldIfTransparentUnion(Ty); 7729 7730 // TODO: Can we omit empty structs? 7731 7732 // Coerce single element structs to its element. 7733 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 7734 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 7735 7736 // If we set CanBeFlattened to true, CodeGen will expand the struct to its 7737 // individual elements, which confuses the Clover OpenCL backend; therefore we 7738 // have to set it to false here. Other args of getDirect() are just defaults. 7739 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 7740 } 7741 7742 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, 7743 unsigned &NumRegsLeft) const { 7744 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow"); 7745 7746 Ty = useFirstFieldIfTransparentUnion(Ty); 7747 7748 if (isAggregateTypeForABI(Ty)) { 7749 // Records with non-trivial destructors/copy-constructors should not be 7750 // passed by value. 7751 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 7752 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7753 7754 // Ignore empty structs/unions. 7755 if (isEmptyRecord(getContext(), Ty, true)) 7756 return ABIArgInfo::getIgnore(); 7757 7758 // Lower single-element structs to just pass a regular value. TODO: We 7759 // could do reasonable-size multiple-element structs too, using getExpand(), 7760 // though watch out for things like bitfields. 7761 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 7762 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 7763 7764 if (const RecordType *RT = Ty->getAs<RecordType>()) { 7765 const RecordDecl *RD = RT->getDecl(); 7766 if (RD->hasFlexibleArrayMember()) 7767 return DefaultABIInfo::classifyArgumentType(Ty); 7768 } 7769 7770 // Pack aggregates <= 8 bytes into single VGPR or pair. 7771 uint64_t Size = getContext().getTypeSize(Ty); 7772 if (Size <= 64) { 7773 unsigned NumRegs = (Size + 31) / 32; 7774 NumRegsLeft -= std::min(NumRegsLeft, NumRegs); 7775 7776 if (Size <= 16) 7777 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 7778 7779 if (Size <= 32) 7780 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 7781 7782 // XXX: Should this be i64 instead, and should the limit increase? 7783 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); 7784 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); 7785 } 7786 7787 if (NumRegsLeft > 0) { 7788 unsigned NumRegs = numRegsForType(Ty); 7789 if (NumRegsLeft >= NumRegs) { 7790 NumRegsLeft -= NumRegs; 7791 return ABIArgInfo::getDirect(); 7792 } 7793 } 7794 } 7795 7796 // Otherwise just do the default thing. 7797 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty); 7798 if (!ArgInfo.isIndirect()) { 7799 unsigned NumRegs = numRegsForType(Ty); 7800 NumRegsLeft -= std::min(NumRegs, NumRegsLeft); 7801 } 7802 7803 return ArgInfo; 7804 } 7805 7806 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 7807 public: 7808 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 7809 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {} 7810 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7811 CodeGen::CodeGenModule &M) const override; 7812 unsigned getOpenCLKernelCallingConv() const override; 7813 7814 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM, 7815 llvm::PointerType *T, QualType QT) const override; 7816 7817 LangAS getASTAllocaAddressSpace() const override { 7818 return getLangASFromTargetAS( 7819 getABIInfo().getDataLayout().getAllocaAddrSpace()); 7820 } 7821 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM, 7822 const VarDecl *D) const override; 7823 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, 7824 SyncScope Scope, 7825 llvm::AtomicOrdering Ordering, 7826 llvm::LLVMContext &Ctx) const override; 7827 llvm::Function * 7828 createEnqueuedBlockKernel(CodeGenFunction &CGF, 7829 llvm::Function *BlockInvokeFunc, 7830 llvm::Value *BlockLiteral) const override; 7831 bool shouldEmitStaticExternCAliases() const override; 7832 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override; 7833 }; 7834 } 7835 7836 static bool requiresAMDGPUProtectedVisibility(const Decl *D, 7837 llvm::GlobalValue *GV) { 7838 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility) 7839 return false; 7840 7841 return D->hasAttr<OpenCLKernelAttr>() || 7842 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) || 7843 (isa<VarDecl>(D) && 7844 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>())); 7845 } 7846 7847 void AMDGPUTargetCodeGenInfo::setTargetAttributes( 7848 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 7849 if (requiresAMDGPUProtectedVisibility(D, GV)) { 7850 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility); 7851 GV->setDSOLocal(true); 7852 } 7853 7854 if (GV->isDeclaration()) 7855 return; 7856 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7857 if (!FD) 7858 return; 7859 7860 llvm::Function *F = cast<llvm::Function>(GV); 7861 7862 const auto *ReqdWGS = M.getLangOpts().OpenCL ? 7863 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr; 7864 7865 if (M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>() && 7866 (M.getTriple().getOS() == llvm::Triple::AMDHSA)) 7867 F->addFnAttr("amdgpu-implicitarg-num-bytes", "48"); 7868 7869 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>(); 7870 if (ReqdWGS || FlatWGS) { 7871 unsigned Min = 0; 7872 unsigned Max = 0; 7873 if (FlatWGS) { 7874 Min = FlatWGS->getMin() 7875 ->EvaluateKnownConstInt(M.getContext()) 7876 .getExtValue(); 7877 Max = FlatWGS->getMax() 7878 ->EvaluateKnownConstInt(M.getContext()) 7879 .getExtValue(); 7880 } 7881 if (ReqdWGS && Min == 0 && Max == 0) 7882 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim(); 7883 7884 if (Min != 0) { 7885 assert(Min <= Max && "Min must be less than or equal Max"); 7886 7887 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max); 7888 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); 7889 } else 7890 assert(Max == 0 && "Max must be zero"); 7891 } 7892 7893 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) { 7894 unsigned Min = 7895 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue(); 7896 unsigned Max = Attr->getMax() ? Attr->getMax() 7897 ->EvaluateKnownConstInt(M.getContext()) 7898 .getExtValue() 7899 : 0; 7900 7901 if (Min != 0) { 7902 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max"); 7903 7904 std::string AttrVal = llvm::utostr(Min); 7905 if (Max != 0) 7906 AttrVal = AttrVal + "," + llvm::utostr(Max); 7907 F->addFnAttr("amdgpu-waves-per-eu", AttrVal); 7908 } else 7909 assert(Max == 0 && "Max must be zero"); 7910 } 7911 7912 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 7913 unsigned NumSGPR = Attr->getNumSGPR(); 7914 7915 if (NumSGPR != 0) 7916 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR)); 7917 } 7918 7919 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 7920 uint32_t NumVGPR = Attr->getNumVGPR(); 7921 7922 if (NumVGPR != 0) 7923 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR)); 7924 } 7925 } 7926 7927 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 7928 return llvm::CallingConv::AMDGPU_KERNEL; 7929 } 7930 7931 // Currently LLVM assumes null pointers always have value 0, 7932 // which results in incorrectly transformed IR. Therefore, instead of 7933 // emitting null pointers in private and local address spaces, a null 7934 // pointer in generic address space is emitted which is casted to a 7935 // pointer in local or private address space. 7936 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer( 7937 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT, 7938 QualType QT) const { 7939 if (CGM.getContext().getTargetNullPointerValue(QT) == 0) 7940 return llvm::ConstantPointerNull::get(PT); 7941 7942 auto &Ctx = CGM.getContext(); 7943 auto NPT = llvm::PointerType::get(PT->getElementType(), 7944 Ctx.getTargetAddressSpace(LangAS::opencl_generic)); 7945 return llvm::ConstantExpr::getAddrSpaceCast( 7946 llvm::ConstantPointerNull::get(NPT), PT); 7947 } 7948 7949 LangAS 7950 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 7951 const VarDecl *D) const { 7952 assert(!CGM.getLangOpts().OpenCL && 7953 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 7954 "Address space agnostic languages only"); 7955 LangAS DefaultGlobalAS = getLangASFromTargetAS( 7956 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global)); 7957 if (!D) 7958 return DefaultGlobalAS; 7959 7960 LangAS AddrSpace = D->getType().getAddressSpace(); 7961 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace)); 7962 if (AddrSpace != LangAS::Default) 7963 return AddrSpace; 7964 7965 if (CGM.isTypeConstant(D->getType(), false)) { 7966 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace()) 7967 return ConstAS.getValue(); 7968 } 7969 return DefaultGlobalAS; 7970 } 7971 7972 llvm::SyncScope::ID 7973 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts, 7974 SyncScope Scope, 7975 llvm::AtomicOrdering Ordering, 7976 llvm::LLVMContext &Ctx) const { 7977 std::string Name; 7978 switch (Scope) { 7979 case SyncScope::OpenCLWorkGroup: 7980 Name = "workgroup"; 7981 break; 7982 case SyncScope::OpenCLDevice: 7983 Name = "agent"; 7984 break; 7985 case SyncScope::OpenCLAllSVMDevices: 7986 Name = ""; 7987 break; 7988 case SyncScope::OpenCLSubGroup: 7989 Name = "wavefront"; 7990 } 7991 7992 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) { 7993 if (!Name.empty()) 7994 Name = Twine(Twine(Name) + Twine("-")).str(); 7995 7996 Name = Twine(Twine(Name) + Twine("one-as")).str(); 7997 } 7998 7999 return Ctx.getOrInsertSyncScopeID(Name); 8000 } 8001 8002 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const { 8003 return false; 8004 } 8005 8006 void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention( 8007 const FunctionType *&FT) const { 8008 FT = getABIInfo().getContext().adjustFunctionType( 8009 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel)); 8010 } 8011 8012 //===----------------------------------------------------------------------===// 8013 // SPARC v8 ABI Implementation. 8014 // Based on the SPARC Compliance Definition version 2.4.1. 8015 // 8016 // Ensures that complex values are passed in registers. 8017 // 8018 namespace { 8019 class SparcV8ABIInfo : public DefaultABIInfo { 8020 public: 8021 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 8022 8023 private: 8024 ABIArgInfo classifyReturnType(QualType RetTy) const; 8025 void computeInfo(CGFunctionInfo &FI) const override; 8026 }; 8027 } // end anonymous namespace 8028 8029 8030 ABIArgInfo 8031 SparcV8ABIInfo::classifyReturnType(QualType Ty) const { 8032 if (Ty->isAnyComplexType()) { 8033 return ABIArgInfo::getDirect(); 8034 } 8035 else { 8036 return DefaultABIInfo::classifyReturnType(Ty); 8037 } 8038 } 8039 8040 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { 8041 8042 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 8043 for (auto &Arg : FI.arguments()) 8044 Arg.info = classifyArgumentType(Arg.type); 8045 } 8046 8047 namespace { 8048 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo { 8049 public: 8050 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT) 8051 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {} 8052 }; 8053 } // end anonymous namespace 8054 8055 //===----------------------------------------------------------------------===// 8056 // SPARC v9 ABI Implementation. 8057 // Based on the SPARC Compliance Definition version 2.4.1. 8058 // 8059 // Function arguments a mapped to a nominal "parameter array" and promoted to 8060 // registers depending on their type. Each argument occupies 8 or 16 bytes in 8061 // the array, structs larger than 16 bytes are passed indirectly. 8062 // 8063 // One case requires special care: 8064 // 8065 // struct mixed { 8066 // int i; 8067 // float f; 8068 // }; 8069 // 8070 // When a struct mixed is passed by value, it only occupies 8 bytes in the 8071 // parameter array, but the int is passed in an integer register, and the float 8072 // is passed in a floating point register. This is represented as two arguments 8073 // with the LLVM IR inreg attribute: 8074 // 8075 // declare void f(i32 inreg %i, float inreg %f) 8076 // 8077 // The code generator will only allocate 4 bytes from the parameter array for 8078 // the inreg arguments. All other arguments are allocated a multiple of 8 8079 // bytes. 8080 // 8081 namespace { 8082 class SparcV9ABIInfo : public ABIInfo { 8083 public: 8084 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 8085 8086 private: 8087 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 8088 void computeInfo(CGFunctionInfo &FI) const override; 8089 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8090 QualType Ty) const override; 8091 8092 // Coercion type builder for structs passed in registers. The coercion type 8093 // serves two purposes: 8094 // 8095 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 8096 // in registers. 8097 // 2. Expose aligned floating point elements as first-level elements, so the 8098 // code generator knows to pass them in floating point registers. 8099 // 8100 // We also compute the InReg flag which indicates that the struct contains 8101 // aligned 32-bit floats. 8102 // 8103 struct CoerceBuilder { 8104 llvm::LLVMContext &Context; 8105 const llvm::DataLayout &DL; 8106 SmallVector<llvm::Type*, 8> Elems; 8107 uint64_t Size; 8108 bool InReg; 8109 8110 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 8111 : Context(c), DL(dl), Size(0), InReg(false) {} 8112 8113 // Pad Elems with integers until Size is ToSize. 8114 void pad(uint64_t ToSize) { 8115 assert(ToSize >= Size && "Cannot remove elements"); 8116 if (ToSize == Size) 8117 return; 8118 8119 // Finish the current 64-bit word. 8120 uint64_t Aligned = llvm::alignTo(Size, 64); 8121 if (Aligned > Size && Aligned <= ToSize) { 8122 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 8123 Size = Aligned; 8124 } 8125 8126 // Add whole 64-bit words. 8127 while (Size + 64 <= ToSize) { 8128 Elems.push_back(llvm::Type::getInt64Ty(Context)); 8129 Size += 64; 8130 } 8131 8132 // Final in-word padding. 8133 if (Size < ToSize) { 8134 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 8135 Size = ToSize; 8136 } 8137 } 8138 8139 // Add a floating point element at Offset. 8140 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 8141 // Unaligned floats are treated as integers. 8142 if (Offset % Bits) 8143 return; 8144 // The InReg flag is only required if there are any floats < 64 bits. 8145 if (Bits < 64) 8146 InReg = true; 8147 pad(Offset); 8148 Elems.push_back(Ty); 8149 Size = Offset + Bits; 8150 } 8151 8152 // Add a struct type to the coercion type, starting at Offset (in bits). 8153 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 8154 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 8155 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 8156 llvm::Type *ElemTy = StrTy->getElementType(i); 8157 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 8158 switch (ElemTy->getTypeID()) { 8159 case llvm::Type::StructTyID: 8160 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 8161 break; 8162 case llvm::Type::FloatTyID: 8163 addFloat(ElemOffset, ElemTy, 32); 8164 break; 8165 case llvm::Type::DoubleTyID: 8166 addFloat(ElemOffset, ElemTy, 64); 8167 break; 8168 case llvm::Type::FP128TyID: 8169 addFloat(ElemOffset, ElemTy, 128); 8170 break; 8171 case llvm::Type::PointerTyID: 8172 if (ElemOffset % 64 == 0) { 8173 pad(ElemOffset); 8174 Elems.push_back(ElemTy); 8175 Size += 64; 8176 } 8177 break; 8178 default: 8179 break; 8180 } 8181 } 8182 } 8183 8184 // Check if Ty is a usable substitute for the coercion type. 8185 bool isUsableType(llvm::StructType *Ty) const { 8186 return llvm::makeArrayRef(Elems) == Ty->elements(); 8187 } 8188 8189 // Get the coercion type as a literal struct type. 8190 llvm::Type *getType() const { 8191 if (Elems.size() == 1) 8192 return Elems.front(); 8193 else 8194 return llvm::StructType::get(Context, Elems); 8195 } 8196 }; 8197 }; 8198 } // end anonymous namespace 8199 8200 ABIArgInfo 8201 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 8202 if (Ty->isVoidType()) 8203 return ABIArgInfo::getIgnore(); 8204 8205 uint64_t Size = getContext().getTypeSize(Ty); 8206 8207 // Anything too big to fit in registers is passed with an explicit indirect 8208 // pointer / sret pointer. 8209 if (Size > SizeLimit) 8210 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 8211 8212 // Treat an enum type as its underlying type. 8213 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 8214 Ty = EnumTy->getDecl()->getIntegerType(); 8215 8216 // Integer types smaller than a register are extended. 8217 if (Size < 64 && Ty->isIntegerType()) 8218 return ABIArgInfo::getExtend(Ty); 8219 8220 // Other non-aggregates go in registers. 8221 if (!isAggregateTypeForABI(Ty)) 8222 return ABIArgInfo::getDirect(); 8223 8224 // If a C++ object has either a non-trivial copy constructor or a non-trivial 8225 // destructor, it is passed with an explicit indirect pointer / sret pointer. 8226 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 8227 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 8228 8229 // This is a small aggregate type that should be passed in registers. 8230 // Build a coercion type from the LLVM struct type. 8231 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 8232 if (!StrTy) 8233 return ABIArgInfo::getDirect(); 8234 8235 CoerceBuilder CB(getVMContext(), getDataLayout()); 8236 CB.addStruct(0, StrTy); 8237 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64)); 8238 8239 // Try to use the original type for coercion. 8240 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 8241 8242 if (CB.InReg) 8243 return ABIArgInfo::getDirectInReg(CoerceTy); 8244 else 8245 return ABIArgInfo::getDirect(CoerceTy); 8246 } 8247 8248 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8249 QualType Ty) const { 8250 ABIArgInfo AI = classifyType(Ty, 16 * 8); 8251 llvm::Type *ArgTy = CGT.ConvertType(Ty); 8252 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 8253 AI.setCoerceToType(ArgTy); 8254 8255 CharUnits SlotSize = CharUnits::fromQuantity(8); 8256 8257 CGBuilderTy &Builder = CGF.Builder; 8258 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 8259 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 8260 8261 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 8262 8263 Address ArgAddr = Address::invalid(); 8264 CharUnits Stride; 8265 switch (AI.getKind()) { 8266 case ABIArgInfo::Expand: 8267 case ABIArgInfo::CoerceAndExpand: 8268 case ABIArgInfo::InAlloca: 8269 llvm_unreachable("Unsupported ABI kind for va_arg"); 8270 8271 case ABIArgInfo::Extend: { 8272 Stride = SlotSize; 8273 CharUnits Offset = SlotSize - TypeInfo.first; 8274 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); 8275 break; 8276 } 8277 8278 case ABIArgInfo::Direct: { 8279 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 8280 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); 8281 ArgAddr = Addr; 8282 break; 8283 } 8284 8285 case ABIArgInfo::Indirect: 8286 Stride = SlotSize; 8287 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect"); 8288 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), 8289 TypeInfo.second); 8290 break; 8291 8292 case ABIArgInfo::Ignore: 8293 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second); 8294 } 8295 8296 // Update VAList. 8297 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next"); 8298 Builder.CreateStore(NextPtr.getPointer(), VAListAddr); 8299 8300 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr"); 8301 } 8302 8303 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 8304 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 8305 for (auto &I : FI.arguments()) 8306 I.info = classifyType(I.type, 16 * 8); 8307 } 8308 8309 namespace { 8310 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 8311 public: 8312 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 8313 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} 8314 8315 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 8316 return 14; 8317 } 8318 8319 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 8320 llvm::Value *Address) const override; 8321 }; 8322 } // end anonymous namespace 8323 8324 bool 8325 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 8326 llvm::Value *Address) const { 8327 // This is calculated from the LLVM and GCC tables and verified 8328 // against gcc output. AFAIK all ABIs use the same encoding. 8329 8330 CodeGen::CGBuilderTy &Builder = CGF.Builder; 8331 8332 llvm::IntegerType *i8 = CGF.Int8Ty; 8333 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 8334 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 8335 8336 // 0-31: the 8-byte general-purpose registers 8337 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 8338 8339 // 32-63: f0-31, the 4-byte floating-point registers 8340 AssignToArrayRange(Builder, Address, Four8, 32, 63); 8341 8342 // Y = 64 8343 // PSR = 65 8344 // WIM = 66 8345 // TBR = 67 8346 // PC = 68 8347 // NPC = 69 8348 // FSR = 70 8349 // CSR = 71 8350 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 8351 8352 // 72-87: d0-15, the 8-byte floating-point registers 8353 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 8354 8355 return false; 8356 } 8357 8358 // ARC ABI implementation. 8359 namespace { 8360 8361 class ARCABIInfo : public DefaultABIInfo { 8362 public: 8363 using DefaultABIInfo::DefaultABIInfo; 8364 8365 private: 8366 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8367 QualType Ty) const override; 8368 8369 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const { 8370 if (!State.FreeRegs) 8371 return; 8372 if (Info.isIndirect() && Info.getInReg()) 8373 State.FreeRegs--; 8374 else if (Info.isDirect() && Info.getInReg()) { 8375 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32; 8376 if (sz < State.FreeRegs) 8377 State.FreeRegs -= sz; 8378 else 8379 State.FreeRegs = 0; 8380 } 8381 } 8382 8383 void computeInfo(CGFunctionInfo &FI) const override { 8384 CCState State(FI.getCallingConvention()); 8385 // ARC uses 8 registers to pass arguments. 8386 State.FreeRegs = 8; 8387 8388 if (!getCXXABI().classifyReturnType(FI)) 8389 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 8390 updateState(FI.getReturnInfo(), FI.getReturnType(), State); 8391 for (auto &I : FI.arguments()) { 8392 I.info = classifyArgumentType(I.type, State.FreeRegs); 8393 updateState(I.info, I.type, State); 8394 } 8395 } 8396 8397 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const; 8398 ABIArgInfo getIndirectByValue(QualType Ty) const; 8399 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const; 8400 ABIArgInfo classifyReturnType(QualType RetTy) const; 8401 }; 8402 8403 class ARCTargetCodeGenInfo : public TargetCodeGenInfo { 8404 public: 8405 ARCTargetCodeGenInfo(CodeGenTypes &CGT) 8406 : TargetCodeGenInfo(new ARCABIInfo(CGT)) {} 8407 }; 8408 8409 8410 ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const { 8411 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) : 8412 getNaturalAlignIndirect(Ty, false); 8413 } 8414 8415 ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const { 8416 // Compute the byval alignment. 8417 const unsigned MinABIStackAlignInBytes = 4; 8418 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 8419 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 8420 TypeAlign > MinABIStackAlignInBytes); 8421 } 8422 8423 Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8424 QualType Ty) const { 8425 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 8426 getContext().getTypeInfoInChars(Ty), 8427 CharUnits::fromQuantity(4), true); 8428 } 8429 8430 ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty, 8431 uint8_t FreeRegs) const { 8432 // Handle the generic C++ ABI. 8433 const RecordType *RT = Ty->getAs<RecordType>(); 8434 if (RT) { 8435 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 8436 if (RAA == CGCXXABI::RAA_Indirect) 8437 return getIndirectByRef(Ty, FreeRegs > 0); 8438 8439 if (RAA == CGCXXABI::RAA_DirectInMemory) 8440 return getIndirectByValue(Ty); 8441 } 8442 8443 // Treat an enum type as its underlying type. 8444 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 8445 Ty = EnumTy->getDecl()->getIntegerType(); 8446 8447 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32; 8448 8449 if (isAggregateTypeForABI(Ty)) { 8450 // Structures with flexible arrays are always indirect. 8451 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 8452 return getIndirectByValue(Ty); 8453 8454 // Ignore empty structs/unions. 8455 if (isEmptyRecord(getContext(), Ty, true)) 8456 return ABIArgInfo::getIgnore(); 8457 8458 llvm::LLVMContext &LLVMContext = getVMContext(); 8459 8460 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 8461 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 8462 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 8463 8464 return FreeRegs >= SizeInRegs ? 8465 ABIArgInfo::getDirectInReg(Result) : 8466 ABIArgInfo::getDirect(Result, 0, nullptr, false); 8467 } 8468 8469 return Ty->isPromotableIntegerType() ? 8470 (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) : 8471 ABIArgInfo::getExtend(Ty)) : 8472 (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() : 8473 ABIArgInfo::getDirect()); 8474 } 8475 8476 ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const { 8477 if (RetTy->isAnyComplexType()) 8478 return ABIArgInfo::getDirectInReg(); 8479 8480 // Arguments of size > 4 registers are indirect. 8481 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32; 8482 if (RetSize > 4) 8483 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true); 8484 8485 return DefaultABIInfo::classifyReturnType(RetTy); 8486 } 8487 8488 } // End anonymous namespace. 8489 8490 //===----------------------------------------------------------------------===// 8491 // XCore ABI Implementation 8492 //===----------------------------------------------------------------------===// 8493 8494 namespace { 8495 8496 /// A SmallStringEnc instance is used to build up the TypeString by passing 8497 /// it by reference between functions that append to it. 8498 typedef llvm::SmallString<128> SmallStringEnc; 8499 8500 /// TypeStringCache caches the meta encodings of Types. 8501 /// 8502 /// The reason for caching TypeStrings is two fold: 8503 /// 1. To cache a type's encoding for later uses; 8504 /// 2. As a means to break recursive member type inclusion. 8505 /// 8506 /// A cache Entry can have a Status of: 8507 /// NonRecursive: The type encoding is not recursive; 8508 /// Recursive: The type encoding is recursive; 8509 /// Incomplete: An incomplete TypeString; 8510 /// IncompleteUsed: An incomplete TypeString that has been used in a 8511 /// Recursive type encoding. 8512 /// 8513 /// A NonRecursive entry will have all of its sub-members expanded as fully 8514 /// as possible. Whilst it may contain types which are recursive, the type 8515 /// itself is not recursive and thus its encoding may be safely used whenever 8516 /// the type is encountered. 8517 /// 8518 /// A Recursive entry will have all of its sub-members expanded as fully as 8519 /// possible. The type itself is recursive and it may contain other types which 8520 /// are recursive. The Recursive encoding must not be used during the expansion 8521 /// of a recursive type's recursive branch. For simplicity the code uses 8522 /// IncompleteCount to reject all usage of Recursive encodings for member types. 8523 /// 8524 /// An Incomplete entry is always a RecordType and only encodes its 8525 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 8526 /// are placed into the cache during type expansion as a means to identify and 8527 /// handle recursive inclusion of types as sub-members. If there is recursion 8528 /// the entry becomes IncompleteUsed. 8529 /// 8530 /// During the expansion of a RecordType's members: 8531 /// 8532 /// If the cache contains a NonRecursive encoding for the member type, the 8533 /// cached encoding is used; 8534 /// 8535 /// If the cache contains a Recursive encoding for the member type, the 8536 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 8537 /// 8538 /// If the member is a RecordType, an Incomplete encoding is placed into the 8539 /// cache to break potential recursive inclusion of itself as a sub-member; 8540 /// 8541 /// Once a member RecordType has been expanded, its temporary incomplete 8542 /// entry is removed from the cache. If a Recursive encoding was swapped out 8543 /// it is swapped back in; 8544 /// 8545 /// If an incomplete entry is used to expand a sub-member, the incomplete 8546 /// entry is marked as IncompleteUsed. The cache keeps count of how many 8547 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 8548 /// 8549 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 8550 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 8551 /// Else the member is part of a recursive type and thus the recursion has 8552 /// been exited too soon for the encoding to be correct for the member. 8553 /// 8554 class TypeStringCache { 8555 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 8556 struct Entry { 8557 std::string Str; // The encoded TypeString for the type. 8558 enum Status State; // Information about the encoding in 'Str'. 8559 std::string Swapped; // A temporary place holder for a Recursive encoding 8560 // during the expansion of RecordType's members. 8561 }; 8562 std::map<const IdentifierInfo *, struct Entry> Map; 8563 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 8564 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 8565 public: 8566 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} 8567 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 8568 bool removeIncomplete(const IdentifierInfo *ID); 8569 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 8570 bool IsRecursive); 8571 StringRef lookupStr(const IdentifierInfo *ID); 8572 }; 8573 8574 /// TypeString encodings for enum & union fields must be order. 8575 /// FieldEncoding is a helper for this ordering process. 8576 class FieldEncoding { 8577 bool HasName; 8578 std::string Enc; 8579 public: 8580 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} 8581 StringRef str() { return Enc; } 8582 bool operator<(const FieldEncoding &rhs) const { 8583 if (HasName != rhs.HasName) return HasName; 8584 return Enc < rhs.Enc; 8585 } 8586 }; 8587 8588 class XCoreABIInfo : public DefaultABIInfo { 8589 public: 8590 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 8591 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8592 QualType Ty) const override; 8593 }; 8594 8595 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 8596 mutable TypeStringCache TSC; 8597 public: 8598 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 8599 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} 8600 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 8601 CodeGen::CodeGenModule &M) const override; 8602 }; 8603 8604 } // End anonymous namespace. 8605 8606 // TODO: this implementation is likely now redundant with the default 8607 // EmitVAArg. 8608 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8609 QualType Ty) const { 8610 CGBuilderTy &Builder = CGF.Builder; 8611 8612 // Get the VAList. 8613 CharUnits SlotSize = CharUnits::fromQuantity(4); 8614 Address AP(Builder.CreateLoad(VAListAddr), SlotSize); 8615 8616 // Handle the argument. 8617 ABIArgInfo AI = classifyArgumentType(Ty); 8618 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty); 8619 llvm::Type *ArgTy = CGT.ConvertType(Ty); 8620 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 8621 AI.setCoerceToType(ArgTy); 8622 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 8623 8624 Address Val = Address::invalid(); 8625 CharUnits ArgSize = CharUnits::Zero(); 8626 switch (AI.getKind()) { 8627 case ABIArgInfo::Expand: 8628 case ABIArgInfo::CoerceAndExpand: 8629 case ABIArgInfo::InAlloca: 8630 llvm_unreachable("Unsupported ABI kind for va_arg"); 8631 case ABIArgInfo::Ignore: 8632 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign); 8633 ArgSize = CharUnits::Zero(); 8634 break; 8635 case ABIArgInfo::Extend: 8636 case ABIArgInfo::Direct: 8637 Val = Builder.CreateBitCast(AP, ArgPtrTy); 8638 ArgSize = CharUnits::fromQuantity( 8639 getDataLayout().getTypeAllocSize(AI.getCoerceToType())); 8640 ArgSize = ArgSize.alignTo(SlotSize); 8641 break; 8642 case ABIArgInfo::Indirect: 8643 Val = Builder.CreateElementBitCast(AP, ArgPtrTy); 8644 Val = Address(Builder.CreateLoad(Val), TypeAlign); 8645 ArgSize = SlotSize; 8646 break; 8647 } 8648 8649 // Increment the VAList. 8650 if (!ArgSize.isZero()) { 8651 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize); 8652 Builder.CreateStore(APN.getPointer(), VAListAddr); 8653 } 8654 8655 return Val; 8656 } 8657 8658 /// During the expansion of a RecordType, an incomplete TypeString is placed 8659 /// into the cache as a means to identify and break recursion. 8660 /// If there is a Recursive encoding in the cache, it is swapped out and will 8661 /// be reinserted by removeIncomplete(). 8662 /// All other types of encoding should have been used rather than arriving here. 8663 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 8664 std::string StubEnc) { 8665 if (!ID) 8666 return; 8667 Entry &E = Map[ID]; 8668 assert( (E.Str.empty() || E.State == Recursive) && 8669 "Incorrectly use of addIncomplete"); 8670 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 8671 E.Swapped.swap(E.Str); // swap out the Recursive 8672 E.Str.swap(StubEnc); 8673 E.State = Incomplete; 8674 ++IncompleteCount; 8675 } 8676 8677 /// Once the RecordType has been expanded, the temporary incomplete TypeString 8678 /// must be removed from the cache. 8679 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 8680 /// Returns true if the RecordType was defined recursively. 8681 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 8682 if (!ID) 8683 return false; 8684 auto I = Map.find(ID); 8685 assert(I != Map.end() && "Entry not present"); 8686 Entry &E = I->second; 8687 assert( (E.State == Incomplete || 8688 E.State == IncompleteUsed) && 8689 "Entry must be an incomplete type"); 8690 bool IsRecursive = false; 8691 if (E.State == IncompleteUsed) { 8692 // We made use of our Incomplete encoding, thus we are recursive. 8693 IsRecursive = true; 8694 --IncompleteUsedCount; 8695 } 8696 if (E.Swapped.empty()) 8697 Map.erase(I); 8698 else { 8699 // Swap the Recursive back. 8700 E.Swapped.swap(E.Str); 8701 E.Swapped.clear(); 8702 E.State = Recursive; 8703 } 8704 --IncompleteCount; 8705 return IsRecursive; 8706 } 8707 8708 /// Add the encoded TypeString to the cache only if it is NonRecursive or 8709 /// Recursive (viz: all sub-members were expanded as fully as possible). 8710 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 8711 bool IsRecursive) { 8712 if (!ID || IncompleteUsedCount) 8713 return; // No key or it is is an incomplete sub-type so don't add. 8714 Entry &E = Map[ID]; 8715 if (IsRecursive && !E.Str.empty()) { 8716 assert(E.State==Recursive && E.Str.size() == Str.size() && 8717 "This is not the same Recursive entry"); 8718 // The parent container was not recursive after all, so we could have used 8719 // this Recursive sub-member entry after all, but we assumed the worse when 8720 // we started viz: IncompleteCount!=0. 8721 return; 8722 } 8723 assert(E.Str.empty() && "Entry already present"); 8724 E.Str = Str.str(); 8725 E.State = IsRecursive? Recursive : NonRecursive; 8726 } 8727 8728 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 8729 /// are recursively expanding a type (IncompleteCount != 0) and the cached 8730 /// encoding is Recursive, return an empty StringRef. 8731 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 8732 if (!ID) 8733 return StringRef(); // We have no key. 8734 auto I = Map.find(ID); 8735 if (I == Map.end()) 8736 return StringRef(); // We have no encoding. 8737 Entry &E = I->second; 8738 if (E.State == Recursive && IncompleteCount) 8739 return StringRef(); // We don't use Recursive encodings for member types. 8740 8741 if (E.State == Incomplete) { 8742 // The incomplete type is being used to break out of recursion. 8743 E.State = IncompleteUsed; 8744 ++IncompleteUsedCount; 8745 } 8746 return E.Str; 8747 } 8748 8749 /// The XCore ABI includes a type information section that communicates symbol 8750 /// type information to the linker. The linker uses this information to verify 8751 /// safety/correctness of things such as array bound and pointers et al. 8752 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 8753 /// This type information (TypeString) is emitted into meta data for all global 8754 /// symbols: definitions, declarations, functions & variables. 8755 /// 8756 /// The TypeString carries type, qualifier, name, size & value details. 8757 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 8758 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf 8759 /// The output is tested by test/CodeGen/xcore-stringtype.c. 8760 /// 8761 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 8762 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); 8763 8764 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 8765 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 8766 CodeGen::CodeGenModule &CGM) const { 8767 SmallStringEnc Enc; 8768 if (getTypeString(Enc, D, CGM, TSC)) { 8769 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 8770 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV), 8771 llvm::MDString::get(Ctx, Enc.str())}; 8772 llvm::NamedMDNode *MD = 8773 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 8774 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 8775 } 8776 } 8777 8778 //===----------------------------------------------------------------------===// 8779 // SPIR ABI Implementation 8780 //===----------------------------------------------------------------------===// 8781 8782 namespace { 8783 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo { 8784 public: 8785 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 8786 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 8787 unsigned getOpenCLKernelCallingConv() const override; 8788 }; 8789 8790 } // End anonymous namespace. 8791 8792 namespace clang { 8793 namespace CodeGen { 8794 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { 8795 DefaultABIInfo SPIRABI(CGM.getTypes()); 8796 SPIRABI.computeInfo(FI); 8797 } 8798 } 8799 } 8800 8801 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 8802 return llvm::CallingConv::SPIR_KERNEL; 8803 } 8804 8805 static bool appendType(SmallStringEnc &Enc, QualType QType, 8806 const CodeGen::CodeGenModule &CGM, 8807 TypeStringCache &TSC); 8808 8809 /// Helper function for appendRecordType(). 8810 /// Builds a SmallVector containing the encoded field types in declaration 8811 /// order. 8812 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 8813 const RecordDecl *RD, 8814 const CodeGen::CodeGenModule &CGM, 8815 TypeStringCache &TSC) { 8816 for (const auto *Field : RD->fields()) { 8817 SmallStringEnc Enc; 8818 Enc += "m("; 8819 Enc += Field->getName(); 8820 Enc += "){"; 8821 if (Field->isBitField()) { 8822 Enc += "b("; 8823 llvm::raw_svector_ostream OS(Enc); 8824 OS << Field->getBitWidthValue(CGM.getContext()); 8825 Enc += ':'; 8826 } 8827 if (!appendType(Enc, Field->getType(), CGM, TSC)) 8828 return false; 8829 if (Field->isBitField()) 8830 Enc += ')'; 8831 Enc += '}'; 8832 FE.emplace_back(!Field->getName().empty(), Enc); 8833 } 8834 return true; 8835 } 8836 8837 /// Appends structure and union types to Enc and adds encoding to cache. 8838 /// Recursively calls appendType (via extractFieldType) for each field. 8839 /// Union types have their fields ordered according to the ABI. 8840 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 8841 const CodeGen::CodeGenModule &CGM, 8842 TypeStringCache &TSC, const IdentifierInfo *ID) { 8843 // Append the cached TypeString if we have one. 8844 StringRef TypeString = TSC.lookupStr(ID); 8845 if (!TypeString.empty()) { 8846 Enc += TypeString; 8847 return true; 8848 } 8849 8850 // Start to emit an incomplete TypeString. 8851 size_t Start = Enc.size(); 8852 Enc += (RT->isUnionType()? 'u' : 's'); 8853 Enc += '('; 8854 if (ID) 8855 Enc += ID->getName(); 8856 Enc += "){"; 8857 8858 // We collect all encoded fields and order as necessary. 8859 bool IsRecursive = false; 8860 const RecordDecl *RD = RT->getDecl()->getDefinition(); 8861 if (RD && !RD->field_empty()) { 8862 // An incomplete TypeString stub is placed in the cache for this RecordType 8863 // so that recursive calls to this RecordType will use it whilst building a 8864 // complete TypeString for this RecordType. 8865 SmallVector<FieldEncoding, 16> FE; 8866 std::string StubEnc(Enc.substr(Start).str()); 8867 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 8868 TSC.addIncomplete(ID, std::move(StubEnc)); 8869 if (!extractFieldType(FE, RD, CGM, TSC)) { 8870 (void) TSC.removeIncomplete(ID); 8871 return false; 8872 } 8873 IsRecursive = TSC.removeIncomplete(ID); 8874 // The ABI requires unions to be sorted but not structures. 8875 // See FieldEncoding::operator< for sort algorithm. 8876 if (RT->isUnionType()) 8877 llvm::sort(FE); 8878 // We can now complete the TypeString. 8879 unsigned E = FE.size(); 8880 for (unsigned I = 0; I != E; ++I) { 8881 if (I) 8882 Enc += ','; 8883 Enc += FE[I].str(); 8884 } 8885 } 8886 Enc += '}'; 8887 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 8888 return true; 8889 } 8890 8891 /// Appends enum types to Enc and adds the encoding to the cache. 8892 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 8893 TypeStringCache &TSC, 8894 const IdentifierInfo *ID) { 8895 // Append the cached TypeString if we have one. 8896 StringRef TypeString = TSC.lookupStr(ID); 8897 if (!TypeString.empty()) { 8898 Enc += TypeString; 8899 return true; 8900 } 8901 8902 size_t Start = Enc.size(); 8903 Enc += "e("; 8904 if (ID) 8905 Enc += ID->getName(); 8906 Enc += "){"; 8907 8908 // We collect all encoded enumerations and order them alphanumerically. 8909 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 8910 SmallVector<FieldEncoding, 16> FE; 8911 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 8912 ++I) { 8913 SmallStringEnc EnumEnc; 8914 EnumEnc += "m("; 8915 EnumEnc += I->getName(); 8916 EnumEnc += "){"; 8917 I->getInitVal().toString(EnumEnc); 8918 EnumEnc += '}'; 8919 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 8920 } 8921 llvm::sort(FE); 8922 unsigned E = FE.size(); 8923 for (unsigned I = 0; I != E; ++I) { 8924 if (I) 8925 Enc += ','; 8926 Enc += FE[I].str(); 8927 } 8928 } 8929 Enc += '}'; 8930 TSC.addIfComplete(ID, Enc.substr(Start), false); 8931 return true; 8932 } 8933 8934 /// Appends type's qualifier to Enc. 8935 /// This is done prior to appending the type's encoding. 8936 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 8937 // Qualifiers are emitted in alphabetical order. 8938 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 8939 int Lookup = 0; 8940 if (QT.isConstQualified()) 8941 Lookup += 1<<0; 8942 if (QT.isRestrictQualified()) 8943 Lookup += 1<<1; 8944 if (QT.isVolatileQualified()) 8945 Lookup += 1<<2; 8946 Enc += Table[Lookup]; 8947 } 8948 8949 /// Appends built-in types to Enc. 8950 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 8951 const char *EncType; 8952 switch (BT->getKind()) { 8953 case BuiltinType::Void: 8954 EncType = "0"; 8955 break; 8956 case BuiltinType::Bool: 8957 EncType = "b"; 8958 break; 8959 case BuiltinType::Char_U: 8960 EncType = "uc"; 8961 break; 8962 case BuiltinType::UChar: 8963 EncType = "uc"; 8964 break; 8965 case BuiltinType::SChar: 8966 EncType = "sc"; 8967 break; 8968 case BuiltinType::UShort: 8969 EncType = "us"; 8970 break; 8971 case BuiltinType::Short: 8972 EncType = "ss"; 8973 break; 8974 case BuiltinType::UInt: 8975 EncType = "ui"; 8976 break; 8977 case BuiltinType::Int: 8978 EncType = "si"; 8979 break; 8980 case BuiltinType::ULong: 8981 EncType = "ul"; 8982 break; 8983 case BuiltinType::Long: 8984 EncType = "sl"; 8985 break; 8986 case BuiltinType::ULongLong: 8987 EncType = "ull"; 8988 break; 8989 case BuiltinType::LongLong: 8990 EncType = "sll"; 8991 break; 8992 case BuiltinType::Float: 8993 EncType = "ft"; 8994 break; 8995 case BuiltinType::Double: 8996 EncType = "d"; 8997 break; 8998 case BuiltinType::LongDouble: 8999 EncType = "ld"; 9000 break; 9001 default: 9002 return false; 9003 } 9004 Enc += EncType; 9005 return true; 9006 } 9007 9008 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 9009 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 9010 const CodeGen::CodeGenModule &CGM, 9011 TypeStringCache &TSC) { 9012 Enc += "p("; 9013 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 9014 return false; 9015 Enc += ')'; 9016 return true; 9017 } 9018 9019 /// Appends array encoding to Enc before calling appendType for the element. 9020 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 9021 const ArrayType *AT, 9022 const CodeGen::CodeGenModule &CGM, 9023 TypeStringCache &TSC, StringRef NoSizeEnc) { 9024 if (AT->getSizeModifier() != ArrayType::Normal) 9025 return false; 9026 Enc += "a("; 9027 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 9028 CAT->getSize().toStringUnsigned(Enc); 9029 else 9030 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 9031 Enc += ':'; 9032 // The Qualifiers should be attached to the type rather than the array. 9033 appendQualifier(Enc, QT); 9034 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 9035 return false; 9036 Enc += ')'; 9037 return true; 9038 } 9039 9040 /// Appends a function encoding to Enc, calling appendType for the return type 9041 /// and the arguments. 9042 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 9043 const CodeGen::CodeGenModule &CGM, 9044 TypeStringCache &TSC) { 9045 Enc += "f{"; 9046 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 9047 return false; 9048 Enc += "}("; 9049 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 9050 // N.B. we are only interested in the adjusted param types. 9051 auto I = FPT->param_type_begin(); 9052 auto E = FPT->param_type_end(); 9053 if (I != E) { 9054 do { 9055 if (!appendType(Enc, *I, CGM, TSC)) 9056 return false; 9057 ++I; 9058 if (I != E) 9059 Enc += ','; 9060 } while (I != E); 9061 if (FPT->isVariadic()) 9062 Enc += ",va"; 9063 } else { 9064 if (FPT->isVariadic()) 9065 Enc += "va"; 9066 else 9067 Enc += '0'; 9068 } 9069 } 9070 Enc += ')'; 9071 return true; 9072 } 9073 9074 /// Handles the type's qualifier before dispatching a call to handle specific 9075 /// type encodings. 9076 static bool appendType(SmallStringEnc &Enc, QualType QType, 9077 const CodeGen::CodeGenModule &CGM, 9078 TypeStringCache &TSC) { 9079 9080 QualType QT = QType.getCanonicalType(); 9081 9082 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 9083 // The Qualifiers should be attached to the type rather than the array. 9084 // Thus we don't call appendQualifier() here. 9085 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 9086 9087 appendQualifier(Enc, QT); 9088 9089 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 9090 return appendBuiltinType(Enc, BT); 9091 9092 if (const PointerType *PT = QT->getAs<PointerType>()) 9093 return appendPointerType(Enc, PT, CGM, TSC); 9094 9095 if (const EnumType *ET = QT->getAs<EnumType>()) 9096 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 9097 9098 if (const RecordType *RT = QT->getAsStructureType()) 9099 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 9100 9101 if (const RecordType *RT = QT->getAsUnionType()) 9102 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 9103 9104 if (const FunctionType *FT = QT->getAs<FunctionType>()) 9105 return appendFunctionType(Enc, FT, CGM, TSC); 9106 9107 return false; 9108 } 9109 9110 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 9111 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { 9112 if (!D) 9113 return false; 9114 9115 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 9116 if (FD->getLanguageLinkage() != CLanguageLinkage) 9117 return false; 9118 return appendType(Enc, FD->getType(), CGM, TSC); 9119 } 9120 9121 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 9122 if (VD->getLanguageLinkage() != CLanguageLinkage) 9123 return false; 9124 QualType QT = VD->getType().getCanonicalType(); 9125 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 9126 // Global ArrayTypes are given a size of '*' if the size is unknown. 9127 // The Qualifiers should be attached to the type rather than the array. 9128 // Thus we don't call appendQualifier() here. 9129 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 9130 } 9131 return appendType(Enc, QT, CGM, TSC); 9132 } 9133 return false; 9134 } 9135 9136 //===----------------------------------------------------------------------===// 9137 // RISCV ABI Implementation 9138 //===----------------------------------------------------------------------===// 9139 9140 namespace { 9141 class RISCVABIInfo : public DefaultABIInfo { 9142 private: 9143 unsigned XLen; // Size of the integer ('x') registers in bits. 9144 static const int NumArgGPRs = 8; 9145 9146 public: 9147 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen) 9148 : DefaultABIInfo(CGT), XLen(XLen) {} 9149 9150 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 9151 // non-virtual, but computeInfo is virtual, so we overload it. 9152 void computeInfo(CGFunctionInfo &FI) const override; 9153 9154 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, 9155 int &ArgGPRsLeft) const; 9156 ABIArgInfo classifyReturnType(QualType RetTy) const; 9157 9158 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9159 QualType Ty) const override; 9160 9161 ABIArgInfo extendType(QualType Ty) const; 9162 }; 9163 } // end anonymous namespace 9164 9165 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const { 9166 QualType RetTy = FI.getReturnType(); 9167 if (!getCXXABI().classifyReturnType(FI)) 9168 FI.getReturnInfo() = classifyReturnType(RetTy); 9169 9170 // IsRetIndirect is true if classifyArgumentType indicated the value should 9171 // be passed indirect or if the type size is greater than 2*xlen. e.g. fp128 9172 // is passed direct in LLVM IR, relying on the backend lowering code to 9173 // rewrite the argument list and pass indirectly on RV32. 9174 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect || 9175 getContext().getTypeSize(RetTy) > (2 * XLen); 9176 9177 // We must track the number of GPRs used in order to conform to the RISC-V 9178 // ABI, as integer scalars passed in registers should have signext/zeroext 9179 // when promoted, but are anyext if passed on the stack. As GPR usage is 9180 // different for variadic arguments, we must also track whether we are 9181 // examining a vararg or not. 9182 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs; 9183 int NumFixedArgs = FI.getNumRequiredArgs(); 9184 9185 int ArgNum = 0; 9186 for (auto &ArgInfo : FI.arguments()) { 9187 bool IsFixed = ArgNum < NumFixedArgs; 9188 ArgInfo.info = classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft); 9189 ArgNum++; 9190 } 9191 } 9192 9193 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, 9194 int &ArgGPRsLeft) const { 9195 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow"); 9196 Ty = useFirstFieldIfTransparentUnion(Ty); 9197 9198 // Structures with either a non-trivial destructor or a non-trivial 9199 // copy constructor are always passed indirectly. 9200 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 9201 if (ArgGPRsLeft) 9202 ArgGPRsLeft -= 1; 9203 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 9204 CGCXXABI::RAA_DirectInMemory); 9205 } 9206 9207 // Ignore empty structs/unions. 9208 if (isEmptyRecord(getContext(), Ty, true)) 9209 return ABIArgInfo::getIgnore(); 9210 9211 uint64_t Size = getContext().getTypeSize(Ty); 9212 uint64_t NeededAlign = getContext().getTypeAlign(Ty); 9213 bool MustUseStack = false; 9214 // Determine the number of GPRs needed to pass the current argument 9215 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned" 9216 // register pairs, so may consume 3 registers. 9217 int NeededArgGPRs = 1; 9218 if (!IsFixed && NeededAlign == 2 * XLen) 9219 NeededArgGPRs = 2 + (ArgGPRsLeft % 2); 9220 else if (Size > XLen && Size <= 2 * XLen) 9221 NeededArgGPRs = 2; 9222 9223 if (NeededArgGPRs > ArgGPRsLeft) { 9224 MustUseStack = true; 9225 NeededArgGPRs = ArgGPRsLeft; 9226 } 9227 9228 ArgGPRsLeft -= NeededArgGPRs; 9229 9230 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) { 9231 // Treat an enum type as its underlying type. 9232 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 9233 Ty = EnumTy->getDecl()->getIntegerType(); 9234 9235 // All integral types are promoted to XLen width, unless passed on the 9236 // stack. 9237 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) { 9238 return extendType(Ty); 9239 } 9240 9241 return ABIArgInfo::getDirect(); 9242 } 9243 9244 // Aggregates which are <= 2*XLen will be passed in registers if possible, 9245 // so coerce to integers. 9246 if (Size <= 2 * XLen) { 9247 unsigned Alignment = getContext().getTypeAlign(Ty); 9248 9249 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is 9250 // required, and a 2-element XLen array if only XLen alignment is required. 9251 if (Size <= XLen) { 9252 return ABIArgInfo::getDirect( 9253 llvm::IntegerType::get(getVMContext(), XLen)); 9254 } else if (Alignment == 2 * XLen) { 9255 return ABIArgInfo::getDirect( 9256 llvm::IntegerType::get(getVMContext(), 2 * XLen)); 9257 } else { 9258 return ABIArgInfo::getDirect(llvm::ArrayType::get( 9259 llvm::IntegerType::get(getVMContext(), XLen), 2)); 9260 } 9261 } 9262 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 9263 } 9264 9265 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const { 9266 if (RetTy->isVoidType()) 9267 return ABIArgInfo::getIgnore(); 9268 9269 int ArgGPRsLeft = 2; 9270 9271 // The rules for return and argument types are the same, so defer to 9272 // classifyArgumentType. 9273 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft); 9274 } 9275 9276 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 9277 QualType Ty) const { 9278 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8); 9279 9280 // Empty records are ignored for parameter passing purposes. 9281 if (isEmptyRecord(getContext(), Ty, true)) { 9282 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); 9283 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 9284 return Addr; 9285 } 9286 9287 std::pair<CharUnits, CharUnits> SizeAndAlign = 9288 getContext().getTypeInfoInChars(Ty); 9289 9290 // Arguments bigger than 2*Xlen bytes are passed indirectly. 9291 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize; 9292 9293 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign, 9294 SlotSize, /*AllowHigherAlign=*/true); 9295 } 9296 9297 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const { 9298 int TySize = getContext().getTypeSize(Ty); 9299 // RV64 ABI requires unsigned 32 bit integers to be sign extended. 9300 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 9301 return ABIArgInfo::getSignExtend(Ty); 9302 return ABIArgInfo::getExtend(Ty); 9303 } 9304 9305 namespace { 9306 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo { 9307 public: 9308 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen) 9309 : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen)) {} 9310 9311 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 9312 CodeGen::CodeGenModule &CGM) const override { 9313 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 9314 if (!FD) return; 9315 9316 const auto *Attr = FD->getAttr<RISCVInterruptAttr>(); 9317 if (!Attr) 9318 return; 9319 9320 const char *Kind; 9321 switch (Attr->getInterrupt()) { 9322 case RISCVInterruptAttr::user: Kind = "user"; break; 9323 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break; 9324 case RISCVInterruptAttr::machine: Kind = "machine"; break; 9325 } 9326 9327 auto *Fn = cast<llvm::Function>(GV); 9328 9329 Fn->addFnAttr("interrupt", Kind); 9330 } 9331 }; 9332 } // namespace 9333 9334 //===----------------------------------------------------------------------===// 9335 // Driver code 9336 //===----------------------------------------------------------------------===// 9337 9338 bool CodeGenModule::supportsCOMDAT() const { 9339 return getTriple().supportsCOMDAT(); 9340 } 9341 9342 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 9343 if (TheTargetCodeGenInfo) 9344 return *TheTargetCodeGenInfo; 9345 9346 // Helper to set the unique_ptr while still keeping the return value. 9347 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & { 9348 this->TheTargetCodeGenInfo.reset(P); 9349 return *P; 9350 }; 9351 9352 const llvm::Triple &Triple = getTarget().getTriple(); 9353 switch (Triple.getArch()) { 9354 default: 9355 return SetCGInfo(new DefaultTargetCodeGenInfo(Types)); 9356 9357 case llvm::Triple::le32: 9358 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 9359 case llvm::Triple::mips: 9360 case llvm::Triple::mipsel: 9361 if (Triple.getOS() == llvm::Triple::NaCl) 9362 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 9363 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true)); 9364 9365 case llvm::Triple::mips64: 9366 case llvm::Triple::mips64el: 9367 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false)); 9368 9369 case llvm::Triple::avr: 9370 return SetCGInfo(new AVRTargetCodeGenInfo(Types)); 9371 9372 case llvm::Triple::aarch64: 9373 case llvm::Triple::aarch64_be: { 9374 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 9375 if (getTarget().getABI() == "darwinpcs") 9376 Kind = AArch64ABIInfo::DarwinPCS; 9377 else if (Triple.isOSWindows()) 9378 return SetCGInfo( 9379 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64)); 9380 9381 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind)); 9382 } 9383 9384 case llvm::Triple::wasm32: 9385 case llvm::Triple::wasm64: 9386 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types)); 9387 9388 case llvm::Triple::arm: 9389 case llvm::Triple::armeb: 9390 case llvm::Triple::thumb: 9391 case llvm::Triple::thumbeb: { 9392 if (Triple.getOS() == llvm::Triple::Win32) { 9393 return SetCGInfo( 9394 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP)); 9395 } 9396 9397 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 9398 StringRef ABIStr = getTarget().getABI(); 9399 if (ABIStr == "apcs-gnu") 9400 Kind = ARMABIInfo::APCS; 9401 else if (ABIStr == "aapcs16") 9402 Kind = ARMABIInfo::AAPCS16_VFP; 9403 else if (CodeGenOpts.FloatABI == "hard" || 9404 (CodeGenOpts.FloatABI != "soft" && 9405 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF || 9406 Triple.getEnvironment() == llvm::Triple::MuslEABIHF || 9407 Triple.getEnvironment() == llvm::Triple::EABIHF))) 9408 Kind = ARMABIInfo::AAPCS_VFP; 9409 9410 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind)); 9411 } 9412 9413 case llvm::Triple::ppc: 9414 return SetCGInfo( 9415 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft")); 9416 case llvm::Triple::ppc64: 9417 if (Triple.isOSBinFormatELF()) { 9418 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 9419 if (getTarget().getABI() == "elfv2") 9420 Kind = PPC64_SVR4_ABIInfo::ELFv2; 9421 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 9422 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 9423 9424 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, 9425 IsSoftFloat)); 9426 } else 9427 return SetCGInfo(new PPC64TargetCodeGenInfo(Types)); 9428 case llvm::Triple::ppc64le: { 9429 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 9430 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 9431 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx") 9432 Kind = PPC64_SVR4_ABIInfo::ELFv1; 9433 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 9434 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 9435 9436 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, 9437 IsSoftFloat)); 9438 } 9439 9440 case llvm::Triple::nvptx: 9441 case llvm::Triple::nvptx64: 9442 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); 9443 9444 case llvm::Triple::msp430: 9445 return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); 9446 9447 case llvm::Triple::riscv32: 9448 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, 32)); 9449 case llvm::Triple::riscv64: 9450 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, 64)); 9451 9452 case llvm::Triple::systemz: { 9453 bool HasVector = getTarget().getABI() == "vector"; 9454 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector)); 9455 } 9456 9457 case llvm::Triple::tce: 9458 case llvm::Triple::tcele: 9459 return SetCGInfo(new TCETargetCodeGenInfo(Types)); 9460 9461 case llvm::Triple::x86: { 9462 bool IsDarwinVectorABI = Triple.isOSDarwin(); 9463 bool RetSmallStructInRegABI = 9464 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 9465 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 9466 9467 if (Triple.getOS() == llvm::Triple::Win32) { 9468 return SetCGInfo(new WinX86_32TargetCodeGenInfo( 9469 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 9470 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); 9471 } else { 9472 return SetCGInfo(new X86_32TargetCodeGenInfo( 9473 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 9474 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters, 9475 CodeGenOpts.FloatABI == "soft")); 9476 } 9477 } 9478 9479 case llvm::Triple::x86_64: { 9480 StringRef ABI = getTarget().getABI(); 9481 X86AVXABILevel AVXLevel = 9482 (ABI == "avx512" 9483 ? X86AVXABILevel::AVX512 9484 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None); 9485 9486 switch (Triple.getOS()) { 9487 case llvm::Triple::Win32: 9488 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); 9489 default: 9490 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel)); 9491 } 9492 } 9493 case llvm::Triple::hexagon: 9494 return SetCGInfo(new HexagonTargetCodeGenInfo(Types)); 9495 case llvm::Triple::lanai: 9496 return SetCGInfo(new LanaiTargetCodeGenInfo(Types)); 9497 case llvm::Triple::r600: 9498 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 9499 case llvm::Triple::amdgcn: 9500 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 9501 case llvm::Triple::sparc: 9502 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types)); 9503 case llvm::Triple::sparcv9: 9504 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types)); 9505 case llvm::Triple::xcore: 9506 return SetCGInfo(new XCoreTargetCodeGenInfo(Types)); 9507 case llvm::Triple::arc: 9508 return SetCGInfo(new ARCTargetCodeGenInfo(Types)); 9509 case llvm::Triple::spir: 9510 case llvm::Triple::spir64: 9511 return SetCGInfo(new SPIRTargetCodeGenInfo(Types)); 9512 } 9513 } 9514 9515 /// Create an OpenCL kernel for an enqueued block. 9516 /// 9517 /// The kernel has the same function type as the block invoke function. Its 9518 /// name is the name of the block invoke function postfixed with "_kernel". 9519 /// It simply calls the block invoke function then returns. 9520 llvm::Function * 9521 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF, 9522 llvm::Function *Invoke, 9523 llvm::Value *BlockLiteral) const { 9524 auto *InvokeFT = Invoke->getFunctionType(); 9525 llvm::SmallVector<llvm::Type *, 2> ArgTys; 9526 for (auto &P : InvokeFT->params()) 9527 ArgTys.push_back(P); 9528 auto &C = CGF.getLLVMContext(); 9529 std::string Name = Invoke->getName().str() + "_kernel"; 9530 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); 9531 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, 9532 &CGF.CGM.getModule()); 9533 auto IP = CGF.Builder.saveIP(); 9534 auto *BB = llvm::BasicBlock::Create(C, "entry", F); 9535 auto &Builder = CGF.Builder; 9536 Builder.SetInsertPoint(BB); 9537 llvm::SmallVector<llvm::Value *, 2> Args; 9538 for (auto &A : F->args()) 9539 Args.push_back(&A); 9540 Builder.CreateCall(Invoke, Args); 9541 Builder.CreateRetVoid(); 9542 Builder.restoreIP(IP); 9543 return F; 9544 } 9545 9546 /// Create an OpenCL kernel for an enqueued block. 9547 /// 9548 /// The type of the first argument (the block literal) is the struct type 9549 /// of the block literal instead of a pointer type. The first argument 9550 /// (block literal) is passed directly by value to the kernel. The kernel 9551 /// allocates the same type of struct on stack and stores the block literal 9552 /// to it and passes its pointer to the block invoke function. The kernel 9553 /// has "enqueued-block" function attribute and kernel argument metadata. 9554 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel( 9555 CodeGenFunction &CGF, llvm::Function *Invoke, 9556 llvm::Value *BlockLiteral) const { 9557 auto &Builder = CGF.Builder; 9558 auto &C = CGF.getLLVMContext(); 9559 9560 auto *BlockTy = BlockLiteral->getType()->getPointerElementType(); 9561 auto *InvokeFT = Invoke->getFunctionType(); 9562 llvm::SmallVector<llvm::Type *, 2> ArgTys; 9563 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals; 9564 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals; 9565 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames; 9566 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames; 9567 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals; 9568 llvm::SmallVector<llvm::Metadata *, 8> ArgNames; 9569 9570 ArgTys.push_back(BlockTy); 9571 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); 9572 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0))); 9573 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal")); 9574 ArgTypeQuals.push_back(llvm::MDString::get(C, "")); 9575 AccessQuals.push_back(llvm::MDString::get(C, "none")); 9576 ArgNames.push_back(llvm::MDString::get(C, "block_literal")); 9577 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) { 9578 ArgTys.push_back(InvokeFT->getParamType(I)); 9579 ArgTypeNames.push_back(llvm::MDString::get(C, "void*")); 9580 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3))); 9581 AccessQuals.push_back(llvm::MDString::get(C, "none")); 9582 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*")); 9583 ArgTypeQuals.push_back(llvm::MDString::get(C, "")); 9584 ArgNames.push_back( 9585 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str())); 9586 } 9587 std::string Name = Invoke->getName().str() + "_kernel"; 9588 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false); 9589 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name, 9590 &CGF.CGM.getModule()); 9591 F->addFnAttr("enqueued-block"); 9592 auto IP = CGF.Builder.saveIP(); 9593 auto *BB = llvm::BasicBlock::Create(C, "entry", F); 9594 Builder.SetInsertPoint(BB); 9595 unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy); 9596 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr); 9597 BlockPtr->setAlignment(BlockAlign); 9598 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign); 9599 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0)); 9600 llvm::SmallVector<llvm::Value *, 2> Args; 9601 Args.push_back(Cast); 9602 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I) 9603 Args.push_back(I); 9604 Builder.CreateCall(Invoke, Args); 9605 Builder.CreateRetVoid(); 9606 Builder.restoreIP(IP); 9607 9608 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals)); 9609 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals)); 9610 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames)); 9611 F->setMetadata("kernel_arg_base_type", 9612 llvm::MDNode::get(C, ArgBaseTypeNames)); 9613 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals)); 9614 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 9615 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames)); 9616 9617 return F; 9618 } 9619