1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/CodeGen/CGFunctionInfo.h" 22 #include "clang/CodeGen/SwiftCallingConv.h" 23 #include "clang/Frontend/CodeGenOptions.h" 24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/ADT/StringSwitch.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/Type.h" 29 #include "llvm/Support/raw_ostream.h" 30 #include <algorithm> // std::sort 31 32 using namespace clang; 33 using namespace CodeGen; 34 35 // Helper for coercing an aggregate argument or return value into an integer 36 // array of the same size (including padding) and alignment. This alternate 37 // coercion happens only for the RenderScript ABI and can be removed after 38 // runtimes that rely on it are no longer supported. 39 // 40 // RenderScript assumes that the size of the argument / return value in the IR 41 // is the same as the size of the corresponding qualified type. This helper 42 // coerces the aggregate type into an array of the same size (including 43 // padding). This coercion is used in lieu of expansion of struct members or 44 // other canonical coercions that return a coerced-type of larger size. 45 // 46 // Ty - The argument / return value type 47 // Context - The associated ASTContext 48 // LLVMContext - The associated LLVMContext 49 static ABIArgInfo coerceToIntArray(QualType Ty, 50 ASTContext &Context, 51 llvm::LLVMContext &LLVMContext) { 52 // Alignment and Size are measured in bits. 53 const uint64_t Size = Context.getTypeSize(Ty); 54 const uint64_t Alignment = Context.getTypeAlign(Ty); 55 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment); 56 const uint64_t NumElements = (Size + Alignment - 1) / Alignment; 57 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 58 } 59 60 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 61 llvm::Value *Array, 62 llvm::Value *Value, 63 unsigned FirstIndex, 64 unsigned LastIndex) { 65 // Alternatively, we could emit this as a loop in the source. 66 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 67 llvm::Value *Cell = 68 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); 69 Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); 70 } 71 } 72 73 static bool isAggregateTypeForABI(QualType T) { 74 return !CodeGenFunction::hasScalarEvaluationKind(T) || 75 T->isMemberFunctionPointerType(); 76 } 77 78 ABIArgInfo 79 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign, 80 llvm::Type *Padding) const { 81 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), 82 ByRef, Realign, Padding); 83 } 84 85 ABIArgInfo 86 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { 87 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), 88 /*ByRef*/ false, Realign); 89 } 90 91 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 92 QualType Ty) const { 93 return Address::invalid(); 94 } 95 96 ABIInfo::~ABIInfo() {} 97 98 /// Does the given lowering require more than the given number of 99 /// registers when expanded? 100 /// 101 /// This is intended to be the basis of a reasonable basic implementation 102 /// of should{Pass,Return}IndirectlyForSwift. 103 /// 104 /// For most targets, a limit of four total registers is reasonable; this 105 /// limits the amount of code required in order to move around the value 106 /// in case it wasn't produced immediately prior to the call by the caller 107 /// (or wasn't produced in exactly the right registers) or isn't used 108 /// immediately within the callee. But some targets may need to further 109 /// limit the register count due to an inability to support that many 110 /// return registers. 111 static bool occupiesMoreThan(CodeGenTypes &cgt, 112 ArrayRef<llvm::Type*> scalarTypes, 113 unsigned maxAllRegisters) { 114 unsigned intCount = 0, fpCount = 0; 115 for (llvm::Type *type : scalarTypes) { 116 if (type->isPointerTy()) { 117 intCount++; 118 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { 119 auto ptrWidth = cgt.getTarget().getPointerWidth(0); 120 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; 121 } else { 122 assert(type->isVectorTy() || type->isFloatingPointTy()); 123 fpCount++; 124 } 125 } 126 127 return (intCount + fpCount > maxAllRegisters); 128 } 129 130 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 131 llvm::Type *eltTy, 132 unsigned numElts) const { 133 // The default implementation of this assumes that the target guarantees 134 // 128-bit SIMD support but nothing more. 135 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16); 136 } 137 138 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 139 CGCXXABI &CXXABI) { 140 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 141 if (!RD) 142 return CGCXXABI::RAA_Default; 143 return CXXABI.getRecordArgABI(RD); 144 } 145 146 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 147 CGCXXABI &CXXABI) { 148 const RecordType *RT = T->getAs<RecordType>(); 149 if (!RT) 150 return CGCXXABI::RAA_Default; 151 return getRecordArgABI(RT, CXXABI); 152 } 153 154 /// Pass transparent unions as if they were the type of the first element. Sema 155 /// should ensure that all elements of the union have the same "machine type". 156 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 157 if (const RecordType *UT = Ty->getAsUnionType()) { 158 const RecordDecl *UD = UT->getDecl(); 159 if (UD->hasAttr<TransparentUnionAttr>()) { 160 assert(!UD->field_empty() && "sema created an empty transparent union"); 161 return UD->field_begin()->getType(); 162 } 163 } 164 return Ty; 165 } 166 167 CGCXXABI &ABIInfo::getCXXABI() const { 168 return CGT.getCXXABI(); 169 } 170 171 ASTContext &ABIInfo::getContext() const { 172 return CGT.getContext(); 173 } 174 175 llvm::LLVMContext &ABIInfo::getVMContext() const { 176 return CGT.getLLVMContext(); 177 } 178 179 const llvm::DataLayout &ABIInfo::getDataLayout() const { 180 return CGT.getDataLayout(); 181 } 182 183 const TargetInfo &ABIInfo::getTarget() const { 184 return CGT.getTarget(); 185 } 186 187 const CodeGenOptions &ABIInfo::getCodeGenOpts() const { 188 return CGT.getCodeGenOpts(); 189 } 190 191 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); } 192 193 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 194 return false; 195 } 196 197 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 198 uint64_t Members) const { 199 return false; 200 } 201 202 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const { 203 return false; 204 } 205 206 LLVM_DUMP_METHOD void ABIArgInfo::dump() const { 207 raw_ostream &OS = llvm::errs(); 208 OS << "(ABIArgInfo Kind="; 209 switch (TheKind) { 210 case Direct: 211 OS << "Direct Type="; 212 if (llvm::Type *Ty = getCoerceToType()) 213 Ty->print(OS); 214 else 215 OS << "null"; 216 break; 217 case Extend: 218 OS << "Extend"; 219 break; 220 case Ignore: 221 OS << "Ignore"; 222 break; 223 case InAlloca: 224 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 225 break; 226 case Indirect: 227 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 228 << " ByVal=" << getIndirectByVal() 229 << " Realign=" << getIndirectRealign(); 230 break; 231 case Expand: 232 OS << "Expand"; 233 break; 234 case CoerceAndExpand: 235 OS << "CoerceAndExpand Type="; 236 getCoerceAndExpandType()->print(OS); 237 break; 238 } 239 OS << ")\n"; 240 } 241 242 // Dynamically round a pointer up to a multiple of the given alignment. 243 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF, 244 llvm::Value *Ptr, 245 CharUnits Align) { 246 llvm::Value *PtrAsInt = Ptr; 247 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align; 248 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); 249 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, 250 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1)); 251 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, 252 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())); 253 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt, 254 Ptr->getType(), 255 Ptr->getName() + ".aligned"); 256 return PtrAsInt; 257 } 258 259 /// Emit va_arg for a platform using the common void* representation, 260 /// where arguments are simply emitted in an array of slots on the stack. 261 /// 262 /// This version implements the core direct-value passing rules. 263 /// 264 /// \param SlotSize - The size and alignment of a stack slot. 265 /// Each argument will be allocated to a multiple of this number of 266 /// slots, and all the slots will be aligned to this value. 267 /// \param AllowHigherAlign - The slot alignment is not a cap; 268 /// an argument type with an alignment greater than the slot size 269 /// will be emitted on a higher-alignment address, potentially 270 /// leaving one or more empty slots behind as padding. If this 271 /// is false, the returned address might be less-aligned than 272 /// DirectAlign. 273 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, 274 Address VAListAddr, 275 llvm::Type *DirectTy, 276 CharUnits DirectSize, 277 CharUnits DirectAlign, 278 CharUnits SlotSize, 279 bool AllowHigherAlign) { 280 // Cast the element type to i8* if necessary. Some platforms define 281 // va_list as a struct containing an i8* instead of just an i8*. 282 if (VAListAddr.getElementType() != CGF.Int8PtrTy) 283 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); 284 285 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); 286 287 // If the CC aligns values higher than the slot size, do so if needed. 288 Address Addr = Address::invalid(); 289 if (AllowHigherAlign && DirectAlign > SlotSize) { 290 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign), 291 DirectAlign); 292 } else { 293 Addr = Address(Ptr, SlotSize); 294 } 295 296 // Advance the pointer past the argument, then store that back. 297 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); 298 llvm::Value *NextPtr = 299 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize, 300 "argp.next"); 301 CGF.Builder.CreateStore(NextPtr, VAListAddr); 302 303 // If the argument is smaller than a slot, and this is a big-endian 304 // target, the argument will be right-adjusted in its slot. 305 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && 306 !DirectTy->isStructTy()) { 307 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); 308 } 309 310 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); 311 return Addr; 312 } 313 314 /// Emit va_arg for a platform using the common void* representation, 315 /// where arguments are simply emitted in an array of slots on the stack. 316 /// 317 /// \param IsIndirect - Values of this type are passed indirectly. 318 /// \param ValueInfo - The size and alignment of this type, generally 319 /// computed with getContext().getTypeInfoInChars(ValueTy). 320 /// \param SlotSizeAndAlign - The size and alignment of a stack slot. 321 /// Each argument will be allocated to a multiple of this number of 322 /// slots, and all the slots will be aligned to this value. 323 /// \param AllowHigherAlign - The slot alignment is not a cap; 324 /// an argument type with an alignment greater than the slot size 325 /// will be emitted on a higher-alignment address, potentially 326 /// leaving one or more empty slots behind as padding. 327 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, 328 QualType ValueTy, bool IsIndirect, 329 std::pair<CharUnits, CharUnits> ValueInfo, 330 CharUnits SlotSizeAndAlign, 331 bool AllowHigherAlign) { 332 // The size and alignment of the value that was passed directly. 333 CharUnits DirectSize, DirectAlign; 334 if (IsIndirect) { 335 DirectSize = CGF.getPointerSize(); 336 DirectAlign = CGF.getPointerAlign(); 337 } else { 338 DirectSize = ValueInfo.first; 339 DirectAlign = ValueInfo.second; 340 } 341 342 // Cast the address we've calculated to the right type. 343 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy); 344 if (IsIndirect) 345 DirectTy = DirectTy->getPointerTo(0); 346 347 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, 348 DirectSize, DirectAlign, 349 SlotSizeAndAlign, 350 AllowHigherAlign); 351 352 if (IsIndirect) { 353 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second); 354 } 355 356 return Addr; 357 358 } 359 360 static Address emitMergePHI(CodeGenFunction &CGF, 361 Address Addr1, llvm::BasicBlock *Block1, 362 Address Addr2, llvm::BasicBlock *Block2, 363 const llvm::Twine &Name = "") { 364 assert(Addr1.getType() == Addr2.getType()); 365 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); 366 PHI->addIncoming(Addr1.getPointer(), Block1); 367 PHI->addIncoming(Addr2.getPointer(), Block2); 368 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); 369 return Address(PHI, Align); 370 } 371 372 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 373 374 // If someone can figure out a general rule for this, that would be great. 375 // It's probably just doomed to be platform-dependent, though. 376 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 377 // Verified for: 378 // x86-64 FreeBSD, Linux, Darwin 379 // x86-32 FreeBSD, Linux, Darwin 380 // PowerPC Linux, Darwin 381 // ARM Darwin (*not* EABI) 382 // AArch64 Linux 383 return 32; 384 } 385 386 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 387 const FunctionNoProtoType *fnType) const { 388 // The following conventions are known to require this to be false: 389 // x86_stdcall 390 // MIPS 391 // For everything else, we just prefer false unless we opt out. 392 return false; 393 } 394 395 void 396 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 397 llvm::SmallString<24> &Opt) const { 398 // This assumes the user is passing a library name like "rt" instead of a 399 // filename like "librt.a/so", and that they don't care whether it's static or 400 // dynamic. 401 Opt = "-l"; 402 Opt += Lib; 403 } 404 405 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const { 406 // OpenCL kernels are called via an explicit runtime API with arguments 407 // set with clSetKernelArg(), not as normal sub-functions. 408 // Return SPIR_KERNEL by default as the kernel calling convention to 409 // ensure the fingerprint is fixed such way that each OpenCL argument 410 // gets one matching argument in the produced kernel function argument 411 // list to enable feasible implementation of clSetKernelArg() with 412 // aggregates etc. In case we would use the default C calling conv here, 413 // clSetKernelArg() might break depending on the target-specific 414 // conventions; different targets might split structs passed as values 415 // to multiple function arguments etc. 416 return llvm::CallingConv::SPIR_KERNEL; 417 } 418 419 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM, 420 llvm::PointerType *T, QualType QT) const { 421 return llvm::ConstantPointerNull::get(T); 422 } 423 424 unsigned TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 425 const VarDecl *D) const { 426 assert(!CGM.getLangOpts().OpenCL && 427 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 428 "Address space agnostic languages only"); 429 return D ? D->getType().getAddressSpace() 430 : static_cast<unsigned>(LangAS::Default); 431 } 432 433 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast( 434 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, unsigned SrcAddr, 435 unsigned DestAddr, llvm::Type *DestTy, bool isNonNull) const { 436 // Since target may map different address spaces in AST to the same address 437 // space, an address space conversion may end up as a bitcast. 438 if (auto *C = dyn_cast<llvm::Constant>(Src)) 439 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy); 440 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DestTy); 441 } 442 443 llvm::Constant * 444 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src, 445 unsigned SrcAddr, unsigned DestAddr, 446 llvm::Type *DestTy) const { 447 // Since target may map different address spaces in AST to the same address 448 // space, an address space conversion may end up as a bitcast. 449 return llvm::ConstantExpr::getPointerCast(Src, DestTy); 450 } 451 452 llvm::SyncScope::ID 453 TargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const { 454 return C.getOrInsertSyncScopeID(""); /* default sync scope */ 455 } 456 457 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 458 459 /// isEmptyField - Return true iff a the field is "empty", that is it 460 /// is an unnamed bit-field or an (array of) empty record(s). 461 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 462 bool AllowArrays) { 463 if (FD->isUnnamedBitfield()) 464 return true; 465 466 QualType FT = FD->getType(); 467 468 // Constant arrays of empty records count as empty, strip them off. 469 // Constant arrays of zero length always count as empty. 470 if (AllowArrays) 471 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 472 if (AT->getSize() == 0) 473 return true; 474 FT = AT->getElementType(); 475 } 476 477 const RecordType *RT = FT->getAs<RecordType>(); 478 if (!RT) 479 return false; 480 481 // C++ record fields are never empty, at least in the Itanium ABI. 482 // 483 // FIXME: We should use a predicate for whether this behavior is true in the 484 // current ABI. 485 if (isa<CXXRecordDecl>(RT->getDecl())) 486 return false; 487 488 return isEmptyRecord(Context, FT, AllowArrays); 489 } 490 491 /// isEmptyRecord - Return true iff a structure contains only empty 492 /// fields. Note that a structure with a flexible array member is not 493 /// considered empty. 494 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 495 const RecordType *RT = T->getAs<RecordType>(); 496 if (!RT) 497 return false; 498 const RecordDecl *RD = RT->getDecl(); 499 if (RD->hasFlexibleArrayMember()) 500 return false; 501 502 // If this is a C++ record, check the bases first. 503 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 504 for (const auto &I : CXXRD->bases()) 505 if (!isEmptyRecord(Context, I.getType(), true)) 506 return false; 507 508 for (const auto *I : RD->fields()) 509 if (!isEmptyField(Context, I, AllowArrays)) 510 return false; 511 return true; 512 } 513 514 /// isSingleElementStruct - Determine if a structure is a "single 515 /// element struct", i.e. it has exactly one non-empty field or 516 /// exactly one field which is itself a single element 517 /// struct. Structures with flexible array members are never 518 /// considered single element structs. 519 /// 520 /// \return The field declaration for the single non-empty field, if 521 /// it exists. 522 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 523 const RecordType *RT = T->getAs<RecordType>(); 524 if (!RT) 525 return nullptr; 526 527 const RecordDecl *RD = RT->getDecl(); 528 if (RD->hasFlexibleArrayMember()) 529 return nullptr; 530 531 const Type *Found = nullptr; 532 533 // If this is a C++ record, check the bases first. 534 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 535 for (const auto &I : CXXRD->bases()) { 536 // Ignore empty records. 537 if (isEmptyRecord(Context, I.getType(), true)) 538 continue; 539 540 // If we already found an element then this isn't a single-element struct. 541 if (Found) 542 return nullptr; 543 544 // If this is non-empty and not a single element struct, the composite 545 // cannot be a single element struct. 546 Found = isSingleElementStruct(I.getType(), Context); 547 if (!Found) 548 return nullptr; 549 } 550 } 551 552 // Check for single element. 553 for (const auto *FD : RD->fields()) { 554 QualType FT = FD->getType(); 555 556 // Ignore empty fields. 557 if (isEmptyField(Context, FD, true)) 558 continue; 559 560 // If we already found an element then this isn't a single-element 561 // struct. 562 if (Found) 563 return nullptr; 564 565 // Treat single element arrays as the element. 566 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 567 if (AT->getSize().getZExtValue() != 1) 568 break; 569 FT = AT->getElementType(); 570 } 571 572 if (!isAggregateTypeForABI(FT)) { 573 Found = FT.getTypePtr(); 574 } else { 575 Found = isSingleElementStruct(FT, Context); 576 if (!Found) 577 return nullptr; 578 } 579 } 580 581 // We don't consider a struct a single-element struct if it has 582 // padding beyond the element type. 583 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 584 return nullptr; 585 586 return Found; 587 } 588 589 namespace { 590 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, 591 const ABIArgInfo &AI) { 592 // This default implementation defers to the llvm backend's va_arg 593 // instruction. It can handle only passing arguments directly 594 // (typically only handled in the backend for primitive types), or 595 // aggregates passed indirectly by pointer (NOTE: if the "byval" 596 // flag has ABI impact in the callee, this implementation cannot 597 // work.) 598 599 // Only a few cases are covered here at the moment -- those needed 600 // by the default abi. 601 llvm::Value *Val; 602 603 if (AI.isIndirect()) { 604 assert(!AI.getPaddingType() && 605 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 606 assert( 607 !AI.getIndirectRealign() && 608 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"); 609 610 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); 611 CharUnits TyAlignForABI = TyInfo.second; 612 613 llvm::Type *BaseTy = 614 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 615 llvm::Value *Addr = 616 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); 617 return Address(Addr, TyAlignForABI); 618 } else { 619 assert((AI.isDirect() || AI.isExtend()) && 620 "Unexpected ArgInfo Kind in generic VAArg emitter!"); 621 622 assert(!AI.getInReg() && 623 "Unexpected InReg seen in arginfo in generic VAArg emitter!"); 624 assert(!AI.getPaddingType() && 625 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 626 assert(!AI.getDirectOffset() && 627 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"); 628 assert(!AI.getCoerceToType() && 629 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"); 630 631 Address Temp = CGF.CreateMemTemp(Ty, "varet"); 632 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); 633 CGF.Builder.CreateStore(Val, Temp); 634 return Temp; 635 } 636 } 637 638 /// DefaultABIInfo - The default implementation for ABI specific 639 /// details. This implementation provides information which results in 640 /// self-consistent and sensible LLVM IR generation, but does not 641 /// conform to any particular ABI. 642 class DefaultABIInfo : public ABIInfo { 643 public: 644 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 645 646 ABIArgInfo classifyReturnType(QualType RetTy) const; 647 ABIArgInfo classifyArgumentType(QualType RetTy) const; 648 649 void computeInfo(CGFunctionInfo &FI) const override { 650 if (!getCXXABI().classifyReturnType(FI)) 651 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 652 for (auto &I : FI.arguments()) 653 I.info = classifyArgumentType(I.type); 654 } 655 656 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 657 QualType Ty) const override { 658 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); 659 } 660 }; 661 662 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 663 public: 664 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 665 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 666 }; 667 668 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 669 Ty = useFirstFieldIfTransparentUnion(Ty); 670 671 if (isAggregateTypeForABI(Ty)) { 672 // Records with non-trivial destructors/copy-constructors should not be 673 // passed by value. 674 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 675 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 676 677 return getNaturalAlignIndirect(Ty); 678 } 679 680 // Treat an enum type as its underlying type. 681 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 682 Ty = EnumTy->getDecl()->getIntegerType(); 683 684 return (Ty->isPromotableIntegerType() ? 685 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 686 } 687 688 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 689 if (RetTy->isVoidType()) 690 return ABIArgInfo::getIgnore(); 691 692 if (isAggregateTypeForABI(RetTy)) 693 return getNaturalAlignIndirect(RetTy); 694 695 // Treat an enum type as its underlying type. 696 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 697 RetTy = EnumTy->getDecl()->getIntegerType(); 698 699 return (RetTy->isPromotableIntegerType() ? 700 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 701 } 702 703 //===----------------------------------------------------------------------===// 704 // WebAssembly ABI Implementation 705 // 706 // This is a very simple ABI that relies a lot on DefaultABIInfo. 707 //===----------------------------------------------------------------------===// 708 709 class WebAssemblyABIInfo final : public DefaultABIInfo { 710 public: 711 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT) 712 : DefaultABIInfo(CGT) {} 713 714 private: 715 ABIArgInfo classifyReturnType(QualType RetTy) const; 716 ABIArgInfo classifyArgumentType(QualType Ty) const; 717 718 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 719 // non-virtual, but computeInfo and EmitVAArg are virtual, so we 720 // overload them. 721 void computeInfo(CGFunctionInfo &FI) const override { 722 if (!getCXXABI().classifyReturnType(FI)) 723 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 724 for (auto &Arg : FI.arguments()) 725 Arg.info = classifyArgumentType(Arg.type); 726 } 727 728 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 729 QualType Ty) const override; 730 }; 731 732 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { 733 public: 734 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 735 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {} 736 }; 737 738 /// \brief Classify argument of given type \p Ty. 739 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { 740 Ty = useFirstFieldIfTransparentUnion(Ty); 741 742 if (isAggregateTypeForABI(Ty)) { 743 // Records with non-trivial destructors/copy-constructors should not be 744 // passed by value. 745 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 746 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 747 // Ignore empty structs/unions. 748 if (isEmptyRecord(getContext(), Ty, true)) 749 return ABIArgInfo::getIgnore(); 750 // Lower single-element structs to just pass a regular value. TODO: We 751 // could do reasonable-size multiple-element structs too, using getExpand(), 752 // though watch out for things like bitfields. 753 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 754 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 755 } 756 757 // Otherwise just do the default thing. 758 return DefaultABIInfo::classifyArgumentType(Ty); 759 } 760 761 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { 762 if (isAggregateTypeForABI(RetTy)) { 763 // Records with non-trivial destructors/copy-constructors should not be 764 // returned by value. 765 if (!getRecordArgABI(RetTy, getCXXABI())) { 766 // Ignore empty structs/unions. 767 if (isEmptyRecord(getContext(), RetTy, true)) 768 return ABIArgInfo::getIgnore(); 769 // Lower single-element structs to just return a regular value. TODO: We 770 // could do reasonable-size multiple-element structs too, using 771 // ABIArgInfo::getDirect(). 772 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 773 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 774 } 775 } 776 777 // Otherwise just do the default thing. 778 return DefaultABIInfo::classifyReturnType(RetTy); 779 } 780 781 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 782 QualType Ty) const { 783 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false, 784 getContext().getTypeInfoInChars(Ty), 785 CharUnits::fromQuantity(4), 786 /*AllowHigherAlign=*/ true); 787 } 788 789 //===----------------------------------------------------------------------===// 790 // le32/PNaCl bitcode ABI Implementation 791 // 792 // This is a simplified version of the x86_32 ABI. Arguments and return values 793 // are always passed on the stack. 794 //===----------------------------------------------------------------------===// 795 796 class PNaClABIInfo : public ABIInfo { 797 public: 798 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 799 800 ABIArgInfo classifyReturnType(QualType RetTy) const; 801 ABIArgInfo classifyArgumentType(QualType RetTy) const; 802 803 void computeInfo(CGFunctionInfo &FI) const override; 804 Address EmitVAArg(CodeGenFunction &CGF, 805 Address VAListAddr, QualType Ty) const override; 806 }; 807 808 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 809 public: 810 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 811 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 812 }; 813 814 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 815 if (!getCXXABI().classifyReturnType(FI)) 816 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 817 818 for (auto &I : FI.arguments()) 819 I.info = classifyArgumentType(I.type); 820 } 821 822 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 823 QualType Ty) const { 824 // The PNaCL ABI is a bit odd, in that varargs don't use normal 825 // function classification. Structs get passed directly for varargs 826 // functions, through a rewriting transform in 827 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows 828 // this target to actually support a va_arg instructions with an 829 // aggregate type, unlike other targets. 830 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 831 } 832 833 /// \brief Classify argument of given type \p Ty. 834 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 835 if (isAggregateTypeForABI(Ty)) { 836 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 837 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 838 return getNaturalAlignIndirect(Ty); 839 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 840 // Treat an enum type as its underlying type. 841 Ty = EnumTy->getDecl()->getIntegerType(); 842 } else if (Ty->isFloatingType()) { 843 // Floating-point types don't go inreg. 844 return ABIArgInfo::getDirect(); 845 } 846 847 return (Ty->isPromotableIntegerType() ? 848 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 849 } 850 851 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 852 if (RetTy->isVoidType()) 853 return ABIArgInfo::getIgnore(); 854 855 // In the PNaCl ABI we always return records/structures on the stack. 856 if (isAggregateTypeForABI(RetTy)) 857 return getNaturalAlignIndirect(RetTy); 858 859 // Treat an enum type as its underlying type. 860 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 861 RetTy = EnumTy->getDecl()->getIntegerType(); 862 863 return (RetTy->isPromotableIntegerType() ? 864 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 865 } 866 867 /// IsX86_MMXType - Return true if this is an MMX type. 868 bool IsX86_MMXType(llvm::Type *IRType) { 869 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 870 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 871 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 872 IRType->getScalarSizeInBits() != 64; 873 } 874 875 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 876 StringRef Constraint, 877 llvm::Type* Ty) { 878 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint) 879 .Cases("y", "&y", "^Ym", true) 880 .Default(false); 881 if (IsMMXCons && Ty->isVectorTy()) { 882 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { 883 // Invalid MMX constraint 884 return nullptr; 885 } 886 887 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 888 } 889 890 // No operation needed 891 return Ty; 892 } 893 894 /// Returns true if this type can be passed in SSE registers with the 895 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 896 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 897 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 898 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) { 899 if (BT->getKind() == BuiltinType::LongDouble) { 900 if (&Context.getTargetInfo().getLongDoubleFormat() == 901 &llvm::APFloat::x87DoubleExtended()) 902 return false; 903 } 904 return true; 905 } 906 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 907 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 908 // registers specially. 909 unsigned VecSize = Context.getTypeSize(VT); 910 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 911 return true; 912 } 913 return false; 914 } 915 916 /// Returns true if this aggregate is small enough to be passed in SSE registers 917 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 918 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 919 return NumMembers <= 4; 920 } 921 922 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86. 923 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { 924 auto AI = ABIArgInfo::getDirect(T); 925 AI.setInReg(true); 926 AI.setCanBeFlattened(false); 927 return AI; 928 } 929 930 //===----------------------------------------------------------------------===// 931 // X86-32 ABI Implementation 932 //===----------------------------------------------------------------------===// 933 934 /// \brief Similar to llvm::CCState, but for Clang. 935 struct CCState { 936 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {} 937 938 unsigned CC; 939 unsigned FreeRegs; 940 unsigned FreeSSERegs; 941 }; 942 943 enum { 944 // Vectorcall only allows the first 6 parameters to be passed in registers. 945 VectorcallMaxParamNumAsReg = 6 946 }; 947 948 /// X86_32ABIInfo - The X86-32 ABI information. 949 class X86_32ABIInfo : public SwiftABIInfo { 950 enum Class { 951 Integer, 952 Float 953 }; 954 955 static const unsigned MinABIStackAlignInBytes = 4; 956 957 bool IsDarwinVectorABI; 958 bool IsRetSmallStructInRegABI; 959 bool IsWin32StructABI; 960 bool IsSoftFloatABI; 961 bool IsMCUABI; 962 unsigned DefaultNumRegisterParameters; 963 964 static bool isRegisterSize(unsigned Size) { 965 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 966 } 967 968 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 969 // FIXME: Assumes vectorcall is in use. 970 return isX86VectorTypeForVectorCall(getContext(), Ty); 971 } 972 973 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 974 uint64_t NumMembers) const override { 975 // FIXME: Assumes vectorcall is in use. 976 return isX86VectorCallAggregateSmallEnough(NumMembers); 977 } 978 979 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 980 981 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 982 /// such that the argument will be passed in memory. 983 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 984 985 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; 986 987 /// \brief Return the alignment to use for the given type on the stack. 988 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 989 990 Class classify(QualType Ty) const; 991 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 992 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 993 994 /// \brief Updates the number of available free registers, returns 995 /// true if any registers were allocated. 996 bool updateFreeRegs(QualType Ty, CCState &State) const; 997 998 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg, 999 bool &NeedsPadding) const; 1000 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const; 1001 1002 bool canExpandIndirectArgument(QualType Ty) const; 1003 1004 /// \brief Rewrite the function info so that all memory arguments use 1005 /// inalloca. 1006 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 1007 1008 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1009 CharUnits &StackOffset, ABIArgInfo &Info, 1010 QualType Type) const; 1011 void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State, 1012 bool &UsedInAlloca) const; 1013 1014 public: 1015 1016 void computeInfo(CGFunctionInfo &FI) const override; 1017 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 1018 QualType Ty) const override; 1019 1020 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1021 bool RetSmallStructInRegABI, bool Win32StructABI, 1022 unsigned NumRegisterParameters, bool SoftFloatABI) 1023 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), 1024 IsRetSmallStructInRegABI(RetSmallStructInRegABI), 1025 IsWin32StructABI(Win32StructABI), 1026 IsSoftFloatABI(SoftFloatABI), 1027 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()), 1028 DefaultNumRegisterParameters(NumRegisterParameters) {} 1029 1030 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 1031 ArrayRef<llvm::Type*> scalars, 1032 bool asReturnValue) const override { 1033 // LLVM's x86-32 lowering currently only assigns up to three 1034 // integer registers and three fp registers. Oddly, it'll use up to 1035 // four vector registers for vectors, but those can overlap with the 1036 // scalar registers. 1037 return occupiesMoreThan(CGT, scalars, /*total*/ 3); 1038 } 1039 1040 bool isSwiftErrorInRegister() const override { 1041 // x86-32 lowering does not support passing swifterror in a register. 1042 return false; 1043 } 1044 }; 1045 1046 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 1047 public: 1048 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1049 bool RetSmallStructInRegABI, bool Win32StructABI, 1050 unsigned NumRegisterParameters, bool SoftFloatABI) 1051 : TargetCodeGenInfo(new X86_32ABIInfo( 1052 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, 1053 NumRegisterParameters, SoftFloatABI)) {} 1054 1055 static bool isStructReturnInRegABI( 1056 const llvm::Triple &Triple, const CodeGenOptions &Opts); 1057 1058 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 1059 CodeGen::CodeGenModule &CGM, 1060 ForDefinition_t IsForDefinition) const override; 1061 1062 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1063 // Darwin uses different dwarf register numbers for EH. 1064 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 1065 return 4; 1066 } 1067 1068 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1069 llvm::Value *Address) const override; 1070 1071 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1072 StringRef Constraint, 1073 llvm::Type* Ty) const override { 1074 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1075 } 1076 1077 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 1078 std::string &Constraints, 1079 std::vector<llvm::Type *> &ResultRegTypes, 1080 std::vector<llvm::Type *> &ResultTruncRegTypes, 1081 std::vector<LValue> &ResultRegDests, 1082 std::string &AsmString, 1083 unsigned NumOutputs) const override; 1084 1085 llvm::Constant * 1086 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1087 unsigned Sig = (0xeb << 0) | // jmp rel8 1088 (0x06 << 8) | // .+0x08 1089 ('F' << 16) | 1090 ('T' << 24); 1091 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1092 } 1093 1094 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 1095 return "movl\t%ebp, %ebp" 1096 "\t\t// marker for objc_retainAutoreleaseReturnValue"; 1097 } 1098 }; 1099 1100 } 1101 1102 /// Rewrite input constraint references after adding some output constraints. 1103 /// In the case where there is one output and one input and we add one output, 1104 /// we need to replace all operand references greater than or equal to 1: 1105 /// mov $0, $1 1106 /// mov eax, $1 1107 /// The result will be: 1108 /// mov $0, $2 1109 /// mov eax, $2 1110 static void rewriteInputConstraintReferences(unsigned FirstIn, 1111 unsigned NumNewOuts, 1112 std::string &AsmString) { 1113 std::string Buf; 1114 llvm::raw_string_ostream OS(Buf); 1115 size_t Pos = 0; 1116 while (Pos < AsmString.size()) { 1117 size_t DollarStart = AsmString.find('$', Pos); 1118 if (DollarStart == std::string::npos) 1119 DollarStart = AsmString.size(); 1120 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 1121 if (DollarEnd == std::string::npos) 1122 DollarEnd = AsmString.size(); 1123 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 1124 Pos = DollarEnd; 1125 size_t NumDollars = DollarEnd - DollarStart; 1126 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 1127 // We have an operand reference. 1128 size_t DigitStart = Pos; 1129 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 1130 if (DigitEnd == std::string::npos) 1131 DigitEnd = AsmString.size(); 1132 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 1133 unsigned OperandIndex; 1134 if (!OperandStr.getAsInteger(10, OperandIndex)) { 1135 if (OperandIndex >= FirstIn) 1136 OperandIndex += NumNewOuts; 1137 OS << OperandIndex; 1138 } else { 1139 OS << OperandStr; 1140 } 1141 Pos = DigitEnd; 1142 } 1143 } 1144 AsmString = std::move(OS.str()); 1145 } 1146 1147 /// Add output constraints for EAX:EDX because they are return registers. 1148 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 1149 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 1150 std::vector<llvm::Type *> &ResultRegTypes, 1151 std::vector<llvm::Type *> &ResultTruncRegTypes, 1152 std::vector<LValue> &ResultRegDests, std::string &AsmString, 1153 unsigned NumOutputs) const { 1154 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 1155 1156 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 1157 // larger. 1158 if (!Constraints.empty()) 1159 Constraints += ','; 1160 if (RetWidth <= 32) { 1161 Constraints += "={eax}"; 1162 ResultRegTypes.push_back(CGF.Int32Ty); 1163 } else { 1164 // Use the 'A' constraint for EAX:EDX. 1165 Constraints += "=A"; 1166 ResultRegTypes.push_back(CGF.Int64Ty); 1167 } 1168 1169 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 1170 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 1171 ResultTruncRegTypes.push_back(CoerceTy); 1172 1173 // Coerce the integer by bitcasting the return slot pointer. 1174 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), 1175 CoerceTy->getPointerTo())); 1176 ResultRegDests.push_back(ReturnSlot); 1177 1178 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 1179 } 1180 1181 /// shouldReturnTypeInRegister - Determine if the given type should be 1182 /// returned in a register (for the Darwin and MCU ABI). 1183 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 1184 ASTContext &Context) const { 1185 uint64_t Size = Context.getTypeSize(Ty); 1186 1187 // For i386, type must be register sized. 1188 // For the MCU ABI, it only needs to be <= 8-byte 1189 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) 1190 return false; 1191 1192 if (Ty->isVectorType()) { 1193 // 64- and 128- bit vectors inside structures are not returned in 1194 // registers. 1195 if (Size == 64 || Size == 128) 1196 return false; 1197 1198 return true; 1199 } 1200 1201 // If this is a builtin, pointer, enum, complex type, member pointer, or 1202 // member function pointer it is ok. 1203 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 1204 Ty->isAnyComplexType() || Ty->isEnumeralType() || 1205 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 1206 return true; 1207 1208 // Arrays are treated like records. 1209 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 1210 return shouldReturnTypeInRegister(AT->getElementType(), Context); 1211 1212 // Otherwise, it must be a record type. 1213 const RecordType *RT = Ty->getAs<RecordType>(); 1214 if (!RT) return false; 1215 1216 // FIXME: Traverse bases here too. 1217 1218 // Structure types are passed in register if all fields would be 1219 // passed in a register. 1220 for (const auto *FD : RT->getDecl()->fields()) { 1221 // Empty fields are ignored. 1222 if (isEmptyField(Context, FD, true)) 1223 continue; 1224 1225 // Check fields recursively. 1226 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 1227 return false; 1228 } 1229 return true; 1230 } 1231 1232 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 1233 // Treat complex types as the element type. 1234 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 1235 Ty = CTy->getElementType(); 1236 1237 // Check for a type which we know has a simple scalar argument-passing 1238 // convention without any padding. (We're specifically looking for 32 1239 // and 64-bit integer and integer-equivalents, float, and double.) 1240 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 1241 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 1242 return false; 1243 1244 uint64_t Size = Context.getTypeSize(Ty); 1245 return Size == 32 || Size == 64; 1246 } 1247 1248 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, 1249 uint64_t &Size) { 1250 for (const auto *FD : RD->fields()) { 1251 // Scalar arguments on the stack get 4 byte alignment on x86. If the 1252 // argument is smaller than 32-bits, expanding the struct will create 1253 // alignment padding. 1254 if (!is32Or64BitBasicType(FD->getType(), Context)) 1255 return false; 1256 1257 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 1258 // how to expand them yet, and the predicate for telling if a bitfield still 1259 // counts as "basic" is more complicated than what we were doing previously. 1260 if (FD->isBitField()) 1261 return false; 1262 1263 Size += Context.getTypeSize(FD->getType()); 1264 } 1265 return true; 1266 } 1267 1268 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, 1269 uint64_t &Size) { 1270 // Don't do this if there are any non-empty bases. 1271 for (const CXXBaseSpecifier &Base : RD->bases()) { 1272 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(), 1273 Size)) 1274 return false; 1275 } 1276 if (!addFieldSizes(Context, RD, Size)) 1277 return false; 1278 return true; 1279 } 1280 1281 /// Test whether an argument type which is to be passed indirectly (on the 1282 /// stack) would have the equivalent layout if it was expanded into separate 1283 /// arguments. If so, we prefer to do the latter to avoid inhibiting 1284 /// optimizations. 1285 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const { 1286 // We can only expand structure types. 1287 const RecordType *RT = Ty->getAs<RecordType>(); 1288 if (!RT) 1289 return false; 1290 const RecordDecl *RD = RT->getDecl(); 1291 uint64_t Size = 0; 1292 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1293 if (!IsWin32StructABI) { 1294 // On non-Windows, we have to conservatively match our old bitcode 1295 // prototypes in order to be ABI-compatible at the bitcode level. 1296 if (!CXXRD->isCLike()) 1297 return false; 1298 } else { 1299 // Don't do this for dynamic classes. 1300 if (CXXRD->isDynamicClass()) 1301 return false; 1302 } 1303 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size)) 1304 return false; 1305 } else { 1306 if (!addFieldSizes(getContext(), RD, Size)) 1307 return false; 1308 } 1309 1310 // We can do this if there was no alignment padding. 1311 return Size == getContext().getTypeSize(Ty); 1312 } 1313 1314 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { 1315 // If the return value is indirect, then the hidden argument is consuming one 1316 // integer register. 1317 if (State.FreeRegs) { 1318 --State.FreeRegs; 1319 if (!IsMCUABI) 1320 return getNaturalAlignIndirectInReg(RetTy); 1321 } 1322 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 1323 } 1324 1325 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 1326 CCState &State) const { 1327 if (RetTy->isVoidType()) 1328 return ABIArgInfo::getIgnore(); 1329 1330 const Type *Base = nullptr; 1331 uint64_t NumElts = 0; 1332 if ((State.CC == llvm::CallingConv::X86_VectorCall || 1333 State.CC == llvm::CallingConv::X86_RegCall) && 1334 isHomogeneousAggregate(RetTy, Base, NumElts)) { 1335 // The LLVM struct type for such an aggregate should lower properly. 1336 return ABIArgInfo::getDirect(); 1337 } 1338 1339 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 1340 // On Darwin, some vectors are returned in registers. 1341 if (IsDarwinVectorABI) { 1342 uint64_t Size = getContext().getTypeSize(RetTy); 1343 1344 // 128-bit vectors are a special case; they are returned in 1345 // registers and we need to make sure to pick a type the LLVM 1346 // backend will like. 1347 if (Size == 128) 1348 return ABIArgInfo::getDirect(llvm::VectorType::get( 1349 llvm::Type::getInt64Ty(getVMContext()), 2)); 1350 1351 // Always return in register if it fits in a general purpose 1352 // register, or if it is 64 bits and has a single element. 1353 if ((Size == 8 || Size == 16 || Size == 32) || 1354 (Size == 64 && VT->getNumElements() == 1)) 1355 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1356 Size)); 1357 1358 return getIndirectReturnResult(RetTy, State); 1359 } 1360 1361 return ABIArgInfo::getDirect(); 1362 } 1363 1364 if (isAggregateTypeForABI(RetTy)) { 1365 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 1366 // Structures with flexible arrays are always indirect. 1367 if (RT->getDecl()->hasFlexibleArrayMember()) 1368 return getIndirectReturnResult(RetTy, State); 1369 } 1370 1371 // If specified, structs and unions are always indirect. 1372 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) 1373 return getIndirectReturnResult(RetTy, State); 1374 1375 // Ignore empty structs/unions. 1376 if (isEmptyRecord(getContext(), RetTy, true)) 1377 return ABIArgInfo::getIgnore(); 1378 1379 // Small structures which are register sized are generally returned 1380 // in a register. 1381 if (shouldReturnTypeInRegister(RetTy, getContext())) { 1382 uint64_t Size = getContext().getTypeSize(RetTy); 1383 1384 // As a special-case, if the struct is a "single-element" struct, and 1385 // the field is of type "float" or "double", return it in a 1386 // floating-point register. (MSVC does not apply this special case.) 1387 // We apply a similar transformation for pointer types to improve the 1388 // quality of the generated IR. 1389 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 1390 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 1391 || SeltTy->hasPointerRepresentation()) 1392 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 1393 1394 // FIXME: We should be able to narrow this integer in cases with dead 1395 // padding. 1396 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 1397 } 1398 1399 return getIndirectReturnResult(RetTy, State); 1400 } 1401 1402 // Treat an enum type as its underlying type. 1403 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1404 RetTy = EnumTy->getDecl()->getIntegerType(); 1405 1406 return (RetTy->isPromotableIntegerType() ? 1407 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1408 } 1409 1410 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 1411 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 1412 } 1413 1414 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 1415 const RecordType *RT = Ty->getAs<RecordType>(); 1416 if (!RT) 1417 return 0; 1418 const RecordDecl *RD = RT->getDecl(); 1419 1420 // If this is a C++ record, check the bases first. 1421 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 1422 for (const auto &I : CXXRD->bases()) 1423 if (!isRecordWithSSEVectorType(Context, I.getType())) 1424 return false; 1425 1426 for (const auto *i : RD->fields()) { 1427 QualType FT = i->getType(); 1428 1429 if (isSSEVectorType(Context, FT)) 1430 return true; 1431 1432 if (isRecordWithSSEVectorType(Context, FT)) 1433 return true; 1434 } 1435 1436 return false; 1437 } 1438 1439 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 1440 unsigned Align) const { 1441 // Otherwise, if the alignment is less than or equal to the minimum ABI 1442 // alignment, just use the default; the backend will handle this. 1443 if (Align <= MinABIStackAlignInBytes) 1444 return 0; // Use default alignment. 1445 1446 // On non-Darwin, the stack type alignment is always 4. 1447 if (!IsDarwinVectorABI) { 1448 // Set explicit alignment, since we may need to realign the top. 1449 return MinABIStackAlignInBytes; 1450 } 1451 1452 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 1453 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 1454 isRecordWithSSEVectorType(getContext(), Ty))) 1455 return 16; 1456 1457 return MinABIStackAlignInBytes; 1458 } 1459 1460 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 1461 CCState &State) const { 1462 if (!ByVal) { 1463 if (State.FreeRegs) { 1464 --State.FreeRegs; // Non-byval indirects just use one pointer. 1465 if (!IsMCUABI) 1466 return getNaturalAlignIndirectInReg(Ty); 1467 } 1468 return getNaturalAlignIndirect(Ty, false); 1469 } 1470 1471 // Compute the byval alignment. 1472 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 1473 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 1474 if (StackAlign == 0) 1475 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true); 1476 1477 // If the stack alignment is less than the type alignment, realign the 1478 // argument. 1479 bool Realign = TypeAlign > StackAlign; 1480 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), 1481 /*ByVal=*/true, Realign); 1482 } 1483 1484 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 1485 const Type *T = isSingleElementStruct(Ty, getContext()); 1486 if (!T) 1487 T = Ty.getTypePtr(); 1488 1489 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 1490 BuiltinType::Kind K = BT->getKind(); 1491 if (K == BuiltinType::Float || K == BuiltinType::Double) 1492 return Float; 1493 } 1494 return Integer; 1495 } 1496 1497 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const { 1498 if (!IsSoftFloatABI) { 1499 Class C = classify(Ty); 1500 if (C == Float) 1501 return false; 1502 } 1503 1504 unsigned Size = getContext().getTypeSize(Ty); 1505 unsigned SizeInRegs = (Size + 31) / 32; 1506 1507 if (SizeInRegs == 0) 1508 return false; 1509 1510 if (!IsMCUABI) { 1511 if (SizeInRegs > State.FreeRegs) { 1512 State.FreeRegs = 0; 1513 return false; 1514 } 1515 } else { 1516 // The MCU psABI allows passing parameters in-reg even if there are 1517 // earlier parameters that are passed on the stack. Also, 1518 // it does not allow passing >8-byte structs in-register, 1519 // even if there are 3 free registers available. 1520 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2) 1521 return false; 1522 } 1523 1524 State.FreeRegs -= SizeInRegs; 1525 return true; 1526 } 1527 1528 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State, 1529 bool &InReg, 1530 bool &NeedsPadding) const { 1531 // On Windows, aggregates other than HFAs are never passed in registers, and 1532 // they do not consume register slots. Homogenous floating-point aggregates 1533 // (HFAs) have already been dealt with at this point. 1534 if (IsWin32StructABI && isAggregateTypeForABI(Ty)) 1535 return false; 1536 1537 NeedsPadding = false; 1538 InReg = !IsMCUABI; 1539 1540 if (!updateFreeRegs(Ty, State)) 1541 return false; 1542 1543 if (IsMCUABI) 1544 return true; 1545 1546 if (State.CC == llvm::CallingConv::X86_FastCall || 1547 State.CC == llvm::CallingConv::X86_VectorCall || 1548 State.CC == llvm::CallingConv::X86_RegCall) { 1549 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs) 1550 NeedsPadding = true; 1551 1552 return false; 1553 } 1554 1555 return true; 1556 } 1557 1558 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const { 1559 if (!updateFreeRegs(Ty, State)) 1560 return false; 1561 1562 if (IsMCUABI) 1563 return false; 1564 1565 if (State.CC == llvm::CallingConv::X86_FastCall || 1566 State.CC == llvm::CallingConv::X86_VectorCall || 1567 State.CC == llvm::CallingConv::X86_RegCall) { 1568 if (getContext().getTypeSize(Ty) > 32) 1569 return false; 1570 1571 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() || 1572 Ty->isReferenceType()); 1573 } 1574 1575 return true; 1576 } 1577 1578 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1579 CCState &State) const { 1580 // FIXME: Set alignment on indirect arguments. 1581 1582 Ty = useFirstFieldIfTransparentUnion(Ty); 1583 1584 // Check with the C++ ABI first. 1585 const RecordType *RT = Ty->getAs<RecordType>(); 1586 if (RT) { 1587 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1588 if (RAA == CGCXXABI::RAA_Indirect) { 1589 return getIndirectResult(Ty, false, State); 1590 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1591 // The field index doesn't matter, we'll fix it up later. 1592 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1593 } 1594 } 1595 1596 // Regcall uses the concept of a homogenous vector aggregate, similar 1597 // to other targets. 1598 const Type *Base = nullptr; 1599 uint64_t NumElts = 0; 1600 if (State.CC == llvm::CallingConv::X86_RegCall && 1601 isHomogeneousAggregate(Ty, Base, NumElts)) { 1602 1603 if (State.FreeSSERegs >= NumElts) { 1604 State.FreeSSERegs -= NumElts; 1605 if (Ty->isBuiltinType() || Ty->isVectorType()) 1606 return ABIArgInfo::getDirect(); 1607 return ABIArgInfo::getExpand(); 1608 } 1609 return getIndirectResult(Ty, /*ByVal=*/false, State); 1610 } 1611 1612 if (isAggregateTypeForABI(Ty)) { 1613 // Structures with flexible arrays are always indirect. 1614 // FIXME: This should not be byval! 1615 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 1616 return getIndirectResult(Ty, true, State); 1617 1618 // Ignore empty structs/unions on non-Windows. 1619 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true)) 1620 return ABIArgInfo::getIgnore(); 1621 1622 llvm::LLVMContext &LLVMContext = getVMContext(); 1623 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1624 bool NeedsPadding = false; 1625 bool InReg; 1626 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { 1627 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 1628 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1629 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1630 if (InReg) 1631 return ABIArgInfo::getDirectInReg(Result); 1632 else 1633 return ABIArgInfo::getDirect(Result); 1634 } 1635 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1636 1637 // Expand small (<= 128-bit) record types when we know that the stack layout 1638 // of those arguments will match the struct. This is important because the 1639 // LLVM backend isn't smart enough to remove byval, which inhibits many 1640 // optimizations. 1641 // Don't do this for the MCU if there are still free integer registers 1642 // (see X86_64 ABI for full explanation). 1643 if (getContext().getTypeSize(Ty) <= 4 * 32 && 1644 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty)) 1645 return ABIArgInfo::getExpandWithPadding( 1646 State.CC == llvm::CallingConv::X86_FastCall || 1647 State.CC == llvm::CallingConv::X86_VectorCall || 1648 State.CC == llvm::CallingConv::X86_RegCall, 1649 PaddingType); 1650 1651 return getIndirectResult(Ty, true, State); 1652 } 1653 1654 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1655 // On Darwin, some vectors are passed in memory, we handle this by passing 1656 // it as an i8/i16/i32/i64. 1657 if (IsDarwinVectorABI) { 1658 uint64_t Size = getContext().getTypeSize(Ty); 1659 if ((Size == 8 || Size == 16 || Size == 32) || 1660 (Size == 64 && VT->getNumElements() == 1)) 1661 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1662 Size)); 1663 } 1664 1665 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1666 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1667 1668 return ABIArgInfo::getDirect(); 1669 } 1670 1671 1672 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1673 Ty = EnumTy->getDecl()->getIntegerType(); 1674 1675 bool InReg = shouldPrimitiveUseInReg(Ty, State); 1676 1677 if (Ty->isPromotableIntegerType()) { 1678 if (InReg) 1679 return ABIArgInfo::getExtendInReg(); 1680 return ABIArgInfo::getExtend(); 1681 } 1682 1683 if (InReg) 1684 return ABIArgInfo::getDirectInReg(); 1685 return ABIArgInfo::getDirect(); 1686 } 1687 1688 void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State, 1689 bool &UsedInAlloca) const { 1690 // Vectorcall x86 works subtly different than in x64, so the format is 1691 // a bit different than the x64 version. First, all vector types (not HVAs) 1692 // are assigned, with the first 6 ending up in the YMM0-5 or XMM0-5 registers. 1693 // This differs from the x64 implementation, where the first 6 by INDEX get 1694 // registers. 1695 // After that, integers AND HVAs are assigned Left to Right in the same pass. 1696 // Integers are passed as ECX/EDX if one is available (in order). HVAs will 1697 // first take up the remaining YMM/XMM registers. If insufficient registers 1698 // remain but an integer register (ECX/EDX) is available, it will be passed 1699 // in that, else, on the stack. 1700 for (auto &I : FI.arguments()) { 1701 // First pass do all the vector types. 1702 const Type *Base = nullptr; 1703 uint64_t NumElts = 0; 1704 const QualType& Ty = I.type; 1705 if ((Ty->isVectorType() || Ty->isBuiltinType()) && 1706 isHomogeneousAggregate(Ty, Base, NumElts)) { 1707 if (State.FreeSSERegs >= NumElts) { 1708 State.FreeSSERegs -= NumElts; 1709 I.info = ABIArgInfo::getDirect(); 1710 } else { 1711 I.info = classifyArgumentType(Ty, State); 1712 } 1713 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1714 } 1715 } 1716 1717 for (auto &I : FI.arguments()) { 1718 // Second pass, do the rest! 1719 const Type *Base = nullptr; 1720 uint64_t NumElts = 0; 1721 const QualType& Ty = I.type; 1722 bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts); 1723 1724 if (IsHva && !Ty->isVectorType() && !Ty->isBuiltinType()) { 1725 // Assign true HVAs (non vector/native FP types). 1726 if (State.FreeSSERegs >= NumElts) { 1727 State.FreeSSERegs -= NumElts; 1728 I.info = getDirectX86Hva(); 1729 } else { 1730 I.info = getIndirectResult(Ty, /*ByVal=*/false, State); 1731 } 1732 } else if (!IsHva) { 1733 // Assign all Non-HVAs, so this will exclude Vector/FP args. 1734 I.info = classifyArgumentType(Ty, State); 1735 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1736 } 1737 } 1738 } 1739 1740 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1741 CCState State(FI.getCallingConvention()); 1742 if (IsMCUABI) 1743 State.FreeRegs = 3; 1744 else if (State.CC == llvm::CallingConv::X86_FastCall) 1745 State.FreeRegs = 2; 1746 else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1747 State.FreeRegs = 2; 1748 State.FreeSSERegs = 6; 1749 } else if (FI.getHasRegParm()) 1750 State.FreeRegs = FI.getRegParm(); 1751 else if (State.CC == llvm::CallingConv::X86_RegCall) { 1752 State.FreeRegs = 5; 1753 State.FreeSSERegs = 8; 1754 } else 1755 State.FreeRegs = DefaultNumRegisterParameters; 1756 1757 if (!getCXXABI().classifyReturnType(FI)) { 1758 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1759 } else if (FI.getReturnInfo().isIndirect()) { 1760 // The C++ ABI is not aware of register usage, so we have to check if the 1761 // return value was sret and put it in a register ourselves if appropriate. 1762 if (State.FreeRegs) { 1763 --State.FreeRegs; // The sret parameter consumes a register. 1764 if (!IsMCUABI) 1765 FI.getReturnInfo().setInReg(true); 1766 } 1767 } 1768 1769 // The chain argument effectively gives us another free register. 1770 if (FI.isChainCall()) 1771 ++State.FreeRegs; 1772 1773 bool UsedInAlloca = false; 1774 if (State.CC == llvm::CallingConv::X86_VectorCall) { 1775 computeVectorCallArgs(FI, State, UsedInAlloca); 1776 } else { 1777 // If not vectorcall, revert to normal behavior. 1778 for (auto &I : FI.arguments()) { 1779 I.info = classifyArgumentType(I.type, State); 1780 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1781 } 1782 } 1783 1784 // If we needed to use inalloca for any argument, do a second pass and rewrite 1785 // all the memory arguments to use inalloca. 1786 if (UsedInAlloca) 1787 rewriteWithInAlloca(FI); 1788 } 1789 1790 void 1791 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1792 CharUnits &StackOffset, ABIArgInfo &Info, 1793 QualType Type) const { 1794 // Arguments are always 4-byte-aligned. 1795 CharUnits FieldAlign = CharUnits::fromQuantity(4); 1796 1797 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct"); 1798 Info = ABIArgInfo::getInAlloca(FrameFields.size()); 1799 FrameFields.push_back(CGT.ConvertTypeForMem(Type)); 1800 StackOffset += getContext().getTypeSizeInChars(Type); 1801 1802 // Insert padding bytes to respect alignment. 1803 CharUnits FieldEnd = StackOffset; 1804 StackOffset = FieldEnd.alignTo(FieldAlign); 1805 if (StackOffset != FieldEnd) { 1806 CharUnits NumBytes = StackOffset - FieldEnd; 1807 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1808 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); 1809 FrameFields.push_back(Ty); 1810 } 1811 } 1812 1813 static bool isArgInAlloca(const ABIArgInfo &Info) { 1814 // Leave ignored and inreg arguments alone. 1815 switch (Info.getKind()) { 1816 case ABIArgInfo::InAlloca: 1817 return true; 1818 case ABIArgInfo::Indirect: 1819 assert(Info.getIndirectByVal()); 1820 return true; 1821 case ABIArgInfo::Ignore: 1822 return false; 1823 case ABIArgInfo::Direct: 1824 case ABIArgInfo::Extend: 1825 if (Info.getInReg()) 1826 return false; 1827 return true; 1828 case ABIArgInfo::Expand: 1829 case ABIArgInfo::CoerceAndExpand: 1830 // These are aggregate types which are never passed in registers when 1831 // inalloca is involved. 1832 return true; 1833 } 1834 llvm_unreachable("invalid enum"); 1835 } 1836 1837 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 1838 assert(IsWin32StructABI && "inalloca only supported on win32"); 1839 1840 // Build a packed struct type for all of the arguments in memory. 1841 SmallVector<llvm::Type *, 6> FrameFields; 1842 1843 // The stack alignment is always 4. 1844 CharUnits StackAlign = CharUnits::fromQuantity(4); 1845 1846 CharUnits StackOffset; 1847 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 1848 1849 // Put 'this' into the struct before 'sret', if necessary. 1850 bool IsThisCall = 1851 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 1852 ABIArgInfo &Ret = FI.getReturnInfo(); 1853 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 1854 isArgInAlloca(I->info)) { 1855 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1856 ++I; 1857 } 1858 1859 // Put the sret parameter into the inalloca struct if it's in memory. 1860 if (Ret.isIndirect() && !Ret.getInReg()) { 1861 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); 1862 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); 1863 // On Windows, the hidden sret parameter is always returned in eax. 1864 Ret.setInAllocaSRet(IsWin32StructABI); 1865 } 1866 1867 // Skip the 'this' parameter in ecx. 1868 if (IsThisCall) 1869 ++I; 1870 1871 // Put arguments passed in memory into the struct. 1872 for (; I != E; ++I) { 1873 if (isArgInAlloca(I->info)) 1874 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1875 } 1876 1877 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 1878 /*isPacked=*/true), 1879 StackAlign); 1880 } 1881 1882 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, 1883 Address VAListAddr, QualType Ty) const { 1884 1885 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 1886 1887 // x86-32 changes the alignment of certain arguments on the stack. 1888 // 1889 // Just messing with TypeInfo like this works because we never pass 1890 // anything indirectly. 1891 TypeInfo.second = CharUnits::fromQuantity( 1892 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity())); 1893 1894 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 1895 TypeInfo, CharUnits::fromQuantity(4), 1896 /*AllowHigherAlign*/ true); 1897 } 1898 1899 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 1900 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 1901 assert(Triple.getArch() == llvm::Triple::x86); 1902 1903 switch (Opts.getStructReturnConvention()) { 1904 case CodeGenOptions::SRCK_Default: 1905 break; 1906 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 1907 return false; 1908 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 1909 return true; 1910 } 1911 1912 if (Triple.isOSDarwin() || Triple.isOSIAMCU()) 1913 return true; 1914 1915 switch (Triple.getOS()) { 1916 case llvm::Triple::DragonFly: 1917 case llvm::Triple::FreeBSD: 1918 case llvm::Triple::OpenBSD: 1919 case llvm::Triple::Win32: 1920 return true; 1921 default: 1922 return false; 1923 } 1924 } 1925 1926 void X86_32TargetCodeGenInfo::setTargetAttributes( 1927 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM, 1928 ForDefinition_t IsForDefinition) const { 1929 if (!IsForDefinition) 1930 return; 1931 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 1932 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1933 // Get the LLVM function. 1934 llvm::Function *Fn = cast<llvm::Function>(GV); 1935 1936 // Now add the 'alignstack' attribute with a value of 16. 1937 llvm::AttrBuilder B; 1938 B.addStackAlignmentAttr(16); 1939 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 1940 } 1941 if (FD->hasAttr<AnyX86InterruptAttr>()) { 1942 llvm::Function *Fn = cast<llvm::Function>(GV); 1943 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 1944 } 1945 } 1946 } 1947 1948 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1949 CodeGen::CodeGenFunction &CGF, 1950 llvm::Value *Address) const { 1951 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1952 1953 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1954 1955 // 0-7 are the eight integer registers; the order is different 1956 // on Darwin (for EH), but the range is the same. 1957 // 8 is %eip. 1958 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1959 1960 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 1961 // 12-16 are st(0..4). Not sure why we stop at 4. 1962 // These have size 16, which is sizeof(long double) on 1963 // platforms with 8-byte alignment for that type. 1964 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1965 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1966 1967 } else { 1968 // 9 is %eflags, which doesn't get a size on Darwin for some 1969 // reason. 1970 Builder.CreateAlignedStore( 1971 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), 1972 CharUnits::One()); 1973 1974 // 11-16 are st(0..5). Not sure why we stop at 5. 1975 // These have size 12, which is sizeof(long double) on 1976 // platforms with 4-byte alignment for that type. 1977 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1978 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1979 } 1980 1981 return false; 1982 } 1983 1984 //===----------------------------------------------------------------------===// 1985 // X86-64 ABI Implementation 1986 //===----------------------------------------------------------------------===// 1987 1988 1989 namespace { 1990 /// The AVX ABI level for X86 targets. 1991 enum class X86AVXABILevel { 1992 None, 1993 AVX, 1994 AVX512 1995 }; 1996 1997 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. 1998 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { 1999 switch (AVXLevel) { 2000 case X86AVXABILevel::AVX512: 2001 return 512; 2002 case X86AVXABILevel::AVX: 2003 return 256; 2004 case X86AVXABILevel::None: 2005 return 128; 2006 } 2007 llvm_unreachable("Unknown AVXLevel"); 2008 } 2009 2010 /// X86_64ABIInfo - The X86_64 ABI information. 2011 class X86_64ABIInfo : public SwiftABIInfo { 2012 enum Class { 2013 Integer = 0, 2014 SSE, 2015 SSEUp, 2016 X87, 2017 X87Up, 2018 ComplexX87, 2019 NoClass, 2020 Memory 2021 }; 2022 2023 /// merge - Implement the X86_64 ABI merging algorithm. 2024 /// 2025 /// Merge an accumulating classification \arg Accum with a field 2026 /// classification \arg Field. 2027 /// 2028 /// \param Accum - The accumulating classification. This should 2029 /// always be either NoClass or the result of a previous merge 2030 /// call. In addition, this should never be Memory (the caller 2031 /// should just return Memory for the aggregate). 2032 static Class merge(Class Accum, Class Field); 2033 2034 /// postMerge - Implement the X86_64 ABI post merging algorithm. 2035 /// 2036 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 2037 /// final MEMORY or SSE classes when necessary. 2038 /// 2039 /// \param AggregateSize - The size of the current aggregate in 2040 /// the classification process. 2041 /// 2042 /// \param Lo - The classification for the parts of the type 2043 /// residing in the low word of the containing object. 2044 /// 2045 /// \param Hi - The classification for the parts of the type 2046 /// residing in the higher words of the containing object. 2047 /// 2048 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 2049 2050 /// classify - Determine the x86_64 register classes in which the 2051 /// given type T should be passed. 2052 /// 2053 /// \param Lo - The classification for the parts of the type 2054 /// residing in the low word of the containing object. 2055 /// 2056 /// \param Hi - The classification for the parts of the type 2057 /// residing in the high word of the containing object. 2058 /// 2059 /// \param OffsetBase - The bit offset of this type in the 2060 /// containing object. Some parameters are classified different 2061 /// depending on whether they straddle an eightbyte boundary. 2062 /// 2063 /// \param isNamedArg - Whether the argument in question is a "named" 2064 /// argument, as used in AMD64-ABI 3.5.7. 2065 /// 2066 /// If a word is unused its result will be NoClass; if a type should 2067 /// be passed in Memory then at least the classification of \arg Lo 2068 /// will be Memory. 2069 /// 2070 /// The \arg Lo class will be NoClass iff the argument is ignored. 2071 /// 2072 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 2073 /// also be ComplexX87. 2074 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 2075 bool isNamedArg) const; 2076 2077 llvm::Type *GetByteVectorType(QualType Ty) const; 2078 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 2079 unsigned IROffset, QualType SourceTy, 2080 unsigned SourceOffset) const; 2081 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 2082 unsigned IROffset, QualType SourceTy, 2083 unsigned SourceOffset) const; 2084 2085 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2086 /// such that the argument will be returned in memory. 2087 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 2088 2089 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2090 /// such that the argument will be passed in memory. 2091 /// 2092 /// \param freeIntRegs - The number of free integer registers remaining 2093 /// available. 2094 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 2095 2096 ABIArgInfo classifyReturnType(QualType RetTy) const; 2097 2098 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs, 2099 unsigned &neededInt, unsigned &neededSSE, 2100 bool isNamedArg) const; 2101 2102 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt, 2103 unsigned &NeededSSE) const; 2104 2105 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 2106 unsigned &NeededSSE) const; 2107 2108 bool IsIllegalVectorType(QualType Ty) const; 2109 2110 /// The 0.98 ABI revision clarified a lot of ambiguities, 2111 /// unfortunately in ways that were not always consistent with 2112 /// certain previous compilers. In particular, platforms which 2113 /// required strict binary compatibility with older versions of GCC 2114 /// may need to exempt themselves. 2115 bool honorsRevision0_98() const { 2116 return !getTarget().getTriple().isOSDarwin(); 2117 } 2118 2119 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to 2120 /// classify it as INTEGER (for compatibility with older clang compilers). 2121 bool classifyIntegerMMXAsSSE() const { 2122 // Clang <= 3.8 did not do this. 2123 if (getCodeGenOpts().getClangABICompat() <= 2124 CodeGenOptions::ClangABI::Ver3_8) 2125 return false; 2126 2127 const llvm::Triple &Triple = getTarget().getTriple(); 2128 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4) 2129 return false; 2130 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10) 2131 return false; 2132 return true; 2133 } 2134 2135 X86AVXABILevel AVXLevel; 2136 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 2137 // 64-bit hardware. 2138 bool Has64BitPointers; 2139 2140 public: 2141 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : 2142 SwiftABIInfo(CGT), AVXLevel(AVXLevel), 2143 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 2144 } 2145 2146 bool isPassedUsingAVXType(QualType type) const { 2147 unsigned neededInt, neededSSE; 2148 // The freeIntRegs argument doesn't matter here. 2149 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 2150 /*isNamedArg*/true); 2151 if (info.isDirect()) { 2152 llvm::Type *ty = info.getCoerceToType(); 2153 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 2154 return (vectorTy->getBitWidth() > 128); 2155 } 2156 return false; 2157 } 2158 2159 void computeInfo(CGFunctionInfo &FI) const override; 2160 2161 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2162 QualType Ty) const override; 2163 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 2164 QualType Ty) const override; 2165 2166 bool has64BitPointers() const { 2167 return Has64BitPointers; 2168 } 2169 2170 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 2171 ArrayRef<llvm::Type*> scalars, 2172 bool asReturnValue) const override { 2173 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2174 } 2175 bool isSwiftErrorInRegister() const override { 2176 return true; 2177 } 2178 }; 2179 2180 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 2181 class WinX86_64ABIInfo : public SwiftABIInfo { 2182 public: 2183 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) 2184 : SwiftABIInfo(CGT), 2185 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} 2186 2187 void computeInfo(CGFunctionInfo &FI) const override; 2188 2189 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2190 QualType Ty) const override; 2191 2192 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 2193 // FIXME: Assumes vectorcall is in use. 2194 return isX86VectorTypeForVectorCall(getContext(), Ty); 2195 } 2196 2197 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 2198 uint64_t NumMembers) const override { 2199 // FIXME: Assumes vectorcall is in use. 2200 return isX86VectorCallAggregateSmallEnough(NumMembers); 2201 } 2202 2203 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 2204 ArrayRef<llvm::Type *> scalars, 2205 bool asReturnValue) const override { 2206 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2207 } 2208 2209 bool isSwiftErrorInRegister() const override { 2210 return true; 2211 } 2212 2213 private: 2214 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType, 2215 bool IsVectorCall, bool IsRegCall) const; 2216 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, 2217 const ABIArgInfo ¤t) const; 2218 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs, 2219 bool IsVectorCall, bool IsRegCall) const; 2220 2221 bool IsMingw64; 2222 }; 2223 2224 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2225 public: 2226 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2227 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {} 2228 2229 const X86_64ABIInfo &getABIInfo() const { 2230 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2231 } 2232 2233 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2234 return 7; 2235 } 2236 2237 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2238 llvm::Value *Address) const override { 2239 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2240 2241 // 0-15 are the 16 integer registers. 2242 // 16 is %rip. 2243 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2244 return false; 2245 } 2246 2247 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 2248 StringRef Constraint, 2249 llvm::Type* Ty) const override { 2250 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 2251 } 2252 2253 bool isNoProtoCallVariadic(const CallArgList &args, 2254 const FunctionNoProtoType *fnType) const override { 2255 // The default CC on x86-64 sets %al to the number of SSA 2256 // registers used, and GCC sets this when calling an unprototyped 2257 // function, so we override the default behavior. However, don't do 2258 // that when AVX types are involved: the ABI explicitly states it is 2259 // undefined, and it doesn't work in practice because of how the ABI 2260 // defines varargs anyway. 2261 if (fnType->getCallConv() == CC_C) { 2262 bool HasAVXType = false; 2263 for (CallArgList::const_iterator 2264 it = args.begin(), ie = args.end(); it != ie; ++it) { 2265 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 2266 HasAVXType = true; 2267 break; 2268 } 2269 } 2270 2271 if (!HasAVXType) 2272 return true; 2273 } 2274 2275 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 2276 } 2277 2278 llvm::Constant * 2279 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 2280 unsigned Sig; 2281 if (getABIInfo().has64BitPointers()) 2282 Sig = (0xeb << 0) | // jmp rel8 2283 (0x0a << 8) | // .+0x0c 2284 ('F' << 16) | 2285 ('T' << 24); 2286 else 2287 Sig = (0xeb << 0) | // jmp rel8 2288 (0x06 << 8) | // .+0x08 2289 ('F' << 16) | 2290 ('T' << 24); 2291 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 2292 } 2293 2294 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2295 CodeGen::CodeGenModule &CGM, 2296 ForDefinition_t IsForDefinition) const override { 2297 if (!IsForDefinition) 2298 return; 2299 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2300 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2301 // Get the LLVM function. 2302 auto *Fn = cast<llvm::Function>(GV); 2303 2304 // Now add the 'alignstack' attribute with a value of 16. 2305 llvm::AttrBuilder B; 2306 B.addStackAlignmentAttr(16); 2307 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 2308 } 2309 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2310 llvm::Function *Fn = cast<llvm::Function>(GV); 2311 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2312 } 2313 } 2314 } 2315 }; 2316 2317 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo { 2318 public: 2319 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2320 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {} 2321 2322 void getDependentLibraryOption(llvm::StringRef Lib, 2323 llvm::SmallString<24> &Opt) const override { 2324 Opt = "\01"; 2325 // If the argument contains a space, enclose it in quotes. 2326 if (Lib.find(" ") != StringRef::npos) 2327 Opt += "\"" + Lib.str() + "\""; 2328 else 2329 Opt += Lib; 2330 } 2331 }; 2332 2333 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 2334 // If the argument does not end in .lib, automatically add the suffix. 2335 // If the argument contains a space, enclose it in quotes. 2336 // This matches the behavior of MSVC. 2337 bool Quote = (Lib.find(" ") != StringRef::npos); 2338 std::string ArgStr = Quote ? "\"" : ""; 2339 ArgStr += Lib; 2340 if (!Lib.endswith_lower(".lib")) 2341 ArgStr += ".lib"; 2342 ArgStr += Quote ? "\"" : ""; 2343 return ArgStr; 2344 } 2345 2346 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 2347 public: 2348 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2349 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, 2350 unsigned NumRegisterParameters) 2351 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, 2352 Win32StructABI, NumRegisterParameters, false) {} 2353 2354 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2355 CodeGen::CodeGenModule &CGM, 2356 ForDefinition_t IsForDefinition) const override; 2357 2358 void getDependentLibraryOption(llvm::StringRef Lib, 2359 llvm::SmallString<24> &Opt) const override { 2360 Opt = "/DEFAULTLIB:"; 2361 Opt += qualifyWindowsLibrary(Lib); 2362 } 2363 2364 void getDetectMismatchOption(llvm::StringRef Name, 2365 llvm::StringRef Value, 2366 llvm::SmallString<32> &Opt) const override { 2367 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2368 } 2369 }; 2370 2371 static void addStackProbeSizeTargetAttribute(const Decl *D, 2372 llvm::GlobalValue *GV, 2373 CodeGen::CodeGenModule &CGM) { 2374 if (D && isa<FunctionDecl>(D)) { 2375 if (CGM.getCodeGenOpts().StackProbeSize != 4096) { 2376 llvm::Function *Fn = cast<llvm::Function>(GV); 2377 2378 Fn->addFnAttr("stack-probe-size", 2379 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 2380 } 2381 } 2382 } 2383 2384 void WinX86_32TargetCodeGenInfo::setTargetAttributes( 2385 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM, 2386 ForDefinition_t IsForDefinition) const { 2387 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition); 2388 if (!IsForDefinition) 2389 return; 2390 addStackProbeSizeTargetAttribute(D, GV, CGM); 2391 } 2392 2393 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2394 public: 2395 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2396 X86AVXABILevel AVXLevel) 2397 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 2398 2399 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2400 CodeGen::CodeGenModule &CGM, 2401 ForDefinition_t IsForDefinition) const override; 2402 2403 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2404 return 7; 2405 } 2406 2407 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2408 llvm::Value *Address) const override { 2409 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2410 2411 // 0-15 are the 16 integer registers. 2412 // 16 is %rip. 2413 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2414 return false; 2415 } 2416 2417 void getDependentLibraryOption(llvm::StringRef Lib, 2418 llvm::SmallString<24> &Opt) const override { 2419 Opt = "/DEFAULTLIB:"; 2420 Opt += qualifyWindowsLibrary(Lib); 2421 } 2422 2423 void getDetectMismatchOption(llvm::StringRef Name, 2424 llvm::StringRef Value, 2425 llvm::SmallString<32> &Opt) const override { 2426 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2427 } 2428 }; 2429 2430 void WinX86_64TargetCodeGenInfo::setTargetAttributes( 2431 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM, 2432 ForDefinition_t IsForDefinition) const { 2433 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition); 2434 if (!IsForDefinition) 2435 return; 2436 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2437 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 2438 // Get the LLVM function. 2439 auto *Fn = cast<llvm::Function>(GV); 2440 2441 // Now add the 'alignstack' attribute with a value of 16. 2442 llvm::AttrBuilder B; 2443 B.addStackAlignmentAttr(16); 2444 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 2445 } 2446 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2447 llvm::Function *Fn = cast<llvm::Function>(GV); 2448 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2449 } 2450 } 2451 2452 addStackProbeSizeTargetAttribute(D, GV, CGM); 2453 } 2454 } 2455 2456 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 2457 Class &Hi) const { 2458 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 2459 // 2460 // (a) If one of the classes is Memory, the whole argument is passed in 2461 // memory. 2462 // 2463 // (b) If X87UP is not preceded by X87, the whole argument is passed in 2464 // memory. 2465 // 2466 // (c) If the size of the aggregate exceeds two eightbytes and the first 2467 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 2468 // argument is passed in memory. NOTE: This is necessary to keep the 2469 // ABI working for processors that don't support the __m256 type. 2470 // 2471 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 2472 // 2473 // Some of these are enforced by the merging logic. Others can arise 2474 // only with unions; for example: 2475 // union { _Complex double; unsigned; } 2476 // 2477 // Note that clauses (b) and (c) were added in 0.98. 2478 // 2479 if (Hi == Memory) 2480 Lo = Memory; 2481 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 2482 Lo = Memory; 2483 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 2484 Lo = Memory; 2485 if (Hi == SSEUp && Lo != SSE) 2486 Hi = SSE; 2487 } 2488 2489 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 2490 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 2491 // classified recursively so that always two fields are 2492 // considered. The resulting class is calculated according to 2493 // the classes of the fields in the eightbyte: 2494 // 2495 // (a) If both classes are equal, this is the resulting class. 2496 // 2497 // (b) If one of the classes is NO_CLASS, the resulting class is 2498 // the other class. 2499 // 2500 // (c) If one of the classes is MEMORY, the result is the MEMORY 2501 // class. 2502 // 2503 // (d) If one of the classes is INTEGER, the result is the 2504 // INTEGER. 2505 // 2506 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 2507 // MEMORY is used as class. 2508 // 2509 // (f) Otherwise class SSE is used. 2510 2511 // Accum should never be memory (we should have returned) or 2512 // ComplexX87 (because this cannot be passed in a structure). 2513 assert((Accum != Memory && Accum != ComplexX87) && 2514 "Invalid accumulated classification during merge."); 2515 if (Accum == Field || Field == NoClass) 2516 return Accum; 2517 if (Field == Memory) 2518 return Memory; 2519 if (Accum == NoClass) 2520 return Field; 2521 if (Accum == Integer || Field == Integer) 2522 return Integer; 2523 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 2524 Accum == X87 || Accum == X87Up) 2525 return Memory; 2526 return SSE; 2527 } 2528 2529 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 2530 Class &Lo, Class &Hi, bool isNamedArg) const { 2531 // FIXME: This code can be simplified by introducing a simple value class for 2532 // Class pairs with appropriate constructor methods for the various 2533 // situations. 2534 2535 // FIXME: Some of the split computations are wrong; unaligned vectors 2536 // shouldn't be passed in registers for example, so there is no chance they 2537 // can straddle an eightbyte. Verify & simplify. 2538 2539 Lo = Hi = NoClass; 2540 2541 Class &Current = OffsetBase < 64 ? Lo : Hi; 2542 Current = Memory; 2543 2544 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2545 BuiltinType::Kind k = BT->getKind(); 2546 2547 if (k == BuiltinType::Void) { 2548 Current = NoClass; 2549 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 2550 Lo = Integer; 2551 Hi = Integer; 2552 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 2553 Current = Integer; 2554 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 2555 Current = SSE; 2556 } else if (k == BuiltinType::LongDouble) { 2557 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2558 if (LDF == &llvm::APFloat::IEEEquad()) { 2559 Lo = SSE; 2560 Hi = SSEUp; 2561 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) { 2562 Lo = X87; 2563 Hi = X87Up; 2564 } else if (LDF == &llvm::APFloat::IEEEdouble()) { 2565 Current = SSE; 2566 } else 2567 llvm_unreachable("unexpected long double representation!"); 2568 } 2569 // FIXME: _Decimal32 and _Decimal64 are SSE. 2570 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 2571 return; 2572 } 2573 2574 if (const EnumType *ET = Ty->getAs<EnumType>()) { 2575 // Classify the underlying integer type. 2576 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 2577 return; 2578 } 2579 2580 if (Ty->hasPointerRepresentation()) { 2581 Current = Integer; 2582 return; 2583 } 2584 2585 if (Ty->isMemberPointerType()) { 2586 if (Ty->isMemberFunctionPointerType()) { 2587 if (Has64BitPointers) { 2588 // If Has64BitPointers, this is an {i64, i64}, so classify both 2589 // Lo and Hi now. 2590 Lo = Hi = Integer; 2591 } else { 2592 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 2593 // straddles an eightbyte boundary, Hi should be classified as well. 2594 uint64_t EB_FuncPtr = (OffsetBase) / 64; 2595 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 2596 if (EB_FuncPtr != EB_ThisAdj) { 2597 Lo = Hi = Integer; 2598 } else { 2599 Current = Integer; 2600 } 2601 } 2602 } else { 2603 Current = Integer; 2604 } 2605 return; 2606 } 2607 2608 if (const VectorType *VT = Ty->getAs<VectorType>()) { 2609 uint64_t Size = getContext().getTypeSize(VT); 2610 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { 2611 // gcc passes the following as integer: 2612 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float> 2613 // 2 bytes - <2 x char>, <1 x short> 2614 // 1 byte - <1 x char> 2615 Current = Integer; 2616 2617 // If this type crosses an eightbyte boundary, it should be 2618 // split. 2619 uint64_t EB_Lo = (OffsetBase) / 64; 2620 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; 2621 if (EB_Lo != EB_Hi) 2622 Hi = Lo; 2623 } else if (Size == 64) { 2624 QualType ElementType = VT->getElementType(); 2625 2626 // gcc passes <1 x double> in memory. :( 2627 if (ElementType->isSpecificBuiltinType(BuiltinType::Double)) 2628 return; 2629 2630 // gcc passes <1 x long long> as SSE but clang used to unconditionally 2631 // pass them as integer. For platforms where clang is the de facto 2632 // platform compiler, we must continue to use integer. 2633 if (!classifyIntegerMMXAsSSE() && 2634 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) || 2635 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) || 2636 ElementType->isSpecificBuiltinType(BuiltinType::Long) || 2637 ElementType->isSpecificBuiltinType(BuiltinType::ULong))) 2638 Current = Integer; 2639 else 2640 Current = SSE; 2641 2642 // If this type crosses an eightbyte boundary, it should be 2643 // split. 2644 if (OffsetBase && OffsetBase != 64) 2645 Hi = Lo; 2646 } else if (Size == 128 || 2647 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { 2648 // Arguments of 256-bits are split into four eightbyte chunks. The 2649 // least significant one belongs to class SSE and all the others to class 2650 // SSEUP. The original Lo and Hi design considers that types can't be 2651 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 2652 // This design isn't correct for 256-bits, but since there're no cases 2653 // where the upper parts would need to be inspected, avoid adding 2654 // complexity and just consider Hi to match the 64-256 part. 2655 // 2656 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 2657 // registers if they are "named", i.e. not part of the "..." of a 2658 // variadic function. 2659 // 2660 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are 2661 // split into eight eightbyte chunks, one SSE and seven SSEUP. 2662 Lo = SSE; 2663 Hi = SSEUp; 2664 } 2665 return; 2666 } 2667 2668 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2669 QualType ET = getContext().getCanonicalType(CT->getElementType()); 2670 2671 uint64_t Size = getContext().getTypeSize(Ty); 2672 if (ET->isIntegralOrEnumerationType()) { 2673 if (Size <= 64) 2674 Current = Integer; 2675 else if (Size <= 128) 2676 Lo = Hi = Integer; 2677 } else if (ET == getContext().FloatTy) { 2678 Current = SSE; 2679 } else if (ET == getContext().DoubleTy) { 2680 Lo = Hi = SSE; 2681 } else if (ET == getContext().LongDoubleTy) { 2682 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2683 if (LDF == &llvm::APFloat::IEEEquad()) 2684 Current = Memory; 2685 else if (LDF == &llvm::APFloat::x87DoubleExtended()) 2686 Current = ComplexX87; 2687 else if (LDF == &llvm::APFloat::IEEEdouble()) 2688 Lo = Hi = SSE; 2689 else 2690 llvm_unreachable("unexpected long double representation!"); 2691 } 2692 2693 // If this complex type crosses an eightbyte boundary then it 2694 // should be split. 2695 uint64_t EB_Real = (OffsetBase) / 64; 2696 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 2697 if (Hi == NoClass && EB_Real != EB_Imag) 2698 Hi = Lo; 2699 2700 return; 2701 } 2702 2703 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2704 // Arrays are treated like structures. 2705 2706 uint64_t Size = getContext().getTypeSize(Ty); 2707 2708 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2709 // than eight eightbytes, ..., it has class MEMORY. 2710 if (Size > 512) 2711 return; 2712 2713 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 2714 // fields, it has class MEMORY. 2715 // 2716 // Only need to check alignment of array base. 2717 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 2718 return; 2719 2720 // Otherwise implement simplified merge. We could be smarter about 2721 // this, but it isn't worth it and would be harder to verify. 2722 Current = NoClass; 2723 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 2724 uint64_t ArraySize = AT->getSize().getZExtValue(); 2725 2726 // The only case a 256-bit wide vector could be used is when the array 2727 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2728 // to work for sizes wider than 128, early check and fallback to memory. 2729 // 2730 if (Size > 128 && 2731 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) 2732 return; 2733 2734 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 2735 Class FieldLo, FieldHi; 2736 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 2737 Lo = merge(Lo, FieldLo); 2738 Hi = merge(Hi, FieldHi); 2739 if (Lo == Memory || Hi == Memory) 2740 break; 2741 } 2742 2743 postMerge(Size, Lo, Hi); 2744 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 2745 return; 2746 } 2747 2748 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2749 uint64_t Size = getContext().getTypeSize(Ty); 2750 2751 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2752 // than eight eightbytes, ..., it has class MEMORY. 2753 if (Size > 512) 2754 return; 2755 2756 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 2757 // copy constructor or a non-trivial destructor, it is passed by invisible 2758 // reference. 2759 if (getRecordArgABI(RT, getCXXABI())) 2760 return; 2761 2762 const RecordDecl *RD = RT->getDecl(); 2763 2764 // Assume variable sized types are passed in memory. 2765 if (RD->hasFlexibleArrayMember()) 2766 return; 2767 2768 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2769 2770 // Reset Lo class, this will be recomputed. 2771 Current = NoClass; 2772 2773 // If this is a C++ record, classify the bases first. 2774 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2775 for (const auto &I : CXXRD->bases()) { 2776 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2777 "Unexpected base class!"); 2778 const CXXRecordDecl *Base = 2779 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2780 2781 // Classify this field. 2782 // 2783 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 2784 // single eightbyte, each is classified separately. Each eightbyte gets 2785 // initialized to class NO_CLASS. 2786 Class FieldLo, FieldHi; 2787 uint64_t Offset = 2788 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 2789 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 2790 Lo = merge(Lo, FieldLo); 2791 Hi = merge(Hi, FieldHi); 2792 if (Lo == Memory || Hi == Memory) { 2793 postMerge(Size, Lo, Hi); 2794 return; 2795 } 2796 } 2797 } 2798 2799 // Classify the fields one at a time, merging the results. 2800 unsigned idx = 0; 2801 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2802 i != e; ++i, ++idx) { 2803 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2804 bool BitField = i->isBitField(); 2805 2806 // Ignore padding bit-fields. 2807 if (BitField && i->isUnnamedBitfield()) 2808 continue; 2809 2810 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 2811 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 2812 // 2813 // The only case a 256-bit wide vector could be used is when the struct 2814 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2815 // to work for sizes wider than 128, early check and fallback to memory. 2816 // 2817 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) || 2818 Size > getNativeVectorSizeForAVXABI(AVXLevel))) { 2819 Lo = Memory; 2820 postMerge(Size, Lo, Hi); 2821 return; 2822 } 2823 // Note, skip this test for bit-fields, see below. 2824 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 2825 Lo = Memory; 2826 postMerge(Size, Lo, Hi); 2827 return; 2828 } 2829 2830 // Classify this field. 2831 // 2832 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 2833 // exceeds a single eightbyte, each is classified 2834 // separately. Each eightbyte gets initialized to class 2835 // NO_CLASS. 2836 Class FieldLo, FieldHi; 2837 2838 // Bit-fields require special handling, they do not force the 2839 // structure to be passed in memory even if unaligned, and 2840 // therefore they can straddle an eightbyte. 2841 if (BitField) { 2842 assert(!i->isUnnamedBitfield()); 2843 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2844 uint64_t Size = i->getBitWidthValue(getContext()); 2845 2846 uint64_t EB_Lo = Offset / 64; 2847 uint64_t EB_Hi = (Offset + Size - 1) / 64; 2848 2849 if (EB_Lo) { 2850 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 2851 FieldLo = NoClass; 2852 FieldHi = Integer; 2853 } else { 2854 FieldLo = Integer; 2855 FieldHi = EB_Hi ? Integer : NoClass; 2856 } 2857 } else 2858 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 2859 Lo = merge(Lo, FieldLo); 2860 Hi = merge(Hi, FieldHi); 2861 if (Lo == Memory || Hi == Memory) 2862 break; 2863 } 2864 2865 postMerge(Size, Lo, Hi); 2866 } 2867 } 2868 2869 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 2870 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2871 // place naturally. 2872 if (!isAggregateTypeForABI(Ty)) { 2873 // Treat an enum type as its underlying type. 2874 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2875 Ty = EnumTy->getDecl()->getIntegerType(); 2876 2877 return (Ty->isPromotableIntegerType() ? 2878 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2879 } 2880 2881 return getNaturalAlignIndirect(Ty); 2882 } 2883 2884 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 2885 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 2886 uint64_t Size = getContext().getTypeSize(VecTy); 2887 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); 2888 if (Size <= 64 || Size > LargestVector) 2889 return true; 2890 } 2891 2892 return false; 2893 } 2894 2895 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 2896 unsigned freeIntRegs) const { 2897 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2898 // place naturally. 2899 // 2900 // This assumption is optimistic, as there could be free registers available 2901 // when we need to pass this argument in memory, and LLVM could try to pass 2902 // the argument in the free register. This does not seem to happen currently, 2903 // but this code would be much safer if we could mark the argument with 2904 // 'onstack'. See PR12193. 2905 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 2906 // Treat an enum type as its underlying type. 2907 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2908 Ty = EnumTy->getDecl()->getIntegerType(); 2909 2910 return (Ty->isPromotableIntegerType() ? 2911 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2912 } 2913 2914 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 2915 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 2916 2917 // Compute the byval alignment. We specify the alignment of the byval in all 2918 // cases so that the mid-level optimizer knows the alignment of the byval. 2919 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 2920 2921 // Attempt to avoid passing indirect results using byval when possible. This 2922 // is important for good codegen. 2923 // 2924 // We do this by coercing the value into a scalar type which the backend can 2925 // handle naturally (i.e., without using byval). 2926 // 2927 // For simplicity, we currently only do this when we have exhausted all of the 2928 // free integer registers. Doing this when there are free integer registers 2929 // would require more care, as we would have to ensure that the coerced value 2930 // did not claim the unused register. That would require either reording the 2931 // arguments to the function (so that any subsequent inreg values came first), 2932 // or only doing this optimization when there were no following arguments that 2933 // might be inreg. 2934 // 2935 // We currently expect it to be rare (particularly in well written code) for 2936 // arguments to be passed on the stack when there are still free integer 2937 // registers available (this would typically imply large structs being passed 2938 // by value), so this seems like a fair tradeoff for now. 2939 // 2940 // We can revisit this if the backend grows support for 'onstack' parameter 2941 // attributes. See PR12193. 2942 if (freeIntRegs == 0) { 2943 uint64_t Size = getContext().getTypeSize(Ty); 2944 2945 // If this type fits in an eightbyte, coerce it into the matching integral 2946 // type, which will end up on the stack (with alignment 8). 2947 if (Align == 8 && Size <= 64) 2948 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2949 Size)); 2950 } 2951 2952 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); 2953 } 2954 2955 /// The ABI specifies that a value should be passed in a full vector XMM/YMM 2956 /// register. Pick an LLVM IR type that will be passed as a vector register. 2957 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 2958 // Wrapper structs/arrays that only contain vectors are passed just like 2959 // vectors; strip them off if present. 2960 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) 2961 Ty = QualType(InnerTy, 0); 2962 2963 llvm::Type *IRType = CGT.ConvertType(Ty); 2964 if (isa<llvm::VectorType>(IRType) || 2965 IRType->getTypeID() == llvm::Type::FP128TyID) 2966 return IRType; 2967 2968 // We couldn't find the preferred IR vector type for 'Ty'. 2969 uint64_t Size = getContext().getTypeSize(Ty); 2970 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"); 2971 2972 // Return a LLVM IR vector type based on the size of 'Ty'. 2973 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2974 Size / 64); 2975 } 2976 2977 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 2978 /// is known to either be off the end of the specified type or being in 2979 /// alignment padding. The user type specified is known to be at most 128 bits 2980 /// in size, and have passed through X86_64ABIInfo::classify with a successful 2981 /// classification that put one of the two halves in the INTEGER class. 2982 /// 2983 /// It is conservatively correct to return false. 2984 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 2985 unsigned EndBit, ASTContext &Context) { 2986 // If the bytes being queried are off the end of the type, there is no user 2987 // data hiding here. This handles analysis of builtins, vectors and other 2988 // types that don't contain interesting padding. 2989 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 2990 if (TySize <= StartBit) 2991 return true; 2992 2993 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2994 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 2995 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 2996 2997 // Check each element to see if the element overlaps with the queried range. 2998 for (unsigned i = 0; i != NumElts; ++i) { 2999 // If the element is after the span we care about, then we're done.. 3000 unsigned EltOffset = i*EltSize; 3001 if (EltOffset >= EndBit) break; 3002 3003 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 3004 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 3005 EndBit-EltOffset, Context)) 3006 return false; 3007 } 3008 // If it overlaps no elements, then it is safe to process as padding. 3009 return true; 3010 } 3011 3012 if (const RecordType *RT = Ty->getAs<RecordType>()) { 3013 const RecordDecl *RD = RT->getDecl(); 3014 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 3015 3016 // If this is a C++ record, check the bases first. 3017 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 3018 for (const auto &I : CXXRD->bases()) { 3019 assert(!I.isVirtual() && !I.getType()->isDependentType() && 3020 "Unexpected base class!"); 3021 const CXXRecordDecl *Base = 3022 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 3023 3024 // If the base is after the span we care about, ignore it. 3025 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 3026 if (BaseOffset >= EndBit) continue; 3027 3028 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 3029 if (!BitsContainNoUserData(I.getType(), BaseStart, 3030 EndBit-BaseOffset, Context)) 3031 return false; 3032 } 3033 } 3034 3035 // Verify that no field has data that overlaps the region of interest. Yes 3036 // this could be sped up a lot by being smarter about queried fields, 3037 // however we're only looking at structs up to 16 bytes, so we don't care 3038 // much. 3039 unsigned idx = 0; 3040 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3041 i != e; ++i, ++idx) { 3042 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 3043 3044 // If we found a field after the region we care about, then we're done. 3045 if (FieldOffset >= EndBit) break; 3046 3047 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 3048 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 3049 Context)) 3050 return false; 3051 } 3052 3053 // If nothing in this record overlapped the area of interest, then we're 3054 // clean. 3055 return true; 3056 } 3057 3058 return false; 3059 } 3060 3061 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 3062 /// float member at the specified offset. For example, {int,{float}} has a 3063 /// float at offset 4. It is conservatively correct for this routine to return 3064 /// false. 3065 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 3066 const llvm::DataLayout &TD) { 3067 // Base case if we find a float. 3068 if (IROffset == 0 && IRType->isFloatTy()) 3069 return true; 3070 3071 // If this is a struct, recurse into the field at the specified offset. 3072 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3073 const llvm::StructLayout *SL = TD.getStructLayout(STy); 3074 unsigned Elt = SL->getElementContainingOffset(IROffset); 3075 IROffset -= SL->getElementOffset(Elt); 3076 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 3077 } 3078 3079 // If this is an array, recurse into the field at the specified offset. 3080 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3081 llvm::Type *EltTy = ATy->getElementType(); 3082 unsigned EltSize = TD.getTypeAllocSize(EltTy); 3083 IROffset -= IROffset/EltSize*EltSize; 3084 return ContainsFloatAtOffset(EltTy, IROffset, TD); 3085 } 3086 3087 return false; 3088 } 3089 3090 3091 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 3092 /// low 8 bytes of an XMM register, corresponding to the SSE class. 3093 llvm::Type *X86_64ABIInfo:: 3094 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3095 QualType SourceTy, unsigned SourceOffset) const { 3096 // The only three choices we have are either double, <2 x float>, or float. We 3097 // pass as float if the last 4 bytes is just padding. This happens for 3098 // structs that contain 3 floats. 3099 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 3100 SourceOffset*8+64, getContext())) 3101 return llvm::Type::getFloatTy(getVMContext()); 3102 3103 // We want to pass as <2 x float> if the LLVM IR type contains a float at 3104 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 3105 // case. 3106 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 3107 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 3108 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 3109 3110 return llvm::Type::getDoubleTy(getVMContext()); 3111 } 3112 3113 3114 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 3115 /// an 8-byte GPR. This means that we either have a scalar or we are talking 3116 /// about the high or low part of an up-to-16-byte struct. This routine picks 3117 /// the best LLVM IR type to represent this, which may be i64 or may be anything 3118 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 3119 /// etc). 3120 /// 3121 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 3122 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 3123 /// the 8-byte value references. PrefType may be null. 3124 /// 3125 /// SourceTy is the source-level type for the entire argument. SourceOffset is 3126 /// an offset into this that we're processing (which is always either 0 or 8). 3127 /// 3128 llvm::Type *X86_64ABIInfo:: 3129 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3130 QualType SourceTy, unsigned SourceOffset) const { 3131 // If we're dealing with an un-offset LLVM IR type, then it means that we're 3132 // returning an 8-byte unit starting with it. See if we can safely use it. 3133 if (IROffset == 0) { 3134 // Pointers and int64's always fill the 8-byte unit. 3135 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 3136 IRType->isIntegerTy(64)) 3137 return IRType; 3138 3139 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 3140 // goodness in the source type is just tail padding. This is allowed to 3141 // kick in for struct {double,int} on the int, but not on 3142 // struct{double,int,int} because we wouldn't return the second int. We 3143 // have to do this analysis on the source type because we can't depend on 3144 // unions being lowered a specific way etc. 3145 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 3146 IRType->isIntegerTy(32) || 3147 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 3148 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 3149 cast<llvm::IntegerType>(IRType)->getBitWidth(); 3150 3151 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 3152 SourceOffset*8+64, getContext())) 3153 return IRType; 3154 } 3155 } 3156 3157 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3158 // If this is a struct, recurse into the field at the specified offset. 3159 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 3160 if (IROffset < SL->getSizeInBytes()) { 3161 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 3162 IROffset -= SL->getElementOffset(FieldIdx); 3163 3164 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 3165 SourceTy, SourceOffset); 3166 } 3167 } 3168 3169 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3170 llvm::Type *EltTy = ATy->getElementType(); 3171 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 3172 unsigned EltOffset = IROffset/EltSize*EltSize; 3173 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 3174 SourceOffset); 3175 } 3176 3177 // Okay, we don't have any better idea of what to pass, so we pass this in an 3178 // integer register that isn't too big to fit the rest of the struct. 3179 unsigned TySizeInBytes = 3180 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 3181 3182 assert(TySizeInBytes != SourceOffset && "Empty field?"); 3183 3184 // It is always safe to classify this as an integer type up to i64 that 3185 // isn't larger than the structure. 3186 return llvm::IntegerType::get(getVMContext(), 3187 std::min(TySizeInBytes-SourceOffset, 8U)*8); 3188 } 3189 3190 3191 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 3192 /// be used as elements of a two register pair to pass or return, return a 3193 /// first class aggregate to represent them. For example, if the low part of 3194 /// a by-value argument should be passed as i32* and the high part as float, 3195 /// return {i32*, float}. 3196 static llvm::Type * 3197 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 3198 const llvm::DataLayout &TD) { 3199 // In order to correctly satisfy the ABI, we need to the high part to start 3200 // at offset 8. If the high and low parts we inferred are both 4-byte types 3201 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 3202 // the second element at offset 8. Check for this: 3203 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 3204 unsigned HiAlign = TD.getABITypeAlignment(Hi); 3205 unsigned HiStart = llvm::alignTo(LoSize, HiAlign); 3206 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 3207 3208 // To handle this, we have to increase the size of the low part so that the 3209 // second element will start at an 8 byte offset. We can't increase the size 3210 // of the second element because it might make us access off the end of the 3211 // struct. 3212 if (HiStart != 8) { 3213 // There are usually two sorts of types the ABI generation code can produce 3214 // for the low part of a pair that aren't 8 bytes in size: float or 3215 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and 3216 // NaCl). 3217 // Promote these to a larger type. 3218 if (Lo->isFloatTy()) 3219 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 3220 else { 3221 assert((Lo->isIntegerTy() || Lo->isPointerTy()) 3222 && "Invalid/unknown lo type"); 3223 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 3224 } 3225 } 3226 3227 llvm::StructType *Result = llvm::StructType::get(Lo, Hi); 3228 3229 // Verify that the second element is at an 8-byte offset. 3230 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 3231 "Invalid x86-64 argument pair!"); 3232 return Result; 3233 } 3234 3235 ABIArgInfo X86_64ABIInfo:: 3236 classifyReturnType(QualType RetTy) const { 3237 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 3238 // classification algorithm. 3239 X86_64ABIInfo::Class Lo, Hi; 3240 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 3241 3242 // Check some invariants. 3243 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3244 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3245 3246 llvm::Type *ResType = nullptr; 3247 switch (Lo) { 3248 case NoClass: 3249 if (Hi == NoClass) 3250 return ABIArgInfo::getIgnore(); 3251 // If the low part is just padding, it takes no register, leave ResType 3252 // null. 3253 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3254 "Unknown missing lo part"); 3255 break; 3256 3257 case SSEUp: 3258 case X87Up: 3259 llvm_unreachable("Invalid classification for lo word."); 3260 3261 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 3262 // hidden argument. 3263 case Memory: 3264 return getIndirectReturnResult(RetTy); 3265 3266 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 3267 // available register of the sequence %rax, %rdx is used. 3268 case Integer: 3269 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3270 3271 // If we have a sign or zero extended integer, make sure to return Extend 3272 // so that the parameter gets the right LLVM IR attributes. 3273 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3274 // Treat an enum type as its underlying type. 3275 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3276 RetTy = EnumTy->getDecl()->getIntegerType(); 3277 3278 if (RetTy->isIntegralOrEnumerationType() && 3279 RetTy->isPromotableIntegerType()) 3280 return ABIArgInfo::getExtend(); 3281 } 3282 break; 3283 3284 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 3285 // available SSE register of the sequence %xmm0, %xmm1 is used. 3286 case SSE: 3287 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3288 break; 3289 3290 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 3291 // returned on the X87 stack in %st0 as 80-bit x87 number. 3292 case X87: 3293 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 3294 break; 3295 3296 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 3297 // part of the value is returned in %st0 and the imaginary part in 3298 // %st1. 3299 case ComplexX87: 3300 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 3301 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 3302 llvm::Type::getX86_FP80Ty(getVMContext())); 3303 break; 3304 } 3305 3306 llvm::Type *HighPart = nullptr; 3307 switch (Hi) { 3308 // Memory was handled previously and X87 should 3309 // never occur as a hi class. 3310 case Memory: 3311 case X87: 3312 llvm_unreachable("Invalid classification for hi word."); 3313 3314 case ComplexX87: // Previously handled. 3315 case NoClass: 3316 break; 3317 3318 case Integer: 3319 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3320 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3321 return ABIArgInfo::getDirect(HighPart, 8); 3322 break; 3323 case SSE: 3324 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3325 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3326 return ABIArgInfo::getDirect(HighPart, 8); 3327 break; 3328 3329 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 3330 // is passed in the next available eightbyte chunk if the last used 3331 // vector register. 3332 // 3333 // SSEUP should always be preceded by SSE, just widen. 3334 case SSEUp: 3335 assert(Lo == SSE && "Unexpected SSEUp classification."); 3336 ResType = GetByteVectorType(RetTy); 3337 break; 3338 3339 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 3340 // returned together with the previous X87 value in %st0. 3341 case X87Up: 3342 // If X87Up is preceded by X87, we don't need to do 3343 // anything. However, in some cases with unions it may not be 3344 // preceded by X87. In such situations we follow gcc and pass the 3345 // extra bits in an SSE reg. 3346 if (Lo != X87) { 3347 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3348 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3349 return ABIArgInfo::getDirect(HighPart, 8); 3350 } 3351 break; 3352 } 3353 3354 // If a high part was specified, merge it together with the low part. It is 3355 // known to pass in the high eightbyte of the result. We do this by forming a 3356 // first class struct aggregate with the high and low part: {low, high} 3357 if (HighPart) 3358 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3359 3360 return ABIArgInfo::getDirect(ResType); 3361 } 3362 3363 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 3364 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 3365 bool isNamedArg) 3366 const 3367 { 3368 Ty = useFirstFieldIfTransparentUnion(Ty); 3369 3370 X86_64ABIInfo::Class Lo, Hi; 3371 classify(Ty, 0, Lo, Hi, isNamedArg); 3372 3373 // Check some invariants. 3374 // FIXME: Enforce these by construction. 3375 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3376 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3377 3378 neededInt = 0; 3379 neededSSE = 0; 3380 llvm::Type *ResType = nullptr; 3381 switch (Lo) { 3382 case NoClass: 3383 if (Hi == NoClass) 3384 return ABIArgInfo::getIgnore(); 3385 // If the low part is just padding, it takes no register, leave ResType 3386 // null. 3387 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3388 "Unknown missing lo part"); 3389 break; 3390 3391 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 3392 // on the stack. 3393 case Memory: 3394 3395 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 3396 // COMPLEX_X87, it is passed in memory. 3397 case X87: 3398 case ComplexX87: 3399 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 3400 ++neededInt; 3401 return getIndirectResult(Ty, freeIntRegs); 3402 3403 case SSEUp: 3404 case X87Up: 3405 llvm_unreachable("Invalid classification for lo word."); 3406 3407 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 3408 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 3409 // and %r9 is used. 3410 case Integer: 3411 ++neededInt; 3412 3413 // Pick an 8-byte type based on the preferred type. 3414 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 3415 3416 // If we have a sign or zero extended integer, make sure to return Extend 3417 // so that the parameter gets the right LLVM IR attributes. 3418 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3419 // Treat an enum type as its underlying type. 3420 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3421 Ty = EnumTy->getDecl()->getIntegerType(); 3422 3423 if (Ty->isIntegralOrEnumerationType() && 3424 Ty->isPromotableIntegerType()) 3425 return ABIArgInfo::getExtend(); 3426 } 3427 3428 break; 3429 3430 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 3431 // available SSE register is used, the registers are taken in the 3432 // order from %xmm0 to %xmm7. 3433 case SSE: { 3434 llvm::Type *IRType = CGT.ConvertType(Ty); 3435 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 3436 ++neededSSE; 3437 break; 3438 } 3439 } 3440 3441 llvm::Type *HighPart = nullptr; 3442 switch (Hi) { 3443 // Memory was handled previously, ComplexX87 and X87 should 3444 // never occur as hi classes, and X87Up must be preceded by X87, 3445 // which is passed in memory. 3446 case Memory: 3447 case X87: 3448 case ComplexX87: 3449 llvm_unreachable("Invalid classification for hi word."); 3450 3451 case NoClass: break; 3452 3453 case Integer: 3454 ++neededInt; 3455 // Pick an 8-byte type based on the preferred type. 3456 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3457 3458 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3459 return ABIArgInfo::getDirect(HighPart, 8); 3460 break; 3461 3462 // X87Up generally doesn't occur here (long double is passed in 3463 // memory), except in situations involving unions. 3464 case X87Up: 3465 case SSE: 3466 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3467 3468 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3469 return ABIArgInfo::getDirect(HighPart, 8); 3470 3471 ++neededSSE; 3472 break; 3473 3474 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 3475 // eightbyte is passed in the upper half of the last used SSE 3476 // register. This only happens when 128-bit vectors are passed. 3477 case SSEUp: 3478 assert(Lo == SSE && "Unexpected SSEUp classification"); 3479 ResType = GetByteVectorType(Ty); 3480 break; 3481 } 3482 3483 // If a high part was specified, merge it together with the low part. It is 3484 // known to pass in the high eightbyte of the result. We do this by forming a 3485 // first class struct aggregate with the high and low part: {low, high} 3486 if (HighPart) 3487 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3488 3489 return ABIArgInfo::getDirect(ResType); 3490 } 3491 3492 ABIArgInfo 3493 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 3494 unsigned &NeededSSE) const { 3495 auto RT = Ty->getAs<RecordType>(); 3496 assert(RT && "classifyRegCallStructType only valid with struct types"); 3497 3498 if (RT->getDecl()->hasFlexibleArrayMember()) 3499 return getIndirectReturnResult(Ty); 3500 3501 // Sum up bases 3502 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 3503 if (CXXRD->isDynamicClass()) { 3504 NeededInt = NeededSSE = 0; 3505 return getIndirectReturnResult(Ty); 3506 } 3507 3508 for (const auto &I : CXXRD->bases()) 3509 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE) 3510 .isIndirect()) { 3511 NeededInt = NeededSSE = 0; 3512 return getIndirectReturnResult(Ty); 3513 } 3514 } 3515 3516 // Sum up members 3517 for (const auto *FD : RT->getDecl()->fields()) { 3518 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) { 3519 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE) 3520 .isIndirect()) { 3521 NeededInt = NeededSSE = 0; 3522 return getIndirectReturnResult(Ty); 3523 } 3524 } else { 3525 unsigned LocalNeededInt, LocalNeededSSE; 3526 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt, 3527 LocalNeededSSE, true) 3528 .isIndirect()) { 3529 NeededInt = NeededSSE = 0; 3530 return getIndirectReturnResult(Ty); 3531 } 3532 NeededInt += LocalNeededInt; 3533 NeededSSE += LocalNeededSSE; 3534 } 3535 } 3536 3537 return ABIArgInfo::getDirect(); 3538 } 3539 3540 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty, 3541 unsigned &NeededInt, 3542 unsigned &NeededSSE) const { 3543 3544 NeededInt = 0; 3545 NeededSSE = 0; 3546 3547 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE); 3548 } 3549 3550 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3551 3552 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall; 3553 3554 // Keep track of the number of assigned registers. 3555 unsigned FreeIntRegs = IsRegCall ? 11 : 6; 3556 unsigned FreeSSERegs = IsRegCall ? 16 : 8; 3557 unsigned NeededInt, NeededSSE; 3558 3559 if (!getCXXABI().classifyReturnType(FI)) { 3560 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() && 3561 !FI.getReturnType()->getTypePtr()->isUnionType()) { 3562 FI.getReturnInfo() = 3563 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE); 3564 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3565 FreeIntRegs -= NeededInt; 3566 FreeSSERegs -= NeededSSE; 3567 } else { 3568 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3569 } 3570 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) { 3571 // Complex Long Double Type is passed in Memory when Regcall 3572 // calling convention is used. 3573 const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>(); 3574 if (getContext().getCanonicalType(CT->getElementType()) == 3575 getContext().LongDoubleTy) 3576 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3577 } else 3578 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3579 } 3580 3581 // If the return value is indirect, then the hidden argument is consuming one 3582 // integer register. 3583 if (FI.getReturnInfo().isIndirect()) 3584 --FreeIntRegs; 3585 3586 // The chain argument effectively gives us another free register. 3587 if (FI.isChainCall()) 3588 ++FreeIntRegs; 3589 3590 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 3591 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 3592 // get assigned (in left-to-right order) for passing as follows... 3593 unsigned ArgNo = 0; 3594 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3595 it != ie; ++it, ++ArgNo) { 3596 bool IsNamedArg = ArgNo < NumRequiredArgs; 3597 3598 if (IsRegCall && it->type->isStructureOrClassType()) 3599 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE); 3600 else 3601 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, 3602 NeededSSE, IsNamedArg); 3603 3604 // AMD64-ABI 3.2.3p3: If there are no registers available for any 3605 // eightbyte of an argument, the whole argument is passed on the 3606 // stack. If registers have already been assigned for some 3607 // eightbytes of such an argument, the assignments get reverted. 3608 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3609 FreeIntRegs -= NeededInt; 3610 FreeSSERegs -= NeededSSE; 3611 } else { 3612 it->info = getIndirectResult(it->type, FreeIntRegs); 3613 } 3614 } 3615 } 3616 3617 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, 3618 Address VAListAddr, QualType Ty) { 3619 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP( 3620 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p"); 3621 llvm::Value *overflow_arg_area = 3622 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 3623 3624 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 3625 // byte boundary if alignment needed by type exceeds 8 byte boundary. 3626 // It isn't stated explicitly in the standard, but in practice we use 3627 // alignment greater than 16 where necessary. 3628 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 3629 if (Align > CharUnits::fromQuantity(8)) { 3630 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, 3631 Align); 3632 } 3633 3634 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 3635 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3636 llvm::Value *Res = 3637 CGF.Builder.CreateBitCast(overflow_arg_area, 3638 llvm::PointerType::getUnqual(LTy)); 3639 3640 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 3641 // l->overflow_arg_area + sizeof(type). 3642 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 3643 // an 8 byte boundary. 3644 3645 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 3646 llvm::Value *Offset = 3647 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 3648 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 3649 "overflow_arg_area.next"); 3650 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 3651 3652 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 3653 return Address(Res, Align); 3654 } 3655 3656 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3657 QualType Ty) const { 3658 // Assume that va_list type is correct; should be pointer to LLVM type: 3659 // struct { 3660 // i32 gp_offset; 3661 // i32 fp_offset; 3662 // i8* overflow_arg_area; 3663 // i8* reg_save_area; 3664 // }; 3665 unsigned neededInt, neededSSE; 3666 3667 Ty = getContext().getCanonicalType(Ty); 3668 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 3669 /*isNamedArg*/false); 3670 3671 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 3672 // in the registers. If not go to step 7. 3673 if (!neededInt && !neededSSE) 3674 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3675 3676 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 3677 // general purpose registers needed to pass type and num_fp to hold 3678 // the number of floating point registers needed. 3679 3680 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 3681 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 3682 // l->fp_offset > 304 - num_fp * 16 go to step 7. 3683 // 3684 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 3685 // register save space). 3686 3687 llvm::Value *InRegs = nullptr; 3688 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); 3689 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; 3690 if (neededInt) { 3691 gp_offset_p = 3692 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(), 3693 "gp_offset_p"); 3694 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 3695 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 3696 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 3697 } 3698 3699 if (neededSSE) { 3700 fp_offset_p = 3701 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4), 3702 "fp_offset_p"); 3703 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 3704 llvm::Value *FitsInFP = 3705 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 3706 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 3707 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 3708 } 3709 3710 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 3711 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 3712 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 3713 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 3714 3715 // Emit code to load the value if it was passed in registers. 3716 3717 CGF.EmitBlock(InRegBlock); 3718 3719 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 3720 // an offset of l->gp_offset and/or l->fp_offset. This may require 3721 // copying to a temporary location in case the parameter is passed 3722 // in different register classes or requires an alignment greater 3723 // than 8 for general purpose registers and 16 for XMM registers. 3724 // 3725 // FIXME: This really results in shameful code when we end up needing to 3726 // collect arguments from different places; often what should result in a 3727 // simple assembling of a structure from scattered addresses has many more 3728 // loads than necessary. Can we clean this up? 3729 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3730 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( 3731 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)), 3732 "reg_save_area"); 3733 3734 Address RegAddr = Address::invalid(); 3735 if (neededInt && neededSSE) { 3736 // FIXME: Cleanup. 3737 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 3738 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 3739 Address Tmp = CGF.CreateMemTemp(Ty); 3740 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 3741 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 3742 llvm::Type *TyLo = ST->getElementType(0); 3743 llvm::Type *TyHi = ST->getElementType(1); 3744 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 3745 "Unexpected ABI info for mixed regs"); 3746 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 3747 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 3748 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset); 3749 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset); 3750 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 3751 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 3752 3753 // Copy the first element. 3754 // FIXME: Our choice of alignment here and below is probably pessimistic. 3755 llvm::Value *V = CGF.Builder.CreateAlignedLoad( 3756 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo), 3757 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo))); 3758 CGF.Builder.CreateStore(V, 3759 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero())); 3760 3761 // Copy the second element. 3762 V = CGF.Builder.CreateAlignedLoad( 3763 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi), 3764 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi))); 3765 CharUnits Offset = CharUnits::fromQuantity( 3766 getDataLayout().getStructLayout(ST)->getElementOffset(1)); 3767 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset)); 3768 3769 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 3770 } else if (neededInt) { 3771 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset), 3772 CharUnits::fromQuantity(8)); 3773 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 3774 3775 // Copy to a temporary if necessary to ensure the appropriate alignment. 3776 std::pair<CharUnits, CharUnits> SizeAlign = 3777 getContext().getTypeInfoInChars(Ty); 3778 uint64_t TySize = SizeAlign.first.getQuantity(); 3779 CharUnits TyAlign = SizeAlign.second; 3780 3781 // Copy into a temporary if the type is more aligned than the 3782 // register save area. 3783 if (TyAlign.getQuantity() > 8) { 3784 Address Tmp = CGF.CreateMemTemp(Ty); 3785 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); 3786 RegAddr = Tmp; 3787 } 3788 3789 } else if (neededSSE == 1) { 3790 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 3791 CharUnits::fromQuantity(16)); 3792 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 3793 } else { 3794 assert(neededSSE == 2 && "Invalid number of needed registers!"); 3795 // SSE registers are spaced 16 bytes apart in the register save 3796 // area, we need to collect the two eightbytes together. 3797 // The ABI isn't explicit about this, but it seems reasonable 3798 // to assume that the slots are 16-byte aligned, since the stack is 3799 // naturally 16-byte aligned and the prologue is expected to store 3800 // all the SSE registers to the RSA. 3801 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 3802 CharUnits::fromQuantity(16)); 3803 Address RegAddrHi = 3804 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, 3805 CharUnits::fromQuantity(16)); 3806 llvm::Type *DoubleTy = CGF.DoubleTy; 3807 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy); 3808 llvm::Value *V; 3809 Address Tmp = CGF.CreateMemTemp(Ty); 3810 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 3811 V = CGF.Builder.CreateLoad( 3812 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy)); 3813 CGF.Builder.CreateStore(V, 3814 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero())); 3815 V = CGF.Builder.CreateLoad( 3816 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy)); 3817 CGF.Builder.CreateStore(V, 3818 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8))); 3819 3820 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 3821 } 3822 3823 // AMD64-ABI 3.5.7p5: Step 5. Set: 3824 // l->gp_offset = l->gp_offset + num_gp * 8 3825 // l->fp_offset = l->fp_offset + num_fp * 16. 3826 if (neededInt) { 3827 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 3828 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 3829 gp_offset_p); 3830 } 3831 if (neededSSE) { 3832 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 3833 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 3834 fp_offset_p); 3835 } 3836 CGF.EmitBranch(ContBlock); 3837 3838 // Emit code to load the value if it was passed in memory. 3839 3840 CGF.EmitBlock(InMemBlock); 3841 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3842 3843 // Return the appropriate result. 3844 3845 CGF.EmitBlock(ContBlock); 3846 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, 3847 "vaarg.addr"); 3848 return ResAddr; 3849 } 3850 3851 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 3852 QualType Ty) const { 3853 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 3854 CGF.getContext().getTypeInfoInChars(Ty), 3855 CharUnits::fromQuantity(8), 3856 /*allowHigherAlign*/ false); 3857 } 3858 3859 ABIArgInfo 3860 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, 3861 const ABIArgInfo ¤t) const { 3862 // Assumes vectorCall calling convention. 3863 const Type *Base = nullptr; 3864 uint64_t NumElts = 0; 3865 3866 if (!Ty->isBuiltinType() && !Ty->isVectorType() && 3867 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) { 3868 FreeSSERegs -= NumElts; 3869 return getDirectX86Hva(); 3870 } 3871 return current; 3872 } 3873 3874 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 3875 bool IsReturnType, bool IsVectorCall, 3876 bool IsRegCall) const { 3877 3878 if (Ty->isVoidType()) 3879 return ABIArgInfo::getIgnore(); 3880 3881 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3882 Ty = EnumTy->getDecl()->getIntegerType(); 3883 3884 TypeInfo Info = getContext().getTypeInfo(Ty); 3885 uint64_t Width = Info.Width; 3886 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align); 3887 3888 const RecordType *RT = Ty->getAs<RecordType>(); 3889 if (RT) { 3890 if (!IsReturnType) { 3891 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 3892 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 3893 } 3894 3895 if (RT->getDecl()->hasFlexibleArrayMember()) 3896 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3897 3898 } 3899 3900 const Type *Base = nullptr; 3901 uint64_t NumElts = 0; 3902 // vectorcall adds the concept of a homogenous vector aggregate, similar to 3903 // other targets. 3904 if ((IsVectorCall || IsRegCall) && 3905 isHomogeneousAggregate(Ty, Base, NumElts)) { 3906 if (IsRegCall) { 3907 if (FreeSSERegs >= NumElts) { 3908 FreeSSERegs -= NumElts; 3909 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 3910 return ABIArgInfo::getDirect(); 3911 return ABIArgInfo::getExpand(); 3912 } 3913 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3914 } else if (IsVectorCall) { 3915 if (FreeSSERegs >= NumElts && 3916 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) { 3917 FreeSSERegs -= NumElts; 3918 return ABIArgInfo::getDirect(); 3919 } else if (IsReturnType) { 3920 return ABIArgInfo::getExpand(); 3921 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) { 3922 // HVAs are delayed and reclassified in the 2nd step. 3923 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3924 } 3925 } 3926 } 3927 3928 if (Ty->isMemberPointerType()) { 3929 // If the member pointer is represented by an LLVM int or ptr, pass it 3930 // directly. 3931 llvm::Type *LLTy = CGT.ConvertType(Ty); 3932 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 3933 return ABIArgInfo::getDirect(); 3934 } 3935 3936 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { 3937 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 3938 // not 1, 2, 4, or 8 bytes, must be passed by reference." 3939 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 3940 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3941 3942 // Otherwise, coerce it to a small integer. 3943 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 3944 } 3945 3946 // Bool type is always extended to the ABI, other builtin types are not 3947 // extended. 3948 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3949 if (BT && BT->getKind() == BuiltinType::Bool) 3950 return ABIArgInfo::getExtend(); 3951 3952 // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It 3953 // passes them indirectly through memory. 3954 if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) { 3955 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 3956 if (LDF == &llvm::APFloat::x87DoubleExtended()) 3957 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3958 } 3959 3960 return ABIArgInfo::getDirect(); 3961 } 3962 3963 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, 3964 unsigned FreeSSERegs, 3965 bool IsVectorCall, 3966 bool IsRegCall) const { 3967 unsigned Count = 0; 3968 for (auto &I : FI.arguments()) { 3969 // Vectorcall in x64 only permits the first 6 arguments to be passed 3970 // as XMM/YMM registers. 3971 if (Count < VectorcallMaxParamNumAsReg) 3972 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); 3973 else { 3974 // Since these cannot be passed in registers, pretend no registers 3975 // are left. 3976 unsigned ZeroSSERegsAvail = 0; 3977 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false, 3978 IsVectorCall, IsRegCall); 3979 } 3980 ++Count; 3981 } 3982 3983 for (auto &I : FI.arguments()) { 3984 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info); 3985 } 3986 } 3987 3988 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3989 bool IsVectorCall = 3990 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall; 3991 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall; 3992 3993 unsigned FreeSSERegs = 0; 3994 if (IsVectorCall) { 3995 // We can use up to 4 SSE return registers with vectorcall. 3996 FreeSSERegs = 4; 3997 } else if (IsRegCall) { 3998 // RegCall gives us 16 SSE registers. 3999 FreeSSERegs = 16; 4000 } 4001 4002 if (!getCXXABI().classifyReturnType(FI)) 4003 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true, 4004 IsVectorCall, IsRegCall); 4005 4006 if (IsVectorCall) { 4007 // We can use up to 6 SSE register parameters with vectorcall. 4008 FreeSSERegs = 6; 4009 } else if (IsRegCall) { 4010 // RegCall gives us 16 SSE registers, we can reuse the return registers. 4011 FreeSSERegs = 16; 4012 } 4013 4014 if (IsVectorCall) { 4015 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall); 4016 } else { 4017 for (auto &I : FI.arguments()) 4018 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); 4019 } 4020 4021 } 4022 4023 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4024 QualType Ty) const { 4025 4026 bool IsIndirect = false; 4027 4028 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 4029 // not 1, 2, 4, or 8 bytes, must be passed by reference." 4030 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) { 4031 uint64_t Width = getContext().getTypeSize(Ty); 4032 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); 4033 } 4034 4035 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 4036 CGF.getContext().getTypeInfoInChars(Ty), 4037 CharUnits::fromQuantity(8), 4038 /*allowHigherAlign*/ false); 4039 } 4040 4041 // PowerPC-32 4042 namespace { 4043 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 4044 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 4045 bool IsSoftFloatABI; 4046 public: 4047 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI) 4048 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {} 4049 4050 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4051 QualType Ty) const override; 4052 }; 4053 4054 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 4055 public: 4056 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI) 4057 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {} 4058 4059 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4060 // This is recovered from gcc output. 4061 return 1; // r1 is the dedicated stack pointer 4062 } 4063 4064 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4065 llvm::Value *Address) const override; 4066 }; 4067 4068 } 4069 4070 // TODO: this implementation is now likely redundant with 4071 // DefaultABIInfo::EmitVAArg. 4072 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, 4073 QualType Ty) const { 4074 const unsigned OverflowLimit = 8; 4075 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4076 // TODO: Implement this. For now ignore. 4077 (void)CTy; 4078 return Address::invalid(); // FIXME? 4079 } 4080 4081 // struct __va_list_tag { 4082 // unsigned char gpr; 4083 // unsigned char fpr; 4084 // unsigned short reserved; 4085 // void *overflow_arg_area; 4086 // void *reg_save_area; 4087 // }; 4088 4089 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 4090 bool isInt = 4091 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); 4092 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64; 4093 4094 // All aggregates are passed indirectly? That doesn't seem consistent 4095 // with the argument-lowering code. 4096 bool isIndirect = Ty->isAggregateType(); 4097 4098 CGBuilderTy &Builder = CGF.Builder; 4099 4100 // The calling convention either uses 1-2 GPRs or 1 FPR. 4101 Address NumRegsAddr = Address::invalid(); 4102 if (isInt || IsSoftFloatABI) { 4103 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr"); 4104 } else { 4105 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr"); 4106 } 4107 4108 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); 4109 4110 // "Align" the register count when TY is i64. 4111 if (isI64 || (isF64 && IsSoftFloatABI)) { 4112 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); 4113 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); 4114 } 4115 4116 llvm::Value *CC = 4117 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond"); 4118 4119 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 4120 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 4121 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 4122 4123 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 4124 4125 llvm::Type *DirectTy = CGF.ConvertType(Ty); 4126 if (isIndirect) DirectTy = DirectTy->getPointerTo(0); 4127 4128 // Case 1: consume registers. 4129 Address RegAddr = Address::invalid(); 4130 { 4131 CGF.EmitBlock(UsingRegs); 4132 4133 Address RegSaveAreaPtr = 4134 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8)); 4135 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), 4136 CharUnits::fromQuantity(8)); 4137 assert(RegAddr.getElementType() == CGF.Int8Ty); 4138 4139 // Floating-point registers start after the general-purpose registers. 4140 if (!(isInt || IsSoftFloatABI)) { 4141 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, 4142 CharUnits::fromQuantity(32)); 4143 } 4144 4145 // Get the address of the saved value by scaling the number of 4146 // registers we've used by the number of 4147 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); 4148 llvm::Value *RegOffset = 4149 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); 4150 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty, 4151 RegAddr.getPointer(), RegOffset), 4152 RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); 4153 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); 4154 4155 // Increase the used-register count. 4156 NumRegs = 4157 Builder.CreateAdd(NumRegs, 4158 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); 4159 Builder.CreateStore(NumRegs, NumRegsAddr); 4160 4161 CGF.EmitBranch(Cont); 4162 } 4163 4164 // Case 2: consume space in the overflow area. 4165 Address MemAddr = Address::invalid(); 4166 { 4167 CGF.EmitBlock(UsingOverflow); 4168 4169 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr); 4170 4171 // Everything in the overflow area is rounded up to a size of at least 4. 4172 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); 4173 4174 CharUnits Size; 4175 if (!isIndirect) { 4176 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); 4177 Size = TypeInfo.first.alignTo(OverflowAreaAlign); 4178 } else { 4179 Size = CGF.getPointerSize(); 4180 } 4181 4182 Address OverflowAreaAddr = 4183 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4)); 4184 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), 4185 OverflowAreaAlign); 4186 // Round up address of argument to alignment 4187 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 4188 if (Align > OverflowAreaAlign) { 4189 llvm::Value *Ptr = OverflowArea.getPointer(); 4190 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), 4191 Align); 4192 } 4193 4194 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); 4195 4196 // Increase the overflow area. 4197 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); 4198 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); 4199 CGF.EmitBranch(Cont); 4200 } 4201 4202 CGF.EmitBlock(Cont); 4203 4204 // Merge the cases with a phi. 4205 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, 4206 "vaarg.addr"); 4207 4208 // Load the pointer if the argument was passed indirectly. 4209 if (isIndirect) { 4210 Result = Address(Builder.CreateLoad(Result, "aggr"), 4211 getContext().getTypeAlignInChars(Ty)); 4212 } 4213 4214 return Result; 4215 } 4216 4217 bool 4218 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4219 llvm::Value *Address) const { 4220 // This is calculated from the LLVM and GCC tables and verified 4221 // against gcc output. AFAIK all ABIs use the same encoding. 4222 4223 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4224 4225 llvm::IntegerType *i8 = CGF.Int8Ty; 4226 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4227 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4228 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4229 4230 // 0-31: r0-31, the 4-byte general-purpose registers 4231 AssignToArrayRange(Builder, Address, Four8, 0, 31); 4232 4233 // 32-63: fp0-31, the 8-byte floating-point registers 4234 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4235 4236 // 64-76 are various 4-byte special-purpose registers: 4237 // 64: mq 4238 // 65: lr 4239 // 66: ctr 4240 // 67: ap 4241 // 68-75 cr0-7 4242 // 76: xer 4243 AssignToArrayRange(Builder, Address, Four8, 64, 76); 4244 4245 // 77-108: v0-31, the 16-byte vector registers 4246 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4247 4248 // 109: vrsave 4249 // 110: vscr 4250 // 111: spe_acc 4251 // 112: spefscr 4252 // 113: sfp 4253 AssignToArrayRange(Builder, Address, Four8, 109, 113); 4254 4255 return false; 4256 } 4257 4258 // PowerPC-64 4259 4260 namespace { 4261 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 4262 class PPC64_SVR4_ABIInfo : public ABIInfo { 4263 public: 4264 enum ABIKind { 4265 ELFv1 = 0, 4266 ELFv2 4267 }; 4268 4269 private: 4270 static const unsigned GPRBits = 64; 4271 ABIKind Kind; 4272 bool HasQPX; 4273 bool IsSoftFloatABI; 4274 4275 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and 4276 // will be passed in a QPX register. 4277 bool IsQPXVectorTy(const Type *Ty) const { 4278 if (!HasQPX) 4279 return false; 4280 4281 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4282 unsigned NumElements = VT->getNumElements(); 4283 if (NumElements == 1) 4284 return false; 4285 4286 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) { 4287 if (getContext().getTypeSize(Ty) <= 256) 4288 return true; 4289 } else if (VT->getElementType()-> 4290 isSpecificBuiltinType(BuiltinType::Float)) { 4291 if (getContext().getTypeSize(Ty) <= 128) 4292 return true; 4293 } 4294 } 4295 4296 return false; 4297 } 4298 4299 bool IsQPXVectorTy(QualType Ty) const { 4300 return IsQPXVectorTy(Ty.getTypePtr()); 4301 } 4302 4303 public: 4304 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX, 4305 bool SoftFloatABI) 4306 : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX), 4307 IsSoftFloatABI(SoftFloatABI) {} 4308 4309 bool isPromotableTypeForABI(QualType Ty) const; 4310 CharUnits getParamTypeAlignment(QualType Ty) const; 4311 4312 ABIArgInfo classifyReturnType(QualType RetTy) const; 4313 ABIArgInfo classifyArgumentType(QualType Ty) const; 4314 4315 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4316 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4317 uint64_t Members) const override; 4318 4319 // TODO: We can add more logic to computeInfo to improve performance. 4320 // Example: For aggregate arguments that fit in a register, we could 4321 // use getDirectInReg (as is done below for structs containing a single 4322 // floating-point value) to avoid pushing them to memory on function 4323 // entry. This would require changing the logic in PPCISelLowering 4324 // when lowering the parameters in the caller and args in the callee. 4325 void computeInfo(CGFunctionInfo &FI) const override { 4326 if (!getCXXABI().classifyReturnType(FI)) 4327 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4328 for (auto &I : FI.arguments()) { 4329 // We rely on the default argument classification for the most part. 4330 // One exception: An aggregate containing a single floating-point 4331 // or vector item must be passed in a register if one is available. 4332 const Type *T = isSingleElementStruct(I.type, getContext()); 4333 if (T) { 4334 const BuiltinType *BT = T->getAs<BuiltinType>(); 4335 if (IsQPXVectorTy(T) || 4336 (T->isVectorType() && getContext().getTypeSize(T) == 128) || 4337 (BT && BT->isFloatingPoint())) { 4338 QualType QT(T, 0); 4339 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 4340 continue; 4341 } 4342 } 4343 I.info = classifyArgumentType(I.type); 4344 } 4345 } 4346 4347 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4348 QualType Ty) const override; 4349 }; 4350 4351 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 4352 4353 public: 4354 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 4355 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX, 4356 bool SoftFloatABI) 4357 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX, 4358 SoftFloatABI)) {} 4359 4360 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4361 // This is recovered from gcc output. 4362 return 1; // r1 is the dedicated stack pointer 4363 } 4364 4365 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4366 llvm::Value *Address) const override; 4367 }; 4368 4369 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 4370 public: 4371 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 4372 4373 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4374 // This is recovered from gcc output. 4375 return 1; // r1 is the dedicated stack pointer 4376 } 4377 4378 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4379 llvm::Value *Address) const override; 4380 }; 4381 4382 } 4383 4384 // Return true if the ABI requires Ty to be passed sign- or zero- 4385 // extended to 64 bits. 4386 bool 4387 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 4388 // Treat an enum type as its underlying type. 4389 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4390 Ty = EnumTy->getDecl()->getIntegerType(); 4391 4392 // Promotable integer types are required to be promoted by the ABI. 4393 if (Ty->isPromotableIntegerType()) 4394 return true; 4395 4396 // In addition to the usual promotable integer types, we also need to 4397 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 4398 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4399 switch (BT->getKind()) { 4400 case BuiltinType::Int: 4401 case BuiltinType::UInt: 4402 return true; 4403 default: 4404 break; 4405 } 4406 4407 return false; 4408 } 4409 4410 /// isAlignedParamType - Determine whether a type requires 16-byte or 4411 /// higher alignment in the parameter area. Always returns at least 8. 4412 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 4413 // Complex types are passed just like their elements. 4414 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 4415 Ty = CTy->getElementType(); 4416 4417 // Only vector types of size 16 bytes need alignment (larger types are 4418 // passed via reference, smaller types are not aligned). 4419 if (IsQPXVectorTy(Ty)) { 4420 if (getContext().getTypeSize(Ty) > 128) 4421 return CharUnits::fromQuantity(32); 4422 4423 return CharUnits::fromQuantity(16); 4424 } else if (Ty->isVectorType()) { 4425 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); 4426 } 4427 4428 // For single-element float/vector structs, we consider the whole type 4429 // to have the same alignment requirements as its single element. 4430 const Type *AlignAsType = nullptr; 4431 const Type *EltType = isSingleElementStruct(Ty, getContext()); 4432 if (EltType) { 4433 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 4434 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() && 4435 getContext().getTypeSize(EltType) == 128) || 4436 (BT && BT->isFloatingPoint())) 4437 AlignAsType = EltType; 4438 } 4439 4440 // Likewise for ELFv2 homogeneous aggregates. 4441 const Type *Base = nullptr; 4442 uint64_t Members = 0; 4443 if (!AlignAsType && Kind == ELFv2 && 4444 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 4445 AlignAsType = Base; 4446 4447 // With special case aggregates, only vector base types need alignment. 4448 if (AlignAsType && IsQPXVectorTy(AlignAsType)) { 4449 if (getContext().getTypeSize(AlignAsType) > 128) 4450 return CharUnits::fromQuantity(32); 4451 4452 return CharUnits::fromQuantity(16); 4453 } else if (AlignAsType) { 4454 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8); 4455 } 4456 4457 // Otherwise, we only need alignment for any aggregate type that 4458 // has an alignment requirement of >= 16 bytes. 4459 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { 4460 if (HasQPX && getContext().getTypeAlign(Ty) >= 256) 4461 return CharUnits::fromQuantity(32); 4462 return CharUnits::fromQuantity(16); 4463 } 4464 4465 return CharUnits::fromQuantity(8); 4466 } 4467 4468 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 4469 /// aggregate. Base is set to the base element type, and Members is set 4470 /// to the number of base elements. 4471 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 4472 uint64_t &Members) const { 4473 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 4474 uint64_t NElements = AT->getSize().getZExtValue(); 4475 if (NElements == 0) 4476 return false; 4477 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 4478 return false; 4479 Members *= NElements; 4480 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 4481 const RecordDecl *RD = RT->getDecl(); 4482 if (RD->hasFlexibleArrayMember()) 4483 return false; 4484 4485 Members = 0; 4486 4487 // If this is a C++ record, check the bases first. 4488 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 4489 for (const auto &I : CXXRD->bases()) { 4490 // Ignore empty records. 4491 if (isEmptyRecord(getContext(), I.getType(), true)) 4492 continue; 4493 4494 uint64_t FldMembers; 4495 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 4496 return false; 4497 4498 Members += FldMembers; 4499 } 4500 } 4501 4502 for (const auto *FD : RD->fields()) { 4503 // Ignore (non-zero arrays of) empty records. 4504 QualType FT = FD->getType(); 4505 while (const ConstantArrayType *AT = 4506 getContext().getAsConstantArrayType(FT)) { 4507 if (AT->getSize().getZExtValue() == 0) 4508 return false; 4509 FT = AT->getElementType(); 4510 } 4511 if (isEmptyRecord(getContext(), FT, true)) 4512 continue; 4513 4514 // For compatibility with GCC, ignore empty bitfields in C++ mode. 4515 if (getContext().getLangOpts().CPlusPlus && 4516 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 4517 continue; 4518 4519 uint64_t FldMembers; 4520 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 4521 return false; 4522 4523 Members = (RD->isUnion() ? 4524 std::max(Members, FldMembers) : Members + FldMembers); 4525 } 4526 4527 if (!Base) 4528 return false; 4529 4530 // Ensure there is no padding. 4531 if (getContext().getTypeSize(Base) * Members != 4532 getContext().getTypeSize(Ty)) 4533 return false; 4534 } else { 4535 Members = 1; 4536 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 4537 Members = 2; 4538 Ty = CT->getElementType(); 4539 } 4540 4541 // Most ABIs only support float, double, and some vector type widths. 4542 if (!isHomogeneousAggregateBaseType(Ty)) 4543 return false; 4544 4545 // The base type must be the same for all members. Types that 4546 // agree in both total size and mode (float vs. vector) are 4547 // treated as being equivalent here. 4548 const Type *TyPtr = Ty.getTypePtr(); 4549 if (!Base) { 4550 Base = TyPtr; 4551 // If it's a non-power-of-2 vector, its size is already a power-of-2, 4552 // so make sure to widen it explicitly. 4553 if (const VectorType *VT = Base->getAs<VectorType>()) { 4554 QualType EltTy = VT->getElementType(); 4555 unsigned NumElements = 4556 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); 4557 Base = getContext() 4558 .getVectorType(EltTy, NumElements, VT->getVectorKind()) 4559 .getTypePtr(); 4560 } 4561 } 4562 4563 if (Base->isVectorType() != TyPtr->isVectorType() || 4564 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 4565 return false; 4566 } 4567 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 4568 } 4569 4570 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 4571 // Homogeneous aggregates for ELFv2 must have base types of float, 4572 // double, long double, or 128-bit vectors. 4573 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4574 if (BT->getKind() == BuiltinType::Float || 4575 BT->getKind() == BuiltinType::Double || 4576 BT->getKind() == BuiltinType::LongDouble) { 4577 if (IsSoftFloatABI) 4578 return false; 4579 return true; 4580 } 4581 } 4582 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4583 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty)) 4584 return true; 4585 } 4586 return false; 4587 } 4588 4589 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 4590 const Type *Base, uint64_t Members) const { 4591 // Vector types require one register, floating point types require one 4592 // or two registers depending on their size. 4593 uint32_t NumRegs = 4594 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64; 4595 4596 // Homogeneous Aggregates may occupy at most 8 registers. 4597 return Members * NumRegs <= 8; 4598 } 4599 4600 ABIArgInfo 4601 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 4602 Ty = useFirstFieldIfTransparentUnion(Ty); 4603 4604 if (Ty->isAnyComplexType()) 4605 return ABIArgInfo::getDirect(); 4606 4607 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 4608 // or via reference (larger than 16 bytes). 4609 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) { 4610 uint64_t Size = getContext().getTypeSize(Ty); 4611 if (Size > 128) 4612 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4613 else if (Size < 128) { 4614 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 4615 return ABIArgInfo::getDirect(CoerceTy); 4616 } 4617 } 4618 4619 if (isAggregateTypeForABI(Ty)) { 4620 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 4621 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4622 4623 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); 4624 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 4625 4626 // ELFv2 homogeneous aggregates are passed as array types. 4627 const Type *Base = nullptr; 4628 uint64_t Members = 0; 4629 if (Kind == ELFv2 && 4630 isHomogeneousAggregate(Ty, Base, Members)) { 4631 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 4632 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 4633 return ABIArgInfo::getDirect(CoerceTy); 4634 } 4635 4636 // If an aggregate may end up fully in registers, we do not 4637 // use the ByVal method, but pass the aggregate as array. 4638 // This is usually beneficial since we avoid forcing the 4639 // back-end to store the argument to memory. 4640 uint64_t Bits = getContext().getTypeSize(Ty); 4641 if (Bits > 0 && Bits <= 8 * GPRBits) { 4642 llvm::Type *CoerceTy; 4643 4644 // Types up to 8 bytes are passed as integer type (which will be 4645 // properly aligned in the argument save area doubleword). 4646 if (Bits <= GPRBits) 4647 CoerceTy = 4648 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 4649 // Larger types are passed as arrays, with the base type selected 4650 // according to the required alignment in the save area. 4651 else { 4652 uint64_t RegBits = ABIAlign * 8; 4653 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits; 4654 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 4655 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 4656 } 4657 4658 return ABIArgInfo::getDirect(CoerceTy); 4659 } 4660 4661 // All other aggregates are passed ByVal. 4662 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 4663 /*ByVal=*/true, 4664 /*Realign=*/TyAlign > ABIAlign); 4665 } 4666 4667 return (isPromotableTypeForABI(Ty) ? 4668 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4669 } 4670 4671 ABIArgInfo 4672 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 4673 if (RetTy->isVoidType()) 4674 return ABIArgInfo::getIgnore(); 4675 4676 if (RetTy->isAnyComplexType()) 4677 return ABIArgInfo::getDirect(); 4678 4679 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 4680 // or via reference (larger than 16 bytes). 4681 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) { 4682 uint64_t Size = getContext().getTypeSize(RetTy); 4683 if (Size > 128) 4684 return getNaturalAlignIndirect(RetTy); 4685 else if (Size < 128) { 4686 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 4687 return ABIArgInfo::getDirect(CoerceTy); 4688 } 4689 } 4690 4691 if (isAggregateTypeForABI(RetTy)) { 4692 // ELFv2 homogeneous aggregates are returned as array types. 4693 const Type *Base = nullptr; 4694 uint64_t Members = 0; 4695 if (Kind == ELFv2 && 4696 isHomogeneousAggregate(RetTy, Base, Members)) { 4697 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 4698 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 4699 return ABIArgInfo::getDirect(CoerceTy); 4700 } 4701 4702 // ELFv2 small aggregates are returned in up to two registers. 4703 uint64_t Bits = getContext().getTypeSize(RetTy); 4704 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 4705 if (Bits == 0) 4706 return ABIArgInfo::getIgnore(); 4707 4708 llvm::Type *CoerceTy; 4709 if (Bits > GPRBits) { 4710 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 4711 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy); 4712 } else 4713 CoerceTy = 4714 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 4715 return ABIArgInfo::getDirect(CoerceTy); 4716 } 4717 4718 // All other aggregates are returned indirectly. 4719 return getNaturalAlignIndirect(RetTy); 4720 } 4721 4722 return (isPromotableTypeForABI(RetTy) ? 4723 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4724 } 4725 4726 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 4727 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4728 QualType Ty) const { 4729 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 4730 TypeInfo.second = getParamTypeAlignment(Ty); 4731 4732 CharUnits SlotSize = CharUnits::fromQuantity(8); 4733 4734 // If we have a complex type and the base type is smaller than 8 bytes, 4735 // the ABI calls for the real and imaginary parts to be right-adjusted 4736 // in separate doublewords. However, Clang expects us to produce a 4737 // pointer to a structure with the two parts packed tightly. So generate 4738 // loads of the real and imaginary parts relative to the va_list pointer, 4739 // and store them to a temporary structure. 4740 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4741 CharUnits EltSize = TypeInfo.first / 2; 4742 if (EltSize < SlotSize) { 4743 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, 4744 SlotSize * 2, SlotSize, 4745 SlotSize, /*AllowHigher*/ true); 4746 4747 Address RealAddr = Addr; 4748 Address ImagAddr = RealAddr; 4749 if (CGF.CGM.getDataLayout().isBigEndian()) { 4750 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, 4751 SlotSize - EltSize); 4752 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, 4753 2 * SlotSize - EltSize); 4754 } else { 4755 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); 4756 } 4757 4758 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); 4759 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); 4760 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); 4761 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); 4762 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); 4763 4764 Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); 4765 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), 4766 /*init*/ true); 4767 return Temp; 4768 } 4769 } 4770 4771 // Otherwise, just use the general rule. 4772 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 4773 TypeInfo, SlotSize, /*AllowHigher*/ true); 4774 } 4775 4776 static bool 4777 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4778 llvm::Value *Address) { 4779 // This is calculated from the LLVM and GCC tables and verified 4780 // against gcc output. AFAIK all ABIs use the same encoding. 4781 4782 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4783 4784 llvm::IntegerType *i8 = CGF.Int8Ty; 4785 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4786 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4787 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4788 4789 // 0-31: r0-31, the 8-byte general-purpose registers 4790 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 4791 4792 // 32-63: fp0-31, the 8-byte floating-point registers 4793 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4794 4795 // 64-67 are various 8-byte special-purpose registers: 4796 // 64: mq 4797 // 65: lr 4798 // 66: ctr 4799 // 67: ap 4800 AssignToArrayRange(Builder, Address, Eight8, 64, 67); 4801 4802 // 68-76 are various 4-byte special-purpose registers: 4803 // 68-75 cr0-7 4804 // 76: xer 4805 AssignToArrayRange(Builder, Address, Four8, 68, 76); 4806 4807 // 77-108: v0-31, the 16-byte vector registers 4808 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4809 4810 // 109: vrsave 4811 // 110: vscr 4812 // 111: spe_acc 4813 // 112: spefscr 4814 // 113: sfp 4815 // 114: tfhar 4816 // 115: tfiar 4817 // 116: texasr 4818 AssignToArrayRange(Builder, Address, Eight8, 109, 116); 4819 4820 return false; 4821 } 4822 4823 bool 4824 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 4825 CodeGen::CodeGenFunction &CGF, 4826 llvm::Value *Address) const { 4827 4828 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 4829 } 4830 4831 bool 4832 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4833 llvm::Value *Address) const { 4834 4835 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 4836 } 4837 4838 //===----------------------------------------------------------------------===// 4839 // AArch64 ABI Implementation 4840 //===----------------------------------------------------------------------===// 4841 4842 namespace { 4843 4844 class AArch64ABIInfo : public SwiftABIInfo { 4845 public: 4846 enum ABIKind { 4847 AAPCS = 0, 4848 DarwinPCS, 4849 Win64 4850 }; 4851 4852 private: 4853 ABIKind Kind; 4854 4855 public: 4856 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) 4857 : SwiftABIInfo(CGT), Kind(Kind) {} 4858 4859 private: 4860 ABIKind getABIKind() const { return Kind; } 4861 bool isDarwinPCS() const { return Kind == DarwinPCS; } 4862 4863 ABIArgInfo classifyReturnType(QualType RetTy) const; 4864 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4865 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4866 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4867 uint64_t Members) const override; 4868 4869 bool isIllegalVectorType(QualType Ty) const; 4870 4871 void computeInfo(CGFunctionInfo &FI) const override { 4872 if (!getCXXABI().classifyReturnType(FI)) 4873 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4874 4875 for (auto &it : FI.arguments()) 4876 it.info = classifyArgumentType(it.type); 4877 } 4878 4879 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, 4880 CodeGenFunction &CGF) const; 4881 4882 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 4883 CodeGenFunction &CGF) const; 4884 4885 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4886 QualType Ty) const override { 4887 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) 4888 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 4889 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 4890 } 4891 4892 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 4893 QualType Ty) const override; 4894 4895 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 4896 ArrayRef<llvm::Type*> scalars, 4897 bool asReturnValue) const override { 4898 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 4899 } 4900 bool isSwiftErrorInRegister() const override { 4901 return true; 4902 } 4903 4904 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 4905 unsigned elts) const override; 4906 }; 4907 4908 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 4909 public: 4910 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 4911 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} 4912 4913 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 4914 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue"; 4915 } 4916 4917 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4918 return 31; 4919 } 4920 4921 bool doesReturnSlotInterfereWithArgs() const override { return false; } 4922 }; 4923 4924 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo { 4925 public: 4926 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K) 4927 : AArch64TargetCodeGenInfo(CGT, K) {} 4928 4929 void getDependentLibraryOption(llvm::StringRef Lib, 4930 llvm::SmallString<24> &Opt) const override { 4931 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 4932 } 4933 4934 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 4935 llvm::SmallString<32> &Opt) const override { 4936 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 4937 } 4938 }; 4939 } 4940 4941 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { 4942 Ty = useFirstFieldIfTransparentUnion(Ty); 4943 4944 // Handle illegal vector types here. 4945 if (isIllegalVectorType(Ty)) { 4946 uint64_t Size = getContext().getTypeSize(Ty); 4947 // Android promotes <2 x i8> to i16, not i32 4948 if (isAndroid() && (Size <= 16)) { 4949 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); 4950 return ABIArgInfo::getDirect(ResType); 4951 } 4952 if (Size <= 32) { 4953 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 4954 return ABIArgInfo::getDirect(ResType); 4955 } 4956 if (Size == 64) { 4957 llvm::Type *ResType = 4958 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 4959 return ABIArgInfo::getDirect(ResType); 4960 } 4961 if (Size == 128) { 4962 llvm::Type *ResType = 4963 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 4964 return ABIArgInfo::getDirect(ResType); 4965 } 4966 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4967 } 4968 4969 if (!isAggregateTypeForABI(Ty)) { 4970 // Treat an enum type as its underlying type. 4971 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4972 Ty = EnumTy->getDecl()->getIntegerType(); 4973 4974 return (Ty->isPromotableIntegerType() && isDarwinPCS() 4975 ? ABIArgInfo::getExtend() 4976 : ABIArgInfo::getDirect()); 4977 } 4978 4979 // Structures with either a non-trivial destructor or a non-trivial 4980 // copy constructor are always indirect. 4981 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 4982 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 4983 CGCXXABI::RAA_DirectInMemory); 4984 } 4985 4986 // Empty records are always ignored on Darwin, but actually passed in C++ mode 4987 // elsewhere for GNU compatibility. 4988 uint64_t Size = getContext().getTypeSize(Ty); 4989 bool IsEmpty = isEmptyRecord(getContext(), Ty, true); 4990 if (IsEmpty || Size == 0) { 4991 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 4992 return ABIArgInfo::getIgnore(); 4993 4994 // GNU C mode. The only argument that gets ignored is an empty one with size 4995 // 0. 4996 if (IsEmpty && Size == 0) 4997 return ABIArgInfo::getIgnore(); 4998 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4999 } 5000 5001 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 5002 const Type *Base = nullptr; 5003 uint64_t Members = 0; 5004 if (isHomogeneousAggregate(Ty, Base, Members)) { 5005 return ABIArgInfo::getDirect( 5006 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 5007 } 5008 5009 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 5010 if (Size <= 128) { 5011 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5012 // same size and alignment. 5013 if (getTarget().isRenderScriptTarget()) { 5014 return coerceToIntArray(Ty, getContext(), getVMContext()); 5015 } 5016 unsigned Alignment = getContext().getTypeAlign(Ty); 5017 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes 5018 5019 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5020 // For aggregates with 16-byte alignment, we use i128. 5021 if (Alignment < 128 && Size == 128) { 5022 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 5023 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 5024 } 5025 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 5026 } 5027 5028 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5029 } 5030 5031 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { 5032 if (RetTy->isVoidType()) 5033 return ABIArgInfo::getIgnore(); 5034 5035 // Large vector types should be returned via memory. 5036 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 5037 return getNaturalAlignIndirect(RetTy); 5038 5039 if (!isAggregateTypeForABI(RetTy)) { 5040 // Treat an enum type as its underlying type. 5041 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5042 RetTy = EnumTy->getDecl()->getIntegerType(); 5043 5044 return (RetTy->isPromotableIntegerType() && isDarwinPCS() 5045 ? ABIArgInfo::getExtend() 5046 : ABIArgInfo::getDirect()); 5047 } 5048 5049 uint64_t Size = getContext().getTypeSize(RetTy); 5050 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0) 5051 return ABIArgInfo::getIgnore(); 5052 5053 const Type *Base = nullptr; 5054 uint64_t Members = 0; 5055 if (isHomogeneousAggregate(RetTy, Base, Members)) 5056 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 5057 return ABIArgInfo::getDirect(); 5058 5059 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 5060 if (Size <= 128) { 5061 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5062 // same size and alignment. 5063 if (getTarget().isRenderScriptTarget()) { 5064 return coerceToIntArray(RetTy, getContext(), getVMContext()); 5065 } 5066 unsigned Alignment = getContext().getTypeAlign(RetTy); 5067 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes 5068 5069 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5070 // For aggregates with 16-byte alignment, we use i128. 5071 if (Alignment < 128 && Size == 128) { 5072 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 5073 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 5074 } 5075 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 5076 } 5077 5078 return getNaturalAlignIndirect(RetTy); 5079 } 5080 5081 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 5082 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 5083 if (const VectorType *VT = Ty->getAs<VectorType>()) { 5084 // Check whether VT is legal. 5085 unsigned NumElements = VT->getNumElements(); 5086 uint64_t Size = getContext().getTypeSize(VT); 5087 // NumElements should be power of 2. 5088 if (!llvm::isPowerOf2_32(NumElements)) 5089 return true; 5090 return Size != 64 && (Size != 128 || NumElements == 1); 5091 } 5092 return false; 5093 } 5094 5095 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize, 5096 llvm::Type *eltTy, 5097 unsigned elts) const { 5098 if (!llvm::isPowerOf2_32(elts)) 5099 return false; 5100 if (totalSize.getQuantity() != 8 && 5101 (totalSize.getQuantity() != 16 || elts == 1)) 5102 return false; 5103 return true; 5104 } 5105 5106 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5107 // Homogeneous aggregates for AAPCS64 must have base types of a floating 5108 // point type or a short-vector type. This is the same as the 32-bit ABI, 5109 // but with the difference that any floating-point type is allowed, 5110 // including __fp16. 5111 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5112 if (BT->isFloatingPoint()) 5113 return true; 5114 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 5115 unsigned VecSize = getContext().getTypeSize(VT); 5116 if (VecSize == 64 || VecSize == 128) 5117 return true; 5118 } 5119 return false; 5120 } 5121 5122 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 5123 uint64_t Members) const { 5124 return Members <= 4; 5125 } 5126 5127 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, 5128 QualType Ty, 5129 CodeGenFunction &CGF) const { 5130 ABIArgInfo AI = classifyArgumentType(Ty); 5131 bool IsIndirect = AI.isIndirect(); 5132 5133 llvm::Type *BaseTy = CGF.ConvertType(Ty); 5134 if (IsIndirect) 5135 BaseTy = llvm::PointerType::getUnqual(BaseTy); 5136 else if (AI.getCoerceToType()) 5137 BaseTy = AI.getCoerceToType(); 5138 5139 unsigned NumRegs = 1; 5140 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 5141 BaseTy = ArrTy->getElementType(); 5142 NumRegs = ArrTy->getNumElements(); 5143 } 5144 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 5145 5146 // The AArch64 va_list type and handling is specified in the Procedure Call 5147 // Standard, section B.4: 5148 // 5149 // struct { 5150 // void *__stack; 5151 // void *__gr_top; 5152 // void *__vr_top; 5153 // int __gr_offs; 5154 // int __vr_offs; 5155 // }; 5156 5157 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 5158 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 5159 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 5160 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 5161 5162 auto TyInfo = getContext().getTypeInfoInChars(Ty); 5163 CharUnits TyAlign = TyInfo.second; 5164 5165 Address reg_offs_p = Address::invalid(); 5166 llvm::Value *reg_offs = nullptr; 5167 int reg_top_index; 5168 CharUnits reg_top_offset; 5169 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity(); 5170 if (!IsFPR) { 5171 // 3 is the field number of __gr_offs 5172 reg_offs_p = 5173 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24), 5174 "gr_offs_p"); 5175 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 5176 reg_top_index = 1; // field number for __gr_top 5177 reg_top_offset = CharUnits::fromQuantity(8); 5178 RegSize = llvm::alignTo(RegSize, 8); 5179 } else { 5180 // 4 is the field number of __vr_offs. 5181 reg_offs_p = 5182 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28), 5183 "vr_offs_p"); 5184 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 5185 reg_top_index = 2; // field number for __vr_top 5186 reg_top_offset = CharUnits::fromQuantity(16); 5187 RegSize = 16 * NumRegs; 5188 } 5189 5190 //======================================= 5191 // Find out where argument was passed 5192 //======================================= 5193 5194 // If reg_offs >= 0 we're already using the stack for this type of 5195 // argument. We don't want to keep updating reg_offs (in case it overflows, 5196 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 5197 // whatever they get). 5198 llvm::Value *UsingStack = nullptr; 5199 UsingStack = CGF.Builder.CreateICmpSGE( 5200 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 5201 5202 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 5203 5204 // Otherwise, at least some kind of argument could go in these registers, the 5205 // question is whether this particular type is too big. 5206 CGF.EmitBlock(MaybeRegBlock); 5207 5208 // Integer arguments may need to correct register alignment (for example a 5209 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 5210 // align __gr_offs to calculate the potential address. 5211 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { 5212 int Align = TyAlign.getQuantity(); 5213 5214 reg_offs = CGF.Builder.CreateAdd( 5215 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 5216 "align_regoffs"); 5217 reg_offs = CGF.Builder.CreateAnd( 5218 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 5219 "aligned_regoffs"); 5220 } 5221 5222 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 5223 // The fact that this is done unconditionally reflects the fact that 5224 // allocating an argument to the stack also uses up all the remaining 5225 // registers of the appropriate kind. 5226 llvm::Value *NewOffset = nullptr; 5227 NewOffset = CGF.Builder.CreateAdd( 5228 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 5229 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 5230 5231 // Now we're in a position to decide whether this argument really was in 5232 // registers or not. 5233 llvm::Value *InRegs = nullptr; 5234 InRegs = CGF.Builder.CreateICmpSLE( 5235 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 5236 5237 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 5238 5239 //======================================= 5240 // Argument was in registers 5241 //======================================= 5242 5243 // Now we emit the code for if the argument was originally passed in 5244 // registers. First start the appropriate block: 5245 CGF.EmitBlock(InRegBlock); 5246 5247 llvm::Value *reg_top = nullptr; 5248 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, 5249 reg_top_offset, "reg_top_p"); 5250 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 5251 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs), 5252 CharUnits::fromQuantity(IsFPR ? 16 : 8)); 5253 Address RegAddr = Address::invalid(); 5254 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); 5255 5256 if (IsIndirect) { 5257 // If it's been passed indirectly (actually a struct), whatever we find from 5258 // stored registers or on the stack will actually be a struct **. 5259 MemTy = llvm::PointerType::getUnqual(MemTy); 5260 } 5261 5262 const Type *Base = nullptr; 5263 uint64_t NumMembers = 0; 5264 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 5265 if (IsHFA && NumMembers > 1) { 5266 // Homogeneous aggregates passed in registers will have their elements split 5267 // and stored 16-bytes apart regardless of size (they're notionally in qN, 5268 // qN+1, ...). We reload and store into a temporary local variable 5269 // contiguously. 5270 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 5271 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); 5272 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 5273 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 5274 Address Tmp = CGF.CreateTempAlloca(HFATy, 5275 std::max(TyAlign, BaseTyInfo.second)); 5276 5277 // On big-endian platforms, the value will be right-aligned in its slot. 5278 int Offset = 0; 5279 if (CGF.CGM.getDataLayout().isBigEndian() && 5280 BaseTyInfo.first.getQuantity() < 16) 5281 Offset = 16 - BaseTyInfo.first.getQuantity(); 5282 5283 for (unsigned i = 0; i < NumMembers; ++i) { 5284 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); 5285 Address LoadAddr = 5286 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); 5287 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); 5288 5289 Address StoreAddr = 5290 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first); 5291 5292 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 5293 CGF.Builder.CreateStore(Elem, StoreAddr); 5294 } 5295 5296 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); 5297 } else { 5298 // Otherwise the object is contiguous in memory. 5299 5300 // It might be right-aligned in its slot. 5301 CharUnits SlotSize = BaseAddr.getAlignment(); 5302 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && 5303 (IsHFA || !isAggregateTypeForABI(Ty)) && 5304 TyInfo.first < SlotSize) { 5305 CharUnits Offset = SlotSize - TyInfo.first; 5306 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); 5307 } 5308 5309 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); 5310 } 5311 5312 CGF.EmitBranch(ContBlock); 5313 5314 //======================================= 5315 // Argument was on the stack 5316 //======================================= 5317 CGF.EmitBlock(OnStackBlock); 5318 5319 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, 5320 CharUnits::Zero(), "stack_p"); 5321 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); 5322 5323 // Again, stack arguments may need realignment. In this case both integer and 5324 // floating-point ones might be affected. 5325 if (!IsIndirect && TyAlign.getQuantity() > 8) { 5326 int Align = TyAlign.getQuantity(); 5327 5328 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); 5329 5330 OnStackPtr = CGF.Builder.CreateAdd( 5331 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 5332 "align_stack"); 5333 OnStackPtr = CGF.Builder.CreateAnd( 5334 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 5335 "align_stack"); 5336 5337 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); 5338 } 5339 Address OnStackAddr(OnStackPtr, 5340 std::max(CharUnits::fromQuantity(8), TyAlign)); 5341 5342 // All stack slots are multiples of 8 bytes. 5343 CharUnits StackSlotSize = CharUnits::fromQuantity(8); 5344 CharUnits StackSize; 5345 if (IsIndirect) 5346 StackSize = StackSlotSize; 5347 else 5348 StackSize = TyInfo.first.alignTo(StackSlotSize); 5349 5350 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); 5351 llvm::Value *NewStack = 5352 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack"); 5353 5354 // Write the new value of __stack for the next call to va_arg 5355 CGF.Builder.CreateStore(NewStack, stack_p); 5356 5357 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 5358 TyInfo.first < StackSlotSize) { 5359 CharUnits Offset = StackSlotSize - TyInfo.first; 5360 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); 5361 } 5362 5363 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); 5364 5365 CGF.EmitBranch(ContBlock); 5366 5367 //======================================= 5368 // Tidy up 5369 //======================================= 5370 CGF.EmitBlock(ContBlock); 5371 5372 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 5373 OnStackAddr, OnStackBlock, "vaargs.addr"); 5374 5375 if (IsIndirect) 5376 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), 5377 TyInfo.second); 5378 5379 return ResAddr; 5380 } 5381 5382 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, 5383 CodeGenFunction &CGF) const { 5384 // The backend's lowering doesn't support va_arg for aggregates or 5385 // illegal vector types. Lower VAArg here for these cases and use 5386 // the LLVM va_arg instruction for everything else. 5387 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 5388 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 5389 5390 CharUnits SlotSize = CharUnits::fromQuantity(8); 5391 5392 // Empty records are ignored for parameter passing purposes. 5393 if (isEmptyRecord(getContext(), Ty, true)) { 5394 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 5395 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 5396 return Addr; 5397 } 5398 5399 // The size of the actual thing passed, which might end up just 5400 // being a pointer for indirect types. 5401 auto TyInfo = getContext().getTypeInfoInChars(Ty); 5402 5403 // Arguments bigger than 16 bytes which aren't homogeneous 5404 // aggregates should be passed indirectly. 5405 bool IsIndirect = false; 5406 if (TyInfo.first.getQuantity() > 16) { 5407 const Type *Base = nullptr; 5408 uint64_t Members = 0; 5409 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); 5410 } 5411 5412 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 5413 TyInfo, SlotSize, /*AllowHigherAlign*/ true); 5414 } 5415 5416 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 5417 QualType Ty) const { 5418 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 5419 CGF.getContext().getTypeInfoInChars(Ty), 5420 CharUnits::fromQuantity(8), 5421 /*allowHigherAlign*/ false); 5422 } 5423 5424 //===----------------------------------------------------------------------===// 5425 // ARM ABI Implementation 5426 //===----------------------------------------------------------------------===// 5427 5428 namespace { 5429 5430 class ARMABIInfo : public SwiftABIInfo { 5431 public: 5432 enum ABIKind { 5433 APCS = 0, 5434 AAPCS = 1, 5435 AAPCS_VFP = 2, 5436 AAPCS16_VFP = 3, 5437 }; 5438 5439 private: 5440 ABIKind Kind; 5441 5442 public: 5443 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) 5444 : SwiftABIInfo(CGT), Kind(_Kind) { 5445 setCCs(); 5446 } 5447 5448 bool isEABI() const { 5449 switch (getTarget().getTriple().getEnvironment()) { 5450 case llvm::Triple::Android: 5451 case llvm::Triple::EABI: 5452 case llvm::Triple::EABIHF: 5453 case llvm::Triple::GNUEABI: 5454 case llvm::Triple::GNUEABIHF: 5455 case llvm::Triple::MuslEABI: 5456 case llvm::Triple::MuslEABIHF: 5457 return true; 5458 default: 5459 return false; 5460 } 5461 } 5462 5463 bool isEABIHF() const { 5464 switch (getTarget().getTriple().getEnvironment()) { 5465 case llvm::Triple::EABIHF: 5466 case llvm::Triple::GNUEABIHF: 5467 case llvm::Triple::MuslEABIHF: 5468 return true; 5469 default: 5470 return false; 5471 } 5472 } 5473 5474 ABIKind getABIKind() const { return Kind; } 5475 5476 private: 5477 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; 5478 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const; 5479 bool isIllegalVectorType(QualType Ty) const; 5480 5481 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 5482 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 5483 uint64_t Members) const override; 5484 5485 void computeInfo(CGFunctionInfo &FI) const override; 5486 5487 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5488 QualType Ty) const override; 5489 5490 llvm::CallingConv::ID getLLVMDefaultCC() const; 5491 llvm::CallingConv::ID getABIDefaultCC() const; 5492 void setCCs(); 5493 5494 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 5495 ArrayRef<llvm::Type*> scalars, 5496 bool asReturnValue) const override { 5497 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 5498 } 5499 bool isSwiftErrorInRegister() const override { 5500 return true; 5501 } 5502 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 5503 unsigned elts) const override; 5504 }; 5505 5506 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 5507 public: 5508 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 5509 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 5510 5511 const ARMABIInfo &getABIInfo() const { 5512 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 5513 } 5514 5515 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5516 return 13; 5517 } 5518 5519 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 5520 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue"; 5521 } 5522 5523 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5524 llvm::Value *Address) const override { 5525 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 5526 5527 // 0-15 are the 16 integer registers. 5528 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 5529 return false; 5530 } 5531 5532 unsigned getSizeOfUnwindException() const override { 5533 if (getABIInfo().isEABI()) return 88; 5534 return TargetCodeGenInfo::getSizeOfUnwindException(); 5535 } 5536 5537 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5538 CodeGen::CodeGenModule &CGM, 5539 ForDefinition_t IsForDefinition) const override { 5540 if (!IsForDefinition) 5541 return; 5542 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5543 if (!FD) 5544 return; 5545 5546 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 5547 if (!Attr) 5548 return; 5549 5550 const char *Kind; 5551 switch (Attr->getInterrupt()) { 5552 case ARMInterruptAttr::Generic: Kind = ""; break; 5553 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 5554 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 5555 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 5556 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 5557 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 5558 } 5559 5560 llvm::Function *Fn = cast<llvm::Function>(GV); 5561 5562 Fn->addFnAttr("interrupt", Kind); 5563 5564 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind(); 5565 if (ABI == ARMABIInfo::APCS) 5566 return; 5567 5568 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 5569 // however this is not necessarily true on taking any interrupt. Instruct 5570 // the backend to perform a realignment as part of the function prologue. 5571 llvm::AttrBuilder B; 5572 B.addStackAlignmentAttr(8); 5573 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 5574 } 5575 }; 5576 5577 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 5578 public: 5579 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 5580 : ARMTargetCodeGenInfo(CGT, K) {} 5581 5582 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5583 CodeGen::CodeGenModule &CGM, 5584 ForDefinition_t IsForDefinition) const override; 5585 5586 void getDependentLibraryOption(llvm::StringRef Lib, 5587 llvm::SmallString<24> &Opt) const override { 5588 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 5589 } 5590 5591 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 5592 llvm::SmallString<32> &Opt) const override { 5593 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 5594 } 5595 }; 5596 5597 void WindowsARMTargetCodeGenInfo::setTargetAttributes( 5598 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM, 5599 ForDefinition_t IsForDefinition) const { 5600 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition); 5601 if (!IsForDefinition) 5602 return; 5603 addStackProbeSizeTargetAttribute(D, GV, CGM); 5604 } 5605 } 5606 5607 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 5608 if (!getCXXABI().classifyReturnType(FI)) 5609 FI.getReturnInfo() = 5610 classifyReturnType(FI.getReturnType(), FI.isVariadic()); 5611 5612 for (auto &I : FI.arguments()) 5613 I.info = classifyArgumentType(I.type, FI.isVariadic()); 5614 5615 // Always honor user-specified calling convention. 5616 if (FI.getCallingConvention() != llvm::CallingConv::C) 5617 return; 5618 5619 llvm::CallingConv::ID cc = getRuntimeCC(); 5620 if (cc != llvm::CallingConv::C) 5621 FI.setEffectiveCallingConvention(cc); 5622 } 5623 5624 /// Return the default calling convention that LLVM will use. 5625 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 5626 // The default calling convention that LLVM will infer. 5627 if (isEABIHF() || getTarget().getTriple().isWatchABI()) 5628 return llvm::CallingConv::ARM_AAPCS_VFP; 5629 else if (isEABI()) 5630 return llvm::CallingConv::ARM_AAPCS; 5631 else 5632 return llvm::CallingConv::ARM_APCS; 5633 } 5634 5635 /// Return the calling convention that our ABI would like us to use 5636 /// as the C calling convention. 5637 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 5638 switch (getABIKind()) { 5639 case APCS: return llvm::CallingConv::ARM_APCS; 5640 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 5641 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 5642 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 5643 } 5644 llvm_unreachable("bad ABI kind"); 5645 } 5646 5647 void ARMABIInfo::setCCs() { 5648 assert(getRuntimeCC() == llvm::CallingConv::C); 5649 5650 // Don't muddy up the IR with a ton of explicit annotations if 5651 // they'd just match what LLVM will infer from the triple. 5652 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 5653 if (abiCC != getLLVMDefaultCC()) 5654 RuntimeCC = abiCC; 5655 5656 // AAPCS apparently requires runtime support functions to be soft-float, but 5657 // that's almost certainly for historic reasons (Thumb1 not supporting VFP 5658 // most likely). It's more convenient for AAPCS16_VFP to be hard-float. 5659 5660 // The Run-time ABI for the ARM Architecture section 4.1.2 requires 5661 // AEABI-complying FP helper functions to use the base AAPCS. 5662 // These AEABI functions are expanded in the ARM llvm backend, all the builtin 5663 // support functions emitted by clang such as the _Complex helpers follow the 5664 // abiCC. 5665 if (abiCC != getLLVMDefaultCC()) 5666 BuiltinCC = abiCC; 5667 } 5668 5669 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, 5670 bool isVariadic) const { 5671 // 6.1.2.1 The following argument types are VFP CPRCs: 5672 // A single-precision floating-point type (including promoted 5673 // half-precision types); A double-precision floating-point type; 5674 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 5675 // with a Base Type of a single- or double-precision floating-point type, 5676 // 64-bit containerized vectors or 128-bit containerized vectors with one 5677 // to four Elements. 5678 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 5679 5680 Ty = useFirstFieldIfTransparentUnion(Ty); 5681 5682 // Handle illegal vector types here. 5683 if (isIllegalVectorType(Ty)) { 5684 uint64_t Size = getContext().getTypeSize(Ty); 5685 if (Size <= 32) { 5686 llvm::Type *ResType = 5687 llvm::Type::getInt32Ty(getVMContext()); 5688 return ABIArgInfo::getDirect(ResType); 5689 } 5690 if (Size == 64) { 5691 llvm::Type *ResType = llvm::VectorType::get( 5692 llvm::Type::getInt32Ty(getVMContext()), 2); 5693 return ABIArgInfo::getDirect(ResType); 5694 } 5695 if (Size == 128) { 5696 llvm::Type *ResType = llvm::VectorType::get( 5697 llvm::Type::getInt32Ty(getVMContext()), 4); 5698 return ABIArgInfo::getDirect(ResType); 5699 } 5700 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5701 } 5702 5703 // __fp16 gets passed as if it were an int or float, but with the top 16 bits 5704 // unspecified. This is not done for OpenCL as it handles the half type 5705 // natively, and does not need to interwork with AAPCS code. 5706 if (Ty->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) { 5707 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ? 5708 llvm::Type::getFloatTy(getVMContext()) : 5709 llvm::Type::getInt32Ty(getVMContext()); 5710 return ABIArgInfo::getDirect(ResType); 5711 } 5712 5713 if (!isAggregateTypeForABI(Ty)) { 5714 // Treat an enum type as its underlying type. 5715 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 5716 Ty = EnumTy->getDecl()->getIntegerType(); 5717 } 5718 5719 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() 5720 : ABIArgInfo::getDirect()); 5721 } 5722 5723 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5724 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 5725 } 5726 5727 // Ignore empty records. 5728 if (isEmptyRecord(getContext(), Ty, true)) 5729 return ABIArgInfo::getIgnore(); 5730 5731 if (IsEffectivelyAAPCS_VFP) { 5732 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 5733 // into VFP registers. 5734 const Type *Base = nullptr; 5735 uint64_t Members = 0; 5736 if (isHomogeneousAggregate(Ty, Base, Members)) { 5737 assert(Base && "Base class should be set for homogeneous aggregate"); 5738 // Base can be a floating-point or a vector. 5739 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 5740 } 5741 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 5742 // WatchOS does have homogeneous aggregates. Note that we intentionally use 5743 // this convention even for a variadic function: the backend will use GPRs 5744 // if needed. 5745 const Type *Base = nullptr; 5746 uint64_t Members = 0; 5747 if (isHomogeneousAggregate(Ty, Base, Members)) { 5748 assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); 5749 llvm::Type *Ty = 5750 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); 5751 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 5752 } 5753 } 5754 5755 if (getABIKind() == ARMABIInfo::AAPCS16_VFP && 5756 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { 5757 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're 5758 // bigger than 128-bits, they get placed in space allocated by the caller, 5759 // and a pointer is passed. 5760 return ABIArgInfo::getIndirect( 5761 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); 5762 } 5763 5764 // Support byval for ARM. 5765 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 5766 // most 8-byte. We realign the indirect argument if type alignment is bigger 5767 // than ABI alignment. 5768 uint64_t ABIAlign = 4; 5769 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 5770 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 5771 getABIKind() == ARMABIInfo::AAPCS) 5772 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 5773 5774 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 5775 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval"); 5776 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 5777 /*ByVal=*/true, 5778 /*Realign=*/TyAlign > ABIAlign); 5779 } 5780 5781 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of 5782 // same size and alignment. 5783 if (getTarget().isRenderScriptTarget()) { 5784 return coerceToIntArray(Ty, getContext(), getVMContext()); 5785 } 5786 5787 // Otherwise, pass by coercing to a structure of the appropriate size. 5788 llvm::Type* ElemTy; 5789 unsigned SizeRegs; 5790 // FIXME: Try to match the types of the arguments more accurately where 5791 // we can. 5792 if (getContext().getTypeAlign(Ty) <= 32) { 5793 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 5794 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 5795 } else { 5796 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 5797 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 5798 } 5799 5800 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 5801 } 5802 5803 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 5804 llvm::LLVMContext &VMContext) { 5805 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 5806 // is called integer-like if its size is less than or equal to one word, and 5807 // the offset of each of its addressable sub-fields is zero. 5808 5809 uint64_t Size = Context.getTypeSize(Ty); 5810 5811 // Check that the type fits in a word. 5812 if (Size > 32) 5813 return false; 5814 5815 // FIXME: Handle vector types! 5816 if (Ty->isVectorType()) 5817 return false; 5818 5819 // Float types are never treated as "integer like". 5820 if (Ty->isRealFloatingType()) 5821 return false; 5822 5823 // If this is a builtin or pointer type then it is ok. 5824 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 5825 return true; 5826 5827 // Small complex integer types are "integer like". 5828 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 5829 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 5830 5831 // Single element and zero sized arrays should be allowed, by the definition 5832 // above, but they are not. 5833 5834 // Otherwise, it must be a record type. 5835 const RecordType *RT = Ty->getAs<RecordType>(); 5836 if (!RT) return false; 5837 5838 // Ignore records with flexible arrays. 5839 const RecordDecl *RD = RT->getDecl(); 5840 if (RD->hasFlexibleArrayMember()) 5841 return false; 5842 5843 // Check that all sub-fields are at offset 0, and are themselves "integer 5844 // like". 5845 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 5846 5847 bool HadField = false; 5848 unsigned idx = 0; 5849 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 5850 i != e; ++i, ++idx) { 5851 const FieldDecl *FD = *i; 5852 5853 // Bit-fields are not addressable, we only need to verify they are "integer 5854 // like". We still have to disallow a subsequent non-bitfield, for example: 5855 // struct { int : 0; int x } 5856 // is non-integer like according to gcc. 5857 if (FD->isBitField()) { 5858 if (!RD->isUnion()) 5859 HadField = true; 5860 5861 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 5862 return false; 5863 5864 continue; 5865 } 5866 5867 // Check if this field is at offset 0. 5868 if (Layout.getFieldOffset(idx) != 0) 5869 return false; 5870 5871 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 5872 return false; 5873 5874 // Only allow at most one field in a structure. This doesn't match the 5875 // wording above, but follows gcc in situations with a field following an 5876 // empty structure. 5877 if (!RD->isUnion()) { 5878 if (HadField) 5879 return false; 5880 5881 HadField = true; 5882 } 5883 } 5884 5885 return true; 5886 } 5887 5888 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, 5889 bool isVariadic) const { 5890 bool IsEffectivelyAAPCS_VFP = 5891 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic; 5892 5893 if (RetTy->isVoidType()) 5894 return ABIArgInfo::getIgnore(); 5895 5896 // Large vector types should be returned via memory. 5897 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) { 5898 return getNaturalAlignIndirect(RetTy); 5899 } 5900 5901 // __fp16 gets returned as if it were an int or float, but with the top 16 5902 // bits unspecified. This is not done for OpenCL as it handles the half type 5903 // natively, and does not need to interwork with AAPCS code. 5904 if (RetTy->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) { 5905 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ? 5906 llvm::Type::getFloatTy(getVMContext()) : 5907 llvm::Type::getInt32Ty(getVMContext()); 5908 return ABIArgInfo::getDirect(ResType); 5909 } 5910 5911 if (!isAggregateTypeForABI(RetTy)) { 5912 // Treat an enum type as its underlying type. 5913 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5914 RetTy = EnumTy->getDecl()->getIntegerType(); 5915 5916 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() 5917 : ABIArgInfo::getDirect(); 5918 } 5919 5920 // Are we following APCS? 5921 if (getABIKind() == APCS) { 5922 if (isEmptyRecord(getContext(), RetTy, false)) 5923 return ABIArgInfo::getIgnore(); 5924 5925 // Complex types are all returned as packed integers. 5926 // 5927 // FIXME: Consider using 2 x vector types if the back end handles them 5928 // correctly. 5929 if (RetTy->isAnyComplexType()) 5930 return ABIArgInfo::getDirect(llvm::IntegerType::get( 5931 getVMContext(), getContext().getTypeSize(RetTy))); 5932 5933 // Integer like structures are returned in r0. 5934 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 5935 // Return in the smallest viable integer type. 5936 uint64_t Size = getContext().getTypeSize(RetTy); 5937 if (Size <= 8) 5938 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5939 if (Size <= 16) 5940 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5941 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5942 } 5943 5944 // Otherwise return in memory. 5945 return getNaturalAlignIndirect(RetTy); 5946 } 5947 5948 // Otherwise this is an AAPCS variant. 5949 5950 if (isEmptyRecord(getContext(), RetTy, true)) 5951 return ABIArgInfo::getIgnore(); 5952 5953 // Check for homogeneous aggregates with AAPCS-VFP. 5954 if (IsEffectivelyAAPCS_VFP) { 5955 const Type *Base = nullptr; 5956 uint64_t Members = 0; 5957 if (isHomogeneousAggregate(RetTy, Base, Members)) { 5958 assert(Base && "Base class should be set for homogeneous aggregate"); 5959 // Homogeneous Aggregates are returned directly. 5960 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 5961 } 5962 } 5963 5964 // Aggregates <= 4 bytes are returned in r0; other aggregates 5965 // are returned indirectly. 5966 uint64_t Size = getContext().getTypeSize(RetTy); 5967 if (Size <= 32) { 5968 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of 5969 // same size and alignment. 5970 if (getTarget().isRenderScriptTarget()) { 5971 return coerceToIntArray(RetTy, getContext(), getVMContext()); 5972 } 5973 if (getDataLayout().isBigEndian()) 5974 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 5975 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5976 5977 // Return in the smallest viable integer type. 5978 if (Size <= 8) 5979 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5980 if (Size <= 16) 5981 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5982 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5983 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) { 5984 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); 5985 llvm::Type *CoerceTy = 5986 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); 5987 return ABIArgInfo::getDirect(CoerceTy); 5988 } 5989 5990 return getNaturalAlignIndirect(RetTy); 5991 } 5992 5993 /// isIllegalVector - check whether Ty is an illegal vector type. 5994 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 5995 if (const VectorType *VT = Ty->getAs<VectorType> ()) { 5996 if (isAndroid()) { 5997 // Android shipped using Clang 3.1, which supported a slightly different 5998 // vector ABI. The primary differences were that 3-element vector types 5999 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path 6000 // accepts that legacy behavior for Android only. 6001 // Check whether VT is legal. 6002 unsigned NumElements = VT->getNumElements(); 6003 // NumElements should be power of 2 or equal to 3. 6004 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) 6005 return true; 6006 } else { 6007 // Check whether VT is legal. 6008 unsigned NumElements = VT->getNumElements(); 6009 uint64_t Size = getContext().getTypeSize(VT); 6010 // NumElements should be power of 2. 6011 if (!llvm::isPowerOf2_32(NumElements)) 6012 return true; 6013 // Size should be greater than 32 bits. 6014 return Size <= 32; 6015 } 6016 } 6017 return false; 6018 } 6019 6020 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 6021 llvm::Type *eltTy, 6022 unsigned numElts) const { 6023 if (!llvm::isPowerOf2_32(numElts)) 6024 return false; 6025 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy); 6026 if (size > 64) 6027 return false; 6028 if (vectorSize.getQuantity() != 8 && 6029 (vectorSize.getQuantity() != 16 || numElts == 1)) 6030 return false; 6031 return true; 6032 } 6033 6034 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 6035 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 6036 // double, or 64-bit or 128-bit vectors. 6037 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 6038 if (BT->getKind() == BuiltinType::Float || 6039 BT->getKind() == BuiltinType::Double || 6040 BT->getKind() == BuiltinType::LongDouble) 6041 return true; 6042 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 6043 unsigned VecSize = getContext().getTypeSize(VT); 6044 if (VecSize == 64 || VecSize == 128) 6045 return true; 6046 } 6047 return false; 6048 } 6049 6050 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 6051 uint64_t Members) const { 6052 return Members <= 4; 6053 } 6054 6055 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6056 QualType Ty) const { 6057 CharUnits SlotSize = CharUnits::fromQuantity(4); 6058 6059 // Empty records are ignored for parameter passing purposes. 6060 if (isEmptyRecord(getContext(), Ty, true)) { 6061 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); 6062 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 6063 return Addr; 6064 } 6065 6066 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6067 CharUnits TyAlignForABI = TyInfo.second; 6068 6069 // Use indirect if size of the illegal vector is bigger than 16 bytes. 6070 bool IsIndirect = false; 6071 const Type *Base = nullptr; 6072 uint64_t Members = 0; 6073 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { 6074 IsIndirect = true; 6075 6076 // ARMv7k passes structs bigger than 16 bytes indirectly, in space 6077 // allocated by the caller. 6078 } else if (TyInfo.first > CharUnits::fromQuantity(16) && 6079 getABIKind() == ARMABIInfo::AAPCS16_VFP && 6080 !isHomogeneousAggregate(Ty, Base, Members)) { 6081 IsIndirect = true; 6082 6083 // Otherwise, bound the type's ABI alignment. 6084 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 6085 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 6086 // Our callers should be prepared to handle an under-aligned address. 6087 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP || 6088 getABIKind() == ARMABIInfo::AAPCS) { 6089 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 6090 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); 6091 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 6092 // ARMv7k allows type alignment up to 16 bytes. 6093 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 6094 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); 6095 } else { 6096 TyAlignForABI = CharUnits::fromQuantity(4); 6097 } 6098 TyInfo.second = TyAlignForABI; 6099 6100 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, 6101 SlotSize, /*AllowHigherAlign*/ true); 6102 } 6103 6104 //===----------------------------------------------------------------------===// 6105 // NVPTX ABI Implementation 6106 //===----------------------------------------------------------------------===// 6107 6108 namespace { 6109 6110 class NVPTXABIInfo : public ABIInfo { 6111 public: 6112 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 6113 6114 ABIArgInfo classifyReturnType(QualType RetTy) const; 6115 ABIArgInfo classifyArgumentType(QualType Ty) const; 6116 6117 void computeInfo(CGFunctionInfo &FI) const override; 6118 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6119 QualType Ty) const override; 6120 }; 6121 6122 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 6123 public: 6124 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 6125 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 6126 6127 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6128 CodeGen::CodeGenModule &M, 6129 ForDefinition_t IsForDefinition) const override; 6130 6131 private: 6132 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the 6133 // resulting MDNode to the nvvm.annotations MDNode. 6134 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand); 6135 }; 6136 6137 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 6138 if (RetTy->isVoidType()) 6139 return ABIArgInfo::getIgnore(); 6140 6141 // note: this is different from default ABI 6142 if (!RetTy->isScalarType()) 6143 return ABIArgInfo::getDirect(); 6144 6145 // Treat an enum type as its underlying type. 6146 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6147 RetTy = EnumTy->getDecl()->getIntegerType(); 6148 6149 return (RetTy->isPromotableIntegerType() ? 6150 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6151 } 6152 6153 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 6154 // Treat an enum type as its underlying type. 6155 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6156 Ty = EnumTy->getDecl()->getIntegerType(); 6157 6158 // Return aggregates type as indirect by value 6159 if (isAggregateTypeForABI(Ty)) 6160 return getNaturalAlignIndirect(Ty, /* byval */ true); 6161 6162 return (Ty->isPromotableIntegerType() ? 6163 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6164 } 6165 6166 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 6167 if (!getCXXABI().classifyReturnType(FI)) 6168 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6169 for (auto &I : FI.arguments()) 6170 I.info = classifyArgumentType(I.type); 6171 6172 // Always honor user-specified calling convention. 6173 if (FI.getCallingConvention() != llvm::CallingConv::C) 6174 return; 6175 6176 FI.setEffectiveCallingConvention(getRuntimeCC()); 6177 } 6178 6179 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6180 QualType Ty) const { 6181 llvm_unreachable("NVPTX does not support varargs"); 6182 } 6183 6184 void NVPTXTargetCodeGenInfo::setTargetAttributes( 6185 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M, 6186 ForDefinition_t IsForDefinition) const { 6187 if (!IsForDefinition) 6188 return; 6189 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6190 if (!FD) return; 6191 6192 llvm::Function *F = cast<llvm::Function>(GV); 6193 6194 // Perform special handling in OpenCL mode 6195 if (M.getLangOpts().OpenCL) { 6196 // Use OpenCL function attributes to check for kernel functions 6197 // By default, all functions are device functions 6198 if (FD->hasAttr<OpenCLKernelAttr>()) { 6199 // OpenCL __kernel functions get kernel metadata 6200 // Create !{<func-ref>, metadata !"kernel", i32 1} node 6201 addNVVMMetadata(F, "kernel", 1); 6202 // And kernel functions are not subject to inlining 6203 F->addFnAttr(llvm::Attribute::NoInline); 6204 } 6205 } 6206 6207 // Perform special handling in CUDA mode. 6208 if (M.getLangOpts().CUDA) { 6209 // CUDA __global__ functions get a kernel metadata entry. Since 6210 // __global__ functions cannot be called from the device, we do not 6211 // need to set the noinline attribute. 6212 if (FD->hasAttr<CUDAGlobalAttr>()) { 6213 // Create !{<func-ref>, metadata !"kernel", i32 1} node 6214 addNVVMMetadata(F, "kernel", 1); 6215 } 6216 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { 6217 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 6218 llvm::APSInt MaxThreads(32); 6219 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); 6220 if (MaxThreads > 0) 6221 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); 6222 6223 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was 6224 // not specified in __launch_bounds__ or if the user specified a 0 value, 6225 // we don't have to add a PTX directive. 6226 if (Attr->getMinBlocks()) { 6227 llvm::APSInt MinBlocks(32); 6228 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); 6229 if (MinBlocks > 0) 6230 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 6231 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); 6232 } 6233 } 6234 } 6235 } 6236 6237 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, 6238 int Operand) { 6239 llvm::Module *M = F->getParent(); 6240 llvm::LLVMContext &Ctx = M->getContext(); 6241 6242 // Get "nvvm.annotations" metadata node 6243 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 6244 6245 llvm::Metadata *MDVals[] = { 6246 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name), 6247 llvm::ConstantAsMetadata::get( 6248 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 6249 // Append metadata to nvvm.annotations 6250 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 6251 } 6252 } 6253 6254 //===----------------------------------------------------------------------===// 6255 // SystemZ ABI Implementation 6256 //===----------------------------------------------------------------------===// 6257 6258 namespace { 6259 6260 class SystemZABIInfo : public SwiftABIInfo { 6261 bool HasVector; 6262 6263 public: 6264 SystemZABIInfo(CodeGenTypes &CGT, bool HV) 6265 : SwiftABIInfo(CGT), HasVector(HV) {} 6266 6267 bool isPromotableIntegerType(QualType Ty) const; 6268 bool isCompoundType(QualType Ty) const; 6269 bool isVectorArgumentType(QualType Ty) const; 6270 bool isFPArgumentType(QualType Ty) const; 6271 QualType GetSingleElementType(QualType Ty) const; 6272 6273 ABIArgInfo classifyReturnType(QualType RetTy) const; 6274 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 6275 6276 void computeInfo(CGFunctionInfo &FI) const override { 6277 if (!getCXXABI().classifyReturnType(FI)) 6278 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6279 for (auto &I : FI.arguments()) 6280 I.info = classifyArgumentType(I.type); 6281 } 6282 6283 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6284 QualType Ty) const override; 6285 6286 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 6287 ArrayRef<llvm::Type*> scalars, 6288 bool asReturnValue) const override { 6289 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 6290 } 6291 bool isSwiftErrorInRegister() const override { 6292 return true; 6293 } 6294 }; 6295 6296 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 6297 public: 6298 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector) 6299 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {} 6300 }; 6301 6302 } 6303 6304 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 6305 // Treat an enum type as its underlying type. 6306 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6307 Ty = EnumTy->getDecl()->getIntegerType(); 6308 6309 // Promotable integer types are required to be promoted by the ABI. 6310 if (Ty->isPromotableIntegerType()) 6311 return true; 6312 6313 // 32-bit values must also be promoted. 6314 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 6315 switch (BT->getKind()) { 6316 case BuiltinType::Int: 6317 case BuiltinType::UInt: 6318 return true; 6319 default: 6320 return false; 6321 } 6322 return false; 6323 } 6324 6325 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 6326 return (Ty->isAnyComplexType() || 6327 Ty->isVectorType() || 6328 isAggregateTypeForABI(Ty)); 6329 } 6330 6331 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { 6332 return (HasVector && 6333 Ty->isVectorType() && 6334 getContext().getTypeSize(Ty) <= 128); 6335 } 6336 6337 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 6338 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 6339 switch (BT->getKind()) { 6340 case BuiltinType::Float: 6341 case BuiltinType::Double: 6342 return true; 6343 default: 6344 return false; 6345 } 6346 6347 return false; 6348 } 6349 6350 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { 6351 if (const RecordType *RT = Ty->getAsStructureType()) { 6352 const RecordDecl *RD = RT->getDecl(); 6353 QualType Found; 6354 6355 // If this is a C++ record, check the bases first. 6356 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 6357 for (const auto &I : CXXRD->bases()) { 6358 QualType Base = I.getType(); 6359 6360 // Empty bases don't affect things either way. 6361 if (isEmptyRecord(getContext(), Base, true)) 6362 continue; 6363 6364 if (!Found.isNull()) 6365 return Ty; 6366 Found = GetSingleElementType(Base); 6367 } 6368 6369 // Check the fields. 6370 for (const auto *FD : RD->fields()) { 6371 // For compatibility with GCC, ignore empty bitfields in C++ mode. 6372 // Unlike isSingleElementStruct(), empty structure and array fields 6373 // do count. So do anonymous bitfields that aren't zero-sized. 6374 if (getContext().getLangOpts().CPlusPlus && 6375 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 6376 continue; 6377 6378 // Unlike isSingleElementStruct(), arrays do not count. 6379 // Nested structures still do though. 6380 if (!Found.isNull()) 6381 return Ty; 6382 Found = GetSingleElementType(FD->getType()); 6383 } 6384 6385 // Unlike isSingleElementStruct(), trailing padding is allowed. 6386 // An 8-byte aligned struct s { float f; } is passed as a double. 6387 if (!Found.isNull()) 6388 return Found; 6389 } 6390 6391 return Ty; 6392 } 6393 6394 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6395 QualType Ty) const { 6396 // Assume that va_list type is correct; should be pointer to LLVM type: 6397 // struct { 6398 // i64 __gpr; 6399 // i64 __fpr; 6400 // i8 *__overflow_arg_area; 6401 // i8 *__reg_save_area; 6402 // }; 6403 6404 // Every non-vector argument occupies 8 bytes and is passed by preference 6405 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are 6406 // always passed on the stack. 6407 Ty = getContext().getCanonicalType(Ty); 6408 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6409 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); 6410 llvm::Type *DirectTy = ArgTy; 6411 ABIArgInfo AI = classifyArgumentType(Ty); 6412 bool IsIndirect = AI.isIndirect(); 6413 bool InFPRs = false; 6414 bool IsVector = false; 6415 CharUnits UnpaddedSize; 6416 CharUnits DirectAlign; 6417 if (IsIndirect) { 6418 DirectTy = llvm::PointerType::getUnqual(DirectTy); 6419 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); 6420 } else { 6421 if (AI.getCoerceToType()) 6422 ArgTy = AI.getCoerceToType(); 6423 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy(); 6424 IsVector = ArgTy->isVectorTy(); 6425 UnpaddedSize = TyInfo.first; 6426 DirectAlign = TyInfo.second; 6427 } 6428 CharUnits PaddedSize = CharUnits::fromQuantity(8); 6429 if (IsVector && UnpaddedSize > PaddedSize) 6430 PaddedSize = CharUnits::fromQuantity(16); 6431 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size."); 6432 6433 CharUnits Padding = (PaddedSize - UnpaddedSize); 6434 6435 llvm::Type *IndexTy = CGF.Int64Ty; 6436 llvm::Value *PaddedSizeV = 6437 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); 6438 6439 if (IsVector) { 6440 // Work out the address of a vector argument on the stack. 6441 // Vector arguments are always passed in the high bits of a 6442 // single (8 byte) or double (16 byte) stack slot. 6443 Address OverflowArgAreaPtr = 6444 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16), 6445 "overflow_arg_area_ptr"); 6446 Address OverflowArgArea = 6447 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 6448 TyInfo.second); 6449 Address MemAddr = 6450 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr"); 6451 6452 // Update overflow_arg_area_ptr pointer 6453 llvm::Value *NewOverflowArgArea = 6454 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 6455 "overflow_arg_area"); 6456 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 6457 6458 return MemAddr; 6459 } 6460 6461 assert(PaddedSize.getQuantity() == 8); 6462 6463 unsigned MaxRegs, RegCountField, RegSaveIndex; 6464 CharUnits RegPadding; 6465 if (InFPRs) { 6466 MaxRegs = 4; // Maximum of 4 FPR arguments 6467 RegCountField = 1; // __fpr 6468 RegSaveIndex = 16; // save offset for f0 6469 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR 6470 } else { 6471 MaxRegs = 5; // Maximum of 5 GPR arguments 6472 RegCountField = 0; // __gpr 6473 RegSaveIndex = 2; // save offset for r2 6474 RegPadding = Padding; // values are passed in the low bits of a GPR 6475 } 6476 6477 Address RegCountPtr = CGF.Builder.CreateStructGEP( 6478 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8), 6479 "reg_count_ptr"); 6480 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 6481 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 6482 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 6483 "fits_in_regs"); 6484 6485 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 6486 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 6487 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 6488 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 6489 6490 // Emit code to load the value if it was passed in registers. 6491 CGF.EmitBlock(InRegBlock); 6492 6493 // Work out the address of an argument register. 6494 llvm::Value *ScaledRegCount = 6495 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 6496 llvm::Value *RegBase = 6497 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() 6498 + RegPadding.getQuantity()); 6499 llvm::Value *RegOffset = 6500 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 6501 Address RegSaveAreaPtr = 6502 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24), 6503 "reg_save_area_ptr"); 6504 llvm::Value *RegSaveArea = 6505 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 6506 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset, 6507 "raw_reg_addr"), 6508 PaddedSize); 6509 Address RegAddr = 6510 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr"); 6511 6512 // Update the register count 6513 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 6514 llvm::Value *NewRegCount = 6515 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 6516 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 6517 CGF.EmitBranch(ContBlock); 6518 6519 // Emit code to load the value if it was passed in memory. 6520 CGF.EmitBlock(InMemBlock); 6521 6522 // Work out the address of a stack argument. 6523 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP( 6524 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr"); 6525 Address OverflowArgArea = 6526 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 6527 PaddedSize); 6528 Address RawMemAddr = 6529 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); 6530 Address MemAddr = 6531 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); 6532 6533 // Update overflow_arg_area_ptr pointer 6534 llvm::Value *NewOverflowArgArea = 6535 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 6536 "overflow_arg_area"); 6537 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 6538 CGF.EmitBranch(ContBlock); 6539 6540 // Return the appropriate result. 6541 CGF.EmitBlock(ContBlock); 6542 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 6543 MemAddr, InMemBlock, "va_arg.addr"); 6544 6545 if (IsIndirect) 6546 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), 6547 TyInfo.second); 6548 6549 return ResAddr; 6550 } 6551 6552 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 6553 if (RetTy->isVoidType()) 6554 return ABIArgInfo::getIgnore(); 6555 if (isVectorArgumentType(RetTy)) 6556 return ABIArgInfo::getDirect(); 6557 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 6558 return getNaturalAlignIndirect(RetTy); 6559 return (isPromotableIntegerType(RetTy) ? 6560 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6561 } 6562 6563 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 6564 // Handle the generic C++ ABI. 6565 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6566 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6567 6568 // Integers and enums are extended to full register width. 6569 if (isPromotableIntegerType(Ty)) 6570 return ABIArgInfo::getExtend(); 6571 6572 // Handle vector types and vector-like structure types. Note that 6573 // as opposed to float-like structure types, we do not allow any 6574 // padding for vector-like structures, so verify the sizes match. 6575 uint64_t Size = getContext().getTypeSize(Ty); 6576 QualType SingleElementTy = GetSingleElementType(Ty); 6577 if (isVectorArgumentType(SingleElementTy) && 6578 getContext().getTypeSize(SingleElementTy) == Size) 6579 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); 6580 6581 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 6582 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 6583 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6584 6585 // Handle small structures. 6586 if (const RecordType *RT = Ty->getAs<RecordType>()) { 6587 // Structures with flexible arrays have variable length, so really 6588 // fail the size test above. 6589 const RecordDecl *RD = RT->getDecl(); 6590 if (RD->hasFlexibleArrayMember()) 6591 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6592 6593 // The structure is passed as an unextended integer, a float, or a double. 6594 llvm::Type *PassTy; 6595 if (isFPArgumentType(SingleElementTy)) { 6596 assert(Size == 32 || Size == 64); 6597 if (Size == 32) 6598 PassTy = llvm::Type::getFloatTy(getVMContext()); 6599 else 6600 PassTy = llvm::Type::getDoubleTy(getVMContext()); 6601 } else 6602 PassTy = llvm::IntegerType::get(getVMContext(), Size); 6603 return ABIArgInfo::getDirect(PassTy); 6604 } 6605 6606 // Non-structure compounds are passed indirectly. 6607 if (isCompoundType(Ty)) 6608 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6609 6610 return ABIArgInfo::getDirect(nullptr); 6611 } 6612 6613 //===----------------------------------------------------------------------===// 6614 // MSP430 ABI Implementation 6615 //===----------------------------------------------------------------------===// 6616 6617 namespace { 6618 6619 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 6620 public: 6621 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 6622 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 6623 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6624 CodeGen::CodeGenModule &M, 6625 ForDefinition_t IsForDefinition) const override; 6626 }; 6627 6628 } 6629 6630 void MSP430TargetCodeGenInfo::setTargetAttributes( 6631 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M, 6632 ForDefinition_t IsForDefinition) const { 6633 if (!IsForDefinition) 6634 return; 6635 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 6636 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 6637 // Handle 'interrupt' attribute: 6638 llvm::Function *F = cast<llvm::Function>(GV); 6639 6640 // Step 1: Set ISR calling convention. 6641 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 6642 6643 // Step 2: Add attributes goodness. 6644 F->addFnAttr(llvm::Attribute::NoInline); 6645 6646 // Step 3: Emit ISR vector alias. 6647 unsigned Num = attr->getNumber() / 2; 6648 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage, 6649 "__isr_" + Twine(Num), F); 6650 } 6651 } 6652 } 6653 6654 //===----------------------------------------------------------------------===// 6655 // MIPS ABI Implementation. This works for both little-endian and 6656 // big-endian variants. 6657 //===----------------------------------------------------------------------===// 6658 6659 namespace { 6660 class MipsABIInfo : public ABIInfo { 6661 bool IsO32; 6662 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 6663 void CoerceToIntArgs(uint64_t TySize, 6664 SmallVectorImpl<llvm::Type *> &ArgList) const; 6665 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 6666 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 6667 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 6668 public: 6669 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 6670 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 6671 StackAlignInBytes(IsO32 ? 8 : 16) {} 6672 6673 ABIArgInfo classifyReturnType(QualType RetTy) const; 6674 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 6675 void computeInfo(CGFunctionInfo &FI) const override; 6676 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6677 QualType Ty) const override; 6678 bool shouldSignExtUnsignedType(QualType Ty) const override; 6679 }; 6680 6681 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 6682 unsigned SizeOfUnwindException; 6683 public: 6684 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 6685 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 6686 SizeOfUnwindException(IsO32 ? 24 : 32) {} 6687 6688 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 6689 return 29; 6690 } 6691 6692 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6693 CodeGen::CodeGenModule &CGM, 6694 ForDefinition_t IsForDefinition) const override { 6695 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6696 if (!FD) return; 6697 llvm::Function *Fn = cast<llvm::Function>(GV); 6698 6699 if (FD->hasAttr<MipsLongCallAttr>()) 6700 Fn->addFnAttr("long-call"); 6701 else if (FD->hasAttr<MipsShortCallAttr>()) 6702 Fn->addFnAttr("short-call"); 6703 6704 // Other attributes do not have a meaning for declarations. 6705 if (!IsForDefinition) 6706 return; 6707 6708 if (FD->hasAttr<Mips16Attr>()) { 6709 Fn->addFnAttr("mips16"); 6710 } 6711 else if (FD->hasAttr<NoMips16Attr>()) { 6712 Fn->addFnAttr("nomips16"); 6713 } 6714 6715 if (FD->hasAttr<MicroMipsAttr>()) 6716 Fn->addFnAttr("micromips"); 6717 else if (FD->hasAttr<NoMicroMipsAttr>()) 6718 Fn->addFnAttr("nomicromips"); 6719 6720 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>(); 6721 if (!Attr) 6722 return; 6723 6724 const char *Kind; 6725 switch (Attr->getInterrupt()) { 6726 case MipsInterruptAttr::eic: Kind = "eic"; break; 6727 case MipsInterruptAttr::sw0: Kind = "sw0"; break; 6728 case MipsInterruptAttr::sw1: Kind = "sw1"; break; 6729 case MipsInterruptAttr::hw0: Kind = "hw0"; break; 6730 case MipsInterruptAttr::hw1: Kind = "hw1"; break; 6731 case MipsInterruptAttr::hw2: Kind = "hw2"; break; 6732 case MipsInterruptAttr::hw3: Kind = "hw3"; break; 6733 case MipsInterruptAttr::hw4: Kind = "hw4"; break; 6734 case MipsInterruptAttr::hw5: Kind = "hw5"; break; 6735 } 6736 6737 Fn->addFnAttr("interrupt", Kind); 6738 6739 } 6740 6741 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6742 llvm::Value *Address) const override; 6743 6744 unsigned getSizeOfUnwindException() const override { 6745 return SizeOfUnwindException; 6746 } 6747 }; 6748 } 6749 6750 void MipsABIInfo::CoerceToIntArgs( 6751 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const { 6752 llvm::IntegerType *IntTy = 6753 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 6754 6755 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 6756 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 6757 ArgList.push_back(IntTy); 6758 6759 // If necessary, add one more integer type to ArgList. 6760 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 6761 6762 if (R) 6763 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 6764 } 6765 6766 // In N32/64, an aligned double precision floating point field is passed in 6767 // a register. 6768 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 6769 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 6770 6771 if (IsO32) { 6772 CoerceToIntArgs(TySize, ArgList); 6773 return llvm::StructType::get(getVMContext(), ArgList); 6774 } 6775 6776 if (Ty->isComplexType()) 6777 return CGT.ConvertType(Ty); 6778 6779 const RecordType *RT = Ty->getAs<RecordType>(); 6780 6781 // Unions/vectors are passed in integer registers. 6782 if (!RT || !RT->isStructureOrClassType()) { 6783 CoerceToIntArgs(TySize, ArgList); 6784 return llvm::StructType::get(getVMContext(), ArgList); 6785 } 6786 6787 const RecordDecl *RD = RT->getDecl(); 6788 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 6789 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 6790 6791 uint64_t LastOffset = 0; 6792 unsigned idx = 0; 6793 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 6794 6795 // Iterate over fields in the struct/class and check if there are any aligned 6796 // double fields. 6797 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 6798 i != e; ++i, ++idx) { 6799 const QualType Ty = i->getType(); 6800 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 6801 6802 if (!BT || BT->getKind() != BuiltinType::Double) 6803 continue; 6804 6805 uint64_t Offset = Layout.getFieldOffset(idx); 6806 if (Offset % 64) // Ignore doubles that are not aligned. 6807 continue; 6808 6809 // Add ((Offset - LastOffset) / 64) args of type i64. 6810 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 6811 ArgList.push_back(I64); 6812 6813 // Add double type. 6814 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 6815 LastOffset = Offset + 64; 6816 } 6817 6818 CoerceToIntArgs(TySize - LastOffset, IntArgList); 6819 ArgList.append(IntArgList.begin(), IntArgList.end()); 6820 6821 return llvm::StructType::get(getVMContext(), ArgList); 6822 } 6823 6824 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 6825 uint64_t Offset) const { 6826 if (OrigOffset + MinABIStackAlignInBytes > Offset) 6827 return nullptr; 6828 6829 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 6830 } 6831 6832 ABIArgInfo 6833 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 6834 Ty = useFirstFieldIfTransparentUnion(Ty); 6835 6836 uint64_t OrigOffset = Offset; 6837 uint64_t TySize = getContext().getTypeSize(Ty); 6838 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 6839 6840 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 6841 (uint64_t)StackAlignInBytes); 6842 unsigned CurrOffset = llvm::alignTo(Offset, Align); 6843 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8; 6844 6845 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 6846 // Ignore empty aggregates. 6847 if (TySize == 0) 6848 return ABIArgInfo::getIgnore(); 6849 6850 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 6851 Offset = OrigOffset + MinABIStackAlignInBytes; 6852 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6853 } 6854 6855 // If we have reached here, aggregates are passed directly by coercing to 6856 // another structure type. Padding is inserted if the offset of the 6857 // aggregate is unaligned. 6858 ABIArgInfo ArgInfo = 6859 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 6860 getPaddingType(OrigOffset, CurrOffset)); 6861 ArgInfo.setInReg(true); 6862 return ArgInfo; 6863 } 6864 6865 // Treat an enum type as its underlying type. 6866 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6867 Ty = EnumTy->getDecl()->getIntegerType(); 6868 6869 // All integral types are promoted to the GPR width. 6870 if (Ty->isIntegralOrEnumerationType()) 6871 return ABIArgInfo::getExtend(); 6872 6873 return ABIArgInfo::getDirect( 6874 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 6875 } 6876 6877 llvm::Type* 6878 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 6879 const RecordType *RT = RetTy->getAs<RecordType>(); 6880 SmallVector<llvm::Type*, 8> RTList; 6881 6882 if (RT && RT->isStructureOrClassType()) { 6883 const RecordDecl *RD = RT->getDecl(); 6884 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 6885 unsigned FieldCnt = Layout.getFieldCount(); 6886 6887 // N32/64 returns struct/classes in floating point registers if the 6888 // following conditions are met: 6889 // 1. The size of the struct/class is no larger than 128-bit. 6890 // 2. The struct/class has one or two fields all of which are floating 6891 // point types. 6892 // 3. The offset of the first field is zero (this follows what gcc does). 6893 // 6894 // Any other composite results are returned in integer registers. 6895 // 6896 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 6897 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 6898 for (; b != e; ++b) { 6899 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 6900 6901 if (!BT || !BT->isFloatingPoint()) 6902 break; 6903 6904 RTList.push_back(CGT.ConvertType(b->getType())); 6905 } 6906 6907 if (b == e) 6908 return llvm::StructType::get(getVMContext(), RTList, 6909 RD->hasAttr<PackedAttr>()); 6910 6911 RTList.clear(); 6912 } 6913 } 6914 6915 CoerceToIntArgs(Size, RTList); 6916 return llvm::StructType::get(getVMContext(), RTList); 6917 } 6918 6919 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 6920 uint64_t Size = getContext().getTypeSize(RetTy); 6921 6922 if (RetTy->isVoidType()) 6923 return ABIArgInfo::getIgnore(); 6924 6925 // O32 doesn't treat zero-sized structs differently from other structs. 6926 // However, N32/N64 ignores zero sized return values. 6927 if (!IsO32 && Size == 0) 6928 return ABIArgInfo::getIgnore(); 6929 6930 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 6931 if (Size <= 128) { 6932 if (RetTy->isAnyComplexType()) 6933 return ABIArgInfo::getDirect(); 6934 6935 // O32 returns integer vectors in registers and N32/N64 returns all small 6936 // aggregates in registers. 6937 if (!IsO32 || 6938 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 6939 ABIArgInfo ArgInfo = 6940 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 6941 ArgInfo.setInReg(true); 6942 return ArgInfo; 6943 } 6944 } 6945 6946 return getNaturalAlignIndirect(RetTy); 6947 } 6948 6949 // Treat an enum type as its underlying type. 6950 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6951 RetTy = EnumTy->getDecl()->getIntegerType(); 6952 6953 return (RetTy->isPromotableIntegerType() ? 6954 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6955 } 6956 6957 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 6958 ABIArgInfo &RetInfo = FI.getReturnInfo(); 6959 if (!getCXXABI().classifyReturnType(FI)) 6960 RetInfo = classifyReturnType(FI.getReturnType()); 6961 6962 // Check if a pointer to an aggregate is passed as a hidden argument. 6963 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 6964 6965 for (auto &I : FI.arguments()) 6966 I.info = classifyArgumentType(I.type, Offset); 6967 } 6968 6969 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6970 QualType OrigTy) const { 6971 QualType Ty = OrigTy; 6972 6973 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. 6974 // Pointers are also promoted in the same way but this only matters for N32. 6975 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 6976 unsigned PtrWidth = getTarget().getPointerWidth(0); 6977 bool DidPromote = false; 6978 if ((Ty->isIntegerType() && 6979 getContext().getIntWidth(Ty) < SlotSizeInBits) || 6980 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { 6981 DidPromote = true; 6982 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, 6983 Ty->isSignedIntegerType()); 6984 } 6985 6986 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6987 6988 // The alignment of things in the argument area is never larger than 6989 // StackAlignInBytes. 6990 TyInfo.second = 6991 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes)); 6992 6993 // MinABIStackAlignInBytes is the size of argument slots on the stack. 6994 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); 6995 6996 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 6997 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true); 6998 6999 7000 // If there was a promotion, "unpromote" into a temporary. 7001 // TODO: can we just use a pointer into a subset of the original slot? 7002 if (DidPromote) { 7003 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp"); 7004 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); 7005 7006 // Truncate down to the right width. 7007 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() 7008 : CGF.IntPtrTy); 7009 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); 7010 if (OrigTy->isPointerType()) 7011 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); 7012 7013 CGF.Builder.CreateStore(V, Temp); 7014 Addr = Temp; 7015 } 7016 7017 return Addr; 7018 } 7019 7020 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const { 7021 int TySize = getContext().getTypeSize(Ty); 7022 7023 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. 7024 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 7025 return true; 7026 7027 return false; 7028 } 7029 7030 bool 7031 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7032 llvm::Value *Address) const { 7033 // This information comes from gcc's implementation, which seems to 7034 // as canonical as it gets. 7035 7036 // Everything on MIPS is 4 bytes. Double-precision FP registers 7037 // are aliased to pairs of single-precision FP registers. 7038 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 7039 7040 // 0-31 are the general purpose registers, $0 - $31. 7041 // 32-63 are the floating-point registers, $f0 - $f31. 7042 // 64 and 65 are the multiply/divide registers, $hi and $lo. 7043 // 66 is the (notional, I think) register for signal-handler return. 7044 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 7045 7046 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 7047 // They are one bit wide and ignored here. 7048 7049 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 7050 // (coprocessor 1 is the FP unit) 7051 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 7052 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 7053 // 176-181 are the DSP accumulator registers. 7054 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 7055 return false; 7056 } 7057 7058 //===----------------------------------------------------------------------===// 7059 // AVR ABI Implementation. 7060 //===----------------------------------------------------------------------===// 7061 7062 namespace { 7063 class AVRTargetCodeGenInfo : public TargetCodeGenInfo { 7064 public: 7065 AVRTargetCodeGenInfo(CodeGenTypes &CGT) 7066 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { } 7067 7068 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7069 CodeGen::CodeGenModule &CGM, 7070 ForDefinition_t IsForDefinition) const override { 7071 if (!IsForDefinition) 7072 return; 7073 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 7074 if (!FD) return; 7075 auto *Fn = cast<llvm::Function>(GV); 7076 7077 if (FD->getAttr<AVRInterruptAttr>()) 7078 Fn->addFnAttr("interrupt"); 7079 7080 if (FD->getAttr<AVRSignalAttr>()) 7081 Fn->addFnAttr("signal"); 7082 } 7083 }; 7084 } 7085 7086 //===----------------------------------------------------------------------===// 7087 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 7088 // Currently subclassed only to implement custom OpenCL C function attribute 7089 // handling. 7090 //===----------------------------------------------------------------------===// 7091 7092 namespace { 7093 7094 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 7095 public: 7096 TCETargetCodeGenInfo(CodeGenTypes &CGT) 7097 : DefaultTargetCodeGenInfo(CGT) {} 7098 7099 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7100 CodeGen::CodeGenModule &M, 7101 ForDefinition_t IsForDefinition) const override; 7102 }; 7103 7104 void TCETargetCodeGenInfo::setTargetAttributes( 7105 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M, 7106 ForDefinition_t IsForDefinition) const { 7107 if (!IsForDefinition) 7108 return; 7109 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7110 if (!FD) return; 7111 7112 llvm::Function *F = cast<llvm::Function>(GV); 7113 7114 if (M.getLangOpts().OpenCL) { 7115 if (FD->hasAttr<OpenCLKernelAttr>()) { 7116 // OpenCL C Kernel functions are not subject to inlining 7117 F->addFnAttr(llvm::Attribute::NoInline); 7118 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 7119 if (Attr) { 7120 // Convert the reqd_work_group_size() attributes to metadata. 7121 llvm::LLVMContext &Context = F->getContext(); 7122 llvm::NamedMDNode *OpenCLMetadata = 7123 M.getModule().getOrInsertNamedMetadata( 7124 "opencl.kernel_wg_size_info"); 7125 7126 SmallVector<llvm::Metadata *, 5> Operands; 7127 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 7128 7129 Operands.push_back( 7130 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 7131 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 7132 Operands.push_back( 7133 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 7134 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 7135 Operands.push_back( 7136 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 7137 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 7138 7139 // Add a boolean constant operand for "required" (true) or "hint" 7140 // (false) for implementing the work_group_size_hint attr later. 7141 // Currently always true as the hint is not yet implemented. 7142 Operands.push_back( 7143 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 7144 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 7145 } 7146 } 7147 } 7148 } 7149 7150 } 7151 7152 //===----------------------------------------------------------------------===// 7153 // Hexagon ABI Implementation 7154 //===----------------------------------------------------------------------===// 7155 7156 namespace { 7157 7158 class HexagonABIInfo : public ABIInfo { 7159 7160 7161 public: 7162 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 7163 7164 private: 7165 7166 ABIArgInfo classifyReturnType(QualType RetTy) const; 7167 ABIArgInfo classifyArgumentType(QualType RetTy) const; 7168 7169 void computeInfo(CGFunctionInfo &FI) const override; 7170 7171 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7172 QualType Ty) const override; 7173 }; 7174 7175 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 7176 public: 7177 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 7178 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 7179 7180 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 7181 return 29; 7182 } 7183 }; 7184 7185 } 7186 7187 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 7188 if (!getCXXABI().classifyReturnType(FI)) 7189 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7190 for (auto &I : FI.arguments()) 7191 I.info = classifyArgumentType(I.type); 7192 } 7193 7194 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 7195 if (!isAggregateTypeForABI(Ty)) { 7196 // Treat an enum type as its underlying type. 7197 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7198 Ty = EnumTy->getDecl()->getIntegerType(); 7199 7200 return (Ty->isPromotableIntegerType() ? 7201 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 7202 } 7203 7204 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 7205 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7206 7207 // Ignore empty records. 7208 if (isEmptyRecord(getContext(), Ty, true)) 7209 return ABIArgInfo::getIgnore(); 7210 7211 uint64_t Size = getContext().getTypeSize(Ty); 7212 if (Size > 64) 7213 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 7214 // Pass in the smallest viable integer type. 7215 else if (Size > 32) 7216 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 7217 else if (Size > 16) 7218 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 7219 else if (Size > 8) 7220 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 7221 else 7222 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 7223 } 7224 7225 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 7226 if (RetTy->isVoidType()) 7227 return ABIArgInfo::getIgnore(); 7228 7229 // Large vector types should be returned via memory. 7230 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 7231 return getNaturalAlignIndirect(RetTy); 7232 7233 if (!isAggregateTypeForABI(RetTy)) { 7234 // Treat an enum type as its underlying type. 7235 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 7236 RetTy = EnumTy->getDecl()->getIntegerType(); 7237 7238 return (RetTy->isPromotableIntegerType() ? 7239 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 7240 } 7241 7242 if (isEmptyRecord(getContext(), RetTy, true)) 7243 return ABIArgInfo::getIgnore(); 7244 7245 // Aggregates <= 8 bytes are returned in r0; other aggregates 7246 // are returned indirectly. 7247 uint64_t Size = getContext().getTypeSize(RetTy); 7248 if (Size <= 64) { 7249 // Return in the smallest viable integer type. 7250 if (Size <= 8) 7251 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 7252 if (Size <= 16) 7253 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 7254 if (Size <= 32) 7255 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 7256 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 7257 } 7258 7259 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); 7260 } 7261 7262 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7263 QualType Ty) const { 7264 // FIXME: Someone needs to audit that this handle alignment correctly. 7265 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 7266 getContext().getTypeInfoInChars(Ty), 7267 CharUnits::fromQuantity(4), 7268 /*AllowHigherAlign*/ true); 7269 } 7270 7271 //===----------------------------------------------------------------------===// 7272 // Lanai ABI Implementation 7273 //===----------------------------------------------------------------------===// 7274 7275 namespace { 7276 class LanaiABIInfo : public DefaultABIInfo { 7277 public: 7278 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7279 7280 bool shouldUseInReg(QualType Ty, CCState &State) const; 7281 7282 void computeInfo(CGFunctionInfo &FI) const override { 7283 CCState State(FI.getCallingConvention()); 7284 // Lanai uses 4 registers to pass arguments unless the function has the 7285 // regparm attribute set. 7286 if (FI.getHasRegParm()) { 7287 State.FreeRegs = FI.getRegParm(); 7288 } else { 7289 State.FreeRegs = 4; 7290 } 7291 7292 if (!getCXXABI().classifyReturnType(FI)) 7293 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7294 for (auto &I : FI.arguments()) 7295 I.info = classifyArgumentType(I.type, State); 7296 } 7297 7298 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 7299 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 7300 }; 7301 } // end anonymous namespace 7302 7303 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const { 7304 unsigned Size = getContext().getTypeSize(Ty); 7305 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U; 7306 7307 if (SizeInRegs == 0) 7308 return false; 7309 7310 if (SizeInRegs > State.FreeRegs) { 7311 State.FreeRegs = 0; 7312 return false; 7313 } 7314 7315 State.FreeRegs -= SizeInRegs; 7316 7317 return true; 7318 } 7319 7320 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal, 7321 CCState &State) const { 7322 if (!ByVal) { 7323 if (State.FreeRegs) { 7324 --State.FreeRegs; // Non-byval indirects just use one pointer. 7325 return getNaturalAlignIndirectInReg(Ty); 7326 } 7327 return getNaturalAlignIndirect(Ty, false); 7328 } 7329 7330 // Compute the byval alignment. 7331 const unsigned MinABIStackAlignInBytes = 4; 7332 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 7333 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 7334 /*Realign=*/TypeAlign > 7335 MinABIStackAlignInBytes); 7336 } 7337 7338 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty, 7339 CCState &State) const { 7340 // Check with the C++ ABI first. 7341 const RecordType *RT = Ty->getAs<RecordType>(); 7342 if (RT) { 7343 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 7344 if (RAA == CGCXXABI::RAA_Indirect) { 7345 return getIndirectResult(Ty, /*ByVal=*/false, State); 7346 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 7347 return getNaturalAlignIndirect(Ty, /*ByRef=*/true); 7348 } 7349 } 7350 7351 if (isAggregateTypeForABI(Ty)) { 7352 // Structures with flexible arrays are always indirect. 7353 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 7354 return getIndirectResult(Ty, /*ByVal=*/true, State); 7355 7356 // Ignore empty structs/unions. 7357 if (isEmptyRecord(getContext(), Ty, true)) 7358 return ABIArgInfo::getIgnore(); 7359 7360 llvm::LLVMContext &LLVMContext = getVMContext(); 7361 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 7362 if (SizeInRegs <= State.FreeRegs) { 7363 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 7364 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 7365 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 7366 State.FreeRegs -= SizeInRegs; 7367 return ABIArgInfo::getDirectInReg(Result); 7368 } else { 7369 State.FreeRegs = 0; 7370 } 7371 return getIndirectResult(Ty, true, State); 7372 } 7373 7374 // Treat an enum type as its underlying type. 7375 if (const auto *EnumTy = Ty->getAs<EnumType>()) 7376 Ty = EnumTy->getDecl()->getIntegerType(); 7377 7378 bool InReg = shouldUseInReg(Ty, State); 7379 if (Ty->isPromotableIntegerType()) { 7380 if (InReg) 7381 return ABIArgInfo::getDirectInReg(); 7382 return ABIArgInfo::getExtend(); 7383 } 7384 if (InReg) 7385 return ABIArgInfo::getDirectInReg(); 7386 return ABIArgInfo::getDirect(); 7387 } 7388 7389 namespace { 7390 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo { 7391 public: 7392 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 7393 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {} 7394 }; 7395 } 7396 7397 //===----------------------------------------------------------------------===// 7398 // AMDGPU ABI Implementation 7399 //===----------------------------------------------------------------------===// 7400 7401 namespace { 7402 7403 class AMDGPUABIInfo final : public DefaultABIInfo { 7404 private: 7405 static const unsigned MaxNumRegsForArgsRet = 16; 7406 7407 unsigned numRegsForType(QualType Ty) const; 7408 7409 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 7410 bool isHomogeneousAggregateSmallEnough(const Type *Base, 7411 uint64_t Members) const override; 7412 7413 public: 7414 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : 7415 DefaultABIInfo(CGT) {} 7416 7417 ABIArgInfo classifyReturnType(QualType RetTy) const; 7418 ABIArgInfo classifyKernelArgumentType(QualType Ty) const; 7419 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const; 7420 7421 void computeInfo(CGFunctionInfo &FI) const override; 7422 }; 7423 7424 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 7425 return true; 7426 } 7427 7428 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough( 7429 const Type *Base, uint64_t Members) const { 7430 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32; 7431 7432 // Homogeneous Aggregates may occupy at most 16 registers. 7433 return Members * NumRegs <= MaxNumRegsForArgsRet; 7434 } 7435 7436 /// Estimate number of registers the type will use when passed in registers. 7437 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const { 7438 unsigned NumRegs = 0; 7439 7440 if (const VectorType *VT = Ty->getAs<VectorType>()) { 7441 // Compute from the number of elements. The reported size is based on the 7442 // in-memory size, which includes the padding 4th element for 3-vectors. 7443 QualType EltTy = VT->getElementType(); 7444 unsigned EltSize = getContext().getTypeSize(EltTy); 7445 7446 // 16-bit element vectors should be passed as packed. 7447 if (EltSize == 16) 7448 return (VT->getNumElements() + 1) / 2; 7449 7450 unsigned EltNumRegs = (EltSize + 31) / 32; 7451 return EltNumRegs * VT->getNumElements(); 7452 } 7453 7454 if (const RecordType *RT = Ty->getAs<RecordType>()) { 7455 const RecordDecl *RD = RT->getDecl(); 7456 assert(!RD->hasFlexibleArrayMember()); 7457 7458 for (const FieldDecl *Field : RD->fields()) { 7459 QualType FieldTy = Field->getType(); 7460 NumRegs += numRegsForType(FieldTy); 7461 } 7462 7463 return NumRegs; 7464 } 7465 7466 return (getContext().getTypeSize(Ty) + 31) / 32; 7467 } 7468 7469 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const { 7470 llvm::CallingConv::ID CC = FI.getCallingConvention(); 7471 7472 if (!getCXXABI().classifyReturnType(FI)) 7473 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7474 7475 unsigned NumRegsLeft = MaxNumRegsForArgsRet; 7476 for (auto &Arg : FI.arguments()) { 7477 if (CC == llvm::CallingConv::AMDGPU_KERNEL) { 7478 Arg.info = classifyKernelArgumentType(Arg.type); 7479 } else { 7480 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft); 7481 } 7482 } 7483 } 7484 7485 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const { 7486 if (isAggregateTypeForABI(RetTy)) { 7487 // Records with non-trivial destructors/copy-constructors should not be 7488 // returned by value. 7489 if (!getRecordArgABI(RetTy, getCXXABI())) { 7490 // Ignore empty structs/unions. 7491 if (isEmptyRecord(getContext(), RetTy, true)) 7492 return ABIArgInfo::getIgnore(); 7493 7494 // Lower single-element structs to just return a regular value. 7495 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 7496 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 7497 7498 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 7499 const RecordDecl *RD = RT->getDecl(); 7500 if (RD->hasFlexibleArrayMember()) 7501 return DefaultABIInfo::classifyReturnType(RetTy); 7502 } 7503 7504 // Pack aggregates <= 4 bytes into single VGPR or pair. 7505 uint64_t Size = getContext().getTypeSize(RetTy); 7506 if (Size <= 16) 7507 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 7508 7509 if (Size <= 32) 7510 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 7511 7512 if (Size <= 64) { 7513 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); 7514 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); 7515 } 7516 7517 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet) 7518 return ABIArgInfo::getDirect(); 7519 } 7520 } 7521 7522 // Otherwise just do the default thing. 7523 return DefaultABIInfo::classifyReturnType(RetTy); 7524 } 7525 7526 /// For kernels all parameters are really passed in a special buffer. It doesn't 7527 /// make sense to pass anything byval, so everything must be direct. 7528 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const { 7529 Ty = useFirstFieldIfTransparentUnion(Ty); 7530 7531 // TODO: Can we omit empty structs? 7532 7533 // Coerce single element structs to its element. 7534 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 7535 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 7536 7537 // If we set CanBeFlattened to true, CodeGen will expand the struct to its 7538 // individual elements, which confuses the Clover OpenCL backend; therefore we 7539 // have to set it to false here. Other args of getDirect() are just defaults. 7540 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 7541 } 7542 7543 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, 7544 unsigned &NumRegsLeft) const { 7545 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow"); 7546 7547 Ty = useFirstFieldIfTransparentUnion(Ty); 7548 7549 if (isAggregateTypeForABI(Ty)) { 7550 // Records with non-trivial destructors/copy-constructors should not be 7551 // passed by value. 7552 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 7553 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7554 7555 // Ignore empty structs/unions. 7556 if (isEmptyRecord(getContext(), Ty, true)) 7557 return ABIArgInfo::getIgnore(); 7558 7559 // Lower single-element structs to just pass a regular value. TODO: We 7560 // could do reasonable-size multiple-element structs too, using getExpand(), 7561 // though watch out for things like bitfields. 7562 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 7563 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 7564 7565 if (const RecordType *RT = Ty->getAs<RecordType>()) { 7566 const RecordDecl *RD = RT->getDecl(); 7567 if (RD->hasFlexibleArrayMember()) 7568 return DefaultABIInfo::classifyArgumentType(Ty); 7569 } 7570 7571 // Pack aggregates <= 8 bytes into single VGPR or pair. 7572 uint64_t Size = getContext().getTypeSize(Ty); 7573 if (Size <= 64) { 7574 unsigned NumRegs = (Size + 31) / 32; 7575 NumRegsLeft -= std::min(NumRegsLeft, NumRegs); 7576 7577 if (Size <= 16) 7578 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 7579 7580 if (Size <= 32) 7581 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 7582 7583 // XXX: Should this be i64 instead, and should the limit increase? 7584 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext()); 7585 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2)); 7586 } 7587 7588 if (NumRegsLeft > 0) { 7589 unsigned NumRegs = numRegsForType(Ty); 7590 if (NumRegsLeft >= NumRegs) { 7591 NumRegsLeft -= NumRegs; 7592 return ABIArgInfo::getDirect(); 7593 } 7594 } 7595 } 7596 7597 // Otherwise just do the default thing. 7598 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty); 7599 if (!ArgInfo.isIndirect()) { 7600 unsigned NumRegs = numRegsForType(Ty); 7601 NumRegsLeft -= std::min(NumRegs, NumRegsLeft); 7602 } 7603 7604 return ArgInfo; 7605 } 7606 7607 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 7608 public: 7609 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 7610 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {} 7611 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7612 CodeGen::CodeGenModule &M, 7613 ForDefinition_t IsForDefinition) const override; 7614 unsigned getOpenCLKernelCallingConv() const override; 7615 7616 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM, 7617 llvm::PointerType *T, QualType QT) const override; 7618 7619 unsigned getASTAllocaAddressSpace() const override { 7620 return LangAS::FirstTargetAddressSpace + 7621 getABIInfo().getDataLayout().getAllocaAddrSpace(); 7622 } 7623 unsigned getGlobalVarAddressSpace(CodeGenModule &CGM, 7624 const VarDecl *D) const override; 7625 llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S, 7626 llvm::LLVMContext &C) const override; 7627 }; 7628 } 7629 7630 void AMDGPUTargetCodeGenInfo::setTargetAttributes( 7631 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M, 7632 ForDefinition_t IsForDefinition) const { 7633 if (!IsForDefinition) 7634 return; 7635 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7636 if (!FD) 7637 return; 7638 7639 llvm::Function *F = cast<llvm::Function>(GV); 7640 7641 const auto *ReqdWGS = M.getLangOpts().OpenCL ? 7642 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr; 7643 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>(); 7644 if (ReqdWGS || FlatWGS) { 7645 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0; 7646 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0; 7647 if (ReqdWGS && Min == 0 && Max == 0) 7648 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim(); 7649 7650 if (Min != 0) { 7651 assert(Min <= Max && "Min must be less than or equal Max"); 7652 7653 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max); 7654 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); 7655 } else 7656 assert(Max == 0 && "Max must be zero"); 7657 } 7658 7659 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) { 7660 unsigned Min = Attr->getMin(); 7661 unsigned Max = Attr->getMax(); 7662 7663 if (Min != 0) { 7664 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max"); 7665 7666 std::string AttrVal = llvm::utostr(Min); 7667 if (Max != 0) 7668 AttrVal = AttrVal + "," + llvm::utostr(Max); 7669 F->addFnAttr("amdgpu-waves-per-eu", AttrVal); 7670 } else 7671 assert(Max == 0 && "Max must be zero"); 7672 } 7673 7674 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 7675 unsigned NumSGPR = Attr->getNumSGPR(); 7676 7677 if (NumSGPR != 0) 7678 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR)); 7679 } 7680 7681 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 7682 uint32_t NumVGPR = Attr->getNumVGPR(); 7683 7684 if (NumVGPR != 0) 7685 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR)); 7686 } 7687 } 7688 7689 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 7690 return llvm::CallingConv::AMDGPU_KERNEL; 7691 } 7692 7693 // Currently LLVM assumes null pointers always have value 0, 7694 // which results in incorrectly transformed IR. Therefore, instead of 7695 // emitting null pointers in private and local address spaces, a null 7696 // pointer in generic address space is emitted which is casted to a 7697 // pointer in local or private address space. 7698 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer( 7699 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT, 7700 QualType QT) const { 7701 if (CGM.getContext().getTargetNullPointerValue(QT) == 0) 7702 return llvm::ConstantPointerNull::get(PT); 7703 7704 auto &Ctx = CGM.getContext(); 7705 auto NPT = llvm::PointerType::get(PT->getElementType(), 7706 Ctx.getTargetAddressSpace(LangAS::opencl_generic)); 7707 return llvm::ConstantExpr::getAddrSpaceCast( 7708 llvm::ConstantPointerNull::get(NPT), PT); 7709 } 7710 7711 unsigned 7712 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 7713 const VarDecl *D) const { 7714 assert(!CGM.getLangOpts().OpenCL && 7715 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 7716 "Address space agnostic languages only"); 7717 unsigned DefaultGlobalAS = 7718 LangAS::FirstTargetAddressSpace + 7719 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global); 7720 if (!D) 7721 return DefaultGlobalAS; 7722 7723 unsigned AddrSpace = D->getType().getAddressSpace(); 7724 assert(AddrSpace == LangAS::Default || 7725 AddrSpace >= LangAS::FirstTargetAddressSpace); 7726 if (AddrSpace != LangAS::Default) 7727 return AddrSpace; 7728 7729 if (CGM.isTypeConstant(D->getType(), false)) { 7730 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace()) 7731 return ConstAS.getValue(); 7732 } 7733 return DefaultGlobalAS; 7734 } 7735 7736 llvm::SyncScope::ID 7737 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S, 7738 llvm::LLVMContext &C) const { 7739 StringRef Name; 7740 switch (S) { 7741 case SyncScope::OpenCLWorkGroup: 7742 Name = "workgroup"; 7743 break; 7744 case SyncScope::OpenCLDevice: 7745 Name = "agent"; 7746 break; 7747 case SyncScope::OpenCLAllSVMDevices: 7748 Name = ""; 7749 break; 7750 case SyncScope::OpenCLSubGroup: 7751 Name = "subgroup"; 7752 } 7753 return C.getOrInsertSyncScopeID(Name); 7754 } 7755 7756 //===----------------------------------------------------------------------===// 7757 // SPARC v8 ABI Implementation. 7758 // Based on the SPARC Compliance Definition version 2.4.1. 7759 // 7760 // Ensures that complex values are passed in registers. 7761 // 7762 namespace { 7763 class SparcV8ABIInfo : public DefaultABIInfo { 7764 public: 7765 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7766 7767 private: 7768 ABIArgInfo classifyReturnType(QualType RetTy) const; 7769 void computeInfo(CGFunctionInfo &FI) const override; 7770 }; 7771 } // end anonymous namespace 7772 7773 7774 ABIArgInfo 7775 SparcV8ABIInfo::classifyReturnType(QualType Ty) const { 7776 if (Ty->isAnyComplexType()) { 7777 return ABIArgInfo::getDirect(); 7778 } 7779 else { 7780 return DefaultABIInfo::classifyReturnType(Ty); 7781 } 7782 } 7783 7784 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { 7785 7786 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7787 for (auto &Arg : FI.arguments()) 7788 Arg.info = classifyArgumentType(Arg.type); 7789 } 7790 7791 namespace { 7792 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo { 7793 public: 7794 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT) 7795 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {} 7796 }; 7797 } // end anonymous namespace 7798 7799 //===----------------------------------------------------------------------===// 7800 // SPARC v9 ABI Implementation. 7801 // Based on the SPARC Compliance Definition version 2.4.1. 7802 // 7803 // Function arguments a mapped to a nominal "parameter array" and promoted to 7804 // registers depending on their type. Each argument occupies 8 or 16 bytes in 7805 // the array, structs larger than 16 bytes are passed indirectly. 7806 // 7807 // One case requires special care: 7808 // 7809 // struct mixed { 7810 // int i; 7811 // float f; 7812 // }; 7813 // 7814 // When a struct mixed is passed by value, it only occupies 8 bytes in the 7815 // parameter array, but the int is passed in an integer register, and the float 7816 // is passed in a floating point register. This is represented as two arguments 7817 // with the LLVM IR inreg attribute: 7818 // 7819 // declare void f(i32 inreg %i, float inreg %f) 7820 // 7821 // The code generator will only allocate 4 bytes from the parameter array for 7822 // the inreg arguments. All other arguments are allocated a multiple of 8 7823 // bytes. 7824 // 7825 namespace { 7826 class SparcV9ABIInfo : public ABIInfo { 7827 public: 7828 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 7829 7830 private: 7831 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 7832 void computeInfo(CGFunctionInfo &FI) const override; 7833 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7834 QualType Ty) const override; 7835 7836 // Coercion type builder for structs passed in registers. The coercion type 7837 // serves two purposes: 7838 // 7839 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 7840 // in registers. 7841 // 2. Expose aligned floating point elements as first-level elements, so the 7842 // code generator knows to pass them in floating point registers. 7843 // 7844 // We also compute the InReg flag which indicates that the struct contains 7845 // aligned 32-bit floats. 7846 // 7847 struct CoerceBuilder { 7848 llvm::LLVMContext &Context; 7849 const llvm::DataLayout &DL; 7850 SmallVector<llvm::Type*, 8> Elems; 7851 uint64_t Size; 7852 bool InReg; 7853 7854 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 7855 : Context(c), DL(dl), Size(0), InReg(false) {} 7856 7857 // Pad Elems with integers until Size is ToSize. 7858 void pad(uint64_t ToSize) { 7859 assert(ToSize >= Size && "Cannot remove elements"); 7860 if (ToSize == Size) 7861 return; 7862 7863 // Finish the current 64-bit word. 7864 uint64_t Aligned = llvm::alignTo(Size, 64); 7865 if (Aligned > Size && Aligned <= ToSize) { 7866 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 7867 Size = Aligned; 7868 } 7869 7870 // Add whole 64-bit words. 7871 while (Size + 64 <= ToSize) { 7872 Elems.push_back(llvm::Type::getInt64Ty(Context)); 7873 Size += 64; 7874 } 7875 7876 // Final in-word padding. 7877 if (Size < ToSize) { 7878 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 7879 Size = ToSize; 7880 } 7881 } 7882 7883 // Add a floating point element at Offset. 7884 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 7885 // Unaligned floats are treated as integers. 7886 if (Offset % Bits) 7887 return; 7888 // The InReg flag is only required if there are any floats < 64 bits. 7889 if (Bits < 64) 7890 InReg = true; 7891 pad(Offset); 7892 Elems.push_back(Ty); 7893 Size = Offset + Bits; 7894 } 7895 7896 // Add a struct type to the coercion type, starting at Offset (in bits). 7897 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 7898 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 7899 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 7900 llvm::Type *ElemTy = StrTy->getElementType(i); 7901 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 7902 switch (ElemTy->getTypeID()) { 7903 case llvm::Type::StructTyID: 7904 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 7905 break; 7906 case llvm::Type::FloatTyID: 7907 addFloat(ElemOffset, ElemTy, 32); 7908 break; 7909 case llvm::Type::DoubleTyID: 7910 addFloat(ElemOffset, ElemTy, 64); 7911 break; 7912 case llvm::Type::FP128TyID: 7913 addFloat(ElemOffset, ElemTy, 128); 7914 break; 7915 case llvm::Type::PointerTyID: 7916 if (ElemOffset % 64 == 0) { 7917 pad(ElemOffset); 7918 Elems.push_back(ElemTy); 7919 Size += 64; 7920 } 7921 break; 7922 default: 7923 break; 7924 } 7925 } 7926 } 7927 7928 // Check if Ty is a usable substitute for the coercion type. 7929 bool isUsableType(llvm::StructType *Ty) const { 7930 return llvm::makeArrayRef(Elems) == Ty->elements(); 7931 } 7932 7933 // Get the coercion type as a literal struct type. 7934 llvm::Type *getType() const { 7935 if (Elems.size() == 1) 7936 return Elems.front(); 7937 else 7938 return llvm::StructType::get(Context, Elems); 7939 } 7940 }; 7941 }; 7942 } // end anonymous namespace 7943 7944 ABIArgInfo 7945 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 7946 if (Ty->isVoidType()) 7947 return ABIArgInfo::getIgnore(); 7948 7949 uint64_t Size = getContext().getTypeSize(Ty); 7950 7951 // Anything too big to fit in registers is passed with an explicit indirect 7952 // pointer / sret pointer. 7953 if (Size > SizeLimit) 7954 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7955 7956 // Treat an enum type as its underlying type. 7957 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7958 Ty = EnumTy->getDecl()->getIntegerType(); 7959 7960 // Integer types smaller than a register are extended. 7961 if (Size < 64 && Ty->isIntegerType()) 7962 return ABIArgInfo::getExtend(); 7963 7964 // Other non-aggregates go in registers. 7965 if (!isAggregateTypeForABI(Ty)) 7966 return ABIArgInfo::getDirect(); 7967 7968 // If a C++ object has either a non-trivial copy constructor or a non-trivial 7969 // destructor, it is passed with an explicit indirect pointer / sret pointer. 7970 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 7971 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7972 7973 // This is a small aggregate type that should be passed in registers. 7974 // Build a coercion type from the LLVM struct type. 7975 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 7976 if (!StrTy) 7977 return ABIArgInfo::getDirect(); 7978 7979 CoerceBuilder CB(getVMContext(), getDataLayout()); 7980 CB.addStruct(0, StrTy); 7981 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64)); 7982 7983 // Try to use the original type for coercion. 7984 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 7985 7986 if (CB.InReg) 7987 return ABIArgInfo::getDirectInReg(CoerceTy); 7988 else 7989 return ABIArgInfo::getDirect(CoerceTy); 7990 } 7991 7992 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7993 QualType Ty) const { 7994 ABIArgInfo AI = classifyType(Ty, 16 * 8); 7995 llvm::Type *ArgTy = CGT.ConvertType(Ty); 7996 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 7997 AI.setCoerceToType(ArgTy); 7998 7999 CharUnits SlotSize = CharUnits::fromQuantity(8); 8000 8001 CGBuilderTy &Builder = CGF.Builder; 8002 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 8003 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 8004 8005 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 8006 8007 Address ArgAddr = Address::invalid(); 8008 CharUnits Stride; 8009 switch (AI.getKind()) { 8010 case ABIArgInfo::Expand: 8011 case ABIArgInfo::CoerceAndExpand: 8012 case ABIArgInfo::InAlloca: 8013 llvm_unreachable("Unsupported ABI kind for va_arg"); 8014 8015 case ABIArgInfo::Extend: { 8016 Stride = SlotSize; 8017 CharUnits Offset = SlotSize - TypeInfo.first; 8018 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); 8019 break; 8020 } 8021 8022 case ABIArgInfo::Direct: { 8023 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 8024 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); 8025 ArgAddr = Addr; 8026 break; 8027 } 8028 8029 case ABIArgInfo::Indirect: 8030 Stride = SlotSize; 8031 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect"); 8032 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), 8033 TypeInfo.second); 8034 break; 8035 8036 case ABIArgInfo::Ignore: 8037 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second); 8038 } 8039 8040 // Update VAList. 8041 llvm::Value *NextPtr = 8042 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next"); 8043 Builder.CreateStore(NextPtr, VAListAddr); 8044 8045 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr"); 8046 } 8047 8048 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 8049 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 8050 for (auto &I : FI.arguments()) 8051 I.info = classifyType(I.type, 16 * 8); 8052 } 8053 8054 namespace { 8055 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 8056 public: 8057 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 8058 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} 8059 8060 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 8061 return 14; 8062 } 8063 8064 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 8065 llvm::Value *Address) const override; 8066 }; 8067 } // end anonymous namespace 8068 8069 bool 8070 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 8071 llvm::Value *Address) const { 8072 // This is calculated from the LLVM and GCC tables and verified 8073 // against gcc output. AFAIK all ABIs use the same encoding. 8074 8075 CodeGen::CGBuilderTy &Builder = CGF.Builder; 8076 8077 llvm::IntegerType *i8 = CGF.Int8Ty; 8078 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 8079 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 8080 8081 // 0-31: the 8-byte general-purpose registers 8082 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 8083 8084 // 32-63: f0-31, the 4-byte floating-point registers 8085 AssignToArrayRange(Builder, Address, Four8, 32, 63); 8086 8087 // Y = 64 8088 // PSR = 65 8089 // WIM = 66 8090 // TBR = 67 8091 // PC = 68 8092 // NPC = 69 8093 // FSR = 70 8094 // CSR = 71 8095 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 8096 8097 // 72-87: d0-15, the 8-byte floating-point registers 8098 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 8099 8100 return false; 8101 } 8102 8103 8104 //===----------------------------------------------------------------------===// 8105 // XCore ABI Implementation 8106 //===----------------------------------------------------------------------===// 8107 8108 namespace { 8109 8110 /// A SmallStringEnc instance is used to build up the TypeString by passing 8111 /// it by reference between functions that append to it. 8112 typedef llvm::SmallString<128> SmallStringEnc; 8113 8114 /// TypeStringCache caches the meta encodings of Types. 8115 /// 8116 /// The reason for caching TypeStrings is two fold: 8117 /// 1. To cache a type's encoding for later uses; 8118 /// 2. As a means to break recursive member type inclusion. 8119 /// 8120 /// A cache Entry can have a Status of: 8121 /// NonRecursive: The type encoding is not recursive; 8122 /// Recursive: The type encoding is recursive; 8123 /// Incomplete: An incomplete TypeString; 8124 /// IncompleteUsed: An incomplete TypeString that has been used in a 8125 /// Recursive type encoding. 8126 /// 8127 /// A NonRecursive entry will have all of its sub-members expanded as fully 8128 /// as possible. Whilst it may contain types which are recursive, the type 8129 /// itself is not recursive and thus its encoding may be safely used whenever 8130 /// the type is encountered. 8131 /// 8132 /// A Recursive entry will have all of its sub-members expanded as fully as 8133 /// possible. The type itself is recursive and it may contain other types which 8134 /// are recursive. The Recursive encoding must not be used during the expansion 8135 /// of a recursive type's recursive branch. For simplicity the code uses 8136 /// IncompleteCount to reject all usage of Recursive encodings for member types. 8137 /// 8138 /// An Incomplete entry is always a RecordType and only encodes its 8139 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 8140 /// are placed into the cache during type expansion as a means to identify and 8141 /// handle recursive inclusion of types as sub-members. If there is recursion 8142 /// the entry becomes IncompleteUsed. 8143 /// 8144 /// During the expansion of a RecordType's members: 8145 /// 8146 /// If the cache contains a NonRecursive encoding for the member type, the 8147 /// cached encoding is used; 8148 /// 8149 /// If the cache contains a Recursive encoding for the member type, the 8150 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 8151 /// 8152 /// If the member is a RecordType, an Incomplete encoding is placed into the 8153 /// cache to break potential recursive inclusion of itself as a sub-member; 8154 /// 8155 /// Once a member RecordType has been expanded, its temporary incomplete 8156 /// entry is removed from the cache. If a Recursive encoding was swapped out 8157 /// it is swapped back in; 8158 /// 8159 /// If an incomplete entry is used to expand a sub-member, the incomplete 8160 /// entry is marked as IncompleteUsed. The cache keeps count of how many 8161 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 8162 /// 8163 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 8164 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 8165 /// Else the member is part of a recursive type and thus the recursion has 8166 /// been exited too soon for the encoding to be correct for the member. 8167 /// 8168 class TypeStringCache { 8169 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 8170 struct Entry { 8171 std::string Str; // The encoded TypeString for the type. 8172 enum Status State; // Information about the encoding in 'Str'. 8173 std::string Swapped; // A temporary place holder for a Recursive encoding 8174 // during the expansion of RecordType's members. 8175 }; 8176 std::map<const IdentifierInfo *, struct Entry> Map; 8177 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 8178 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 8179 public: 8180 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} 8181 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 8182 bool removeIncomplete(const IdentifierInfo *ID); 8183 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 8184 bool IsRecursive); 8185 StringRef lookupStr(const IdentifierInfo *ID); 8186 }; 8187 8188 /// TypeString encodings for enum & union fields must be order. 8189 /// FieldEncoding is a helper for this ordering process. 8190 class FieldEncoding { 8191 bool HasName; 8192 std::string Enc; 8193 public: 8194 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} 8195 StringRef str() { return Enc; } 8196 bool operator<(const FieldEncoding &rhs) const { 8197 if (HasName != rhs.HasName) return HasName; 8198 return Enc < rhs.Enc; 8199 } 8200 }; 8201 8202 class XCoreABIInfo : public DefaultABIInfo { 8203 public: 8204 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 8205 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8206 QualType Ty) const override; 8207 }; 8208 8209 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 8210 mutable TypeStringCache TSC; 8211 public: 8212 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 8213 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} 8214 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 8215 CodeGen::CodeGenModule &M) const override; 8216 }; 8217 8218 } // End anonymous namespace. 8219 8220 // TODO: this implementation is likely now redundant with the default 8221 // EmitVAArg. 8222 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8223 QualType Ty) const { 8224 CGBuilderTy &Builder = CGF.Builder; 8225 8226 // Get the VAList. 8227 CharUnits SlotSize = CharUnits::fromQuantity(4); 8228 Address AP(Builder.CreateLoad(VAListAddr), SlotSize); 8229 8230 // Handle the argument. 8231 ABIArgInfo AI = classifyArgumentType(Ty); 8232 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty); 8233 llvm::Type *ArgTy = CGT.ConvertType(Ty); 8234 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 8235 AI.setCoerceToType(ArgTy); 8236 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 8237 8238 Address Val = Address::invalid(); 8239 CharUnits ArgSize = CharUnits::Zero(); 8240 switch (AI.getKind()) { 8241 case ABIArgInfo::Expand: 8242 case ABIArgInfo::CoerceAndExpand: 8243 case ABIArgInfo::InAlloca: 8244 llvm_unreachable("Unsupported ABI kind for va_arg"); 8245 case ABIArgInfo::Ignore: 8246 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign); 8247 ArgSize = CharUnits::Zero(); 8248 break; 8249 case ABIArgInfo::Extend: 8250 case ABIArgInfo::Direct: 8251 Val = Builder.CreateBitCast(AP, ArgPtrTy); 8252 ArgSize = CharUnits::fromQuantity( 8253 getDataLayout().getTypeAllocSize(AI.getCoerceToType())); 8254 ArgSize = ArgSize.alignTo(SlotSize); 8255 break; 8256 case ABIArgInfo::Indirect: 8257 Val = Builder.CreateElementBitCast(AP, ArgPtrTy); 8258 Val = Address(Builder.CreateLoad(Val), TypeAlign); 8259 ArgSize = SlotSize; 8260 break; 8261 } 8262 8263 // Increment the VAList. 8264 if (!ArgSize.isZero()) { 8265 llvm::Value *APN = 8266 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize); 8267 Builder.CreateStore(APN, VAListAddr); 8268 } 8269 8270 return Val; 8271 } 8272 8273 /// During the expansion of a RecordType, an incomplete TypeString is placed 8274 /// into the cache as a means to identify and break recursion. 8275 /// If there is a Recursive encoding in the cache, it is swapped out and will 8276 /// be reinserted by removeIncomplete(). 8277 /// All other types of encoding should have been used rather than arriving here. 8278 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 8279 std::string StubEnc) { 8280 if (!ID) 8281 return; 8282 Entry &E = Map[ID]; 8283 assert( (E.Str.empty() || E.State == Recursive) && 8284 "Incorrectly use of addIncomplete"); 8285 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 8286 E.Swapped.swap(E.Str); // swap out the Recursive 8287 E.Str.swap(StubEnc); 8288 E.State = Incomplete; 8289 ++IncompleteCount; 8290 } 8291 8292 /// Once the RecordType has been expanded, the temporary incomplete TypeString 8293 /// must be removed from the cache. 8294 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 8295 /// Returns true if the RecordType was defined recursively. 8296 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 8297 if (!ID) 8298 return false; 8299 auto I = Map.find(ID); 8300 assert(I != Map.end() && "Entry not present"); 8301 Entry &E = I->second; 8302 assert( (E.State == Incomplete || 8303 E.State == IncompleteUsed) && 8304 "Entry must be an incomplete type"); 8305 bool IsRecursive = false; 8306 if (E.State == IncompleteUsed) { 8307 // We made use of our Incomplete encoding, thus we are recursive. 8308 IsRecursive = true; 8309 --IncompleteUsedCount; 8310 } 8311 if (E.Swapped.empty()) 8312 Map.erase(I); 8313 else { 8314 // Swap the Recursive back. 8315 E.Swapped.swap(E.Str); 8316 E.Swapped.clear(); 8317 E.State = Recursive; 8318 } 8319 --IncompleteCount; 8320 return IsRecursive; 8321 } 8322 8323 /// Add the encoded TypeString to the cache only if it is NonRecursive or 8324 /// Recursive (viz: all sub-members were expanded as fully as possible). 8325 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 8326 bool IsRecursive) { 8327 if (!ID || IncompleteUsedCount) 8328 return; // No key or it is is an incomplete sub-type so don't add. 8329 Entry &E = Map[ID]; 8330 if (IsRecursive && !E.Str.empty()) { 8331 assert(E.State==Recursive && E.Str.size() == Str.size() && 8332 "This is not the same Recursive entry"); 8333 // The parent container was not recursive after all, so we could have used 8334 // this Recursive sub-member entry after all, but we assumed the worse when 8335 // we started viz: IncompleteCount!=0. 8336 return; 8337 } 8338 assert(E.Str.empty() && "Entry already present"); 8339 E.Str = Str.str(); 8340 E.State = IsRecursive? Recursive : NonRecursive; 8341 } 8342 8343 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 8344 /// are recursively expanding a type (IncompleteCount != 0) and the cached 8345 /// encoding is Recursive, return an empty StringRef. 8346 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 8347 if (!ID) 8348 return StringRef(); // We have no key. 8349 auto I = Map.find(ID); 8350 if (I == Map.end()) 8351 return StringRef(); // We have no encoding. 8352 Entry &E = I->second; 8353 if (E.State == Recursive && IncompleteCount) 8354 return StringRef(); // We don't use Recursive encodings for member types. 8355 8356 if (E.State == Incomplete) { 8357 // The incomplete type is being used to break out of recursion. 8358 E.State = IncompleteUsed; 8359 ++IncompleteUsedCount; 8360 } 8361 return E.Str; 8362 } 8363 8364 /// The XCore ABI includes a type information section that communicates symbol 8365 /// type information to the linker. The linker uses this information to verify 8366 /// safety/correctness of things such as array bound and pointers et al. 8367 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 8368 /// This type information (TypeString) is emitted into meta data for all global 8369 /// symbols: definitions, declarations, functions & variables. 8370 /// 8371 /// The TypeString carries type, qualifier, name, size & value details. 8372 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 8373 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf 8374 /// The output is tested by test/CodeGen/xcore-stringtype.c. 8375 /// 8376 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 8377 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); 8378 8379 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 8380 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 8381 CodeGen::CodeGenModule &CGM) const { 8382 SmallStringEnc Enc; 8383 if (getTypeString(Enc, D, CGM, TSC)) { 8384 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 8385 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV), 8386 llvm::MDString::get(Ctx, Enc.str())}; 8387 llvm::NamedMDNode *MD = 8388 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 8389 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 8390 } 8391 } 8392 8393 //===----------------------------------------------------------------------===// 8394 // SPIR ABI Implementation 8395 //===----------------------------------------------------------------------===// 8396 8397 namespace { 8398 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo { 8399 public: 8400 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 8401 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 8402 unsigned getOpenCLKernelCallingConv() const override; 8403 }; 8404 8405 } // End anonymous namespace. 8406 8407 namespace clang { 8408 namespace CodeGen { 8409 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { 8410 DefaultABIInfo SPIRABI(CGM.getTypes()); 8411 SPIRABI.computeInfo(FI); 8412 } 8413 } 8414 } 8415 8416 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 8417 return llvm::CallingConv::SPIR_KERNEL; 8418 } 8419 8420 static bool appendType(SmallStringEnc &Enc, QualType QType, 8421 const CodeGen::CodeGenModule &CGM, 8422 TypeStringCache &TSC); 8423 8424 /// Helper function for appendRecordType(). 8425 /// Builds a SmallVector containing the encoded field types in declaration 8426 /// order. 8427 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 8428 const RecordDecl *RD, 8429 const CodeGen::CodeGenModule &CGM, 8430 TypeStringCache &TSC) { 8431 for (const auto *Field : RD->fields()) { 8432 SmallStringEnc Enc; 8433 Enc += "m("; 8434 Enc += Field->getName(); 8435 Enc += "){"; 8436 if (Field->isBitField()) { 8437 Enc += "b("; 8438 llvm::raw_svector_ostream OS(Enc); 8439 OS << Field->getBitWidthValue(CGM.getContext()); 8440 Enc += ':'; 8441 } 8442 if (!appendType(Enc, Field->getType(), CGM, TSC)) 8443 return false; 8444 if (Field->isBitField()) 8445 Enc += ')'; 8446 Enc += '}'; 8447 FE.emplace_back(!Field->getName().empty(), Enc); 8448 } 8449 return true; 8450 } 8451 8452 /// Appends structure and union types to Enc and adds encoding to cache. 8453 /// Recursively calls appendType (via extractFieldType) for each field. 8454 /// Union types have their fields ordered according to the ABI. 8455 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 8456 const CodeGen::CodeGenModule &CGM, 8457 TypeStringCache &TSC, const IdentifierInfo *ID) { 8458 // Append the cached TypeString if we have one. 8459 StringRef TypeString = TSC.lookupStr(ID); 8460 if (!TypeString.empty()) { 8461 Enc += TypeString; 8462 return true; 8463 } 8464 8465 // Start to emit an incomplete TypeString. 8466 size_t Start = Enc.size(); 8467 Enc += (RT->isUnionType()? 'u' : 's'); 8468 Enc += '('; 8469 if (ID) 8470 Enc += ID->getName(); 8471 Enc += "){"; 8472 8473 // We collect all encoded fields and order as necessary. 8474 bool IsRecursive = false; 8475 const RecordDecl *RD = RT->getDecl()->getDefinition(); 8476 if (RD && !RD->field_empty()) { 8477 // An incomplete TypeString stub is placed in the cache for this RecordType 8478 // so that recursive calls to this RecordType will use it whilst building a 8479 // complete TypeString for this RecordType. 8480 SmallVector<FieldEncoding, 16> FE; 8481 std::string StubEnc(Enc.substr(Start).str()); 8482 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 8483 TSC.addIncomplete(ID, std::move(StubEnc)); 8484 if (!extractFieldType(FE, RD, CGM, TSC)) { 8485 (void) TSC.removeIncomplete(ID); 8486 return false; 8487 } 8488 IsRecursive = TSC.removeIncomplete(ID); 8489 // The ABI requires unions to be sorted but not structures. 8490 // See FieldEncoding::operator< for sort algorithm. 8491 if (RT->isUnionType()) 8492 std::sort(FE.begin(), FE.end()); 8493 // We can now complete the TypeString. 8494 unsigned E = FE.size(); 8495 for (unsigned I = 0; I != E; ++I) { 8496 if (I) 8497 Enc += ','; 8498 Enc += FE[I].str(); 8499 } 8500 } 8501 Enc += '}'; 8502 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 8503 return true; 8504 } 8505 8506 /// Appends enum types to Enc and adds the encoding to the cache. 8507 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 8508 TypeStringCache &TSC, 8509 const IdentifierInfo *ID) { 8510 // Append the cached TypeString if we have one. 8511 StringRef TypeString = TSC.lookupStr(ID); 8512 if (!TypeString.empty()) { 8513 Enc += TypeString; 8514 return true; 8515 } 8516 8517 size_t Start = Enc.size(); 8518 Enc += "e("; 8519 if (ID) 8520 Enc += ID->getName(); 8521 Enc += "){"; 8522 8523 // We collect all encoded enumerations and order them alphanumerically. 8524 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 8525 SmallVector<FieldEncoding, 16> FE; 8526 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 8527 ++I) { 8528 SmallStringEnc EnumEnc; 8529 EnumEnc += "m("; 8530 EnumEnc += I->getName(); 8531 EnumEnc += "){"; 8532 I->getInitVal().toString(EnumEnc); 8533 EnumEnc += '}'; 8534 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 8535 } 8536 std::sort(FE.begin(), FE.end()); 8537 unsigned E = FE.size(); 8538 for (unsigned I = 0; I != E; ++I) { 8539 if (I) 8540 Enc += ','; 8541 Enc += FE[I].str(); 8542 } 8543 } 8544 Enc += '}'; 8545 TSC.addIfComplete(ID, Enc.substr(Start), false); 8546 return true; 8547 } 8548 8549 /// Appends type's qualifier to Enc. 8550 /// This is done prior to appending the type's encoding. 8551 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 8552 // Qualifiers are emitted in alphabetical order. 8553 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 8554 int Lookup = 0; 8555 if (QT.isConstQualified()) 8556 Lookup += 1<<0; 8557 if (QT.isRestrictQualified()) 8558 Lookup += 1<<1; 8559 if (QT.isVolatileQualified()) 8560 Lookup += 1<<2; 8561 Enc += Table[Lookup]; 8562 } 8563 8564 /// Appends built-in types to Enc. 8565 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 8566 const char *EncType; 8567 switch (BT->getKind()) { 8568 case BuiltinType::Void: 8569 EncType = "0"; 8570 break; 8571 case BuiltinType::Bool: 8572 EncType = "b"; 8573 break; 8574 case BuiltinType::Char_U: 8575 EncType = "uc"; 8576 break; 8577 case BuiltinType::UChar: 8578 EncType = "uc"; 8579 break; 8580 case BuiltinType::SChar: 8581 EncType = "sc"; 8582 break; 8583 case BuiltinType::UShort: 8584 EncType = "us"; 8585 break; 8586 case BuiltinType::Short: 8587 EncType = "ss"; 8588 break; 8589 case BuiltinType::UInt: 8590 EncType = "ui"; 8591 break; 8592 case BuiltinType::Int: 8593 EncType = "si"; 8594 break; 8595 case BuiltinType::ULong: 8596 EncType = "ul"; 8597 break; 8598 case BuiltinType::Long: 8599 EncType = "sl"; 8600 break; 8601 case BuiltinType::ULongLong: 8602 EncType = "ull"; 8603 break; 8604 case BuiltinType::LongLong: 8605 EncType = "sll"; 8606 break; 8607 case BuiltinType::Float: 8608 EncType = "ft"; 8609 break; 8610 case BuiltinType::Double: 8611 EncType = "d"; 8612 break; 8613 case BuiltinType::LongDouble: 8614 EncType = "ld"; 8615 break; 8616 default: 8617 return false; 8618 } 8619 Enc += EncType; 8620 return true; 8621 } 8622 8623 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 8624 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 8625 const CodeGen::CodeGenModule &CGM, 8626 TypeStringCache &TSC) { 8627 Enc += "p("; 8628 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 8629 return false; 8630 Enc += ')'; 8631 return true; 8632 } 8633 8634 /// Appends array encoding to Enc before calling appendType for the element. 8635 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 8636 const ArrayType *AT, 8637 const CodeGen::CodeGenModule &CGM, 8638 TypeStringCache &TSC, StringRef NoSizeEnc) { 8639 if (AT->getSizeModifier() != ArrayType::Normal) 8640 return false; 8641 Enc += "a("; 8642 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 8643 CAT->getSize().toStringUnsigned(Enc); 8644 else 8645 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 8646 Enc += ':'; 8647 // The Qualifiers should be attached to the type rather than the array. 8648 appendQualifier(Enc, QT); 8649 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 8650 return false; 8651 Enc += ')'; 8652 return true; 8653 } 8654 8655 /// Appends a function encoding to Enc, calling appendType for the return type 8656 /// and the arguments. 8657 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 8658 const CodeGen::CodeGenModule &CGM, 8659 TypeStringCache &TSC) { 8660 Enc += "f{"; 8661 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 8662 return false; 8663 Enc += "}("; 8664 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 8665 // N.B. we are only interested in the adjusted param types. 8666 auto I = FPT->param_type_begin(); 8667 auto E = FPT->param_type_end(); 8668 if (I != E) { 8669 do { 8670 if (!appendType(Enc, *I, CGM, TSC)) 8671 return false; 8672 ++I; 8673 if (I != E) 8674 Enc += ','; 8675 } while (I != E); 8676 if (FPT->isVariadic()) 8677 Enc += ",va"; 8678 } else { 8679 if (FPT->isVariadic()) 8680 Enc += "va"; 8681 else 8682 Enc += '0'; 8683 } 8684 } 8685 Enc += ')'; 8686 return true; 8687 } 8688 8689 /// Handles the type's qualifier before dispatching a call to handle specific 8690 /// type encodings. 8691 static bool appendType(SmallStringEnc &Enc, QualType QType, 8692 const CodeGen::CodeGenModule &CGM, 8693 TypeStringCache &TSC) { 8694 8695 QualType QT = QType.getCanonicalType(); 8696 8697 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 8698 // The Qualifiers should be attached to the type rather than the array. 8699 // Thus we don't call appendQualifier() here. 8700 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 8701 8702 appendQualifier(Enc, QT); 8703 8704 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 8705 return appendBuiltinType(Enc, BT); 8706 8707 if (const PointerType *PT = QT->getAs<PointerType>()) 8708 return appendPointerType(Enc, PT, CGM, TSC); 8709 8710 if (const EnumType *ET = QT->getAs<EnumType>()) 8711 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 8712 8713 if (const RecordType *RT = QT->getAsStructureType()) 8714 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 8715 8716 if (const RecordType *RT = QT->getAsUnionType()) 8717 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 8718 8719 if (const FunctionType *FT = QT->getAs<FunctionType>()) 8720 return appendFunctionType(Enc, FT, CGM, TSC); 8721 8722 return false; 8723 } 8724 8725 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 8726 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { 8727 if (!D) 8728 return false; 8729 8730 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 8731 if (FD->getLanguageLinkage() != CLanguageLinkage) 8732 return false; 8733 return appendType(Enc, FD->getType(), CGM, TSC); 8734 } 8735 8736 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 8737 if (VD->getLanguageLinkage() != CLanguageLinkage) 8738 return false; 8739 QualType QT = VD->getType().getCanonicalType(); 8740 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 8741 // Global ArrayTypes are given a size of '*' if the size is unknown. 8742 // The Qualifiers should be attached to the type rather than the array. 8743 // Thus we don't call appendQualifier() here. 8744 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 8745 } 8746 return appendType(Enc, QT, CGM, TSC); 8747 } 8748 return false; 8749 } 8750 8751 8752 //===----------------------------------------------------------------------===// 8753 // Driver code 8754 //===----------------------------------------------------------------------===// 8755 8756 bool CodeGenModule::supportsCOMDAT() const { 8757 return getTriple().supportsCOMDAT(); 8758 } 8759 8760 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 8761 if (TheTargetCodeGenInfo) 8762 return *TheTargetCodeGenInfo; 8763 8764 // Helper to set the unique_ptr while still keeping the return value. 8765 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & { 8766 this->TheTargetCodeGenInfo.reset(P); 8767 return *P; 8768 }; 8769 8770 const llvm::Triple &Triple = getTarget().getTriple(); 8771 switch (Triple.getArch()) { 8772 default: 8773 return SetCGInfo(new DefaultTargetCodeGenInfo(Types)); 8774 8775 case llvm::Triple::le32: 8776 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 8777 case llvm::Triple::mips: 8778 case llvm::Triple::mipsel: 8779 if (Triple.getOS() == llvm::Triple::NaCl) 8780 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 8781 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true)); 8782 8783 case llvm::Triple::mips64: 8784 case llvm::Triple::mips64el: 8785 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false)); 8786 8787 case llvm::Triple::avr: 8788 return SetCGInfo(new AVRTargetCodeGenInfo(Types)); 8789 8790 case llvm::Triple::aarch64: 8791 case llvm::Triple::aarch64_be: { 8792 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 8793 if (getTarget().getABI() == "darwinpcs") 8794 Kind = AArch64ABIInfo::DarwinPCS; 8795 else if (Triple.isOSWindows()) 8796 return SetCGInfo( 8797 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64)); 8798 8799 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind)); 8800 } 8801 8802 case llvm::Triple::wasm32: 8803 case llvm::Triple::wasm64: 8804 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types)); 8805 8806 case llvm::Triple::arm: 8807 case llvm::Triple::armeb: 8808 case llvm::Triple::thumb: 8809 case llvm::Triple::thumbeb: { 8810 if (Triple.getOS() == llvm::Triple::Win32) { 8811 return SetCGInfo( 8812 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP)); 8813 } 8814 8815 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 8816 StringRef ABIStr = getTarget().getABI(); 8817 if (ABIStr == "apcs-gnu") 8818 Kind = ARMABIInfo::APCS; 8819 else if (ABIStr == "aapcs16") 8820 Kind = ARMABIInfo::AAPCS16_VFP; 8821 else if (CodeGenOpts.FloatABI == "hard" || 8822 (CodeGenOpts.FloatABI != "soft" && 8823 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF || 8824 Triple.getEnvironment() == llvm::Triple::MuslEABIHF || 8825 Triple.getEnvironment() == llvm::Triple::EABIHF))) 8826 Kind = ARMABIInfo::AAPCS_VFP; 8827 8828 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind)); 8829 } 8830 8831 case llvm::Triple::ppc: 8832 return SetCGInfo( 8833 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft")); 8834 case llvm::Triple::ppc64: 8835 if (Triple.isOSBinFormatELF()) { 8836 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 8837 if (getTarget().getABI() == "elfv2") 8838 Kind = PPC64_SVR4_ABIInfo::ELFv2; 8839 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 8840 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 8841 8842 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, 8843 IsSoftFloat)); 8844 } else 8845 return SetCGInfo(new PPC64TargetCodeGenInfo(Types)); 8846 case llvm::Triple::ppc64le: { 8847 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 8848 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 8849 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx") 8850 Kind = PPC64_SVR4_ABIInfo::ELFv1; 8851 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 8852 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 8853 8854 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, 8855 IsSoftFloat)); 8856 } 8857 8858 case llvm::Triple::nvptx: 8859 case llvm::Triple::nvptx64: 8860 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); 8861 8862 case llvm::Triple::msp430: 8863 return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); 8864 8865 case llvm::Triple::systemz: { 8866 bool HasVector = getTarget().getABI() == "vector"; 8867 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector)); 8868 } 8869 8870 case llvm::Triple::tce: 8871 case llvm::Triple::tcele: 8872 return SetCGInfo(new TCETargetCodeGenInfo(Types)); 8873 8874 case llvm::Triple::x86: { 8875 bool IsDarwinVectorABI = Triple.isOSDarwin(); 8876 bool RetSmallStructInRegABI = 8877 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 8878 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 8879 8880 if (Triple.getOS() == llvm::Triple::Win32) { 8881 return SetCGInfo(new WinX86_32TargetCodeGenInfo( 8882 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 8883 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); 8884 } else { 8885 return SetCGInfo(new X86_32TargetCodeGenInfo( 8886 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 8887 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters, 8888 CodeGenOpts.FloatABI == "soft")); 8889 } 8890 } 8891 8892 case llvm::Triple::x86_64: { 8893 StringRef ABI = getTarget().getABI(); 8894 X86AVXABILevel AVXLevel = 8895 (ABI == "avx512" 8896 ? X86AVXABILevel::AVX512 8897 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None); 8898 8899 switch (Triple.getOS()) { 8900 case llvm::Triple::Win32: 8901 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); 8902 case llvm::Triple::PS4: 8903 return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel)); 8904 default: 8905 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel)); 8906 } 8907 } 8908 case llvm::Triple::hexagon: 8909 return SetCGInfo(new HexagonTargetCodeGenInfo(Types)); 8910 case llvm::Triple::lanai: 8911 return SetCGInfo(new LanaiTargetCodeGenInfo(Types)); 8912 case llvm::Triple::r600: 8913 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 8914 case llvm::Triple::amdgcn: 8915 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 8916 case llvm::Triple::sparc: 8917 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types)); 8918 case llvm::Triple::sparcv9: 8919 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types)); 8920 case llvm::Triple::xcore: 8921 return SetCGInfo(new XCoreTargetCodeGenInfo(Types)); 8922 case llvm::Triple::spir: 8923 case llvm::Triple::spir64: 8924 return SetCGInfo(new SPIRTargetCodeGenInfo(Types)); 8925 } 8926 } 8927