1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/CodeGen/CGFunctionInfo.h" 22 #include "clang/CodeGen/SwiftCallingConv.h" 23 #include "clang/Frontend/CodeGenOptions.h" 24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Type.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include <algorithm> // std::sort 30 31 using namespace clang; 32 using namespace CodeGen; 33 34 // Helper for coercing an aggregate argument or return value into an integer 35 // array of the same size (including padding) and alignment. This alternate 36 // coercion happens only for the RenderScript ABI and can be removed after 37 // runtimes that rely on it are no longer supported. 38 // 39 // RenderScript assumes that the size of the argument / return value in the IR 40 // is the same as the size of the corresponding qualified type. This helper 41 // coerces the aggregate type into an array of the same size (including 42 // padding). This coercion is used in lieu of expansion of struct members or 43 // other canonical coercions that return a coerced-type of larger size. 44 // 45 // Ty - The argument / return value type 46 // Context - The associated ASTContext 47 // LLVMContext - The associated LLVMContext 48 static ABIArgInfo coerceToIntArray(QualType Ty, 49 ASTContext &Context, 50 llvm::LLVMContext &LLVMContext) { 51 // Alignment and Size are measured in bits. 52 const uint64_t Size = Context.getTypeSize(Ty); 53 const uint64_t Alignment = Context.getTypeAlign(Ty); 54 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment); 55 const uint64_t NumElements = (Size + Alignment - 1) / Alignment; 56 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 57 } 58 59 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 60 llvm::Value *Array, 61 llvm::Value *Value, 62 unsigned FirstIndex, 63 unsigned LastIndex) { 64 // Alternatively, we could emit this as a loop in the source. 65 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 66 llvm::Value *Cell = 67 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); 68 Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); 69 } 70 } 71 72 static bool isAggregateTypeForABI(QualType T) { 73 return !CodeGenFunction::hasScalarEvaluationKind(T) || 74 T->isMemberFunctionPointerType(); 75 } 76 77 ABIArgInfo 78 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign, 79 llvm::Type *Padding) const { 80 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), 81 ByRef, Realign, Padding); 82 } 83 84 ABIArgInfo 85 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { 86 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), 87 /*ByRef*/ false, Realign); 88 } 89 90 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 91 QualType Ty) const { 92 return Address::invalid(); 93 } 94 95 ABIInfo::~ABIInfo() {} 96 97 /// Does the given lowering require more than the given number of 98 /// registers when expanded? 99 /// 100 /// This is intended to be the basis of a reasonable basic implementation 101 /// of should{Pass,Return}IndirectlyForSwift. 102 /// 103 /// For most targets, a limit of four total registers is reasonable; this 104 /// limits the amount of code required in order to move around the value 105 /// in case it wasn't produced immediately prior to the call by the caller 106 /// (or wasn't produced in exactly the right registers) or isn't used 107 /// immediately within the callee. But some targets may need to further 108 /// limit the register count due to an inability to support that many 109 /// return registers. 110 static bool occupiesMoreThan(CodeGenTypes &cgt, 111 ArrayRef<llvm::Type*> scalarTypes, 112 unsigned maxAllRegisters) { 113 unsigned intCount = 0, fpCount = 0; 114 for (llvm::Type *type : scalarTypes) { 115 if (type->isPointerTy()) { 116 intCount++; 117 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { 118 auto ptrWidth = cgt.getTarget().getPointerWidth(0); 119 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; 120 } else { 121 assert(type->isVectorTy() || type->isFloatingPointTy()); 122 fpCount++; 123 } 124 } 125 126 return (intCount + fpCount > maxAllRegisters); 127 } 128 129 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 130 llvm::Type *eltTy, 131 unsigned numElts) const { 132 // The default implementation of this assumes that the target guarantees 133 // 128-bit SIMD support but nothing more. 134 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16); 135 } 136 137 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 138 CGCXXABI &CXXABI) { 139 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 140 if (!RD) 141 return CGCXXABI::RAA_Default; 142 return CXXABI.getRecordArgABI(RD); 143 } 144 145 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 146 CGCXXABI &CXXABI) { 147 const RecordType *RT = T->getAs<RecordType>(); 148 if (!RT) 149 return CGCXXABI::RAA_Default; 150 return getRecordArgABI(RT, CXXABI); 151 } 152 153 /// Pass transparent unions as if they were the type of the first element. Sema 154 /// should ensure that all elements of the union have the same "machine type". 155 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 156 if (const RecordType *UT = Ty->getAsUnionType()) { 157 const RecordDecl *UD = UT->getDecl(); 158 if (UD->hasAttr<TransparentUnionAttr>()) { 159 assert(!UD->field_empty() && "sema created an empty transparent union"); 160 return UD->field_begin()->getType(); 161 } 162 } 163 return Ty; 164 } 165 166 CGCXXABI &ABIInfo::getCXXABI() const { 167 return CGT.getCXXABI(); 168 } 169 170 ASTContext &ABIInfo::getContext() const { 171 return CGT.getContext(); 172 } 173 174 llvm::LLVMContext &ABIInfo::getVMContext() const { 175 return CGT.getLLVMContext(); 176 } 177 178 const llvm::DataLayout &ABIInfo::getDataLayout() const { 179 return CGT.getDataLayout(); 180 } 181 182 const TargetInfo &ABIInfo::getTarget() const { 183 return CGT.getTarget(); 184 } 185 186 bool ABIInfo:: isAndroid() const { return getTarget().getTriple().isAndroid(); } 187 188 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 189 return false; 190 } 191 192 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 193 uint64_t Members) const { 194 return false; 195 } 196 197 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const { 198 return false; 199 } 200 201 LLVM_DUMP_METHOD void ABIArgInfo::dump() const { 202 raw_ostream &OS = llvm::errs(); 203 OS << "(ABIArgInfo Kind="; 204 switch (TheKind) { 205 case Direct: 206 OS << "Direct Type="; 207 if (llvm::Type *Ty = getCoerceToType()) 208 Ty->print(OS); 209 else 210 OS << "null"; 211 break; 212 case Extend: 213 OS << "Extend"; 214 break; 215 case Ignore: 216 OS << "Ignore"; 217 break; 218 case InAlloca: 219 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 220 break; 221 case Indirect: 222 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 223 << " ByVal=" << getIndirectByVal() 224 << " Realign=" << getIndirectRealign(); 225 break; 226 case Expand: 227 OS << "Expand"; 228 break; 229 case CoerceAndExpand: 230 OS << "CoerceAndExpand Type="; 231 getCoerceAndExpandType()->print(OS); 232 break; 233 } 234 OS << ")\n"; 235 } 236 237 // Dynamically round a pointer up to a multiple of the given alignment. 238 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF, 239 llvm::Value *Ptr, 240 CharUnits Align) { 241 llvm::Value *PtrAsInt = Ptr; 242 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align; 243 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); 244 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, 245 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1)); 246 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, 247 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())); 248 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt, 249 Ptr->getType(), 250 Ptr->getName() + ".aligned"); 251 return PtrAsInt; 252 } 253 254 /// Emit va_arg for a platform using the common void* representation, 255 /// where arguments are simply emitted in an array of slots on the stack. 256 /// 257 /// This version implements the core direct-value passing rules. 258 /// 259 /// \param SlotSize - The size and alignment of a stack slot. 260 /// Each argument will be allocated to a multiple of this number of 261 /// slots, and all the slots will be aligned to this value. 262 /// \param AllowHigherAlign - The slot alignment is not a cap; 263 /// an argument type with an alignment greater than the slot size 264 /// will be emitted on a higher-alignment address, potentially 265 /// leaving one or more empty slots behind as padding. If this 266 /// is false, the returned address might be less-aligned than 267 /// DirectAlign. 268 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, 269 Address VAListAddr, 270 llvm::Type *DirectTy, 271 CharUnits DirectSize, 272 CharUnits DirectAlign, 273 CharUnits SlotSize, 274 bool AllowHigherAlign) { 275 // Cast the element type to i8* if necessary. Some platforms define 276 // va_list as a struct containing an i8* instead of just an i8*. 277 if (VAListAddr.getElementType() != CGF.Int8PtrTy) 278 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); 279 280 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); 281 282 // If the CC aligns values higher than the slot size, do so if needed. 283 Address Addr = Address::invalid(); 284 if (AllowHigherAlign && DirectAlign > SlotSize) { 285 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign), 286 DirectAlign); 287 } else { 288 Addr = Address(Ptr, SlotSize); 289 } 290 291 // Advance the pointer past the argument, then store that back. 292 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); 293 llvm::Value *NextPtr = 294 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize, 295 "argp.next"); 296 CGF.Builder.CreateStore(NextPtr, VAListAddr); 297 298 // If the argument is smaller than a slot, and this is a big-endian 299 // target, the argument will be right-adjusted in its slot. 300 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && 301 !DirectTy->isStructTy()) { 302 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); 303 } 304 305 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); 306 return Addr; 307 } 308 309 /// Emit va_arg for a platform using the common void* representation, 310 /// where arguments are simply emitted in an array of slots on the stack. 311 /// 312 /// \param IsIndirect - Values of this type are passed indirectly. 313 /// \param ValueInfo - The size and alignment of this type, generally 314 /// computed with getContext().getTypeInfoInChars(ValueTy). 315 /// \param SlotSizeAndAlign - The size and alignment of a stack slot. 316 /// Each argument will be allocated to a multiple of this number of 317 /// slots, and all the slots will be aligned to this value. 318 /// \param AllowHigherAlign - The slot alignment is not a cap; 319 /// an argument type with an alignment greater than the slot size 320 /// will be emitted on a higher-alignment address, potentially 321 /// leaving one or more empty slots behind as padding. 322 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, 323 QualType ValueTy, bool IsIndirect, 324 std::pair<CharUnits, CharUnits> ValueInfo, 325 CharUnits SlotSizeAndAlign, 326 bool AllowHigherAlign) { 327 // The size and alignment of the value that was passed directly. 328 CharUnits DirectSize, DirectAlign; 329 if (IsIndirect) { 330 DirectSize = CGF.getPointerSize(); 331 DirectAlign = CGF.getPointerAlign(); 332 } else { 333 DirectSize = ValueInfo.first; 334 DirectAlign = ValueInfo.second; 335 } 336 337 // Cast the address we've calculated to the right type. 338 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy); 339 if (IsIndirect) 340 DirectTy = DirectTy->getPointerTo(0); 341 342 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, 343 DirectSize, DirectAlign, 344 SlotSizeAndAlign, 345 AllowHigherAlign); 346 347 if (IsIndirect) { 348 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second); 349 } 350 351 return Addr; 352 353 } 354 355 static Address emitMergePHI(CodeGenFunction &CGF, 356 Address Addr1, llvm::BasicBlock *Block1, 357 Address Addr2, llvm::BasicBlock *Block2, 358 const llvm::Twine &Name = "") { 359 assert(Addr1.getType() == Addr2.getType()); 360 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); 361 PHI->addIncoming(Addr1.getPointer(), Block1); 362 PHI->addIncoming(Addr2.getPointer(), Block2); 363 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); 364 return Address(PHI, Align); 365 } 366 367 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 368 369 // If someone can figure out a general rule for this, that would be great. 370 // It's probably just doomed to be platform-dependent, though. 371 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 372 // Verified for: 373 // x86-64 FreeBSD, Linux, Darwin 374 // x86-32 FreeBSD, Linux, Darwin 375 // PowerPC Linux, Darwin 376 // ARM Darwin (*not* EABI) 377 // AArch64 Linux 378 return 32; 379 } 380 381 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 382 const FunctionNoProtoType *fnType) const { 383 // The following conventions are known to require this to be false: 384 // x86_stdcall 385 // MIPS 386 // For everything else, we just prefer false unless we opt out. 387 return false; 388 } 389 390 void 391 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 392 llvm::SmallString<24> &Opt) const { 393 // This assumes the user is passing a library name like "rt" instead of a 394 // filename like "librt.a/so", and that they don't care whether it's static or 395 // dynamic. 396 Opt = "-l"; 397 Opt += Lib; 398 } 399 400 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const { 401 // OpenCL kernels are called via an explicit runtime API with arguments 402 // set with clSetKernelArg(), not as normal sub-functions. 403 // Return SPIR_KERNEL by default as the kernel calling convention to 404 // ensure the fingerprint is fixed such way that each OpenCL argument 405 // gets one matching argument in the produced kernel function argument 406 // list to enable feasible implementation of clSetKernelArg() with 407 // aggregates etc. In case we would use the default C calling conv here, 408 // clSetKernelArg() might break depending on the target-specific 409 // conventions; different targets might split structs passed as values 410 // to multiple function arguments etc. 411 return llvm::CallingConv::SPIR_KERNEL; 412 } 413 414 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM, 415 llvm::PointerType *T, QualType QT) const { 416 return llvm::ConstantPointerNull::get(T); 417 } 418 419 unsigned TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 420 const VarDecl *D) const { 421 assert(!CGM.getLangOpts().OpenCL && 422 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 423 "Address space agnostic languages only"); 424 return D ? D->getType().getAddressSpace() 425 : static_cast<unsigned>(LangAS::Default); 426 } 427 428 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast( 429 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, unsigned SrcAddr, 430 unsigned DestAddr, llvm::Type *DestTy, bool isNonNull) const { 431 // Since target may map different address spaces in AST to the same address 432 // space, an address space conversion may end up as a bitcast. 433 if (auto *C = dyn_cast<llvm::Constant>(Src)) 434 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy); 435 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DestTy); 436 } 437 438 llvm::Constant * 439 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src, 440 unsigned SrcAddr, unsigned DestAddr, 441 llvm::Type *DestTy) const { 442 // Since target may map different address spaces in AST to the same address 443 // space, an address space conversion may end up as a bitcast. 444 return llvm::ConstantExpr::getPointerCast(Src, DestTy); 445 } 446 447 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 448 449 /// isEmptyField - Return true iff a the field is "empty", that is it 450 /// is an unnamed bit-field or an (array of) empty record(s). 451 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 452 bool AllowArrays) { 453 if (FD->isUnnamedBitfield()) 454 return true; 455 456 QualType FT = FD->getType(); 457 458 // Constant arrays of empty records count as empty, strip them off. 459 // Constant arrays of zero length always count as empty. 460 if (AllowArrays) 461 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 462 if (AT->getSize() == 0) 463 return true; 464 FT = AT->getElementType(); 465 } 466 467 const RecordType *RT = FT->getAs<RecordType>(); 468 if (!RT) 469 return false; 470 471 // C++ record fields are never empty, at least in the Itanium ABI. 472 // 473 // FIXME: We should use a predicate for whether this behavior is true in the 474 // current ABI. 475 if (isa<CXXRecordDecl>(RT->getDecl())) 476 return false; 477 478 return isEmptyRecord(Context, FT, AllowArrays); 479 } 480 481 /// isEmptyRecord - Return true iff a structure contains only empty 482 /// fields. Note that a structure with a flexible array member is not 483 /// considered empty. 484 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 485 const RecordType *RT = T->getAs<RecordType>(); 486 if (!RT) 487 return false; 488 const RecordDecl *RD = RT->getDecl(); 489 if (RD->hasFlexibleArrayMember()) 490 return false; 491 492 // If this is a C++ record, check the bases first. 493 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 494 for (const auto &I : CXXRD->bases()) 495 if (!isEmptyRecord(Context, I.getType(), true)) 496 return false; 497 498 for (const auto *I : RD->fields()) 499 if (!isEmptyField(Context, I, AllowArrays)) 500 return false; 501 return true; 502 } 503 504 /// isSingleElementStruct - Determine if a structure is a "single 505 /// element struct", i.e. it has exactly one non-empty field or 506 /// exactly one field which is itself a single element 507 /// struct. Structures with flexible array members are never 508 /// considered single element structs. 509 /// 510 /// \return The field declaration for the single non-empty field, if 511 /// it exists. 512 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 513 const RecordType *RT = T->getAs<RecordType>(); 514 if (!RT) 515 return nullptr; 516 517 const RecordDecl *RD = RT->getDecl(); 518 if (RD->hasFlexibleArrayMember()) 519 return nullptr; 520 521 const Type *Found = nullptr; 522 523 // If this is a C++ record, check the bases first. 524 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 525 for (const auto &I : CXXRD->bases()) { 526 // Ignore empty records. 527 if (isEmptyRecord(Context, I.getType(), true)) 528 continue; 529 530 // If we already found an element then this isn't a single-element struct. 531 if (Found) 532 return nullptr; 533 534 // If this is non-empty and not a single element struct, the composite 535 // cannot be a single element struct. 536 Found = isSingleElementStruct(I.getType(), Context); 537 if (!Found) 538 return nullptr; 539 } 540 } 541 542 // Check for single element. 543 for (const auto *FD : RD->fields()) { 544 QualType FT = FD->getType(); 545 546 // Ignore empty fields. 547 if (isEmptyField(Context, FD, true)) 548 continue; 549 550 // If we already found an element then this isn't a single-element 551 // struct. 552 if (Found) 553 return nullptr; 554 555 // Treat single element arrays as the element. 556 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 557 if (AT->getSize().getZExtValue() != 1) 558 break; 559 FT = AT->getElementType(); 560 } 561 562 if (!isAggregateTypeForABI(FT)) { 563 Found = FT.getTypePtr(); 564 } else { 565 Found = isSingleElementStruct(FT, Context); 566 if (!Found) 567 return nullptr; 568 } 569 } 570 571 // We don't consider a struct a single-element struct if it has 572 // padding beyond the element type. 573 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 574 return nullptr; 575 576 return Found; 577 } 578 579 namespace { 580 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, 581 const ABIArgInfo &AI) { 582 // This default implementation defers to the llvm backend's va_arg 583 // instruction. It can handle only passing arguments directly 584 // (typically only handled in the backend for primitive types), or 585 // aggregates passed indirectly by pointer (NOTE: if the "byval" 586 // flag has ABI impact in the callee, this implementation cannot 587 // work.) 588 589 // Only a few cases are covered here at the moment -- those needed 590 // by the default abi. 591 llvm::Value *Val; 592 593 if (AI.isIndirect()) { 594 assert(!AI.getPaddingType() && 595 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 596 assert( 597 !AI.getIndirectRealign() && 598 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"); 599 600 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); 601 CharUnits TyAlignForABI = TyInfo.second; 602 603 llvm::Type *BaseTy = 604 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 605 llvm::Value *Addr = 606 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); 607 return Address(Addr, TyAlignForABI); 608 } else { 609 assert((AI.isDirect() || AI.isExtend()) && 610 "Unexpected ArgInfo Kind in generic VAArg emitter!"); 611 612 assert(!AI.getInReg() && 613 "Unexpected InReg seen in arginfo in generic VAArg emitter!"); 614 assert(!AI.getPaddingType() && 615 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 616 assert(!AI.getDirectOffset() && 617 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"); 618 assert(!AI.getCoerceToType() && 619 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"); 620 621 Address Temp = CGF.CreateMemTemp(Ty, "varet"); 622 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); 623 CGF.Builder.CreateStore(Val, Temp); 624 return Temp; 625 } 626 } 627 628 /// DefaultABIInfo - The default implementation for ABI specific 629 /// details. This implementation provides information which results in 630 /// self-consistent and sensible LLVM IR generation, but does not 631 /// conform to any particular ABI. 632 class DefaultABIInfo : public ABIInfo { 633 public: 634 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 635 636 ABIArgInfo classifyReturnType(QualType RetTy) const; 637 ABIArgInfo classifyArgumentType(QualType RetTy) const; 638 639 void computeInfo(CGFunctionInfo &FI) const override { 640 if (!getCXXABI().classifyReturnType(FI)) 641 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 642 for (auto &I : FI.arguments()) 643 I.info = classifyArgumentType(I.type); 644 } 645 646 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 647 QualType Ty) const override { 648 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); 649 } 650 }; 651 652 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 653 public: 654 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 655 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 656 }; 657 658 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 659 Ty = useFirstFieldIfTransparentUnion(Ty); 660 661 if (isAggregateTypeForABI(Ty)) { 662 // Records with non-trivial destructors/copy-constructors should not be 663 // passed by value. 664 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 665 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 666 667 return getNaturalAlignIndirect(Ty); 668 } 669 670 // Treat an enum type as its underlying type. 671 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 672 Ty = EnumTy->getDecl()->getIntegerType(); 673 674 return (Ty->isPromotableIntegerType() ? 675 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 676 } 677 678 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 679 if (RetTy->isVoidType()) 680 return ABIArgInfo::getIgnore(); 681 682 if (isAggregateTypeForABI(RetTy)) 683 return getNaturalAlignIndirect(RetTy); 684 685 // Treat an enum type as its underlying type. 686 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 687 RetTy = EnumTy->getDecl()->getIntegerType(); 688 689 return (RetTy->isPromotableIntegerType() ? 690 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 691 } 692 693 //===----------------------------------------------------------------------===// 694 // WebAssembly ABI Implementation 695 // 696 // This is a very simple ABI that relies a lot on DefaultABIInfo. 697 //===----------------------------------------------------------------------===// 698 699 class WebAssemblyABIInfo final : public DefaultABIInfo { 700 public: 701 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT) 702 : DefaultABIInfo(CGT) {} 703 704 private: 705 ABIArgInfo classifyReturnType(QualType RetTy) const; 706 ABIArgInfo classifyArgumentType(QualType Ty) const; 707 708 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 709 // non-virtual, but computeInfo and EmitVAArg are virtual, so we 710 // overload them. 711 void computeInfo(CGFunctionInfo &FI) const override { 712 if (!getCXXABI().classifyReturnType(FI)) 713 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 714 for (auto &Arg : FI.arguments()) 715 Arg.info = classifyArgumentType(Arg.type); 716 } 717 718 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 719 QualType Ty) const override; 720 }; 721 722 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { 723 public: 724 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 725 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {} 726 }; 727 728 /// \brief Classify argument of given type \p Ty. 729 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { 730 Ty = useFirstFieldIfTransparentUnion(Ty); 731 732 if (isAggregateTypeForABI(Ty)) { 733 // Records with non-trivial destructors/copy-constructors should not be 734 // passed by value. 735 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 736 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 737 // Ignore empty structs/unions. 738 if (isEmptyRecord(getContext(), Ty, true)) 739 return ABIArgInfo::getIgnore(); 740 // Lower single-element structs to just pass a regular value. TODO: We 741 // could do reasonable-size multiple-element structs too, using getExpand(), 742 // though watch out for things like bitfields. 743 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 744 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 745 } 746 747 // Otherwise just do the default thing. 748 return DefaultABIInfo::classifyArgumentType(Ty); 749 } 750 751 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { 752 if (isAggregateTypeForABI(RetTy)) { 753 // Records with non-trivial destructors/copy-constructors should not be 754 // returned by value. 755 if (!getRecordArgABI(RetTy, getCXXABI())) { 756 // Ignore empty structs/unions. 757 if (isEmptyRecord(getContext(), RetTy, true)) 758 return ABIArgInfo::getIgnore(); 759 // Lower single-element structs to just return a regular value. TODO: We 760 // could do reasonable-size multiple-element structs too, using 761 // ABIArgInfo::getDirect(). 762 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 763 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 764 } 765 } 766 767 // Otherwise just do the default thing. 768 return DefaultABIInfo::classifyReturnType(RetTy); 769 } 770 771 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 772 QualType Ty) const { 773 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false, 774 getContext().getTypeInfoInChars(Ty), 775 CharUnits::fromQuantity(4), 776 /*AllowHigherAlign=*/ true); 777 } 778 779 //===----------------------------------------------------------------------===// 780 // le32/PNaCl bitcode ABI Implementation 781 // 782 // This is a simplified version of the x86_32 ABI. Arguments and return values 783 // are always passed on the stack. 784 //===----------------------------------------------------------------------===// 785 786 class PNaClABIInfo : public ABIInfo { 787 public: 788 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 789 790 ABIArgInfo classifyReturnType(QualType RetTy) const; 791 ABIArgInfo classifyArgumentType(QualType RetTy) const; 792 793 void computeInfo(CGFunctionInfo &FI) const override; 794 Address EmitVAArg(CodeGenFunction &CGF, 795 Address VAListAddr, QualType Ty) const override; 796 }; 797 798 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 799 public: 800 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 801 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 802 }; 803 804 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 805 if (!getCXXABI().classifyReturnType(FI)) 806 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 807 808 for (auto &I : FI.arguments()) 809 I.info = classifyArgumentType(I.type); 810 } 811 812 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 813 QualType Ty) const { 814 // The PNaCL ABI is a bit odd, in that varargs don't use normal 815 // function classification. Structs get passed directly for varargs 816 // functions, through a rewriting transform in 817 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows 818 // this target to actually support a va_arg instructions with an 819 // aggregate type, unlike other targets. 820 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 821 } 822 823 /// \brief Classify argument of given type \p Ty. 824 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 825 if (isAggregateTypeForABI(Ty)) { 826 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 827 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 828 return getNaturalAlignIndirect(Ty); 829 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 830 // Treat an enum type as its underlying type. 831 Ty = EnumTy->getDecl()->getIntegerType(); 832 } else if (Ty->isFloatingType()) { 833 // Floating-point types don't go inreg. 834 return ABIArgInfo::getDirect(); 835 } 836 837 return (Ty->isPromotableIntegerType() ? 838 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 839 } 840 841 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 842 if (RetTy->isVoidType()) 843 return ABIArgInfo::getIgnore(); 844 845 // In the PNaCl ABI we always return records/structures on the stack. 846 if (isAggregateTypeForABI(RetTy)) 847 return getNaturalAlignIndirect(RetTy); 848 849 // Treat an enum type as its underlying type. 850 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 851 RetTy = EnumTy->getDecl()->getIntegerType(); 852 853 return (RetTy->isPromotableIntegerType() ? 854 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 855 } 856 857 /// IsX86_MMXType - Return true if this is an MMX type. 858 bool IsX86_MMXType(llvm::Type *IRType) { 859 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 860 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 861 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 862 IRType->getScalarSizeInBits() != 64; 863 } 864 865 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 866 StringRef Constraint, 867 llvm::Type* Ty) { 868 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) { 869 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { 870 // Invalid MMX constraint 871 return nullptr; 872 } 873 874 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 875 } 876 877 // No operation needed 878 return Ty; 879 } 880 881 /// Returns true if this type can be passed in SSE registers with the 882 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 883 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 884 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 885 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) { 886 if (BT->getKind() == BuiltinType::LongDouble) { 887 if (&Context.getTargetInfo().getLongDoubleFormat() == 888 &llvm::APFloat::x87DoubleExtended()) 889 return false; 890 } 891 return true; 892 } 893 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 894 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 895 // registers specially. 896 unsigned VecSize = Context.getTypeSize(VT); 897 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 898 return true; 899 } 900 return false; 901 } 902 903 /// Returns true if this aggregate is small enough to be passed in SSE registers 904 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 905 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 906 return NumMembers <= 4; 907 } 908 909 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86. 910 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) { 911 auto AI = ABIArgInfo::getDirect(T); 912 AI.setInReg(true); 913 AI.setCanBeFlattened(false); 914 return AI; 915 } 916 917 //===----------------------------------------------------------------------===// 918 // X86-32 ABI Implementation 919 //===----------------------------------------------------------------------===// 920 921 /// \brief Similar to llvm::CCState, but for Clang. 922 struct CCState { 923 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {} 924 925 unsigned CC; 926 unsigned FreeRegs; 927 unsigned FreeSSERegs; 928 }; 929 930 enum { 931 // Vectorcall only allows the first 6 parameters to be passed in registers. 932 VectorcallMaxParamNumAsReg = 6 933 }; 934 935 /// X86_32ABIInfo - The X86-32 ABI information. 936 class X86_32ABIInfo : public SwiftABIInfo { 937 enum Class { 938 Integer, 939 Float 940 }; 941 942 static const unsigned MinABIStackAlignInBytes = 4; 943 944 bool IsDarwinVectorABI; 945 bool IsRetSmallStructInRegABI; 946 bool IsWin32StructABI; 947 bool IsSoftFloatABI; 948 bool IsMCUABI; 949 unsigned DefaultNumRegisterParameters; 950 951 static bool isRegisterSize(unsigned Size) { 952 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 953 } 954 955 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 956 // FIXME: Assumes vectorcall is in use. 957 return isX86VectorTypeForVectorCall(getContext(), Ty); 958 } 959 960 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 961 uint64_t NumMembers) const override { 962 // FIXME: Assumes vectorcall is in use. 963 return isX86VectorCallAggregateSmallEnough(NumMembers); 964 } 965 966 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 967 968 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 969 /// such that the argument will be passed in memory. 970 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 971 972 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; 973 974 /// \brief Return the alignment to use for the given type on the stack. 975 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 976 977 Class classify(QualType Ty) const; 978 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 979 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 980 981 /// \brief Updates the number of available free registers, returns 982 /// true if any registers were allocated. 983 bool updateFreeRegs(QualType Ty, CCState &State) const; 984 985 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg, 986 bool &NeedsPadding) const; 987 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const; 988 989 bool canExpandIndirectArgument(QualType Ty) const; 990 991 /// \brief Rewrite the function info so that all memory arguments use 992 /// inalloca. 993 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 994 995 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 996 CharUnits &StackOffset, ABIArgInfo &Info, 997 QualType Type) const; 998 void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State, 999 bool &UsedInAlloca) const; 1000 1001 public: 1002 1003 void computeInfo(CGFunctionInfo &FI) const override; 1004 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 1005 QualType Ty) const override; 1006 1007 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1008 bool RetSmallStructInRegABI, bool Win32StructABI, 1009 unsigned NumRegisterParameters, bool SoftFloatABI) 1010 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), 1011 IsRetSmallStructInRegABI(RetSmallStructInRegABI), 1012 IsWin32StructABI(Win32StructABI), 1013 IsSoftFloatABI(SoftFloatABI), 1014 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()), 1015 DefaultNumRegisterParameters(NumRegisterParameters) {} 1016 1017 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 1018 ArrayRef<llvm::Type*> scalars, 1019 bool asReturnValue) const override { 1020 // LLVM's x86-32 lowering currently only assigns up to three 1021 // integer registers and three fp registers. Oddly, it'll use up to 1022 // four vector registers for vectors, but those can overlap with the 1023 // scalar registers. 1024 return occupiesMoreThan(CGT, scalars, /*total*/ 3); 1025 } 1026 1027 bool isSwiftErrorInRegister() const override { 1028 // x86-32 lowering does not support passing swifterror in a register. 1029 return false; 1030 } 1031 }; 1032 1033 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 1034 public: 1035 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 1036 bool RetSmallStructInRegABI, bool Win32StructABI, 1037 unsigned NumRegisterParameters, bool SoftFloatABI) 1038 : TargetCodeGenInfo(new X86_32ABIInfo( 1039 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, 1040 NumRegisterParameters, SoftFloatABI)) {} 1041 1042 static bool isStructReturnInRegABI( 1043 const llvm::Triple &Triple, const CodeGenOptions &Opts); 1044 1045 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 1046 CodeGen::CodeGenModule &CGM, 1047 ForDefinition_t IsForDefinition) const override; 1048 1049 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 1050 // Darwin uses different dwarf register numbers for EH. 1051 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 1052 return 4; 1053 } 1054 1055 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 1056 llvm::Value *Address) const override; 1057 1058 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 1059 StringRef Constraint, 1060 llvm::Type* Ty) const override { 1061 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 1062 } 1063 1064 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 1065 std::string &Constraints, 1066 std::vector<llvm::Type *> &ResultRegTypes, 1067 std::vector<llvm::Type *> &ResultTruncRegTypes, 1068 std::vector<LValue> &ResultRegDests, 1069 std::string &AsmString, 1070 unsigned NumOutputs) const override; 1071 1072 llvm::Constant * 1073 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1074 unsigned Sig = (0xeb << 0) | // jmp rel8 1075 (0x06 << 8) | // .+0x08 1076 ('F' << 16) | 1077 ('T' << 24); 1078 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1079 } 1080 1081 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 1082 return "movl\t%ebp, %ebp" 1083 "\t\t## marker for objc_retainAutoreleaseReturnValue"; 1084 } 1085 }; 1086 1087 } 1088 1089 /// Rewrite input constraint references after adding some output constraints. 1090 /// In the case where there is one output and one input and we add one output, 1091 /// we need to replace all operand references greater than or equal to 1: 1092 /// mov $0, $1 1093 /// mov eax, $1 1094 /// The result will be: 1095 /// mov $0, $2 1096 /// mov eax, $2 1097 static void rewriteInputConstraintReferences(unsigned FirstIn, 1098 unsigned NumNewOuts, 1099 std::string &AsmString) { 1100 std::string Buf; 1101 llvm::raw_string_ostream OS(Buf); 1102 size_t Pos = 0; 1103 while (Pos < AsmString.size()) { 1104 size_t DollarStart = AsmString.find('$', Pos); 1105 if (DollarStart == std::string::npos) 1106 DollarStart = AsmString.size(); 1107 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 1108 if (DollarEnd == std::string::npos) 1109 DollarEnd = AsmString.size(); 1110 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 1111 Pos = DollarEnd; 1112 size_t NumDollars = DollarEnd - DollarStart; 1113 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 1114 // We have an operand reference. 1115 size_t DigitStart = Pos; 1116 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 1117 if (DigitEnd == std::string::npos) 1118 DigitEnd = AsmString.size(); 1119 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 1120 unsigned OperandIndex; 1121 if (!OperandStr.getAsInteger(10, OperandIndex)) { 1122 if (OperandIndex >= FirstIn) 1123 OperandIndex += NumNewOuts; 1124 OS << OperandIndex; 1125 } else { 1126 OS << OperandStr; 1127 } 1128 Pos = DigitEnd; 1129 } 1130 } 1131 AsmString = std::move(OS.str()); 1132 } 1133 1134 /// Add output constraints for EAX:EDX because they are return registers. 1135 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 1136 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 1137 std::vector<llvm::Type *> &ResultRegTypes, 1138 std::vector<llvm::Type *> &ResultTruncRegTypes, 1139 std::vector<LValue> &ResultRegDests, std::string &AsmString, 1140 unsigned NumOutputs) const { 1141 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 1142 1143 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 1144 // larger. 1145 if (!Constraints.empty()) 1146 Constraints += ','; 1147 if (RetWidth <= 32) { 1148 Constraints += "={eax}"; 1149 ResultRegTypes.push_back(CGF.Int32Ty); 1150 } else { 1151 // Use the 'A' constraint for EAX:EDX. 1152 Constraints += "=A"; 1153 ResultRegTypes.push_back(CGF.Int64Ty); 1154 } 1155 1156 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 1157 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 1158 ResultTruncRegTypes.push_back(CoerceTy); 1159 1160 // Coerce the integer by bitcasting the return slot pointer. 1161 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), 1162 CoerceTy->getPointerTo())); 1163 ResultRegDests.push_back(ReturnSlot); 1164 1165 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 1166 } 1167 1168 /// shouldReturnTypeInRegister - Determine if the given type should be 1169 /// returned in a register (for the Darwin and MCU ABI). 1170 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 1171 ASTContext &Context) const { 1172 uint64_t Size = Context.getTypeSize(Ty); 1173 1174 // For i386, type must be register sized. 1175 // For the MCU ABI, it only needs to be <= 8-byte 1176 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) 1177 return false; 1178 1179 if (Ty->isVectorType()) { 1180 // 64- and 128- bit vectors inside structures are not returned in 1181 // registers. 1182 if (Size == 64 || Size == 128) 1183 return false; 1184 1185 return true; 1186 } 1187 1188 // If this is a builtin, pointer, enum, complex type, member pointer, or 1189 // member function pointer it is ok. 1190 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 1191 Ty->isAnyComplexType() || Ty->isEnumeralType() || 1192 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 1193 return true; 1194 1195 // Arrays are treated like records. 1196 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 1197 return shouldReturnTypeInRegister(AT->getElementType(), Context); 1198 1199 // Otherwise, it must be a record type. 1200 const RecordType *RT = Ty->getAs<RecordType>(); 1201 if (!RT) return false; 1202 1203 // FIXME: Traverse bases here too. 1204 1205 // Structure types are passed in register if all fields would be 1206 // passed in a register. 1207 for (const auto *FD : RT->getDecl()->fields()) { 1208 // Empty fields are ignored. 1209 if (isEmptyField(Context, FD, true)) 1210 continue; 1211 1212 // Check fields recursively. 1213 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 1214 return false; 1215 } 1216 return true; 1217 } 1218 1219 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 1220 // Treat complex types as the element type. 1221 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 1222 Ty = CTy->getElementType(); 1223 1224 // Check for a type which we know has a simple scalar argument-passing 1225 // convention without any padding. (We're specifically looking for 32 1226 // and 64-bit integer and integer-equivalents, float, and double.) 1227 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 1228 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 1229 return false; 1230 1231 uint64_t Size = Context.getTypeSize(Ty); 1232 return Size == 32 || Size == 64; 1233 } 1234 1235 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, 1236 uint64_t &Size) { 1237 for (const auto *FD : RD->fields()) { 1238 // Scalar arguments on the stack get 4 byte alignment on x86. If the 1239 // argument is smaller than 32-bits, expanding the struct will create 1240 // alignment padding. 1241 if (!is32Or64BitBasicType(FD->getType(), Context)) 1242 return false; 1243 1244 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 1245 // how to expand them yet, and the predicate for telling if a bitfield still 1246 // counts as "basic" is more complicated than what we were doing previously. 1247 if (FD->isBitField()) 1248 return false; 1249 1250 Size += Context.getTypeSize(FD->getType()); 1251 } 1252 return true; 1253 } 1254 1255 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, 1256 uint64_t &Size) { 1257 // Don't do this if there are any non-empty bases. 1258 for (const CXXBaseSpecifier &Base : RD->bases()) { 1259 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(), 1260 Size)) 1261 return false; 1262 } 1263 if (!addFieldSizes(Context, RD, Size)) 1264 return false; 1265 return true; 1266 } 1267 1268 /// Test whether an argument type which is to be passed indirectly (on the 1269 /// stack) would have the equivalent layout if it was expanded into separate 1270 /// arguments. If so, we prefer to do the latter to avoid inhibiting 1271 /// optimizations. 1272 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const { 1273 // We can only expand structure types. 1274 const RecordType *RT = Ty->getAs<RecordType>(); 1275 if (!RT) 1276 return false; 1277 const RecordDecl *RD = RT->getDecl(); 1278 uint64_t Size = 0; 1279 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1280 if (!IsWin32StructABI) { 1281 // On non-Windows, we have to conservatively match our old bitcode 1282 // prototypes in order to be ABI-compatible at the bitcode level. 1283 if (!CXXRD->isCLike()) 1284 return false; 1285 } else { 1286 // Don't do this for dynamic classes. 1287 if (CXXRD->isDynamicClass()) 1288 return false; 1289 } 1290 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size)) 1291 return false; 1292 } else { 1293 if (!addFieldSizes(getContext(), RD, Size)) 1294 return false; 1295 } 1296 1297 // We can do this if there was no alignment padding. 1298 return Size == getContext().getTypeSize(Ty); 1299 } 1300 1301 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { 1302 // If the return value is indirect, then the hidden argument is consuming one 1303 // integer register. 1304 if (State.FreeRegs) { 1305 --State.FreeRegs; 1306 if (!IsMCUABI) 1307 return getNaturalAlignIndirectInReg(RetTy); 1308 } 1309 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 1310 } 1311 1312 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 1313 CCState &State) const { 1314 if (RetTy->isVoidType()) 1315 return ABIArgInfo::getIgnore(); 1316 1317 const Type *Base = nullptr; 1318 uint64_t NumElts = 0; 1319 if ((State.CC == llvm::CallingConv::X86_VectorCall || 1320 State.CC == llvm::CallingConv::X86_RegCall) && 1321 isHomogeneousAggregate(RetTy, Base, NumElts)) { 1322 // The LLVM struct type for such an aggregate should lower properly. 1323 return ABIArgInfo::getDirect(); 1324 } 1325 1326 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 1327 // On Darwin, some vectors are returned in registers. 1328 if (IsDarwinVectorABI) { 1329 uint64_t Size = getContext().getTypeSize(RetTy); 1330 1331 // 128-bit vectors are a special case; they are returned in 1332 // registers and we need to make sure to pick a type the LLVM 1333 // backend will like. 1334 if (Size == 128) 1335 return ABIArgInfo::getDirect(llvm::VectorType::get( 1336 llvm::Type::getInt64Ty(getVMContext()), 2)); 1337 1338 // Always return in register if it fits in a general purpose 1339 // register, or if it is 64 bits and has a single element. 1340 if ((Size == 8 || Size == 16 || Size == 32) || 1341 (Size == 64 && VT->getNumElements() == 1)) 1342 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1343 Size)); 1344 1345 return getIndirectReturnResult(RetTy, State); 1346 } 1347 1348 return ABIArgInfo::getDirect(); 1349 } 1350 1351 if (isAggregateTypeForABI(RetTy)) { 1352 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 1353 // Structures with flexible arrays are always indirect. 1354 if (RT->getDecl()->hasFlexibleArrayMember()) 1355 return getIndirectReturnResult(RetTy, State); 1356 } 1357 1358 // If specified, structs and unions are always indirect. 1359 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) 1360 return getIndirectReturnResult(RetTy, State); 1361 1362 // Ignore empty structs/unions. 1363 if (isEmptyRecord(getContext(), RetTy, true)) 1364 return ABIArgInfo::getIgnore(); 1365 1366 // Small structures which are register sized are generally returned 1367 // in a register. 1368 if (shouldReturnTypeInRegister(RetTy, getContext())) { 1369 uint64_t Size = getContext().getTypeSize(RetTy); 1370 1371 // As a special-case, if the struct is a "single-element" struct, and 1372 // the field is of type "float" or "double", return it in a 1373 // floating-point register. (MSVC does not apply this special case.) 1374 // We apply a similar transformation for pointer types to improve the 1375 // quality of the generated IR. 1376 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 1377 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 1378 || SeltTy->hasPointerRepresentation()) 1379 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 1380 1381 // FIXME: We should be able to narrow this integer in cases with dead 1382 // padding. 1383 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 1384 } 1385 1386 return getIndirectReturnResult(RetTy, State); 1387 } 1388 1389 // Treat an enum type as its underlying type. 1390 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1391 RetTy = EnumTy->getDecl()->getIntegerType(); 1392 1393 return (RetTy->isPromotableIntegerType() ? 1394 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1395 } 1396 1397 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 1398 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 1399 } 1400 1401 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 1402 const RecordType *RT = Ty->getAs<RecordType>(); 1403 if (!RT) 1404 return 0; 1405 const RecordDecl *RD = RT->getDecl(); 1406 1407 // If this is a C++ record, check the bases first. 1408 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 1409 for (const auto &I : CXXRD->bases()) 1410 if (!isRecordWithSSEVectorType(Context, I.getType())) 1411 return false; 1412 1413 for (const auto *i : RD->fields()) { 1414 QualType FT = i->getType(); 1415 1416 if (isSSEVectorType(Context, FT)) 1417 return true; 1418 1419 if (isRecordWithSSEVectorType(Context, FT)) 1420 return true; 1421 } 1422 1423 return false; 1424 } 1425 1426 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 1427 unsigned Align) const { 1428 // Otherwise, if the alignment is less than or equal to the minimum ABI 1429 // alignment, just use the default; the backend will handle this. 1430 if (Align <= MinABIStackAlignInBytes) 1431 return 0; // Use default alignment. 1432 1433 // On non-Darwin, the stack type alignment is always 4. 1434 if (!IsDarwinVectorABI) { 1435 // Set explicit alignment, since we may need to realign the top. 1436 return MinABIStackAlignInBytes; 1437 } 1438 1439 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 1440 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 1441 isRecordWithSSEVectorType(getContext(), Ty))) 1442 return 16; 1443 1444 return MinABIStackAlignInBytes; 1445 } 1446 1447 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 1448 CCState &State) const { 1449 if (!ByVal) { 1450 if (State.FreeRegs) { 1451 --State.FreeRegs; // Non-byval indirects just use one pointer. 1452 if (!IsMCUABI) 1453 return getNaturalAlignIndirectInReg(Ty); 1454 } 1455 return getNaturalAlignIndirect(Ty, false); 1456 } 1457 1458 // Compute the byval alignment. 1459 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 1460 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 1461 if (StackAlign == 0) 1462 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true); 1463 1464 // If the stack alignment is less than the type alignment, realign the 1465 // argument. 1466 bool Realign = TypeAlign > StackAlign; 1467 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), 1468 /*ByVal=*/true, Realign); 1469 } 1470 1471 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 1472 const Type *T = isSingleElementStruct(Ty, getContext()); 1473 if (!T) 1474 T = Ty.getTypePtr(); 1475 1476 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 1477 BuiltinType::Kind K = BT->getKind(); 1478 if (K == BuiltinType::Float || K == BuiltinType::Double) 1479 return Float; 1480 } 1481 return Integer; 1482 } 1483 1484 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const { 1485 if (!IsSoftFloatABI) { 1486 Class C = classify(Ty); 1487 if (C == Float) 1488 return false; 1489 } 1490 1491 unsigned Size = getContext().getTypeSize(Ty); 1492 unsigned SizeInRegs = (Size + 31) / 32; 1493 1494 if (SizeInRegs == 0) 1495 return false; 1496 1497 if (!IsMCUABI) { 1498 if (SizeInRegs > State.FreeRegs) { 1499 State.FreeRegs = 0; 1500 return false; 1501 } 1502 } else { 1503 // The MCU psABI allows passing parameters in-reg even if there are 1504 // earlier parameters that are passed on the stack. Also, 1505 // it does not allow passing >8-byte structs in-register, 1506 // even if there are 3 free registers available. 1507 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2) 1508 return false; 1509 } 1510 1511 State.FreeRegs -= SizeInRegs; 1512 return true; 1513 } 1514 1515 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State, 1516 bool &InReg, 1517 bool &NeedsPadding) const { 1518 // On Windows, aggregates other than HFAs are never passed in registers, and 1519 // they do not consume register slots. Homogenous floating-point aggregates 1520 // (HFAs) have already been dealt with at this point. 1521 if (IsWin32StructABI && isAggregateTypeForABI(Ty)) 1522 return false; 1523 1524 NeedsPadding = false; 1525 InReg = !IsMCUABI; 1526 1527 if (!updateFreeRegs(Ty, State)) 1528 return false; 1529 1530 if (IsMCUABI) 1531 return true; 1532 1533 if (State.CC == llvm::CallingConv::X86_FastCall || 1534 State.CC == llvm::CallingConv::X86_VectorCall || 1535 State.CC == llvm::CallingConv::X86_RegCall) { 1536 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs) 1537 NeedsPadding = true; 1538 1539 return false; 1540 } 1541 1542 return true; 1543 } 1544 1545 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const { 1546 if (!updateFreeRegs(Ty, State)) 1547 return false; 1548 1549 if (IsMCUABI) 1550 return false; 1551 1552 if (State.CC == llvm::CallingConv::X86_FastCall || 1553 State.CC == llvm::CallingConv::X86_VectorCall || 1554 State.CC == llvm::CallingConv::X86_RegCall) { 1555 if (getContext().getTypeSize(Ty) > 32) 1556 return false; 1557 1558 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() || 1559 Ty->isReferenceType()); 1560 } 1561 1562 return true; 1563 } 1564 1565 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1566 CCState &State) const { 1567 // FIXME: Set alignment on indirect arguments. 1568 1569 Ty = useFirstFieldIfTransparentUnion(Ty); 1570 1571 // Check with the C++ ABI first. 1572 const RecordType *RT = Ty->getAs<RecordType>(); 1573 if (RT) { 1574 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1575 if (RAA == CGCXXABI::RAA_Indirect) { 1576 return getIndirectResult(Ty, false, State); 1577 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1578 // The field index doesn't matter, we'll fix it up later. 1579 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1580 } 1581 } 1582 1583 // Regcall uses the concept of a homogenous vector aggregate, similar 1584 // to other targets. 1585 const Type *Base = nullptr; 1586 uint64_t NumElts = 0; 1587 if (State.CC == llvm::CallingConv::X86_RegCall && 1588 isHomogeneousAggregate(Ty, Base, NumElts)) { 1589 1590 if (State.FreeSSERegs >= NumElts) { 1591 State.FreeSSERegs -= NumElts; 1592 if (Ty->isBuiltinType() || Ty->isVectorType()) 1593 return ABIArgInfo::getDirect(); 1594 return ABIArgInfo::getExpand(); 1595 } 1596 return getIndirectResult(Ty, /*ByVal=*/false, State); 1597 } 1598 1599 if (isAggregateTypeForABI(Ty)) { 1600 // Structures with flexible arrays are always indirect. 1601 // FIXME: This should not be byval! 1602 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 1603 return getIndirectResult(Ty, true, State); 1604 1605 // Ignore empty structs/unions on non-Windows. 1606 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true)) 1607 return ABIArgInfo::getIgnore(); 1608 1609 llvm::LLVMContext &LLVMContext = getVMContext(); 1610 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1611 bool NeedsPadding = false; 1612 bool InReg; 1613 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { 1614 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 1615 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1616 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1617 if (InReg) 1618 return ABIArgInfo::getDirectInReg(Result); 1619 else 1620 return ABIArgInfo::getDirect(Result); 1621 } 1622 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1623 1624 // Expand small (<= 128-bit) record types when we know that the stack layout 1625 // of those arguments will match the struct. This is important because the 1626 // LLVM backend isn't smart enough to remove byval, which inhibits many 1627 // optimizations. 1628 // Don't do this for the MCU if there are still free integer registers 1629 // (see X86_64 ABI for full explanation). 1630 if (getContext().getTypeSize(Ty) <= 4 * 32 && 1631 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty)) 1632 return ABIArgInfo::getExpandWithPadding( 1633 State.CC == llvm::CallingConv::X86_FastCall || 1634 State.CC == llvm::CallingConv::X86_VectorCall || 1635 State.CC == llvm::CallingConv::X86_RegCall, 1636 PaddingType); 1637 1638 return getIndirectResult(Ty, true, State); 1639 } 1640 1641 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1642 // On Darwin, some vectors are passed in memory, we handle this by passing 1643 // it as an i8/i16/i32/i64. 1644 if (IsDarwinVectorABI) { 1645 uint64_t Size = getContext().getTypeSize(Ty); 1646 if ((Size == 8 || Size == 16 || Size == 32) || 1647 (Size == 64 && VT->getNumElements() == 1)) 1648 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1649 Size)); 1650 } 1651 1652 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1653 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1654 1655 return ABIArgInfo::getDirect(); 1656 } 1657 1658 1659 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1660 Ty = EnumTy->getDecl()->getIntegerType(); 1661 1662 bool InReg = shouldPrimitiveUseInReg(Ty, State); 1663 1664 if (Ty->isPromotableIntegerType()) { 1665 if (InReg) 1666 return ABIArgInfo::getExtendInReg(); 1667 return ABIArgInfo::getExtend(); 1668 } 1669 1670 if (InReg) 1671 return ABIArgInfo::getDirectInReg(); 1672 return ABIArgInfo::getDirect(); 1673 } 1674 1675 void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State, 1676 bool &UsedInAlloca) const { 1677 // Vectorcall x86 works subtly different than in x64, so the format is 1678 // a bit different than the x64 version. First, all vector types (not HVAs) 1679 // are assigned, with the first 6 ending up in the YMM0-5 or XMM0-5 registers. 1680 // This differs from the x64 implementation, where the first 6 by INDEX get 1681 // registers. 1682 // After that, integers AND HVAs are assigned Left to Right in the same pass. 1683 // Integers are passed as ECX/EDX if one is available (in order). HVAs will 1684 // first take up the remaining YMM/XMM registers. If insufficient registers 1685 // remain but an integer register (ECX/EDX) is available, it will be passed 1686 // in that, else, on the stack. 1687 for (auto &I : FI.arguments()) { 1688 // First pass do all the vector types. 1689 const Type *Base = nullptr; 1690 uint64_t NumElts = 0; 1691 const QualType& Ty = I.type; 1692 if ((Ty->isVectorType() || Ty->isBuiltinType()) && 1693 isHomogeneousAggregate(Ty, Base, NumElts)) { 1694 if (State.FreeSSERegs >= NumElts) { 1695 State.FreeSSERegs -= NumElts; 1696 I.info = ABIArgInfo::getDirect(); 1697 } else { 1698 I.info = classifyArgumentType(Ty, State); 1699 } 1700 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1701 } 1702 } 1703 1704 for (auto &I : FI.arguments()) { 1705 // Second pass, do the rest! 1706 const Type *Base = nullptr; 1707 uint64_t NumElts = 0; 1708 const QualType& Ty = I.type; 1709 bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts); 1710 1711 if (IsHva && !Ty->isVectorType() && !Ty->isBuiltinType()) { 1712 // Assign true HVAs (non vector/native FP types). 1713 if (State.FreeSSERegs >= NumElts) { 1714 State.FreeSSERegs -= NumElts; 1715 I.info = getDirectX86Hva(); 1716 } else { 1717 I.info = getIndirectResult(Ty, /*ByVal=*/false, State); 1718 } 1719 } else if (!IsHva) { 1720 // Assign all Non-HVAs, so this will exclude Vector/FP args. 1721 I.info = classifyArgumentType(Ty, State); 1722 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1723 } 1724 } 1725 } 1726 1727 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1728 CCState State(FI.getCallingConvention()); 1729 if (IsMCUABI) 1730 State.FreeRegs = 3; 1731 else if (State.CC == llvm::CallingConv::X86_FastCall) 1732 State.FreeRegs = 2; 1733 else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1734 State.FreeRegs = 2; 1735 State.FreeSSERegs = 6; 1736 } else if (FI.getHasRegParm()) 1737 State.FreeRegs = FI.getRegParm(); 1738 else if (State.CC == llvm::CallingConv::X86_RegCall) { 1739 State.FreeRegs = 5; 1740 State.FreeSSERegs = 8; 1741 } else 1742 State.FreeRegs = DefaultNumRegisterParameters; 1743 1744 if (!getCXXABI().classifyReturnType(FI)) { 1745 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1746 } else if (FI.getReturnInfo().isIndirect()) { 1747 // The C++ ABI is not aware of register usage, so we have to check if the 1748 // return value was sret and put it in a register ourselves if appropriate. 1749 if (State.FreeRegs) { 1750 --State.FreeRegs; // The sret parameter consumes a register. 1751 if (!IsMCUABI) 1752 FI.getReturnInfo().setInReg(true); 1753 } 1754 } 1755 1756 // The chain argument effectively gives us another free register. 1757 if (FI.isChainCall()) 1758 ++State.FreeRegs; 1759 1760 bool UsedInAlloca = false; 1761 if (State.CC == llvm::CallingConv::X86_VectorCall) { 1762 computeVectorCallArgs(FI, State, UsedInAlloca); 1763 } else { 1764 // If not vectorcall, revert to normal behavior. 1765 for (auto &I : FI.arguments()) { 1766 I.info = classifyArgumentType(I.type, State); 1767 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1768 } 1769 } 1770 1771 // If we needed to use inalloca for any argument, do a second pass and rewrite 1772 // all the memory arguments to use inalloca. 1773 if (UsedInAlloca) 1774 rewriteWithInAlloca(FI); 1775 } 1776 1777 void 1778 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1779 CharUnits &StackOffset, ABIArgInfo &Info, 1780 QualType Type) const { 1781 // Arguments are always 4-byte-aligned. 1782 CharUnits FieldAlign = CharUnits::fromQuantity(4); 1783 1784 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct"); 1785 Info = ABIArgInfo::getInAlloca(FrameFields.size()); 1786 FrameFields.push_back(CGT.ConvertTypeForMem(Type)); 1787 StackOffset += getContext().getTypeSizeInChars(Type); 1788 1789 // Insert padding bytes to respect alignment. 1790 CharUnits FieldEnd = StackOffset; 1791 StackOffset = FieldEnd.alignTo(FieldAlign); 1792 if (StackOffset != FieldEnd) { 1793 CharUnits NumBytes = StackOffset - FieldEnd; 1794 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1795 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); 1796 FrameFields.push_back(Ty); 1797 } 1798 } 1799 1800 static bool isArgInAlloca(const ABIArgInfo &Info) { 1801 // Leave ignored and inreg arguments alone. 1802 switch (Info.getKind()) { 1803 case ABIArgInfo::InAlloca: 1804 return true; 1805 case ABIArgInfo::Indirect: 1806 assert(Info.getIndirectByVal()); 1807 return true; 1808 case ABIArgInfo::Ignore: 1809 return false; 1810 case ABIArgInfo::Direct: 1811 case ABIArgInfo::Extend: 1812 if (Info.getInReg()) 1813 return false; 1814 return true; 1815 case ABIArgInfo::Expand: 1816 case ABIArgInfo::CoerceAndExpand: 1817 // These are aggregate types which are never passed in registers when 1818 // inalloca is involved. 1819 return true; 1820 } 1821 llvm_unreachable("invalid enum"); 1822 } 1823 1824 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 1825 assert(IsWin32StructABI && "inalloca only supported on win32"); 1826 1827 // Build a packed struct type for all of the arguments in memory. 1828 SmallVector<llvm::Type *, 6> FrameFields; 1829 1830 // The stack alignment is always 4. 1831 CharUnits StackAlign = CharUnits::fromQuantity(4); 1832 1833 CharUnits StackOffset; 1834 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 1835 1836 // Put 'this' into the struct before 'sret', if necessary. 1837 bool IsThisCall = 1838 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 1839 ABIArgInfo &Ret = FI.getReturnInfo(); 1840 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 1841 isArgInAlloca(I->info)) { 1842 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1843 ++I; 1844 } 1845 1846 // Put the sret parameter into the inalloca struct if it's in memory. 1847 if (Ret.isIndirect() && !Ret.getInReg()) { 1848 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); 1849 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); 1850 // On Windows, the hidden sret parameter is always returned in eax. 1851 Ret.setInAllocaSRet(IsWin32StructABI); 1852 } 1853 1854 // Skip the 'this' parameter in ecx. 1855 if (IsThisCall) 1856 ++I; 1857 1858 // Put arguments passed in memory into the struct. 1859 for (; I != E; ++I) { 1860 if (isArgInAlloca(I->info)) 1861 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1862 } 1863 1864 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 1865 /*isPacked=*/true), 1866 StackAlign); 1867 } 1868 1869 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, 1870 Address VAListAddr, QualType Ty) const { 1871 1872 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 1873 1874 // x86-32 changes the alignment of certain arguments on the stack. 1875 // 1876 // Just messing with TypeInfo like this works because we never pass 1877 // anything indirectly. 1878 TypeInfo.second = CharUnits::fromQuantity( 1879 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity())); 1880 1881 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 1882 TypeInfo, CharUnits::fromQuantity(4), 1883 /*AllowHigherAlign*/ true); 1884 } 1885 1886 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 1887 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 1888 assert(Triple.getArch() == llvm::Triple::x86); 1889 1890 switch (Opts.getStructReturnConvention()) { 1891 case CodeGenOptions::SRCK_Default: 1892 break; 1893 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 1894 return false; 1895 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 1896 return true; 1897 } 1898 1899 if (Triple.isOSDarwin() || Triple.isOSIAMCU()) 1900 return true; 1901 1902 switch (Triple.getOS()) { 1903 case llvm::Triple::DragonFly: 1904 case llvm::Triple::FreeBSD: 1905 case llvm::Triple::OpenBSD: 1906 case llvm::Triple::Win32: 1907 return true; 1908 default: 1909 return false; 1910 } 1911 } 1912 1913 void X86_32TargetCodeGenInfo::setTargetAttributes( 1914 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM, 1915 ForDefinition_t IsForDefinition) const { 1916 if (!IsForDefinition) 1917 return; 1918 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 1919 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1920 // Get the LLVM function. 1921 llvm::Function *Fn = cast<llvm::Function>(GV); 1922 1923 // Now add the 'alignstack' attribute with a value of 16. 1924 llvm::AttrBuilder B; 1925 B.addStackAlignmentAttr(16); 1926 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 1927 } 1928 if (FD->hasAttr<AnyX86InterruptAttr>()) { 1929 llvm::Function *Fn = cast<llvm::Function>(GV); 1930 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 1931 } 1932 } 1933 } 1934 1935 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1936 CodeGen::CodeGenFunction &CGF, 1937 llvm::Value *Address) const { 1938 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1939 1940 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1941 1942 // 0-7 are the eight integer registers; the order is different 1943 // on Darwin (for EH), but the range is the same. 1944 // 8 is %eip. 1945 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1946 1947 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 1948 // 12-16 are st(0..4). Not sure why we stop at 4. 1949 // These have size 16, which is sizeof(long double) on 1950 // platforms with 8-byte alignment for that type. 1951 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1952 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1953 1954 } else { 1955 // 9 is %eflags, which doesn't get a size on Darwin for some 1956 // reason. 1957 Builder.CreateAlignedStore( 1958 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), 1959 CharUnits::One()); 1960 1961 // 11-16 are st(0..5). Not sure why we stop at 5. 1962 // These have size 12, which is sizeof(long double) on 1963 // platforms with 4-byte alignment for that type. 1964 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1965 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1966 } 1967 1968 return false; 1969 } 1970 1971 //===----------------------------------------------------------------------===// 1972 // X86-64 ABI Implementation 1973 //===----------------------------------------------------------------------===// 1974 1975 1976 namespace { 1977 /// The AVX ABI level for X86 targets. 1978 enum class X86AVXABILevel { 1979 None, 1980 AVX, 1981 AVX512 1982 }; 1983 1984 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. 1985 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { 1986 switch (AVXLevel) { 1987 case X86AVXABILevel::AVX512: 1988 return 512; 1989 case X86AVXABILevel::AVX: 1990 return 256; 1991 case X86AVXABILevel::None: 1992 return 128; 1993 } 1994 llvm_unreachable("Unknown AVXLevel"); 1995 } 1996 1997 /// X86_64ABIInfo - The X86_64 ABI information. 1998 class X86_64ABIInfo : public SwiftABIInfo { 1999 enum Class { 2000 Integer = 0, 2001 SSE, 2002 SSEUp, 2003 X87, 2004 X87Up, 2005 ComplexX87, 2006 NoClass, 2007 Memory 2008 }; 2009 2010 /// merge - Implement the X86_64 ABI merging algorithm. 2011 /// 2012 /// Merge an accumulating classification \arg Accum with a field 2013 /// classification \arg Field. 2014 /// 2015 /// \param Accum - The accumulating classification. This should 2016 /// always be either NoClass or the result of a previous merge 2017 /// call. In addition, this should never be Memory (the caller 2018 /// should just return Memory for the aggregate). 2019 static Class merge(Class Accum, Class Field); 2020 2021 /// postMerge - Implement the X86_64 ABI post merging algorithm. 2022 /// 2023 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 2024 /// final MEMORY or SSE classes when necessary. 2025 /// 2026 /// \param AggregateSize - The size of the current aggregate in 2027 /// the classification process. 2028 /// 2029 /// \param Lo - The classification for the parts of the type 2030 /// residing in the low word of the containing object. 2031 /// 2032 /// \param Hi - The classification for the parts of the type 2033 /// residing in the higher words of the containing object. 2034 /// 2035 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 2036 2037 /// classify - Determine the x86_64 register classes in which the 2038 /// given type T should be passed. 2039 /// 2040 /// \param Lo - The classification for the parts of the type 2041 /// residing in the low word of the containing object. 2042 /// 2043 /// \param Hi - The classification for the parts of the type 2044 /// residing in the high word of the containing object. 2045 /// 2046 /// \param OffsetBase - The bit offset of this type in the 2047 /// containing object. Some parameters are classified different 2048 /// depending on whether they straddle an eightbyte boundary. 2049 /// 2050 /// \param isNamedArg - Whether the argument in question is a "named" 2051 /// argument, as used in AMD64-ABI 3.5.7. 2052 /// 2053 /// If a word is unused its result will be NoClass; if a type should 2054 /// be passed in Memory then at least the classification of \arg Lo 2055 /// will be Memory. 2056 /// 2057 /// The \arg Lo class will be NoClass iff the argument is ignored. 2058 /// 2059 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 2060 /// also be ComplexX87. 2061 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 2062 bool isNamedArg) const; 2063 2064 llvm::Type *GetByteVectorType(QualType Ty) const; 2065 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 2066 unsigned IROffset, QualType SourceTy, 2067 unsigned SourceOffset) const; 2068 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 2069 unsigned IROffset, QualType SourceTy, 2070 unsigned SourceOffset) const; 2071 2072 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2073 /// such that the argument will be returned in memory. 2074 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 2075 2076 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 2077 /// such that the argument will be passed in memory. 2078 /// 2079 /// \param freeIntRegs - The number of free integer registers remaining 2080 /// available. 2081 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 2082 2083 ABIArgInfo classifyReturnType(QualType RetTy) const; 2084 2085 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs, 2086 unsigned &neededInt, unsigned &neededSSE, 2087 bool isNamedArg) const; 2088 2089 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt, 2090 unsigned &NeededSSE) const; 2091 2092 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 2093 unsigned &NeededSSE) const; 2094 2095 bool IsIllegalVectorType(QualType Ty) const; 2096 2097 /// The 0.98 ABI revision clarified a lot of ambiguities, 2098 /// unfortunately in ways that were not always consistent with 2099 /// certain previous compilers. In particular, platforms which 2100 /// required strict binary compatibility with older versions of GCC 2101 /// may need to exempt themselves. 2102 bool honorsRevision0_98() const { 2103 return !getTarget().getTriple().isOSDarwin(); 2104 } 2105 2106 /// GCC classifies <1 x long long> as SSE but compatibility with older clang 2107 // compilers require us to classify it as INTEGER. 2108 bool classifyIntegerMMXAsSSE() const { 2109 const llvm::Triple &Triple = getTarget().getTriple(); 2110 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4) 2111 return false; 2112 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10) 2113 return false; 2114 return true; 2115 } 2116 2117 X86AVXABILevel AVXLevel; 2118 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 2119 // 64-bit hardware. 2120 bool Has64BitPointers; 2121 2122 public: 2123 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : 2124 SwiftABIInfo(CGT), AVXLevel(AVXLevel), 2125 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 2126 } 2127 2128 bool isPassedUsingAVXType(QualType type) const { 2129 unsigned neededInt, neededSSE; 2130 // The freeIntRegs argument doesn't matter here. 2131 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 2132 /*isNamedArg*/true); 2133 if (info.isDirect()) { 2134 llvm::Type *ty = info.getCoerceToType(); 2135 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 2136 return (vectorTy->getBitWidth() > 128); 2137 } 2138 return false; 2139 } 2140 2141 void computeInfo(CGFunctionInfo &FI) const override; 2142 2143 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2144 QualType Ty) const override; 2145 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 2146 QualType Ty) const override; 2147 2148 bool has64BitPointers() const { 2149 return Has64BitPointers; 2150 } 2151 2152 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 2153 ArrayRef<llvm::Type*> scalars, 2154 bool asReturnValue) const override { 2155 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2156 } 2157 bool isSwiftErrorInRegister() const override { 2158 return true; 2159 } 2160 }; 2161 2162 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 2163 class WinX86_64ABIInfo : public SwiftABIInfo { 2164 public: 2165 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) 2166 : SwiftABIInfo(CGT), 2167 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} 2168 2169 void computeInfo(CGFunctionInfo &FI) const override; 2170 2171 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2172 QualType Ty) const override; 2173 2174 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 2175 // FIXME: Assumes vectorcall is in use. 2176 return isX86VectorTypeForVectorCall(getContext(), Ty); 2177 } 2178 2179 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 2180 uint64_t NumMembers) const override { 2181 // FIXME: Assumes vectorcall is in use. 2182 return isX86VectorCallAggregateSmallEnough(NumMembers); 2183 } 2184 2185 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 2186 ArrayRef<llvm::Type *> scalars, 2187 bool asReturnValue) const override { 2188 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2189 } 2190 2191 bool isSwiftErrorInRegister() const override { 2192 return true; 2193 } 2194 2195 private: 2196 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType, 2197 bool IsVectorCall, bool IsRegCall) const; 2198 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, 2199 const ABIArgInfo ¤t) const; 2200 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs, 2201 bool IsVectorCall, bool IsRegCall) const; 2202 2203 bool IsMingw64; 2204 }; 2205 2206 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2207 public: 2208 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2209 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {} 2210 2211 const X86_64ABIInfo &getABIInfo() const { 2212 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2213 } 2214 2215 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2216 return 7; 2217 } 2218 2219 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2220 llvm::Value *Address) const override { 2221 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2222 2223 // 0-15 are the 16 integer registers. 2224 // 16 is %rip. 2225 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2226 return false; 2227 } 2228 2229 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 2230 StringRef Constraint, 2231 llvm::Type* Ty) const override { 2232 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 2233 } 2234 2235 bool isNoProtoCallVariadic(const CallArgList &args, 2236 const FunctionNoProtoType *fnType) const override { 2237 // The default CC on x86-64 sets %al to the number of SSA 2238 // registers used, and GCC sets this when calling an unprototyped 2239 // function, so we override the default behavior. However, don't do 2240 // that when AVX types are involved: the ABI explicitly states it is 2241 // undefined, and it doesn't work in practice because of how the ABI 2242 // defines varargs anyway. 2243 if (fnType->getCallConv() == CC_C) { 2244 bool HasAVXType = false; 2245 for (CallArgList::const_iterator 2246 it = args.begin(), ie = args.end(); it != ie; ++it) { 2247 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 2248 HasAVXType = true; 2249 break; 2250 } 2251 } 2252 2253 if (!HasAVXType) 2254 return true; 2255 } 2256 2257 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 2258 } 2259 2260 llvm::Constant * 2261 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 2262 unsigned Sig; 2263 if (getABIInfo().has64BitPointers()) 2264 Sig = (0xeb << 0) | // jmp rel8 2265 (0x0a << 8) | // .+0x0c 2266 ('F' << 16) | 2267 ('T' << 24); 2268 else 2269 Sig = (0xeb << 0) | // jmp rel8 2270 (0x06 << 8) | // .+0x08 2271 ('F' << 16) | 2272 ('T' << 24); 2273 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 2274 } 2275 2276 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2277 CodeGen::CodeGenModule &CGM, 2278 ForDefinition_t IsForDefinition) const override { 2279 if (!IsForDefinition) 2280 return; 2281 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2282 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2283 llvm::Function *Fn = cast<llvm::Function>(GV); 2284 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2285 } 2286 } 2287 } 2288 }; 2289 2290 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo { 2291 public: 2292 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2293 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {} 2294 2295 void getDependentLibraryOption(llvm::StringRef Lib, 2296 llvm::SmallString<24> &Opt) const override { 2297 Opt = "\01"; 2298 // If the argument contains a space, enclose it in quotes. 2299 if (Lib.find(" ") != StringRef::npos) 2300 Opt += "\"" + Lib.str() + "\""; 2301 else 2302 Opt += Lib; 2303 } 2304 }; 2305 2306 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 2307 // If the argument does not end in .lib, automatically add the suffix. 2308 // If the argument contains a space, enclose it in quotes. 2309 // This matches the behavior of MSVC. 2310 bool Quote = (Lib.find(" ") != StringRef::npos); 2311 std::string ArgStr = Quote ? "\"" : ""; 2312 ArgStr += Lib; 2313 if (!Lib.endswith_lower(".lib")) 2314 ArgStr += ".lib"; 2315 ArgStr += Quote ? "\"" : ""; 2316 return ArgStr; 2317 } 2318 2319 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 2320 public: 2321 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2322 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, 2323 unsigned NumRegisterParameters) 2324 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, 2325 Win32StructABI, NumRegisterParameters, false) {} 2326 2327 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2328 CodeGen::CodeGenModule &CGM, 2329 ForDefinition_t IsForDefinition) const override; 2330 2331 void getDependentLibraryOption(llvm::StringRef Lib, 2332 llvm::SmallString<24> &Opt) const override { 2333 Opt = "/DEFAULTLIB:"; 2334 Opt += qualifyWindowsLibrary(Lib); 2335 } 2336 2337 void getDetectMismatchOption(llvm::StringRef Name, 2338 llvm::StringRef Value, 2339 llvm::SmallString<32> &Opt) const override { 2340 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2341 } 2342 }; 2343 2344 static void addStackProbeSizeTargetAttribute(const Decl *D, 2345 llvm::GlobalValue *GV, 2346 CodeGen::CodeGenModule &CGM) { 2347 if (D && isa<FunctionDecl>(D)) { 2348 if (CGM.getCodeGenOpts().StackProbeSize != 4096) { 2349 llvm::Function *Fn = cast<llvm::Function>(GV); 2350 2351 Fn->addFnAttr("stack-probe-size", 2352 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 2353 } 2354 } 2355 } 2356 2357 void WinX86_32TargetCodeGenInfo::setTargetAttributes( 2358 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM, 2359 ForDefinition_t IsForDefinition) const { 2360 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition); 2361 if (!IsForDefinition) 2362 return; 2363 addStackProbeSizeTargetAttribute(D, GV, CGM); 2364 } 2365 2366 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2367 public: 2368 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2369 X86AVXABILevel AVXLevel) 2370 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 2371 2372 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2373 CodeGen::CodeGenModule &CGM, 2374 ForDefinition_t IsForDefinition) const override; 2375 2376 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2377 return 7; 2378 } 2379 2380 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2381 llvm::Value *Address) const override { 2382 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2383 2384 // 0-15 are the 16 integer registers. 2385 // 16 is %rip. 2386 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2387 return false; 2388 } 2389 2390 void getDependentLibraryOption(llvm::StringRef Lib, 2391 llvm::SmallString<24> &Opt) const override { 2392 Opt = "/DEFAULTLIB:"; 2393 Opt += qualifyWindowsLibrary(Lib); 2394 } 2395 2396 void getDetectMismatchOption(llvm::StringRef Name, 2397 llvm::StringRef Value, 2398 llvm::SmallString<32> &Opt) const override { 2399 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2400 } 2401 }; 2402 2403 void WinX86_64TargetCodeGenInfo::setTargetAttributes( 2404 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM, 2405 ForDefinition_t IsForDefinition) const { 2406 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition); 2407 if (!IsForDefinition) 2408 return; 2409 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2410 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2411 llvm::Function *Fn = cast<llvm::Function>(GV); 2412 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2413 } 2414 } 2415 2416 addStackProbeSizeTargetAttribute(D, GV, CGM); 2417 } 2418 } 2419 2420 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 2421 Class &Hi) const { 2422 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 2423 // 2424 // (a) If one of the classes is Memory, the whole argument is passed in 2425 // memory. 2426 // 2427 // (b) If X87UP is not preceded by X87, the whole argument is passed in 2428 // memory. 2429 // 2430 // (c) If the size of the aggregate exceeds two eightbytes and the first 2431 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 2432 // argument is passed in memory. NOTE: This is necessary to keep the 2433 // ABI working for processors that don't support the __m256 type. 2434 // 2435 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 2436 // 2437 // Some of these are enforced by the merging logic. Others can arise 2438 // only with unions; for example: 2439 // union { _Complex double; unsigned; } 2440 // 2441 // Note that clauses (b) and (c) were added in 0.98. 2442 // 2443 if (Hi == Memory) 2444 Lo = Memory; 2445 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 2446 Lo = Memory; 2447 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 2448 Lo = Memory; 2449 if (Hi == SSEUp && Lo != SSE) 2450 Hi = SSE; 2451 } 2452 2453 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 2454 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 2455 // classified recursively so that always two fields are 2456 // considered. The resulting class is calculated according to 2457 // the classes of the fields in the eightbyte: 2458 // 2459 // (a) If both classes are equal, this is the resulting class. 2460 // 2461 // (b) If one of the classes is NO_CLASS, the resulting class is 2462 // the other class. 2463 // 2464 // (c) If one of the classes is MEMORY, the result is the MEMORY 2465 // class. 2466 // 2467 // (d) If one of the classes is INTEGER, the result is the 2468 // INTEGER. 2469 // 2470 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 2471 // MEMORY is used as class. 2472 // 2473 // (f) Otherwise class SSE is used. 2474 2475 // Accum should never be memory (we should have returned) or 2476 // ComplexX87 (because this cannot be passed in a structure). 2477 assert((Accum != Memory && Accum != ComplexX87) && 2478 "Invalid accumulated classification during merge."); 2479 if (Accum == Field || Field == NoClass) 2480 return Accum; 2481 if (Field == Memory) 2482 return Memory; 2483 if (Accum == NoClass) 2484 return Field; 2485 if (Accum == Integer || Field == Integer) 2486 return Integer; 2487 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 2488 Accum == X87 || Accum == X87Up) 2489 return Memory; 2490 return SSE; 2491 } 2492 2493 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 2494 Class &Lo, Class &Hi, bool isNamedArg) const { 2495 // FIXME: This code can be simplified by introducing a simple value class for 2496 // Class pairs with appropriate constructor methods for the various 2497 // situations. 2498 2499 // FIXME: Some of the split computations are wrong; unaligned vectors 2500 // shouldn't be passed in registers for example, so there is no chance they 2501 // can straddle an eightbyte. Verify & simplify. 2502 2503 Lo = Hi = NoClass; 2504 2505 Class &Current = OffsetBase < 64 ? Lo : Hi; 2506 Current = Memory; 2507 2508 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2509 BuiltinType::Kind k = BT->getKind(); 2510 2511 if (k == BuiltinType::Void) { 2512 Current = NoClass; 2513 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 2514 Lo = Integer; 2515 Hi = Integer; 2516 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 2517 Current = Integer; 2518 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 2519 Current = SSE; 2520 } else if (k == BuiltinType::LongDouble) { 2521 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2522 if (LDF == &llvm::APFloat::IEEEquad()) { 2523 Lo = SSE; 2524 Hi = SSEUp; 2525 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) { 2526 Lo = X87; 2527 Hi = X87Up; 2528 } else if (LDF == &llvm::APFloat::IEEEdouble()) { 2529 Current = SSE; 2530 } else 2531 llvm_unreachable("unexpected long double representation!"); 2532 } 2533 // FIXME: _Decimal32 and _Decimal64 are SSE. 2534 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 2535 return; 2536 } 2537 2538 if (const EnumType *ET = Ty->getAs<EnumType>()) { 2539 // Classify the underlying integer type. 2540 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 2541 return; 2542 } 2543 2544 if (Ty->hasPointerRepresentation()) { 2545 Current = Integer; 2546 return; 2547 } 2548 2549 if (Ty->isMemberPointerType()) { 2550 if (Ty->isMemberFunctionPointerType()) { 2551 if (Has64BitPointers) { 2552 // If Has64BitPointers, this is an {i64, i64}, so classify both 2553 // Lo and Hi now. 2554 Lo = Hi = Integer; 2555 } else { 2556 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 2557 // straddles an eightbyte boundary, Hi should be classified as well. 2558 uint64_t EB_FuncPtr = (OffsetBase) / 64; 2559 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 2560 if (EB_FuncPtr != EB_ThisAdj) { 2561 Lo = Hi = Integer; 2562 } else { 2563 Current = Integer; 2564 } 2565 } 2566 } else { 2567 Current = Integer; 2568 } 2569 return; 2570 } 2571 2572 if (const VectorType *VT = Ty->getAs<VectorType>()) { 2573 uint64_t Size = getContext().getTypeSize(VT); 2574 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { 2575 // gcc passes the following as integer: 2576 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float> 2577 // 2 bytes - <2 x char>, <1 x short> 2578 // 1 byte - <1 x char> 2579 Current = Integer; 2580 2581 // If this type crosses an eightbyte boundary, it should be 2582 // split. 2583 uint64_t EB_Lo = (OffsetBase) / 64; 2584 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; 2585 if (EB_Lo != EB_Hi) 2586 Hi = Lo; 2587 } else if (Size == 64) { 2588 QualType ElementType = VT->getElementType(); 2589 2590 // gcc passes <1 x double> in memory. :( 2591 if (ElementType->isSpecificBuiltinType(BuiltinType::Double)) 2592 return; 2593 2594 // gcc passes <1 x long long> as SSE but clang used to unconditionally 2595 // pass them as integer. For platforms where clang is the de facto 2596 // platform compiler, we must continue to use integer. 2597 if (!classifyIntegerMMXAsSSE() && 2598 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) || 2599 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) || 2600 ElementType->isSpecificBuiltinType(BuiltinType::Long) || 2601 ElementType->isSpecificBuiltinType(BuiltinType::ULong))) 2602 Current = Integer; 2603 else 2604 Current = SSE; 2605 2606 // If this type crosses an eightbyte boundary, it should be 2607 // split. 2608 if (OffsetBase && OffsetBase != 64) 2609 Hi = Lo; 2610 } else if (Size == 128 || 2611 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { 2612 // Arguments of 256-bits are split into four eightbyte chunks. The 2613 // least significant one belongs to class SSE and all the others to class 2614 // SSEUP. The original Lo and Hi design considers that types can't be 2615 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 2616 // This design isn't correct for 256-bits, but since there're no cases 2617 // where the upper parts would need to be inspected, avoid adding 2618 // complexity and just consider Hi to match the 64-256 part. 2619 // 2620 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 2621 // registers if they are "named", i.e. not part of the "..." of a 2622 // variadic function. 2623 // 2624 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are 2625 // split into eight eightbyte chunks, one SSE and seven SSEUP. 2626 Lo = SSE; 2627 Hi = SSEUp; 2628 } 2629 return; 2630 } 2631 2632 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2633 QualType ET = getContext().getCanonicalType(CT->getElementType()); 2634 2635 uint64_t Size = getContext().getTypeSize(Ty); 2636 if (ET->isIntegralOrEnumerationType()) { 2637 if (Size <= 64) 2638 Current = Integer; 2639 else if (Size <= 128) 2640 Lo = Hi = Integer; 2641 } else if (ET == getContext().FloatTy) { 2642 Current = SSE; 2643 } else if (ET == getContext().DoubleTy) { 2644 Lo = Hi = SSE; 2645 } else if (ET == getContext().LongDoubleTy) { 2646 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2647 if (LDF == &llvm::APFloat::IEEEquad()) 2648 Current = Memory; 2649 else if (LDF == &llvm::APFloat::x87DoubleExtended()) 2650 Current = ComplexX87; 2651 else if (LDF == &llvm::APFloat::IEEEdouble()) 2652 Lo = Hi = SSE; 2653 else 2654 llvm_unreachable("unexpected long double representation!"); 2655 } 2656 2657 // If this complex type crosses an eightbyte boundary then it 2658 // should be split. 2659 uint64_t EB_Real = (OffsetBase) / 64; 2660 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 2661 if (Hi == NoClass && EB_Real != EB_Imag) 2662 Hi = Lo; 2663 2664 return; 2665 } 2666 2667 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2668 // Arrays are treated like structures. 2669 2670 uint64_t Size = getContext().getTypeSize(Ty); 2671 2672 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2673 // than eight eightbytes, ..., it has class MEMORY. 2674 if (Size > 512) 2675 return; 2676 2677 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 2678 // fields, it has class MEMORY. 2679 // 2680 // Only need to check alignment of array base. 2681 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 2682 return; 2683 2684 // Otherwise implement simplified merge. We could be smarter about 2685 // this, but it isn't worth it and would be harder to verify. 2686 Current = NoClass; 2687 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 2688 uint64_t ArraySize = AT->getSize().getZExtValue(); 2689 2690 // The only case a 256-bit wide vector could be used is when the array 2691 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2692 // to work for sizes wider than 128, early check and fallback to memory. 2693 // 2694 if (Size > 128 && 2695 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel))) 2696 return; 2697 2698 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 2699 Class FieldLo, FieldHi; 2700 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 2701 Lo = merge(Lo, FieldLo); 2702 Hi = merge(Hi, FieldHi); 2703 if (Lo == Memory || Hi == Memory) 2704 break; 2705 } 2706 2707 postMerge(Size, Lo, Hi); 2708 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 2709 return; 2710 } 2711 2712 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2713 uint64_t Size = getContext().getTypeSize(Ty); 2714 2715 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2716 // than eight eightbytes, ..., it has class MEMORY. 2717 if (Size > 512) 2718 return; 2719 2720 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 2721 // copy constructor or a non-trivial destructor, it is passed by invisible 2722 // reference. 2723 if (getRecordArgABI(RT, getCXXABI())) 2724 return; 2725 2726 const RecordDecl *RD = RT->getDecl(); 2727 2728 // Assume variable sized types are passed in memory. 2729 if (RD->hasFlexibleArrayMember()) 2730 return; 2731 2732 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2733 2734 // Reset Lo class, this will be recomputed. 2735 Current = NoClass; 2736 2737 // If this is a C++ record, classify the bases first. 2738 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2739 for (const auto &I : CXXRD->bases()) { 2740 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2741 "Unexpected base class!"); 2742 const CXXRecordDecl *Base = 2743 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2744 2745 // Classify this field. 2746 // 2747 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 2748 // single eightbyte, each is classified separately. Each eightbyte gets 2749 // initialized to class NO_CLASS. 2750 Class FieldLo, FieldHi; 2751 uint64_t Offset = 2752 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 2753 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 2754 Lo = merge(Lo, FieldLo); 2755 Hi = merge(Hi, FieldHi); 2756 if (Lo == Memory || Hi == Memory) { 2757 postMerge(Size, Lo, Hi); 2758 return; 2759 } 2760 } 2761 } 2762 2763 // Classify the fields one at a time, merging the results. 2764 unsigned idx = 0; 2765 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2766 i != e; ++i, ++idx) { 2767 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2768 bool BitField = i->isBitField(); 2769 2770 // Ignore padding bit-fields. 2771 if (BitField && i->isUnnamedBitfield()) 2772 continue; 2773 2774 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 2775 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 2776 // 2777 // The only case a 256-bit wide vector could be used is when the struct 2778 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2779 // to work for sizes wider than 128, early check and fallback to memory. 2780 // 2781 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) || 2782 Size > getNativeVectorSizeForAVXABI(AVXLevel))) { 2783 Lo = Memory; 2784 postMerge(Size, Lo, Hi); 2785 return; 2786 } 2787 // Note, skip this test for bit-fields, see below. 2788 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 2789 Lo = Memory; 2790 postMerge(Size, Lo, Hi); 2791 return; 2792 } 2793 2794 // Classify this field. 2795 // 2796 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 2797 // exceeds a single eightbyte, each is classified 2798 // separately. Each eightbyte gets initialized to class 2799 // NO_CLASS. 2800 Class FieldLo, FieldHi; 2801 2802 // Bit-fields require special handling, they do not force the 2803 // structure to be passed in memory even if unaligned, and 2804 // therefore they can straddle an eightbyte. 2805 if (BitField) { 2806 assert(!i->isUnnamedBitfield()); 2807 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2808 uint64_t Size = i->getBitWidthValue(getContext()); 2809 2810 uint64_t EB_Lo = Offset / 64; 2811 uint64_t EB_Hi = (Offset + Size - 1) / 64; 2812 2813 if (EB_Lo) { 2814 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 2815 FieldLo = NoClass; 2816 FieldHi = Integer; 2817 } else { 2818 FieldLo = Integer; 2819 FieldHi = EB_Hi ? Integer : NoClass; 2820 } 2821 } else 2822 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 2823 Lo = merge(Lo, FieldLo); 2824 Hi = merge(Hi, FieldHi); 2825 if (Lo == Memory || Hi == Memory) 2826 break; 2827 } 2828 2829 postMerge(Size, Lo, Hi); 2830 } 2831 } 2832 2833 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 2834 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2835 // place naturally. 2836 if (!isAggregateTypeForABI(Ty)) { 2837 // Treat an enum type as its underlying type. 2838 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2839 Ty = EnumTy->getDecl()->getIntegerType(); 2840 2841 return (Ty->isPromotableIntegerType() ? 2842 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2843 } 2844 2845 return getNaturalAlignIndirect(Ty); 2846 } 2847 2848 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 2849 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 2850 uint64_t Size = getContext().getTypeSize(VecTy); 2851 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); 2852 if (Size <= 64 || Size > LargestVector) 2853 return true; 2854 } 2855 2856 return false; 2857 } 2858 2859 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 2860 unsigned freeIntRegs) const { 2861 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2862 // place naturally. 2863 // 2864 // This assumption is optimistic, as there could be free registers available 2865 // when we need to pass this argument in memory, and LLVM could try to pass 2866 // the argument in the free register. This does not seem to happen currently, 2867 // but this code would be much safer if we could mark the argument with 2868 // 'onstack'. See PR12193. 2869 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 2870 // Treat an enum type as its underlying type. 2871 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2872 Ty = EnumTy->getDecl()->getIntegerType(); 2873 2874 return (Ty->isPromotableIntegerType() ? 2875 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2876 } 2877 2878 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 2879 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 2880 2881 // Compute the byval alignment. We specify the alignment of the byval in all 2882 // cases so that the mid-level optimizer knows the alignment of the byval. 2883 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 2884 2885 // Attempt to avoid passing indirect results using byval when possible. This 2886 // is important for good codegen. 2887 // 2888 // We do this by coercing the value into a scalar type which the backend can 2889 // handle naturally (i.e., without using byval). 2890 // 2891 // For simplicity, we currently only do this when we have exhausted all of the 2892 // free integer registers. Doing this when there are free integer registers 2893 // would require more care, as we would have to ensure that the coerced value 2894 // did not claim the unused register. That would require either reording the 2895 // arguments to the function (so that any subsequent inreg values came first), 2896 // or only doing this optimization when there were no following arguments that 2897 // might be inreg. 2898 // 2899 // We currently expect it to be rare (particularly in well written code) for 2900 // arguments to be passed on the stack when there are still free integer 2901 // registers available (this would typically imply large structs being passed 2902 // by value), so this seems like a fair tradeoff for now. 2903 // 2904 // We can revisit this if the backend grows support for 'onstack' parameter 2905 // attributes. See PR12193. 2906 if (freeIntRegs == 0) { 2907 uint64_t Size = getContext().getTypeSize(Ty); 2908 2909 // If this type fits in an eightbyte, coerce it into the matching integral 2910 // type, which will end up on the stack (with alignment 8). 2911 if (Align == 8 && Size <= 64) 2912 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2913 Size)); 2914 } 2915 2916 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); 2917 } 2918 2919 /// The ABI specifies that a value should be passed in a full vector XMM/YMM 2920 /// register. Pick an LLVM IR type that will be passed as a vector register. 2921 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 2922 // Wrapper structs/arrays that only contain vectors are passed just like 2923 // vectors; strip them off if present. 2924 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) 2925 Ty = QualType(InnerTy, 0); 2926 2927 llvm::Type *IRType = CGT.ConvertType(Ty); 2928 if (isa<llvm::VectorType>(IRType) || 2929 IRType->getTypeID() == llvm::Type::FP128TyID) 2930 return IRType; 2931 2932 // We couldn't find the preferred IR vector type for 'Ty'. 2933 uint64_t Size = getContext().getTypeSize(Ty); 2934 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!"); 2935 2936 // Return a LLVM IR vector type based on the size of 'Ty'. 2937 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2938 Size / 64); 2939 } 2940 2941 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 2942 /// is known to either be off the end of the specified type or being in 2943 /// alignment padding. The user type specified is known to be at most 128 bits 2944 /// in size, and have passed through X86_64ABIInfo::classify with a successful 2945 /// classification that put one of the two halves in the INTEGER class. 2946 /// 2947 /// It is conservatively correct to return false. 2948 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 2949 unsigned EndBit, ASTContext &Context) { 2950 // If the bytes being queried are off the end of the type, there is no user 2951 // data hiding here. This handles analysis of builtins, vectors and other 2952 // types that don't contain interesting padding. 2953 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 2954 if (TySize <= StartBit) 2955 return true; 2956 2957 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2958 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 2959 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 2960 2961 // Check each element to see if the element overlaps with the queried range. 2962 for (unsigned i = 0; i != NumElts; ++i) { 2963 // If the element is after the span we care about, then we're done.. 2964 unsigned EltOffset = i*EltSize; 2965 if (EltOffset >= EndBit) break; 2966 2967 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 2968 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 2969 EndBit-EltOffset, Context)) 2970 return false; 2971 } 2972 // If it overlaps no elements, then it is safe to process as padding. 2973 return true; 2974 } 2975 2976 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2977 const RecordDecl *RD = RT->getDecl(); 2978 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2979 2980 // If this is a C++ record, check the bases first. 2981 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2982 for (const auto &I : CXXRD->bases()) { 2983 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2984 "Unexpected base class!"); 2985 const CXXRecordDecl *Base = 2986 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2987 2988 // If the base is after the span we care about, ignore it. 2989 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 2990 if (BaseOffset >= EndBit) continue; 2991 2992 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 2993 if (!BitsContainNoUserData(I.getType(), BaseStart, 2994 EndBit-BaseOffset, Context)) 2995 return false; 2996 } 2997 } 2998 2999 // Verify that no field has data that overlaps the region of interest. Yes 3000 // this could be sped up a lot by being smarter about queried fields, 3001 // however we're only looking at structs up to 16 bytes, so we don't care 3002 // much. 3003 unsigned idx = 0; 3004 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 3005 i != e; ++i, ++idx) { 3006 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 3007 3008 // If we found a field after the region we care about, then we're done. 3009 if (FieldOffset >= EndBit) break; 3010 3011 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 3012 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 3013 Context)) 3014 return false; 3015 } 3016 3017 // If nothing in this record overlapped the area of interest, then we're 3018 // clean. 3019 return true; 3020 } 3021 3022 return false; 3023 } 3024 3025 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 3026 /// float member at the specified offset. For example, {int,{float}} has a 3027 /// float at offset 4. It is conservatively correct for this routine to return 3028 /// false. 3029 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 3030 const llvm::DataLayout &TD) { 3031 // Base case if we find a float. 3032 if (IROffset == 0 && IRType->isFloatTy()) 3033 return true; 3034 3035 // If this is a struct, recurse into the field at the specified offset. 3036 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3037 const llvm::StructLayout *SL = TD.getStructLayout(STy); 3038 unsigned Elt = SL->getElementContainingOffset(IROffset); 3039 IROffset -= SL->getElementOffset(Elt); 3040 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 3041 } 3042 3043 // If this is an array, recurse into the field at the specified offset. 3044 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3045 llvm::Type *EltTy = ATy->getElementType(); 3046 unsigned EltSize = TD.getTypeAllocSize(EltTy); 3047 IROffset -= IROffset/EltSize*EltSize; 3048 return ContainsFloatAtOffset(EltTy, IROffset, TD); 3049 } 3050 3051 return false; 3052 } 3053 3054 3055 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 3056 /// low 8 bytes of an XMM register, corresponding to the SSE class. 3057 llvm::Type *X86_64ABIInfo:: 3058 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3059 QualType SourceTy, unsigned SourceOffset) const { 3060 // The only three choices we have are either double, <2 x float>, or float. We 3061 // pass as float if the last 4 bytes is just padding. This happens for 3062 // structs that contain 3 floats. 3063 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 3064 SourceOffset*8+64, getContext())) 3065 return llvm::Type::getFloatTy(getVMContext()); 3066 3067 // We want to pass as <2 x float> if the LLVM IR type contains a float at 3068 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 3069 // case. 3070 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 3071 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 3072 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 3073 3074 return llvm::Type::getDoubleTy(getVMContext()); 3075 } 3076 3077 3078 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 3079 /// an 8-byte GPR. This means that we either have a scalar or we are talking 3080 /// about the high or low part of an up-to-16-byte struct. This routine picks 3081 /// the best LLVM IR type to represent this, which may be i64 or may be anything 3082 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 3083 /// etc). 3084 /// 3085 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 3086 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 3087 /// the 8-byte value references. PrefType may be null. 3088 /// 3089 /// SourceTy is the source-level type for the entire argument. SourceOffset is 3090 /// an offset into this that we're processing (which is always either 0 or 8). 3091 /// 3092 llvm::Type *X86_64ABIInfo:: 3093 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 3094 QualType SourceTy, unsigned SourceOffset) const { 3095 // If we're dealing with an un-offset LLVM IR type, then it means that we're 3096 // returning an 8-byte unit starting with it. See if we can safely use it. 3097 if (IROffset == 0) { 3098 // Pointers and int64's always fill the 8-byte unit. 3099 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 3100 IRType->isIntegerTy(64)) 3101 return IRType; 3102 3103 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 3104 // goodness in the source type is just tail padding. This is allowed to 3105 // kick in for struct {double,int} on the int, but not on 3106 // struct{double,int,int} because we wouldn't return the second int. We 3107 // have to do this analysis on the source type because we can't depend on 3108 // unions being lowered a specific way etc. 3109 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 3110 IRType->isIntegerTy(32) || 3111 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 3112 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 3113 cast<llvm::IntegerType>(IRType)->getBitWidth(); 3114 3115 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 3116 SourceOffset*8+64, getContext())) 3117 return IRType; 3118 } 3119 } 3120 3121 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 3122 // If this is a struct, recurse into the field at the specified offset. 3123 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 3124 if (IROffset < SL->getSizeInBytes()) { 3125 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 3126 IROffset -= SL->getElementOffset(FieldIdx); 3127 3128 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 3129 SourceTy, SourceOffset); 3130 } 3131 } 3132 3133 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 3134 llvm::Type *EltTy = ATy->getElementType(); 3135 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 3136 unsigned EltOffset = IROffset/EltSize*EltSize; 3137 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 3138 SourceOffset); 3139 } 3140 3141 // Okay, we don't have any better idea of what to pass, so we pass this in an 3142 // integer register that isn't too big to fit the rest of the struct. 3143 unsigned TySizeInBytes = 3144 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 3145 3146 assert(TySizeInBytes != SourceOffset && "Empty field?"); 3147 3148 // It is always safe to classify this as an integer type up to i64 that 3149 // isn't larger than the structure. 3150 return llvm::IntegerType::get(getVMContext(), 3151 std::min(TySizeInBytes-SourceOffset, 8U)*8); 3152 } 3153 3154 3155 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 3156 /// be used as elements of a two register pair to pass or return, return a 3157 /// first class aggregate to represent them. For example, if the low part of 3158 /// a by-value argument should be passed as i32* and the high part as float, 3159 /// return {i32*, float}. 3160 static llvm::Type * 3161 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 3162 const llvm::DataLayout &TD) { 3163 // In order to correctly satisfy the ABI, we need to the high part to start 3164 // at offset 8. If the high and low parts we inferred are both 4-byte types 3165 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 3166 // the second element at offset 8. Check for this: 3167 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 3168 unsigned HiAlign = TD.getABITypeAlignment(Hi); 3169 unsigned HiStart = llvm::alignTo(LoSize, HiAlign); 3170 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 3171 3172 // To handle this, we have to increase the size of the low part so that the 3173 // second element will start at an 8 byte offset. We can't increase the size 3174 // of the second element because it might make us access off the end of the 3175 // struct. 3176 if (HiStart != 8) { 3177 // There are usually two sorts of types the ABI generation code can produce 3178 // for the low part of a pair that aren't 8 bytes in size: float or 3179 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and 3180 // NaCl). 3181 // Promote these to a larger type. 3182 if (Lo->isFloatTy()) 3183 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 3184 else { 3185 assert((Lo->isIntegerTy() || Lo->isPointerTy()) 3186 && "Invalid/unknown lo type"); 3187 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 3188 } 3189 } 3190 3191 llvm::StructType *Result = llvm::StructType::get(Lo, Hi); 3192 3193 // Verify that the second element is at an 8-byte offset. 3194 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 3195 "Invalid x86-64 argument pair!"); 3196 return Result; 3197 } 3198 3199 ABIArgInfo X86_64ABIInfo:: 3200 classifyReturnType(QualType RetTy) const { 3201 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 3202 // classification algorithm. 3203 X86_64ABIInfo::Class Lo, Hi; 3204 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 3205 3206 // Check some invariants. 3207 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3208 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3209 3210 llvm::Type *ResType = nullptr; 3211 switch (Lo) { 3212 case NoClass: 3213 if (Hi == NoClass) 3214 return ABIArgInfo::getIgnore(); 3215 // If the low part is just padding, it takes no register, leave ResType 3216 // null. 3217 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3218 "Unknown missing lo part"); 3219 break; 3220 3221 case SSEUp: 3222 case X87Up: 3223 llvm_unreachable("Invalid classification for lo word."); 3224 3225 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 3226 // hidden argument. 3227 case Memory: 3228 return getIndirectReturnResult(RetTy); 3229 3230 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 3231 // available register of the sequence %rax, %rdx is used. 3232 case Integer: 3233 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3234 3235 // If we have a sign or zero extended integer, make sure to return Extend 3236 // so that the parameter gets the right LLVM IR attributes. 3237 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3238 // Treat an enum type as its underlying type. 3239 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3240 RetTy = EnumTy->getDecl()->getIntegerType(); 3241 3242 if (RetTy->isIntegralOrEnumerationType() && 3243 RetTy->isPromotableIntegerType()) 3244 return ABIArgInfo::getExtend(); 3245 } 3246 break; 3247 3248 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 3249 // available SSE register of the sequence %xmm0, %xmm1 is used. 3250 case SSE: 3251 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3252 break; 3253 3254 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 3255 // returned on the X87 stack in %st0 as 80-bit x87 number. 3256 case X87: 3257 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 3258 break; 3259 3260 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 3261 // part of the value is returned in %st0 and the imaginary part in 3262 // %st1. 3263 case ComplexX87: 3264 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 3265 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 3266 llvm::Type::getX86_FP80Ty(getVMContext())); 3267 break; 3268 } 3269 3270 llvm::Type *HighPart = nullptr; 3271 switch (Hi) { 3272 // Memory was handled previously and X87 should 3273 // never occur as a hi class. 3274 case Memory: 3275 case X87: 3276 llvm_unreachable("Invalid classification for hi word."); 3277 3278 case ComplexX87: // Previously handled. 3279 case NoClass: 3280 break; 3281 3282 case Integer: 3283 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3284 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3285 return ABIArgInfo::getDirect(HighPart, 8); 3286 break; 3287 case SSE: 3288 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3289 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3290 return ABIArgInfo::getDirect(HighPart, 8); 3291 break; 3292 3293 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 3294 // is passed in the next available eightbyte chunk if the last used 3295 // vector register. 3296 // 3297 // SSEUP should always be preceded by SSE, just widen. 3298 case SSEUp: 3299 assert(Lo == SSE && "Unexpected SSEUp classification."); 3300 ResType = GetByteVectorType(RetTy); 3301 break; 3302 3303 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 3304 // returned together with the previous X87 value in %st0. 3305 case X87Up: 3306 // If X87Up is preceded by X87, we don't need to do 3307 // anything. However, in some cases with unions it may not be 3308 // preceded by X87. In such situations we follow gcc and pass the 3309 // extra bits in an SSE reg. 3310 if (Lo != X87) { 3311 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3312 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3313 return ABIArgInfo::getDirect(HighPart, 8); 3314 } 3315 break; 3316 } 3317 3318 // If a high part was specified, merge it together with the low part. It is 3319 // known to pass in the high eightbyte of the result. We do this by forming a 3320 // first class struct aggregate with the high and low part: {low, high} 3321 if (HighPart) 3322 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3323 3324 return ABIArgInfo::getDirect(ResType); 3325 } 3326 3327 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 3328 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 3329 bool isNamedArg) 3330 const 3331 { 3332 Ty = useFirstFieldIfTransparentUnion(Ty); 3333 3334 X86_64ABIInfo::Class Lo, Hi; 3335 classify(Ty, 0, Lo, Hi, isNamedArg); 3336 3337 // Check some invariants. 3338 // FIXME: Enforce these by construction. 3339 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3340 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3341 3342 neededInt = 0; 3343 neededSSE = 0; 3344 llvm::Type *ResType = nullptr; 3345 switch (Lo) { 3346 case NoClass: 3347 if (Hi == NoClass) 3348 return ABIArgInfo::getIgnore(); 3349 // If the low part is just padding, it takes no register, leave ResType 3350 // null. 3351 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3352 "Unknown missing lo part"); 3353 break; 3354 3355 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 3356 // on the stack. 3357 case Memory: 3358 3359 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 3360 // COMPLEX_X87, it is passed in memory. 3361 case X87: 3362 case ComplexX87: 3363 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 3364 ++neededInt; 3365 return getIndirectResult(Ty, freeIntRegs); 3366 3367 case SSEUp: 3368 case X87Up: 3369 llvm_unreachable("Invalid classification for lo word."); 3370 3371 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 3372 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 3373 // and %r9 is used. 3374 case Integer: 3375 ++neededInt; 3376 3377 // Pick an 8-byte type based on the preferred type. 3378 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 3379 3380 // If we have a sign or zero extended integer, make sure to return Extend 3381 // so that the parameter gets the right LLVM IR attributes. 3382 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3383 // Treat an enum type as its underlying type. 3384 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3385 Ty = EnumTy->getDecl()->getIntegerType(); 3386 3387 if (Ty->isIntegralOrEnumerationType() && 3388 Ty->isPromotableIntegerType()) 3389 return ABIArgInfo::getExtend(); 3390 } 3391 3392 break; 3393 3394 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 3395 // available SSE register is used, the registers are taken in the 3396 // order from %xmm0 to %xmm7. 3397 case SSE: { 3398 llvm::Type *IRType = CGT.ConvertType(Ty); 3399 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 3400 ++neededSSE; 3401 break; 3402 } 3403 } 3404 3405 llvm::Type *HighPart = nullptr; 3406 switch (Hi) { 3407 // Memory was handled previously, ComplexX87 and X87 should 3408 // never occur as hi classes, and X87Up must be preceded by X87, 3409 // which is passed in memory. 3410 case Memory: 3411 case X87: 3412 case ComplexX87: 3413 llvm_unreachable("Invalid classification for hi word."); 3414 3415 case NoClass: break; 3416 3417 case Integer: 3418 ++neededInt; 3419 // Pick an 8-byte type based on the preferred type. 3420 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3421 3422 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3423 return ABIArgInfo::getDirect(HighPart, 8); 3424 break; 3425 3426 // X87Up generally doesn't occur here (long double is passed in 3427 // memory), except in situations involving unions. 3428 case X87Up: 3429 case SSE: 3430 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3431 3432 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3433 return ABIArgInfo::getDirect(HighPart, 8); 3434 3435 ++neededSSE; 3436 break; 3437 3438 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 3439 // eightbyte is passed in the upper half of the last used SSE 3440 // register. This only happens when 128-bit vectors are passed. 3441 case SSEUp: 3442 assert(Lo == SSE && "Unexpected SSEUp classification"); 3443 ResType = GetByteVectorType(Ty); 3444 break; 3445 } 3446 3447 // If a high part was specified, merge it together with the low part. It is 3448 // known to pass in the high eightbyte of the result. We do this by forming a 3449 // first class struct aggregate with the high and low part: {low, high} 3450 if (HighPart) 3451 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3452 3453 return ABIArgInfo::getDirect(ResType); 3454 } 3455 3456 ABIArgInfo 3457 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt, 3458 unsigned &NeededSSE) const { 3459 auto RT = Ty->getAs<RecordType>(); 3460 assert(RT && "classifyRegCallStructType only valid with struct types"); 3461 3462 if (RT->getDecl()->hasFlexibleArrayMember()) 3463 return getIndirectReturnResult(Ty); 3464 3465 // Sum up bases 3466 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 3467 if (CXXRD->isDynamicClass()) { 3468 NeededInt = NeededSSE = 0; 3469 return getIndirectReturnResult(Ty); 3470 } 3471 3472 for (const auto &I : CXXRD->bases()) 3473 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE) 3474 .isIndirect()) { 3475 NeededInt = NeededSSE = 0; 3476 return getIndirectReturnResult(Ty); 3477 } 3478 } 3479 3480 // Sum up members 3481 for (const auto *FD : RT->getDecl()->fields()) { 3482 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) { 3483 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE) 3484 .isIndirect()) { 3485 NeededInt = NeededSSE = 0; 3486 return getIndirectReturnResult(Ty); 3487 } 3488 } else { 3489 unsigned LocalNeededInt, LocalNeededSSE; 3490 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt, 3491 LocalNeededSSE, true) 3492 .isIndirect()) { 3493 NeededInt = NeededSSE = 0; 3494 return getIndirectReturnResult(Ty); 3495 } 3496 NeededInt += LocalNeededInt; 3497 NeededSSE += LocalNeededSSE; 3498 } 3499 } 3500 3501 return ABIArgInfo::getDirect(); 3502 } 3503 3504 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty, 3505 unsigned &NeededInt, 3506 unsigned &NeededSSE) const { 3507 3508 NeededInt = 0; 3509 NeededSSE = 0; 3510 3511 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE); 3512 } 3513 3514 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3515 3516 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall; 3517 3518 // Keep track of the number of assigned registers. 3519 unsigned FreeIntRegs = IsRegCall ? 11 : 6; 3520 unsigned FreeSSERegs = IsRegCall ? 16 : 8; 3521 unsigned NeededInt, NeededSSE; 3522 3523 if (!getCXXABI().classifyReturnType(FI)) { 3524 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() && 3525 !FI.getReturnType()->getTypePtr()->isUnionType()) { 3526 FI.getReturnInfo() = 3527 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE); 3528 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3529 FreeIntRegs -= NeededInt; 3530 FreeSSERegs -= NeededSSE; 3531 } else { 3532 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3533 } 3534 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) { 3535 // Complex Long Double Type is passed in Memory when Regcall 3536 // calling convention is used. 3537 const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>(); 3538 if (getContext().getCanonicalType(CT->getElementType()) == 3539 getContext().LongDoubleTy) 3540 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType()); 3541 } else 3542 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3543 } 3544 3545 // If the return value is indirect, then the hidden argument is consuming one 3546 // integer register. 3547 if (FI.getReturnInfo().isIndirect()) 3548 --FreeIntRegs; 3549 3550 // The chain argument effectively gives us another free register. 3551 if (FI.isChainCall()) 3552 ++FreeIntRegs; 3553 3554 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 3555 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 3556 // get assigned (in left-to-right order) for passing as follows... 3557 unsigned ArgNo = 0; 3558 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3559 it != ie; ++it, ++ArgNo) { 3560 bool IsNamedArg = ArgNo < NumRequiredArgs; 3561 3562 if (IsRegCall && it->type->isStructureOrClassType()) 3563 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE); 3564 else 3565 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt, 3566 NeededSSE, IsNamedArg); 3567 3568 // AMD64-ABI 3.2.3p3: If there are no registers available for any 3569 // eightbyte of an argument, the whole argument is passed on the 3570 // stack. If registers have already been assigned for some 3571 // eightbytes of such an argument, the assignments get reverted. 3572 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) { 3573 FreeIntRegs -= NeededInt; 3574 FreeSSERegs -= NeededSSE; 3575 } else { 3576 it->info = getIndirectResult(it->type, FreeIntRegs); 3577 } 3578 } 3579 } 3580 3581 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, 3582 Address VAListAddr, QualType Ty) { 3583 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP( 3584 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p"); 3585 llvm::Value *overflow_arg_area = 3586 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 3587 3588 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 3589 // byte boundary if alignment needed by type exceeds 8 byte boundary. 3590 // It isn't stated explicitly in the standard, but in practice we use 3591 // alignment greater than 16 where necessary. 3592 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 3593 if (Align > CharUnits::fromQuantity(8)) { 3594 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, 3595 Align); 3596 } 3597 3598 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 3599 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3600 llvm::Value *Res = 3601 CGF.Builder.CreateBitCast(overflow_arg_area, 3602 llvm::PointerType::getUnqual(LTy)); 3603 3604 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 3605 // l->overflow_arg_area + sizeof(type). 3606 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 3607 // an 8 byte boundary. 3608 3609 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 3610 llvm::Value *Offset = 3611 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 3612 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 3613 "overflow_arg_area.next"); 3614 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 3615 3616 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 3617 return Address(Res, Align); 3618 } 3619 3620 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3621 QualType Ty) const { 3622 // Assume that va_list type is correct; should be pointer to LLVM type: 3623 // struct { 3624 // i32 gp_offset; 3625 // i32 fp_offset; 3626 // i8* overflow_arg_area; 3627 // i8* reg_save_area; 3628 // }; 3629 unsigned neededInt, neededSSE; 3630 3631 Ty = getContext().getCanonicalType(Ty); 3632 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 3633 /*isNamedArg*/false); 3634 3635 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 3636 // in the registers. If not go to step 7. 3637 if (!neededInt && !neededSSE) 3638 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3639 3640 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 3641 // general purpose registers needed to pass type and num_fp to hold 3642 // the number of floating point registers needed. 3643 3644 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 3645 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 3646 // l->fp_offset > 304 - num_fp * 16 go to step 7. 3647 // 3648 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 3649 // register save space). 3650 3651 llvm::Value *InRegs = nullptr; 3652 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); 3653 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; 3654 if (neededInt) { 3655 gp_offset_p = 3656 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(), 3657 "gp_offset_p"); 3658 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 3659 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 3660 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 3661 } 3662 3663 if (neededSSE) { 3664 fp_offset_p = 3665 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4), 3666 "fp_offset_p"); 3667 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 3668 llvm::Value *FitsInFP = 3669 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 3670 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 3671 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 3672 } 3673 3674 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 3675 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 3676 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 3677 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 3678 3679 // Emit code to load the value if it was passed in registers. 3680 3681 CGF.EmitBlock(InRegBlock); 3682 3683 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 3684 // an offset of l->gp_offset and/or l->fp_offset. This may require 3685 // copying to a temporary location in case the parameter is passed 3686 // in different register classes or requires an alignment greater 3687 // than 8 for general purpose registers and 16 for XMM registers. 3688 // 3689 // FIXME: This really results in shameful code when we end up needing to 3690 // collect arguments from different places; often what should result in a 3691 // simple assembling of a structure from scattered addresses has many more 3692 // loads than necessary. Can we clean this up? 3693 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3694 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( 3695 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)), 3696 "reg_save_area"); 3697 3698 Address RegAddr = Address::invalid(); 3699 if (neededInt && neededSSE) { 3700 // FIXME: Cleanup. 3701 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 3702 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 3703 Address Tmp = CGF.CreateMemTemp(Ty); 3704 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 3705 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 3706 llvm::Type *TyLo = ST->getElementType(0); 3707 llvm::Type *TyHi = ST->getElementType(1); 3708 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 3709 "Unexpected ABI info for mixed regs"); 3710 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 3711 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 3712 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset); 3713 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset); 3714 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 3715 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 3716 3717 // Copy the first element. 3718 // FIXME: Our choice of alignment here and below is probably pessimistic. 3719 llvm::Value *V = CGF.Builder.CreateAlignedLoad( 3720 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo), 3721 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo))); 3722 CGF.Builder.CreateStore(V, 3723 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero())); 3724 3725 // Copy the second element. 3726 V = CGF.Builder.CreateAlignedLoad( 3727 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi), 3728 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi))); 3729 CharUnits Offset = CharUnits::fromQuantity( 3730 getDataLayout().getStructLayout(ST)->getElementOffset(1)); 3731 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset)); 3732 3733 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 3734 } else if (neededInt) { 3735 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset), 3736 CharUnits::fromQuantity(8)); 3737 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 3738 3739 // Copy to a temporary if necessary to ensure the appropriate alignment. 3740 std::pair<CharUnits, CharUnits> SizeAlign = 3741 getContext().getTypeInfoInChars(Ty); 3742 uint64_t TySize = SizeAlign.first.getQuantity(); 3743 CharUnits TyAlign = SizeAlign.second; 3744 3745 // Copy into a temporary if the type is more aligned than the 3746 // register save area. 3747 if (TyAlign.getQuantity() > 8) { 3748 Address Tmp = CGF.CreateMemTemp(Ty); 3749 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); 3750 RegAddr = Tmp; 3751 } 3752 3753 } else if (neededSSE == 1) { 3754 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 3755 CharUnits::fromQuantity(16)); 3756 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 3757 } else { 3758 assert(neededSSE == 2 && "Invalid number of needed registers!"); 3759 // SSE registers are spaced 16 bytes apart in the register save 3760 // area, we need to collect the two eightbytes together. 3761 // The ABI isn't explicit about this, but it seems reasonable 3762 // to assume that the slots are 16-byte aligned, since the stack is 3763 // naturally 16-byte aligned and the prologue is expected to store 3764 // all the SSE registers to the RSA. 3765 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 3766 CharUnits::fromQuantity(16)); 3767 Address RegAddrHi = 3768 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, 3769 CharUnits::fromQuantity(16)); 3770 llvm::Type *DoubleTy = CGF.DoubleTy; 3771 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy); 3772 llvm::Value *V; 3773 Address Tmp = CGF.CreateMemTemp(Ty); 3774 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 3775 V = CGF.Builder.CreateLoad( 3776 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy)); 3777 CGF.Builder.CreateStore(V, 3778 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero())); 3779 V = CGF.Builder.CreateLoad( 3780 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy)); 3781 CGF.Builder.CreateStore(V, 3782 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8))); 3783 3784 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 3785 } 3786 3787 // AMD64-ABI 3.5.7p5: Step 5. Set: 3788 // l->gp_offset = l->gp_offset + num_gp * 8 3789 // l->fp_offset = l->fp_offset + num_fp * 16. 3790 if (neededInt) { 3791 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 3792 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 3793 gp_offset_p); 3794 } 3795 if (neededSSE) { 3796 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 3797 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 3798 fp_offset_p); 3799 } 3800 CGF.EmitBranch(ContBlock); 3801 3802 // Emit code to load the value if it was passed in memory. 3803 3804 CGF.EmitBlock(InMemBlock); 3805 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3806 3807 // Return the appropriate result. 3808 3809 CGF.EmitBlock(ContBlock); 3810 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, 3811 "vaarg.addr"); 3812 return ResAddr; 3813 } 3814 3815 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 3816 QualType Ty) const { 3817 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 3818 CGF.getContext().getTypeInfoInChars(Ty), 3819 CharUnits::fromQuantity(8), 3820 /*allowHigherAlign*/ false); 3821 } 3822 3823 ABIArgInfo 3824 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs, 3825 const ABIArgInfo ¤t) const { 3826 // Assumes vectorCall calling convention. 3827 const Type *Base = nullptr; 3828 uint64_t NumElts = 0; 3829 3830 if (!Ty->isBuiltinType() && !Ty->isVectorType() && 3831 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) { 3832 FreeSSERegs -= NumElts; 3833 return getDirectX86Hva(); 3834 } 3835 return current; 3836 } 3837 3838 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 3839 bool IsReturnType, bool IsVectorCall, 3840 bool IsRegCall) const { 3841 3842 if (Ty->isVoidType()) 3843 return ABIArgInfo::getIgnore(); 3844 3845 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3846 Ty = EnumTy->getDecl()->getIntegerType(); 3847 3848 TypeInfo Info = getContext().getTypeInfo(Ty); 3849 uint64_t Width = Info.Width; 3850 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align); 3851 3852 const RecordType *RT = Ty->getAs<RecordType>(); 3853 if (RT) { 3854 if (!IsReturnType) { 3855 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 3856 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 3857 } 3858 3859 if (RT->getDecl()->hasFlexibleArrayMember()) 3860 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3861 3862 } 3863 3864 const Type *Base = nullptr; 3865 uint64_t NumElts = 0; 3866 // vectorcall adds the concept of a homogenous vector aggregate, similar to 3867 // other targets. 3868 if ((IsVectorCall || IsRegCall) && 3869 isHomogeneousAggregate(Ty, Base, NumElts)) { 3870 if (IsRegCall) { 3871 if (FreeSSERegs >= NumElts) { 3872 FreeSSERegs -= NumElts; 3873 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 3874 return ABIArgInfo::getDirect(); 3875 return ABIArgInfo::getExpand(); 3876 } 3877 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3878 } else if (IsVectorCall) { 3879 if (FreeSSERegs >= NumElts && 3880 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) { 3881 FreeSSERegs -= NumElts; 3882 return ABIArgInfo::getDirect(); 3883 } else if (IsReturnType) { 3884 return ABIArgInfo::getExpand(); 3885 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) { 3886 // HVAs are delayed and reclassified in the 2nd step. 3887 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3888 } 3889 } 3890 } 3891 3892 if (Ty->isMemberPointerType()) { 3893 // If the member pointer is represented by an LLVM int or ptr, pass it 3894 // directly. 3895 llvm::Type *LLTy = CGT.ConvertType(Ty); 3896 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 3897 return ABIArgInfo::getDirect(); 3898 } 3899 3900 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { 3901 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 3902 // not 1, 2, 4, or 8 bytes, must be passed by reference." 3903 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 3904 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3905 3906 // Otherwise, coerce it to a small integer. 3907 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 3908 } 3909 3910 // Bool type is always extended to the ABI, other builtin types are not 3911 // extended. 3912 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3913 if (BT && BT->getKind() == BuiltinType::Bool) 3914 return ABIArgInfo::getExtend(); 3915 3916 // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It 3917 // passes them indirectly through memory. 3918 if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) { 3919 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 3920 if (LDF == &llvm::APFloat::x87DoubleExtended()) 3921 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3922 } 3923 3924 return ABIArgInfo::getDirect(); 3925 } 3926 3927 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, 3928 unsigned FreeSSERegs, 3929 bool IsVectorCall, 3930 bool IsRegCall) const { 3931 unsigned Count = 0; 3932 for (auto &I : FI.arguments()) { 3933 // Vectorcall in x64 only permits the first 6 arguments to be passed 3934 // as XMM/YMM registers. 3935 if (Count < VectorcallMaxParamNumAsReg) 3936 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); 3937 else { 3938 // Since these cannot be passed in registers, pretend no registers 3939 // are left. 3940 unsigned ZeroSSERegsAvail = 0; 3941 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false, 3942 IsVectorCall, IsRegCall); 3943 } 3944 ++Count; 3945 } 3946 3947 for (auto &I : FI.arguments()) { 3948 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info); 3949 } 3950 } 3951 3952 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3953 bool IsVectorCall = 3954 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall; 3955 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall; 3956 3957 unsigned FreeSSERegs = 0; 3958 if (IsVectorCall) { 3959 // We can use up to 4 SSE return registers with vectorcall. 3960 FreeSSERegs = 4; 3961 } else if (IsRegCall) { 3962 // RegCall gives us 16 SSE registers. 3963 FreeSSERegs = 16; 3964 } 3965 3966 if (!getCXXABI().classifyReturnType(FI)) 3967 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true, 3968 IsVectorCall, IsRegCall); 3969 3970 if (IsVectorCall) { 3971 // We can use up to 6 SSE register parameters with vectorcall. 3972 FreeSSERegs = 6; 3973 } else if (IsRegCall) { 3974 // RegCall gives us 16 SSE registers, we can reuse the return registers. 3975 FreeSSERegs = 16; 3976 } 3977 3978 if (IsVectorCall) { 3979 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall); 3980 } else { 3981 for (auto &I : FI.arguments()) 3982 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall); 3983 } 3984 3985 } 3986 3987 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3988 QualType Ty) const { 3989 3990 bool IsIndirect = false; 3991 3992 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 3993 // not 1, 2, 4, or 8 bytes, must be passed by reference." 3994 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) { 3995 uint64_t Width = getContext().getTypeSize(Ty); 3996 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width); 3997 } 3998 3999 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 4000 CGF.getContext().getTypeInfoInChars(Ty), 4001 CharUnits::fromQuantity(8), 4002 /*allowHigherAlign*/ false); 4003 } 4004 4005 // PowerPC-32 4006 namespace { 4007 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 4008 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 4009 bool IsSoftFloatABI; 4010 public: 4011 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI) 4012 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {} 4013 4014 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4015 QualType Ty) const override; 4016 }; 4017 4018 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 4019 public: 4020 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI) 4021 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {} 4022 4023 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4024 // This is recovered from gcc output. 4025 return 1; // r1 is the dedicated stack pointer 4026 } 4027 4028 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4029 llvm::Value *Address) const override; 4030 }; 4031 4032 } 4033 4034 // TODO: this implementation is now likely redundant with 4035 // DefaultABIInfo::EmitVAArg. 4036 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, 4037 QualType Ty) const { 4038 const unsigned OverflowLimit = 8; 4039 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4040 // TODO: Implement this. For now ignore. 4041 (void)CTy; 4042 return Address::invalid(); // FIXME? 4043 } 4044 4045 // struct __va_list_tag { 4046 // unsigned char gpr; 4047 // unsigned char fpr; 4048 // unsigned short reserved; 4049 // void *overflow_arg_area; 4050 // void *reg_save_area; 4051 // }; 4052 4053 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 4054 bool isInt = 4055 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); 4056 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64; 4057 4058 // All aggregates are passed indirectly? That doesn't seem consistent 4059 // with the argument-lowering code. 4060 bool isIndirect = Ty->isAggregateType(); 4061 4062 CGBuilderTy &Builder = CGF.Builder; 4063 4064 // The calling convention either uses 1-2 GPRs or 1 FPR. 4065 Address NumRegsAddr = Address::invalid(); 4066 if (isInt || IsSoftFloatABI) { 4067 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr"); 4068 } else { 4069 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr"); 4070 } 4071 4072 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); 4073 4074 // "Align" the register count when TY is i64. 4075 if (isI64 || (isF64 && IsSoftFloatABI)) { 4076 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); 4077 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); 4078 } 4079 4080 llvm::Value *CC = 4081 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond"); 4082 4083 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 4084 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 4085 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 4086 4087 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 4088 4089 llvm::Type *DirectTy = CGF.ConvertType(Ty); 4090 if (isIndirect) DirectTy = DirectTy->getPointerTo(0); 4091 4092 // Case 1: consume registers. 4093 Address RegAddr = Address::invalid(); 4094 { 4095 CGF.EmitBlock(UsingRegs); 4096 4097 Address RegSaveAreaPtr = 4098 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8)); 4099 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), 4100 CharUnits::fromQuantity(8)); 4101 assert(RegAddr.getElementType() == CGF.Int8Ty); 4102 4103 // Floating-point registers start after the general-purpose registers. 4104 if (!(isInt || IsSoftFloatABI)) { 4105 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, 4106 CharUnits::fromQuantity(32)); 4107 } 4108 4109 // Get the address of the saved value by scaling the number of 4110 // registers we've used by the number of 4111 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); 4112 llvm::Value *RegOffset = 4113 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); 4114 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty, 4115 RegAddr.getPointer(), RegOffset), 4116 RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); 4117 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); 4118 4119 // Increase the used-register count. 4120 NumRegs = 4121 Builder.CreateAdd(NumRegs, 4122 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); 4123 Builder.CreateStore(NumRegs, NumRegsAddr); 4124 4125 CGF.EmitBranch(Cont); 4126 } 4127 4128 // Case 2: consume space in the overflow area. 4129 Address MemAddr = Address::invalid(); 4130 { 4131 CGF.EmitBlock(UsingOverflow); 4132 4133 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr); 4134 4135 // Everything in the overflow area is rounded up to a size of at least 4. 4136 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); 4137 4138 CharUnits Size; 4139 if (!isIndirect) { 4140 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); 4141 Size = TypeInfo.first.alignTo(OverflowAreaAlign); 4142 } else { 4143 Size = CGF.getPointerSize(); 4144 } 4145 4146 Address OverflowAreaAddr = 4147 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4)); 4148 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), 4149 OverflowAreaAlign); 4150 // Round up address of argument to alignment 4151 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 4152 if (Align > OverflowAreaAlign) { 4153 llvm::Value *Ptr = OverflowArea.getPointer(); 4154 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), 4155 Align); 4156 } 4157 4158 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); 4159 4160 // Increase the overflow area. 4161 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); 4162 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); 4163 CGF.EmitBranch(Cont); 4164 } 4165 4166 CGF.EmitBlock(Cont); 4167 4168 // Merge the cases with a phi. 4169 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, 4170 "vaarg.addr"); 4171 4172 // Load the pointer if the argument was passed indirectly. 4173 if (isIndirect) { 4174 Result = Address(Builder.CreateLoad(Result, "aggr"), 4175 getContext().getTypeAlignInChars(Ty)); 4176 } 4177 4178 return Result; 4179 } 4180 4181 bool 4182 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4183 llvm::Value *Address) const { 4184 // This is calculated from the LLVM and GCC tables and verified 4185 // against gcc output. AFAIK all ABIs use the same encoding. 4186 4187 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4188 4189 llvm::IntegerType *i8 = CGF.Int8Ty; 4190 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4191 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4192 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4193 4194 // 0-31: r0-31, the 4-byte general-purpose registers 4195 AssignToArrayRange(Builder, Address, Four8, 0, 31); 4196 4197 // 32-63: fp0-31, the 8-byte floating-point registers 4198 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4199 4200 // 64-76 are various 4-byte special-purpose registers: 4201 // 64: mq 4202 // 65: lr 4203 // 66: ctr 4204 // 67: ap 4205 // 68-75 cr0-7 4206 // 76: xer 4207 AssignToArrayRange(Builder, Address, Four8, 64, 76); 4208 4209 // 77-108: v0-31, the 16-byte vector registers 4210 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4211 4212 // 109: vrsave 4213 // 110: vscr 4214 // 111: spe_acc 4215 // 112: spefscr 4216 // 113: sfp 4217 AssignToArrayRange(Builder, Address, Four8, 109, 113); 4218 4219 return false; 4220 } 4221 4222 // PowerPC-64 4223 4224 namespace { 4225 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 4226 class PPC64_SVR4_ABIInfo : public ABIInfo { 4227 public: 4228 enum ABIKind { 4229 ELFv1 = 0, 4230 ELFv2 4231 }; 4232 4233 private: 4234 static const unsigned GPRBits = 64; 4235 ABIKind Kind; 4236 bool HasQPX; 4237 bool IsSoftFloatABI; 4238 4239 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and 4240 // will be passed in a QPX register. 4241 bool IsQPXVectorTy(const Type *Ty) const { 4242 if (!HasQPX) 4243 return false; 4244 4245 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4246 unsigned NumElements = VT->getNumElements(); 4247 if (NumElements == 1) 4248 return false; 4249 4250 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) { 4251 if (getContext().getTypeSize(Ty) <= 256) 4252 return true; 4253 } else if (VT->getElementType()-> 4254 isSpecificBuiltinType(BuiltinType::Float)) { 4255 if (getContext().getTypeSize(Ty) <= 128) 4256 return true; 4257 } 4258 } 4259 4260 return false; 4261 } 4262 4263 bool IsQPXVectorTy(QualType Ty) const { 4264 return IsQPXVectorTy(Ty.getTypePtr()); 4265 } 4266 4267 public: 4268 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX, 4269 bool SoftFloatABI) 4270 : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX), 4271 IsSoftFloatABI(SoftFloatABI) {} 4272 4273 bool isPromotableTypeForABI(QualType Ty) const; 4274 CharUnits getParamTypeAlignment(QualType Ty) const; 4275 4276 ABIArgInfo classifyReturnType(QualType RetTy) const; 4277 ABIArgInfo classifyArgumentType(QualType Ty) const; 4278 4279 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4280 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4281 uint64_t Members) const override; 4282 4283 // TODO: We can add more logic to computeInfo to improve performance. 4284 // Example: For aggregate arguments that fit in a register, we could 4285 // use getDirectInReg (as is done below for structs containing a single 4286 // floating-point value) to avoid pushing them to memory on function 4287 // entry. This would require changing the logic in PPCISelLowering 4288 // when lowering the parameters in the caller and args in the callee. 4289 void computeInfo(CGFunctionInfo &FI) const override { 4290 if (!getCXXABI().classifyReturnType(FI)) 4291 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4292 for (auto &I : FI.arguments()) { 4293 // We rely on the default argument classification for the most part. 4294 // One exception: An aggregate containing a single floating-point 4295 // or vector item must be passed in a register if one is available. 4296 const Type *T = isSingleElementStruct(I.type, getContext()); 4297 if (T) { 4298 const BuiltinType *BT = T->getAs<BuiltinType>(); 4299 if (IsQPXVectorTy(T) || 4300 (T->isVectorType() && getContext().getTypeSize(T) == 128) || 4301 (BT && BT->isFloatingPoint())) { 4302 QualType QT(T, 0); 4303 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 4304 continue; 4305 } 4306 } 4307 I.info = classifyArgumentType(I.type); 4308 } 4309 } 4310 4311 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4312 QualType Ty) const override; 4313 }; 4314 4315 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 4316 4317 public: 4318 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 4319 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX, 4320 bool SoftFloatABI) 4321 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX, 4322 SoftFloatABI)) {} 4323 4324 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4325 // This is recovered from gcc output. 4326 return 1; // r1 is the dedicated stack pointer 4327 } 4328 4329 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4330 llvm::Value *Address) const override; 4331 }; 4332 4333 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 4334 public: 4335 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 4336 4337 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4338 // This is recovered from gcc output. 4339 return 1; // r1 is the dedicated stack pointer 4340 } 4341 4342 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4343 llvm::Value *Address) const override; 4344 }; 4345 4346 } 4347 4348 // Return true if the ABI requires Ty to be passed sign- or zero- 4349 // extended to 64 bits. 4350 bool 4351 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 4352 // Treat an enum type as its underlying type. 4353 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4354 Ty = EnumTy->getDecl()->getIntegerType(); 4355 4356 // Promotable integer types are required to be promoted by the ABI. 4357 if (Ty->isPromotableIntegerType()) 4358 return true; 4359 4360 // In addition to the usual promotable integer types, we also need to 4361 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 4362 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4363 switch (BT->getKind()) { 4364 case BuiltinType::Int: 4365 case BuiltinType::UInt: 4366 return true; 4367 default: 4368 break; 4369 } 4370 4371 return false; 4372 } 4373 4374 /// isAlignedParamType - Determine whether a type requires 16-byte or 4375 /// higher alignment in the parameter area. Always returns at least 8. 4376 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 4377 // Complex types are passed just like their elements. 4378 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 4379 Ty = CTy->getElementType(); 4380 4381 // Only vector types of size 16 bytes need alignment (larger types are 4382 // passed via reference, smaller types are not aligned). 4383 if (IsQPXVectorTy(Ty)) { 4384 if (getContext().getTypeSize(Ty) > 128) 4385 return CharUnits::fromQuantity(32); 4386 4387 return CharUnits::fromQuantity(16); 4388 } else if (Ty->isVectorType()) { 4389 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); 4390 } 4391 4392 // For single-element float/vector structs, we consider the whole type 4393 // to have the same alignment requirements as its single element. 4394 const Type *AlignAsType = nullptr; 4395 const Type *EltType = isSingleElementStruct(Ty, getContext()); 4396 if (EltType) { 4397 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 4398 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() && 4399 getContext().getTypeSize(EltType) == 128) || 4400 (BT && BT->isFloatingPoint())) 4401 AlignAsType = EltType; 4402 } 4403 4404 // Likewise for ELFv2 homogeneous aggregates. 4405 const Type *Base = nullptr; 4406 uint64_t Members = 0; 4407 if (!AlignAsType && Kind == ELFv2 && 4408 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 4409 AlignAsType = Base; 4410 4411 // With special case aggregates, only vector base types need alignment. 4412 if (AlignAsType && IsQPXVectorTy(AlignAsType)) { 4413 if (getContext().getTypeSize(AlignAsType) > 128) 4414 return CharUnits::fromQuantity(32); 4415 4416 return CharUnits::fromQuantity(16); 4417 } else if (AlignAsType) { 4418 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8); 4419 } 4420 4421 // Otherwise, we only need alignment for any aggregate type that 4422 // has an alignment requirement of >= 16 bytes. 4423 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { 4424 if (HasQPX && getContext().getTypeAlign(Ty) >= 256) 4425 return CharUnits::fromQuantity(32); 4426 return CharUnits::fromQuantity(16); 4427 } 4428 4429 return CharUnits::fromQuantity(8); 4430 } 4431 4432 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 4433 /// aggregate. Base is set to the base element type, and Members is set 4434 /// to the number of base elements. 4435 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 4436 uint64_t &Members) const { 4437 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 4438 uint64_t NElements = AT->getSize().getZExtValue(); 4439 if (NElements == 0) 4440 return false; 4441 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 4442 return false; 4443 Members *= NElements; 4444 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 4445 const RecordDecl *RD = RT->getDecl(); 4446 if (RD->hasFlexibleArrayMember()) 4447 return false; 4448 4449 Members = 0; 4450 4451 // If this is a C++ record, check the bases first. 4452 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 4453 for (const auto &I : CXXRD->bases()) { 4454 // Ignore empty records. 4455 if (isEmptyRecord(getContext(), I.getType(), true)) 4456 continue; 4457 4458 uint64_t FldMembers; 4459 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 4460 return false; 4461 4462 Members += FldMembers; 4463 } 4464 } 4465 4466 for (const auto *FD : RD->fields()) { 4467 // Ignore (non-zero arrays of) empty records. 4468 QualType FT = FD->getType(); 4469 while (const ConstantArrayType *AT = 4470 getContext().getAsConstantArrayType(FT)) { 4471 if (AT->getSize().getZExtValue() == 0) 4472 return false; 4473 FT = AT->getElementType(); 4474 } 4475 if (isEmptyRecord(getContext(), FT, true)) 4476 continue; 4477 4478 // For compatibility with GCC, ignore empty bitfields in C++ mode. 4479 if (getContext().getLangOpts().CPlusPlus && 4480 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 4481 continue; 4482 4483 uint64_t FldMembers; 4484 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 4485 return false; 4486 4487 Members = (RD->isUnion() ? 4488 std::max(Members, FldMembers) : Members + FldMembers); 4489 } 4490 4491 if (!Base) 4492 return false; 4493 4494 // Ensure there is no padding. 4495 if (getContext().getTypeSize(Base) * Members != 4496 getContext().getTypeSize(Ty)) 4497 return false; 4498 } else { 4499 Members = 1; 4500 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 4501 Members = 2; 4502 Ty = CT->getElementType(); 4503 } 4504 4505 // Most ABIs only support float, double, and some vector type widths. 4506 if (!isHomogeneousAggregateBaseType(Ty)) 4507 return false; 4508 4509 // The base type must be the same for all members. Types that 4510 // agree in both total size and mode (float vs. vector) are 4511 // treated as being equivalent here. 4512 const Type *TyPtr = Ty.getTypePtr(); 4513 if (!Base) { 4514 Base = TyPtr; 4515 // If it's a non-power-of-2 vector, its size is already a power-of-2, 4516 // so make sure to widen it explicitly. 4517 if (const VectorType *VT = Base->getAs<VectorType>()) { 4518 QualType EltTy = VT->getElementType(); 4519 unsigned NumElements = 4520 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); 4521 Base = getContext() 4522 .getVectorType(EltTy, NumElements, VT->getVectorKind()) 4523 .getTypePtr(); 4524 } 4525 } 4526 4527 if (Base->isVectorType() != TyPtr->isVectorType() || 4528 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 4529 return false; 4530 } 4531 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 4532 } 4533 4534 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 4535 // Homogeneous aggregates for ELFv2 must have base types of float, 4536 // double, long double, or 128-bit vectors. 4537 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4538 if (BT->getKind() == BuiltinType::Float || 4539 BT->getKind() == BuiltinType::Double || 4540 BT->getKind() == BuiltinType::LongDouble) { 4541 if (IsSoftFloatABI) 4542 return false; 4543 return true; 4544 } 4545 } 4546 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4547 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty)) 4548 return true; 4549 } 4550 return false; 4551 } 4552 4553 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 4554 const Type *Base, uint64_t Members) const { 4555 // Vector types require one register, floating point types require one 4556 // or two registers depending on their size. 4557 uint32_t NumRegs = 4558 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64; 4559 4560 // Homogeneous Aggregates may occupy at most 8 registers. 4561 return Members * NumRegs <= 8; 4562 } 4563 4564 ABIArgInfo 4565 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 4566 Ty = useFirstFieldIfTransparentUnion(Ty); 4567 4568 if (Ty->isAnyComplexType()) 4569 return ABIArgInfo::getDirect(); 4570 4571 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 4572 // or via reference (larger than 16 bytes). 4573 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) { 4574 uint64_t Size = getContext().getTypeSize(Ty); 4575 if (Size > 128) 4576 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4577 else if (Size < 128) { 4578 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 4579 return ABIArgInfo::getDirect(CoerceTy); 4580 } 4581 } 4582 4583 if (isAggregateTypeForABI(Ty)) { 4584 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 4585 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4586 4587 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); 4588 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 4589 4590 // ELFv2 homogeneous aggregates are passed as array types. 4591 const Type *Base = nullptr; 4592 uint64_t Members = 0; 4593 if (Kind == ELFv2 && 4594 isHomogeneousAggregate(Ty, Base, Members)) { 4595 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 4596 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 4597 return ABIArgInfo::getDirect(CoerceTy); 4598 } 4599 4600 // If an aggregate may end up fully in registers, we do not 4601 // use the ByVal method, but pass the aggregate as array. 4602 // This is usually beneficial since we avoid forcing the 4603 // back-end to store the argument to memory. 4604 uint64_t Bits = getContext().getTypeSize(Ty); 4605 if (Bits > 0 && Bits <= 8 * GPRBits) { 4606 llvm::Type *CoerceTy; 4607 4608 // Types up to 8 bytes are passed as integer type (which will be 4609 // properly aligned in the argument save area doubleword). 4610 if (Bits <= GPRBits) 4611 CoerceTy = 4612 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 4613 // Larger types are passed as arrays, with the base type selected 4614 // according to the required alignment in the save area. 4615 else { 4616 uint64_t RegBits = ABIAlign * 8; 4617 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits; 4618 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 4619 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 4620 } 4621 4622 return ABIArgInfo::getDirect(CoerceTy); 4623 } 4624 4625 // All other aggregates are passed ByVal. 4626 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 4627 /*ByVal=*/true, 4628 /*Realign=*/TyAlign > ABIAlign); 4629 } 4630 4631 return (isPromotableTypeForABI(Ty) ? 4632 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4633 } 4634 4635 ABIArgInfo 4636 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 4637 if (RetTy->isVoidType()) 4638 return ABIArgInfo::getIgnore(); 4639 4640 if (RetTy->isAnyComplexType()) 4641 return ABIArgInfo::getDirect(); 4642 4643 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 4644 // or via reference (larger than 16 bytes). 4645 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) { 4646 uint64_t Size = getContext().getTypeSize(RetTy); 4647 if (Size > 128) 4648 return getNaturalAlignIndirect(RetTy); 4649 else if (Size < 128) { 4650 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 4651 return ABIArgInfo::getDirect(CoerceTy); 4652 } 4653 } 4654 4655 if (isAggregateTypeForABI(RetTy)) { 4656 // ELFv2 homogeneous aggregates are returned as array types. 4657 const Type *Base = nullptr; 4658 uint64_t Members = 0; 4659 if (Kind == ELFv2 && 4660 isHomogeneousAggregate(RetTy, Base, Members)) { 4661 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 4662 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 4663 return ABIArgInfo::getDirect(CoerceTy); 4664 } 4665 4666 // ELFv2 small aggregates are returned in up to two registers. 4667 uint64_t Bits = getContext().getTypeSize(RetTy); 4668 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 4669 if (Bits == 0) 4670 return ABIArgInfo::getIgnore(); 4671 4672 llvm::Type *CoerceTy; 4673 if (Bits > GPRBits) { 4674 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 4675 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy); 4676 } else 4677 CoerceTy = 4678 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 4679 return ABIArgInfo::getDirect(CoerceTy); 4680 } 4681 4682 // All other aggregates are returned indirectly. 4683 return getNaturalAlignIndirect(RetTy); 4684 } 4685 4686 return (isPromotableTypeForABI(RetTy) ? 4687 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4688 } 4689 4690 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 4691 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4692 QualType Ty) const { 4693 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 4694 TypeInfo.second = getParamTypeAlignment(Ty); 4695 4696 CharUnits SlotSize = CharUnits::fromQuantity(8); 4697 4698 // If we have a complex type and the base type is smaller than 8 bytes, 4699 // the ABI calls for the real and imaginary parts to be right-adjusted 4700 // in separate doublewords. However, Clang expects us to produce a 4701 // pointer to a structure with the two parts packed tightly. So generate 4702 // loads of the real and imaginary parts relative to the va_list pointer, 4703 // and store them to a temporary structure. 4704 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4705 CharUnits EltSize = TypeInfo.first / 2; 4706 if (EltSize < SlotSize) { 4707 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, 4708 SlotSize * 2, SlotSize, 4709 SlotSize, /*AllowHigher*/ true); 4710 4711 Address RealAddr = Addr; 4712 Address ImagAddr = RealAddr; 4713 if (CGF.CGM.getDataLayout().isBigEndian()) { 4714 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, 4715 SlotSize - EltSize); 4716 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, 4717 2 * SlotSize - EltSize); 4718 } else { 4719 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); 4720 } 4721 4722 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); 4723 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); 4724 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); 4725 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); 4726 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); 4727 4728 Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); 4729 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), 4730 /*init*/ true); 4731 return Temp; 4732 } 4733 } 4734 4735 // Otherwise, just use the general rule. 4736 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 4737 TypeInfo, SlotSize, /*AllowHigher*/ true); 4738 } 4739 4740 static bool 4741 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4742 llvm::Value *Address) { 4743 // This is calculated from the LLVM and GCC tables and verified 4744 // against gcc output. AFAIK all ABIs use the same encoding. 4745 4746 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4747 4748 llvm::IntegerType *i8 = CGF.Int8Ty; 4749 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4750 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4751 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4752 4753 // 0-31: r0-31, the 8-byte general-purpose registers 4754 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 4755 4756 // 32-63: fp0-31, the 8-byte floating-point registers 4757 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4758 4759 // 64-67 are various 8-byte special-purpose registers: 4760 // 64: mq 4761 // 65: lr 4762 // 66: ctr 4763 // 67: ap 4764 AssignToArrayRange(Builder, Address, Eight8, 64, 67); 4765 4766 // 68-76 are various 4-byte special-purpose registers: 4767 // 68-75 cr0-7 4768 // 76: xer 4769 AssignToArrayRange(Builder, Address, Four8, 68, 76); 4770 4771 // 77-108: v0-31, the 16-byte vector registers 4772 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4773 4774 // 109: vrsave 4775 // 110: vscr 4776 // 111: spe_acc 4777 // 112: spefscr 4778 // 113: sfp 4779 // 114: tfhar 4780 // 115: tfiar 4781 // 116: texasr 4782 AssignToArrayRange(Builder, Address, Eight8, 109, 116); 4783 4784 return false; 4785 } 4786 4787 bool 4788 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 4789 CodeGen::CodeGenFunction &CGF, 4790 llvm::Value *Address) const { 4791 4792 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 4793 } 4794 4795 bool 4796 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4797 llvm::Value *Address) const { 4798 4799 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 4800 } 4801 4802 //===----------------------------------------------------------------------===// 4803 // AArch64 ABI Implementation 4804 //===----------------------------------------------------------------------===// 4805 4806 namespace { 4807 4808 class AArch64ABIInfo : public SwiftABIInfo { 4809 public: 4810 enum ABIKind { 4811 AAPCS = 0, 4812 DarwinPCS, 4813 Win64 4814 }; 4815 4816 private: 4817 ABIKind Kind; 4818 4819 public: 4820 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) 4821 : SwiftABIInfo(CGT), Kind(Kind) {} 4822 4823 private: 4824 ABIKind getABIKind() const { return Kind; } 4825 bool isDarwinPCS() const { return Kind == DarwinPCS; } 4826 4827 ABIArgInfo classifyReturnType(QualType RetTy) const; 4828 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4829 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4830 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4831 uint64_t Members) const override; 4832 4833 bool isIllegalVectorType(QualType Ty) const; 4834 4835 void computeInfo(CGFunctionInfo &FI) const override { 4836 if (!getCXXABI().classifyReturnType(FI)) 4837 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4838 4839 for (auto &it : FI.arguments()) 4840 it.info = classifyArgumentType(it.type); 4841 } 4842 4843 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, 4844 CodeGenFunction &CGF) const; 4845 4846 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 4847 CodeGenFunction &CGF) const; 4848 4849 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4850 QualType Ty) const override { 4851 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) 4852 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 4853 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 4854 } 4855 4856 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 4857 QualType Ty) const override; 4858 4859 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 4860 ArrayRef<llvm::Type*> scalars, 4861 bool asReturnValue) const override { 4862 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 4863 } 4864 bool isSwiftErrorInRegister() const override { 4865 return true; 4866 } 4867 4868 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 4869 unsigned elts) const override; 4870 }; 4871 4872 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 4873 public: 4874 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 4875 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} 4876 4877 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 4878 return "mov\tfp, fp\t\t# marker for objc_retainAutoreleaseReturnValue"; 4879 } 4880 4881 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4882 return 31; 4883 } 4884 4885 bool doesReturnSlotInterfereWithArgs() const override { return false; } 4886 }; 4887 4888 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo { 4889 public: 4890 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K) 4891 : AArch64TargetCodeGenInfo(CGT, K) {} 4892 4893 void getDependentLibraryOption(llvm::StringRef Lib, 4894 llvm::SmallString<24> &Opt) const override { 4895 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 4896 } 4897 4898 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 4899 llvm::SmallString<32> &Opt) const override { 4900 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 4901 } 4902 }; 4903 } 4904 4905 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { 4906 Ty = useFirstFieldIfTransparentUnion(Ty); 4907 4908 // Handle illegal vector types here. 4909 if (isIllegalVectorType(Ty)) { 4910 uint64_t Size = getContext().getTypeSize(Ty); 4911 // Android promotes <2 x i8> to i16, not i32 4912 if (isAndroid() && (Size <= 16)) { 4913 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); 4914 return ABIArgInfo::getDirect(ResType); 4915 } 4916 if (Size <= 32) { 4917 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 4918 return ABIArgInfo::getDirect(ResType); 4919 } 4920 if (Size == 64) { 4921 llvm::Type *ResType = 4922 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 4923 return ABIArgInfo::getDirect(ResType); 4924 } 4925 if (Size == 128) { 4926 llvm::Type *ResType = 4927 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 4928 return ABIArgInfo::getDirect(ResType); 4929 } 4930 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4931 } 4932 4933 if (!isAggregateTypeForABI(Ty)) { 4934 // Treat an enum type as its underlying type. 4935 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4936 Ty = EnumTy->getDecl()->getIntegerType(); 4937 4938 return (Ty->isPromotableIntegerType() && isDarwinPCS() 4939 ? ABIArgInfo::getExtend() 4940 : ABIArgInfo::getDirect()); 4941 } 4942 4943 // Structures with either a non-trivial destructor or a non-trivial 4944 // copy constructor are always indirect. 4945 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 4946 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 4947 CGCXXABI::RAA_DirectInMemory); 4948 } 4949 4950 // Empty records are always ignored on Darwin, but actually passed in C++ mode 4951 // elsewhere for GNU compatibility. 4952 uint64_t Size = getContext().getTypeSize(Ty); 4953 bool IsEmpty = isEmptyRecord(getContext(), Ty, true); 4954 if (IsEmpty || Size == 0) { 4955 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 4956 return ABIArgInfo::getIgnore(); 4957 4958 // GNU C mode. The only argument that gets ignored is an empty one with size 4959 // 0. 4960 if (IsEmpty && Size == 0) 4961 return ABIArgInfo::getIgnore(); 4962 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4963 } 4964 4965 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 4966 const Type *Base = nullptr; 4967 uint64_t Members = 0; 4968 if (isHomogeneousAggregate(Ty, Base, Members)) { 4969 return ABIArgInfo::getDirect( 4970 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 4971 } 4972 4973 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 4974 if (Size <= 128) { 4975 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 4976 // same size and alignment. 4977 if (getTarget().isRenderScriptTarget()) { 4978 return coerceToIntArray(Ty, getContext(), getVMContext()); 4979 } 4980 unsigned Alignment = getContext().getTypeAlign(Ty); 4981 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes 4982 4983 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 4984 // For aggregates with 16-byte alignment, we use i128. 4985 if (Alignment < 128 && Size == 128) { 4986 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 4987 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 4988 } 4989 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 4990 } 4991 4992 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4993 } 4994 4995 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { 4996 if (RetTy->isVoidType()) 4997 return ABIArgInfo::getIgnore(); 4998 4999 // Large vector types should be returned via memory. 5000 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 5001 return getNaturalAlignIndirect(RetTy); 5002 5003 if (!isAggregateTypeForABI(RetTy)) { 5004 // Treat an enum type as its underlying type. 5005 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5006 RetTy = EnumTy->getDecl()->getIntegerType(); 5007 5008 return (RetTy->isPromotableIntegerType() && isDarwinPCS() 5009 ? ABIArgInfo::getExtend() 5010 : ABIArgInfo::getDirect()); 5011 } 5012 5013 uint64_t Size = getContext().getTypeSize(RetTy); 5014 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0) 5015 return ABIArgInfo::getIgnore(); 5016 5017 const Type *Base = nullptr; 5018 uint64_t Members = 0; 5019 if (isHomogeneousAggregate(RetTy, Base, Members)) 5020 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 5021 return ABIArgInfo::getDirect(); 5022 5023 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 5024 if (Size <= 128) { 5025 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 5026 // same size and alignment. 5027 if (getTarget().isRenderScriptTarget()) { 5028 return coerceToIntArray(RetTy, getContext(), getVMContext()); 5029 } 5030 unsigned Alignment = getContext().getTypeAlign(RetTy); 5031 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes 5032 5033 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 5034 // For aggregates with 16-byte alignment, we use i128. 5035 if (Alignment < 128 && Size == 128) { 5036 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 5037 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 5038 } 5039 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 5040 } 5041 5042 return getNaturalAlignIndirect(RetTy); 5043 } 5044 5045 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 5046 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 5047 if (const VectorType *VT = Ty->getAs<VectorType>()) { 5048 // Check whether VT is legal. 5049 unsigned NumElements = VT->getNumElements(); 5050 uint64_t Size = getContext().getTypeSize(VT); 5051 // NumElements should be power of 2. 5052 if (!llvm::isPowerOf2_32(NumElements)) 5053 return true; 5054 return Size != 64 && (Size != 128 || NumElements == 1); 5055 } 5056 return false; 5057 } 5058 5059 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize, 5060 llvm::Type *eltTy, 5061 unsigned elts) const { 5062 if (!llvm::isPowerOf2_32(elts)) 5063 return false; 5064 if (totalSize.getQuantity() != 8 && 5065 (totalSize.getQuantity() != 16 || elts == 1)) 5066 return false; 5067 return true; 5068 } 5069 5070 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5071 // Homogeneous aggregates for AAPCS64 must have base types of a floating 5072 // point type or a short-vector type. This is the same as the 32-bit ABI, 5073 // but with the difference that any floating-point type is allowed, 5074 // including __fp16. 5075 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5076 if (BT->isFloatingPoint()) 5077 return true; 5078 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 5079 unsigned VecSize = getContext().getTypeSize(VT); 5080 if (VecSize == 64 || VecSize == 128) 5081 return true; 5082 } 5083 return false; 5084 } 5085 5086 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 5087 uint64_t Members) const { 5088 return Members <= 4; 5089 } 5090 5091 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, 5092 QualType Ty, 5093 CodeGenFunction &CGF) const { 5094 ABIArgInfo AI = classifyArgumentType(Ty); 5095 bool IsIndirect = AI.isIndirect(); 5096 5097 llvm::Type *BaseTy = CGF.ConvertType(Ty); 5098 if (IsIndirect) 5099 BaseTy = llvm::PointerType::getUnqual(BaseTy); 5100 else if (AI.getCoerceToType()) 5101 BaseTy = AI.getCoerceToType(); 5102 5103 unsigned NumRegs = 1; 5104 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 5105 BaseTy = ArrTy->getElementType(); 5106 NumRegs = ArrTy->getNumElements(); 5107 } 5108 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 5109 5110 // The AArch64 va_list type and handling is specified in the Procedure Call 5111 // Standard, section B.4: 5112 // 5113 // struct { 5114 // void *__stack; 5115 // void *__gr_top; 5116 // void *__vr_top; 5117 // int __gr_offs; 5118 // int __vr_offs; 5119 // }; 5120 5121 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 5122 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 5123 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 5124 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 5125 5126 auto TyInfo = getContext().getTypeInfoInChars(Ty); 5127 CharUnits TyAlign = TyInfo.second; 5128 5129 Address reg_offs_p = Address::invalid(); 5130 llvm::Value *reg_offs = nullptr; 5131 int reg_top_index; 5132 CharUnits reg_top_offset; 5133 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity(); 5134 if (!IsFPR) { 5135 // 3 is the field number of __gr_offs 5136 reg_offs_p = 5137 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24), 5138 "gr_offs_p"); 5139 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 5140 reg_top_index = 1; // field number for __gr_top 5141 reg_top_offset = CharUnits::fromQuantity(8); 5142 RegSize = llvm::alignTo(RegSize, 8); 5143 } else { 5144 // 4 is the field number of __vr_offs. 5145 reg_offs_p = 5146 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28), 5147 "vr_offs_p"); 5148 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 5149 reg_top_index = 2; // field number for __vr_top 5150 reg_top_offset = CharUnits::fromQuantity(16); 5151 RegSize = 16 * NumRegs; 5152 } 5153 5154 //======================================= 5155 // Find out where argument was passed 5156 //======================================= 5157 5158 // If reg_offs >= 0 we're already using the stack for this type of 5159 // argument. We don't want to keep updating reg_offs (in case it overflows, 5160 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 5161 // whatever they get). 5162 llvm::Value *UsingStack = nullptr; 5163 UsingStack = CGF.Builder.CreateICmpSGE( 5164 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 5165 5166 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 5167 5168 // Otherwise, at least some kind of argument could go in these registers, the 5169 // question is whether this particular type is too big. 5170 CGF.EmitBlock(MaybeRegBlock); 5171 5172 // Integer arguments may need to correct register alignment (for example a 5173 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 5174 // align __gr_offs to calculate the potential address. 5175 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { 5176 int Align = TyAlign.getQuantity(); 5177 5178 reg_offs = CGF.Builder.CreateAdd( 5179 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 5180 "align_regoffs"); 5181 reg_offs = CGF.Builder.CreateAnd( 5182 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 5183 "aligned_regoffs"); 5184 } 5185 5186 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 5187 // The fact that this is done unconditionally reflects the fact that 5188 // allocating an argument to the stack also uses up all the remaining 5189 // registers of the appropriate kind. 5190 llvm::Value *NewOffset = nullptr; 5191 NewOffset = CGF.Builder.CreateAdd( 5192 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 5193 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 5194 5195 // Now we're in a position to decide whether this argument really was in 5196 // registers or not. 5197 llvm::Value *InRegs = nullptr; 5198 InRegs = CGF.Builder.CreateICmpSLE( 5199 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 5200 5201 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 5202 5203 //======================================= 5204 // Argument was in registers 5205 //======================================= 5206 5207 // Now we emit the code for if the argument was originally passed in 5208 // registers. First start the appropriate block: 5209 CGF.EmitBlock(InRegBlock); 5210 5211 llvm::Value *reg_top = nullptr; 5212 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, 5213 reg_top_offset, "reg_top_p"); 5214 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 5215 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs), 5216 CharUnits::fromQuantity(IsFPR ? 16 : 8)); 5217 Address RegAddr = Address::invalid(); 5218 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); 5219 5220 if (IsIndirect) { 5221 // If it's been passed indirectly (actually a struct), whatever we find from 5222 // stored registers or on the stack will actually be a struct **. 5223 MemTy = llvm::PointerType::getUnqual(MemTy); 5224 } 5225 5226 const Type *Base = nullptr; 5227 uint64_t NumMembers = 0; 5228 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 5229 if (IsHFA && NumMembers > 1) { 5230 // Homogeneous aggregates passed in registers will have their elements split 5231 // and stored 16-bytes apart regardless of size (they're notionally in qN, 5232 // qN+1, ...). We reload and store into a temporary local variable 5233 // contiguously. 5234 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 5235 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); 5236 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 5237 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 5238 Address Tmp = CGF.CreateTempAlloca(HFATy, 5239 std::max(TyAlign, BaseTyInfo.second)); 5240 5241 // On big-endian platforms, the value will be right-aligned in its slot. 5242 int Offset = 0; 5243 if (CGF.CGM.getDataLayout().isBigEndian() && 5244 BaseTyInfo.first.getQuantity() < 16) 5245 Offset = 16 - BaseTyInfo.first.getQuantity(); 5246 5247 for (unsigned i = 0; i < NumMembers; ++i) { 5248 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); 5249 Address LoadAddr = 5250 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); 5251 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); 5252 5253 Address StoreAddr = 5254 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first); 5255 5256 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 5257 CGF.Builder.CreateStore(Elem, StoreAddr); 5258 } 5259 5260 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); 5261 } else { 5262 // Otherwise the object is contiguous in memory. 5263 5264 // It might be right-aligned in its slot. 5265 CharUnits SlotSize = BaseAddr.getAlignment(); 5266 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && 5267 (IsHFA || !isAggregateTypeForABI(Ty)) && 5268 TyInfo.first < SlotSize) { 5269 CharUnits Offset = SlotSize - TyInfo.first; 5270 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); 5271 } 5272 5273 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); 5274 } 5275 5276 CGF.EmitBranch(ContBlock); 5277 5278 //======================================= 5279 // Argument was on the stack 5280 //======================================= 5281 CGF.EmitBlock(OnStackBlock); 5282 5283 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, 5284 CharUnits::Zero(), "stack_p"); 5285 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); 5286 5287 // Again, stack arguments may need realignment. In this case both integer and 5288 // floating-point ones might be affected. 5289 if (!IsIndirect && TyAlign.getQuantity() > 8) { 5290 int Align = TyAlign.getQuantity(); 5291 5292 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); 5293 5294 OnStackPtr = CGF.Builder.CreateAdd( 5295 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 5296 "align_stack"); 5297 OnStackPtr = CGF.Builder.CreateAnd( 5298 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 5299 "align_stack"); 5300 5301 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); 5302 } 5303 Address OnStackAddr(OnStackPtr, 5304 std::max(CharUnits::fromQuantity(8), TyAlign)); 5305 5306 // All stack slots are multiples of 8 bytes. 5307 CharUnits StackSlotSize = CharUnits::fromQuantity(8); 5308 CharUnits StackSize; 5309 if (IsIndirect) 5310 StackSize = StackSlotSize; 5311 else 5312 StackSize = TyInfo.first.alignTo(StackSlotSize); 5313 5314 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); 5315 llvm::Value *NewStack = 5316 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack"); 5317 5318 // Write the new value of __stack for the next call to va_arg 5319 CGF.Builder.CreateStore(NewStack, stack_p); 5320 5321 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 5322 TyInfo.first < StackSlotSize) { 5323 CharUnits Offset = StackSlotSize - TyInfo.first; 5324 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); 5325 } 5326 5327 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); 5328 5329 CGF.EmitBranch(ContBlock); 5330 5331 //======================================= 5332 // Tidy up 5333 //======================================= 5334 CGF.EmitBlock(ContBlock); 5335 5336 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 5337 OnStackAddr, OnStackBlock, "vaargs.addr"); 5338 5339 if (IsIndirect) 5340 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), 5341 TyInfo.second); 5342 5343 return ResAddr; 5344 } 5345 5346 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, 5347 CodeGenFunction &CGF) const { 5348 // The backend's lowering doesn't support va_arg for aggregates or 5349 // illegal vector types. Lower VAArg here for these cases and use 5350 // the LLVM va_arg instruction for everything else. 5351 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 5352 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 5353 5354 CharUnits SlotSize = CharUnits::fromQuantity(8); 5355 5356 // Empty records are ignored for parameter passing purposes. 5357 if (isEmptyRecord(getContext(), Ty, true)) { 5358 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 5359 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 5360 return Addr; 5361 } 5362 5363 // The size of the actual thing passed, which might end up just 5364 // being a pointer for indirect types. 5365 auto TyInfo = getContext().getTypeInfoInChars(Ty); 5366 5367 // Arguments bigger than 16 bytes which aren't homogeneous 5368 // aggregates should be passed indirectly. 5369 bool IsIndirect = false; 5370 if (TyInfo.first.getQuantity() > 16) { 5371 const Type *Base = nullptr; 5372 uint64_t Members = 0; 5373 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); 5374 } 5375 5376 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 5377 TyInfo, SlotSize, /*AllowHigherAlign*/ true); 5378 } 5379 5380 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 5381 QualType Ty) const { 5382 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 5383 CGF.getContext().getTypeInfoInChars(Ty), 5384 CharUnits::fromQuantity(8), 5385 /*allowHigherAlign*/ false); 5386 } 5387 5388 //===----------------------------------------------------------------------===// 5389 // ARM ABI Implementation 5390 //===----------------------------------------------------------------------===// 5391 5392 namespace { 5393 5394 class ARMABIInfo : public SwiftABIInfo { 5395 public: 5396 enum ABIKind { 5397 APCS = 0, 5398 AAPCS = 1, 5399 AAPCS_VFP = 2, 5400 AAPCS16_VFP = 3, 5401 }; 5402 5403 private: 5404 ABIKind Kind; 5405 5406 public: 5407 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) 5408 : SwiftABIInfo(CGT), Kind(_Kind) { 5409 setCCs(); 5410 } 5411 5412 bool isEABI() const { 5413 switch (getTarget().getTriple().getEnvironment()) { 5414 case llvm::Triple::Android: 5415 case llvm::Triple::EABI: 5416 case llvm::Triple::EABIHF: 5417 case llvm::Triple::GNUEABI: 5418 case llvm::Triple::GNUEABIHF: 5419 case llvm::Triple::MuslEABI: 5420 case llvm::Triple::MuslEABIHF: 5421 return true; 5422 default: 5423 return false; 5424 } 5425 } 5426 5427 bool isEABIHF() const { 5428 switch (getTarget().getTriple().getEnvironment()) { 5429 case llvm::Triple::EABIHF: 5430 case llvm::Triple::GNUEABIHF: 5431 case llvm::Triple::MuslEABIHF: 5432 return true; 5433 default: 5434 return false; 5435 } 5436 } 5437 5438 ABIKind getABIKind() const { return Kind; } 5439 5440 private: 5441 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; 5442 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const; 5443 bool isIllegalVectorType(QualType Ty) const; 5444 5445 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 5446 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 5447 uint64_t Members) const override; 5448 5449 void computeInfo(CGFunctionInfo &FI) const override; 5450 5451 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5452 QualType Ty) const override; 5453 5454 llvm::CallingConv::ID getLLVMDefaultCC() const; 5455 llvm::CallingConv::ID getABIDefaultCC() const; 5456 void setCCs(); 5457 5458 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 5459 ArrayRef<llvm::Type*> scalars, 5460 bool asReturnValue) const override { 5461 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 5462 } 5463 bool isSwiftErrorInRegister() const override { 5464 return true; 5465 } 5466 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, 5467 unsigned elts) const override; 5468 }; 5469 5470 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 5471 public: 5472 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 5473 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 5474 5475 const ARMABIInfo &getABIInfo() const { 5476 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 5477 } 5478 5479 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5480 return 13; 5481 } 5482 5483 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 5484 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 5485 } 5486 5487 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5488 llvm::Value *Address) const override { 5489 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 5490 5491 // 0-15 are the 16 integer registers. 5492 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 5493 return false; 5494 } 5495 5496 unsigned getSizeOfUnwindException() const override { 5497 if (getABIInfo().isEABI()) return 88; 5498 return TargetCodeGenInfo::getSizeOfUnwindException(); 5499 } 5500 5501 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5502 CodeGen::CodeGenModule &CGM, 5503 ForDefinition_t IsForDefinition) const override { 5504 if (!IsForDefinition) 5505 return; 5506 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5507 if (!FD) 5508 return; 5509 5510 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 5511 if (!Attr) 5512 return; 5513 5514 const char *Kind; 5515 switch (Attr->getInterrupt()) { 5516 case ARMInterruptAttr::Generic: Kind = ""; break; 5517 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 5518 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 5519 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 5520 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 5521 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 5522 } 5523 5524 llvm::Function *Fn = cast<llvm::Function>(GV); 5525 5526 Fn->addFnAttr("interrupt", Kind); 5527 5528 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind(); 5529 if (ABI == ARMABIInfo::APCS) 5530 return; 5531 5532 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 5533 // however this is not necessarily true on taking any interrupt. Instruct 5534 // the backend to perform a realignment as part of the function prologue. 5535 llvm::AttrBuilder B; 5536 B.addStackAlignmentAttr(8); 5537 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B); 5538 } 5539 }; 5540 5541 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 5542 public: 5543 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 5544 : ARMTargetCodeGenInfo(CGT, K) {} 5545 5546 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5547 CodeGen::CodeGenModule &CGM, 5548 ForDefinition_t IsForDefinition) const override; 5549 5550 void getDependentLibraryOption(llvm::StringRef Lib, 5551 llvm::SmallString<24> &Opt) const override { 5552 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 5553 } 5554 5555 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 5556 llvm::SmallString<32> &Opt) const override { 5557 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 5558 } 5559 }; 5560 5561 void WindowsARMTargetCodeGenInfo::setTargetAttributes( 5562 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM, 5563 ForDefinition_t IsForDefinition) const { 5564 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition); 5565 if (!IsForDefinition) 5566 return; 5567 addStackProbeSizeTargetAttribute(D, GV, CGM); 5568 } 5569 } 5570 5571 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 5572 if (!getCXXABI().classifyReturnType(FI)) 5573 FI.getReturnInfo() = 5574 classifyReturnType(FI.getReturnType(), FI.isVariadic()); 5575 5576 for (auto &I : FI.arguments()) 5577 I.info = classifyArgumentType(I.type, FI.isVariadic()); 5578 5579 // Always honor user-specified calling convention. 5580 if (FI.getCallingConvention() != llvm::CallingConv::C) 5581 return; 5582 5583 llvm::CallingConv::ID cc = getRuntimeCC(); 5584 if (cc != llvm::CallingConv::C) 5585 FI.setEffectiveCallingConvention(cc); 5586 } 5587 5588 /// Return the default calling convention that LLVM will use. 5589 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 5590 // The default calling convention that LLVM will infer. 5591 if (isEABIHF() || getTarget().getTriple().isWatchABI()) 5592 return llvm::CallingConv::ARM_AAPCS_VFP; 5593 else if (isEABI()) 5594 return llvm::CallingConv::ARM_AAPCS; 5595 else 5596 return llvm::CallingConv::ARM_APCS; 5597 } 5598 5599 /// Return the calling convention that our ABI would like us to use 5600 /// as the C calling convention. 5601 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 5602 switch (getABIKind()) { 5603 case APCS: return llvm::CallingConv::ARM_APCS; 5604 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 5605 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 5606 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 5607 } 5608 llvm_unreachable("bad ABI kind"); 5609 } 5610 5611 void ARMABIInfo::setCCs() { 5612 assert(getRuntimeCC() == llvm::CallingConv::C); 5613 5614 // Don't muddy up the IR with a ton of explicit annotations if 5615 // they'd just match what LLVM will infer from the triple. 5616 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 5617 if (abiCC != getLLVMDefaultCC()) 5618 RuntimeCC = abiCC; 5619 5620 // AAPCS apparently requires runtime support functions to be soft-float, but 5621 // that's almost certainly for historic reasons (Thumb1 not supporting VFP 5622 // most likely). It's more convenient for AAPCS16_VFP to be hard-float. 5623 5624 // The Run-time ABI for the ARM Architecture section 4.1.2 requires 5625 // AEABI-complying FP helper functions to use the base AAPCS. 5626 // These AEABI functions are expanded in the ARM llvm backend, all the builtin 5627 // support functions emitted by clang such as the _Complex helpers follow the 5628 // abiCC. 5629 if (abiCC != getLLVMDefaultCC()) 5630 BuiltinCC = abiCC; 5631 } 5632 5633 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, 5634 bool isVariadic) const { 5635 // 6.1.2.1 The following argument types are VFP CPRCs: 5636 // A single-precision floating-point type (including promoted 5637 // half-precision types); A double-precision floating-point type; 5638 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 5639 // with a Base Type of a single- or double-precision floating-point type, 5640 // 64-bit containerized vectors or 128-bit containerized vectors with one 5641 // to four Elements. 5642 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 5643 5644 Ty = useFirstFieldIfTransparentUnion(Ty); 5645 5646 // Handle illegal vector types here. 5647 if (isIllegalVectorType(Ty)) { 5648 uint64_t Size = getContext().getTypeSize(Ty); 5649 if (Size <= 32) { 5650 llvm::Type *ResType = 5651 llvm::Type::getInt32Ty(getVMContext()); 5652 return ABIArgInfo::getDirect(ResType); 5653 } 5654 if (Size == 64) { 5655 llvm::Type *ResType = llvm::VectorType::get( 5656 llvm::Type::getInt32Ty(getVMContext()), 2); 5657 return ABIArgInfo::getDirect(ResType); 5658 } 5659 if (Size == 128) { 5660 llvm::Type *ResType = llvm::VectorType::get( 5661 llvm::Type::getInt32Ty(getVMContext()), 4); 5662 return ABIArgInfo::getDirect(ResType); 5663 } 5664 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5665 } 5666 5667 // __fp16 gets passed as if it were an int or float, but with the top 16 bits 5668 // unspecified. This is not done for OpenCL as it handles the half type 5669 // natively, and does not need to interwork with AAPCS code. 5670 if (Ty->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) { 5671 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ? 5672 llvm::Type::getFloatTy(getVMContext()) : 5673 llvm::Type::getInt32Ty(getVMContext()); 5674 return ABIArgInfo::getDirect(ResType); 5675 } 5676 5677 if (!isAggregateTypeForABI(Ty)) { 5678 // Treat an enum type as its underlying type. 5679 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 5680 Ty = EnumTy->getDecl()->getIntegerType(); 5681 } 5682 5683 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() 5684 : ABIArgInfo::getDirect()); 5685 } 5686 5687 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5688 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 5689 } 5690 5691 // Ignore empty records. 5692 if (isEmptyRecord(getContext(), Ty, true)) 5693 return ABIArgInfo::getIgnore(); 5694 5695 if (IsEffectivelyAAPCS_VFP) { 5696 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 5697 // into VFP registers. 5698 const Type *Base = nullptr; 5699 uint64_t Members = 0; 5700 if (isHomogeneousAggregate(Ty, Base, Members)) { 5701 assert(Base && "Base class should be set for homogeneous aggregate"); 5702 // Base can be a floating-point or a vector. 5703 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 5704 } 5705 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 5706 // WatchOS does have homogeneous aggregates. Note that we intentionally use 5707 // this convention even for a variadic function: the backend will use GPRs 5708 // if needed. 5709 const Type *Base = nullptr; 5710 uint64_t Members = 0; 5711 if (isHomogeneousAggregate(Ty, Base, Members)) { 5712 assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); 5713 llvm::Type *Ty = 5714 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); 5715 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 5716 } 5717 } 5718 5719 if (getABIKind() == ARMABIInfo::AAPCS16_VFP && 5720 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { 5721 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're 5722 // bigger than 128-bits, they get placed in space allocated by the caller, 5723 // and a pointer is passed. 5724 return ABIArgInfo::getIndirect( 5725 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); 5726 } 5727 5728 // Support byval for ARM. 5729 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 5730 // most 8-byte. We realign the indirect argument if type alignment is bigger 5731 // than ABI alignment. 5732 uint64_t ABIAlign = 4; 5733 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 5734 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 5735 getABIKind() == ARMABIInfo::AAPCS) 5736 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 5737 5738 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 5739 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval"); 5740 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 5741 /*ByVal=*/true, 5742 /*Realign=*/TyAlign > ABIAlign); 5743 } 5744 5745 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of 5746 // same size and alignment. 5747 if (getTarget().isRenderScriptTarget()) { 5748 return coerceToIntArray(Ty, getContext(), getVMContext()); 5749 } 5750 5751 // Otherwise, pass by coercing to a structure of the appropriate size. 5752 llvm::Type* ElemTy; 5753 unsigned SizeRegs; 5754 // FIXME: Try to match the types of the arguments more accurately where 5755 // we can. 5756 if (getContext().getTypeAlign(Ty) <= 32) { 5757 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 5758 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 5759 } else { 5760 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 5761 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 5762 } 5763 5764 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 5765 } 5766 5767 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 5768 llvm::LLVMContext &VMContext) { 5769 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 5770 // is called integer-like if its size is less than or equal to one word, and 5771 // the offset of each of its addressable sub-fields is zero. 5772 5773 uint64_t Size = Context.getTypeSize(Ty); 5774 5775 // Check that the type fits in a word. 5776 if (Size > 32) 5777 return false; 5778 5779 // FIXME: Handle vector types! 5780 if (Ty->isVectorType()) 5781 return false; 5782 5783 // Float types are never treated as "integer like". 5784 if (Ty->isRealFloatingType()) 5785 return false; 5786 5787 // If this is a builtin or pointer type then it is ok. 5788 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 5789 return true; 5790 5791 // Small complex integer types are "integer like". 5792 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 5793 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 5794 5795 // Single element and zero sized arrays should be allowed, by the definition 5796 // above, but they are not. 5797 5798 // Otherwise, it must be a record type. 5799 const RecordType *RT = Ty->getAs<RecordType>(); 5800 if (!RT) return false; 5801 5802 // Ignore records with flexible arrays. 5803 const RecordDecl *RD = RT->getDecl(); 5804 if (RD->hasFlexibleArrayMember()) 5805 return false; 5806 5807 // Check that all sub-fields are at offset 0, and are themselves "integer 5808 // like". 5809 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 5810 5811 bool HadField = false; 5812 unsigned idx = 0; 5813 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 5814 i != e; ++i, ++idx) { 5815 const FieldDecl *FD = *i; 5816 5817 // Bit-fields are not addressable, we only need to verify they are "integer 5818 // like". We still have to disallow a subsequent non-bitfield, for example: 5819 // struct { int : 0; int x } 5820 // is non-integer like according to gcc. 5821 if (FD->isBitField()) { 5822 if (!RD->isUnion()) 5823 HadField = true; 5824 5825 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 5826 return false; 5827 5828 continue; 5829 } 5830 5831 // Check if this field is at offset 0. 5832 if (Layout.getFieldOffset(idx) != 0) 5833 return false; 5834 5835 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 5836 return false; 5837 5838 // Only allow at most one field in a structure. This doesn't match the 5839 // wording above, but follows gcc in situations with a field following an 5840 // empty structure. 5841 if (!RD->isUnion()) { 5842 if (HadField) 5843 return false; 5844 5845 HadField = true; 5846 } 5847 } 5848 5849 return true; 5850 } 5851 5852 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, 5853 bool isVariadic) const { 5854 bool IsEffectivelyAAPCS_VFP = 5855 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic; 5856 5857 if (RetTy->isVoidType()) 5858 return ABIArgInfo::getIgnore(); 5859 5860 // Large vector types should be returned via memory. 5861 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) { 5862 return getNaturalAlignIndirect(RetTy); 5863 } 5864 5865 // __fp16 gets returned as if it were an int or float, but with the top 16 5866 // bits unspecified. This is not done for OpenCL as it handles the half type 5867 // natively, and does not need to interwork with AAPCS code. 5868 if (RetTy->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) { 5869 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ? 5870 llvm::Type::getFloatTy(getVMContext()) : 5871 llvm::Type::getInt32Ty(getVMContext()); 5872 return ABIArgInfo::getDirect(ResType); 5873 } 5874 5875 if (!isAggregateTypeForABI(RetTy)) { 5876 // Treat an enum type as its underlying type. 5877 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5878 RetTy = EnumTy->getDecl()->getIntegerType(); 5879 5880 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() 5881 : ABIArgInfo::getDirect(); 5882 } 5883 5884 // Are we following APCS? 5885 if (getABIKind() == APCS) { 5886 if (isEmptyRecord(getContext(), RetTy, false)) 5887 return ABIArgInfo::getIgnore(); 5888 5889 // Complex types are all returned as packed integers. 5890 // 5891 // FIXME: Consider using 2 x vector types if the back end handles them 5892 // correctly. 5893 if (RetTy->isAnyComplexType()) 5894 return ABIArgInfo::getDirect(llvm::IntegerType::get( 5895 getVMContext(), getContext().getTypeSize(RetTy))); 5896 5897 // Integer like structures are returned in r0. 5898 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 5899 // Return in the smallest viable integer type. 5900 uint64_t Size = getContext().getTypeSize(RetTy); 5901 if (Size <= 8) 5902 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5903 if (Size <= 16) 5904 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5905 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5906 } 5907 5908 // Otherwise return in memory. 5909 return getNaturalAlignIndirect(RetTy); 5910 } 5911 5912 // Otherwise this is an AAPCS variant. 5913 5914 if (isEmptyRecord(getContext(), RetTy, true)) 5915 return ABIArgInfo::getIgnore(); 5916 5917 // Check for homogeneous aggregates with AAPCS-VFP. 5918 if (IsEffectivelyAAPCS_VFP) { 5919 const Type *Base = nullptr; 5920 uint64_t Members = 0; 5921 if (isHomogeneousAggregate(RetTy, Base, Members)) { 5922 assert(Base && "Base class should be set for homogeneous aggregate"); 5923 // Homogeneous Aggregates are returned directly. 5924 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 5925 } 5926 } 5927 5928 // Aggregates <= 4 bytes are returned in r0; other aggregates 5929 // are returned indirectly. 5930 uint64_t Size = getContext().getTypeSize(RetTy); 5931 if (Size <= 32) { 5932 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of 5933 // same size and alignment. 5934 if (getTarget().isRenderScriptTarget()) { 5935 return coerceToIntArray(RetTy, getContext(), getVMContext()); 5936 } 5937 if (getDataLayout().isBigEndian()) 5938 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 5939 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5940 5941 // Return in the smallest viable integer type. 5942 if (Size <= 8) 5943 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5944 if (Size <= 16) 5945 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5946 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5947 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) { 5948 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); 5949 llvm::Type *CoerceTy = 5950 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); 5951 return ABIArgInfo::getDirect(CoerceTy); 5952 } 5953 5954 return getNaturalAlignIndirect(RetTy); 5955 } 5956 5957 /// isIllegalVector - check whether Ty is an illegal vector type. 5958 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 5959 if (const VectorType *VT = Ty->getAs<VectorType> ()) { 5960 if (isAndroid()) { 5961 // Android shipped using Clang 3.1, which supported a slightly different 5962 // vector ABI. The primary differences were that 3-element vector types 5963 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path 5964 // accepts that legacy behavior for Android only. 5965 // Check whether VT is legal. 5966 unsigned NumElements = VT->getNumElements(); 5967 // NumElements should be power of 2 or equal to 3. 5968 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) 5969 return true; 5970 } else { 5971 // Check whether VT is legal. 5972 unsigned NumElements = VT->getNumElements(); 5973 uint64_t Size = getContext().getTypeSize(VT); 5974 // NumElements should be power of 2. 5975 if (!llvm::isPowerOf2_32(NumElements)) 5976 return true; 5977 // Size should be greater than 32 bits. 5978 return Size <= 32; 5979 } 5980 } 5981 return false; 5982 } 5983 5984 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 5985 llvm::Type *eltTy, 5986 unsigned numElts) const { 5987 if (!llvm::isPowerOf2_32(numElts)) 5988 return false; 5989 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy); 5990 if (size > 64) 5991 return false; 5992 if (vectorSize.getQuantity() != 8 && 5993 (vectorSize.getQuantity() != 16 || numElts == 1)) 5994 return false; 5995 return true; 5996 } 5997 5998 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5999 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 6000 // double, or 64-bit or 128-bit vectors. 6001 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 6002 if (BT->getKind() == BuiltinType::Float || 6003 BT->getKind() == BuiltinType::Double || 6004 BT->getKind() == BuiltinType::LongDouble) 6005 return true; 6006 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 6007 unsigned VecSize = getContext().getTypeSize(VT); 6008 if (VecSize == 64 || VecSize == 128) 6009 return true; 6010 } 6011 return false; 6012 } 6013 6014 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 6015 uint64_t Members) const { 6016 return Members <= 4; 6017 } 6018 6019 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6020 QualType Ty) const { 6021 CharUnits SlotSize = CharUnits::fromQuantity(4); 6022 6023 // Empty records are ignored for parameter passing purposes. 6024 if (isEmptyRecord(getContext(), Ty, true)) { 6025 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); 6026 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 6027 return Addr; 6028 } 6029 6030 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6031 CharUnits TyAlignForABI = TyInfo.second; 6032 6033 // Use indirect if size of the illegal vector is bigger than 16 bytes. 6034 bool IsIndirect = false; 6035 const Type *Base = nullptr; 6036 uint64_t Members = 0; 6037 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { 6038 IsIndirect = true; 6039 6040 // ARMv7k passes structs bigger than 16 bytes indirectly, in space 6041 // allocated by the caller. 6042 } else if (TyInfo.first > CharUnits::fromQuantity(16) && 6043 getABIKind() == ARMABIInfo::AAPCS16_VFP && 6044 !isHomogeneousAggregate(Ty, Base, Members)) { 6045 IsIndirect = true; 6046 6047 // Otherwise, bound the type's ABI alignment. 6048 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 6049 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 6050 // Our callers should be prepared to handle an under-aligned address. 6051 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP || 6052 getABIKind() == ARMABIInfo::AAPCS) { 6053 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 6054 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); 6055 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 6056 // ARMv7k allows type alignment up to 16 bytes. 6057 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 6058 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); 6059 } else { 6060 TyAlignForABI = CharUnits::fromQuantity(4); 6061 } 6062 TyInfo.second = TyAlignForABI; 6063 6064 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, 6065 SlotSize, /*AllowHigherAlign*/ true); 6066 } 6067 6068 //===----------------------------------------------------------------------===// 6069 // NVPTX ABI Implementation 6070 //===----------------------------------------------------------------------===// 6071 6072 namespace { 6073 6074 class NVPTXABIInfo : public ABIInfo { 6075 public: 6076 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 6077 6078 ABIArgInfo classifyReturnType(QualType RetTy) const; 6079 ABIArgInfo classifyArgumentType(QualType Ty) const; 6080 6081 void computeInfo(CGFunctionInfo &FI) const override; 6082 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6083 QualType Ty) const override; 6084 }; 6085 6086 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 6087 public: 6088 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 6089 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 6090 6091 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6092 CodeGen::CodeGenModule &M, 6093 ForDefinition_t IsForDefinition) const override; 6094 6095 private: 6096 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the 6097 // resulting MDNode to the nvvm.annotations MDNode. 6098 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand); 6099 }; 6100 6101 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 6102 if (RetTy->isVoidType()) 6103 return ABIArgInfo::getIgnore(); 6104 6105 // note: this is different from default ABI 6106 if (!RetTy->isScalarType()) 6107 return ABIArgInfo::getDirect(); 6108 6109 // Treat an enum type as its underlying type. 6110 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6111 RetTy = EnumTy->getDecl()->getIntegerType(); 6112 6113 return (RetTy->isPromotableIntegerType() ? 6114 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6115 } 6116 6117 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 6118 // Treat an enum type as its underlying type. 6119 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6120 Ty = EnumTy->getDecl()->getIntegerType(); 6121 6122 // Return aggregates type as indirect by value 6123 if (isAggregateTypeForABI(Ty)) 6124 return getNaturalAlignIndirect(Ty, /* byval */ true); 6125 6126 return (Ty->isPromotableIntegerType() ? 6127 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6128 } 6129 6130 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 6131 if (!getCXXABI().classifyReturnType(FI)) 6132 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6133 for (auto &I : FI.arguments()) 6134 I.info = classifyArgumentType(I.type); 6135 6136 // Always honor user-specified calling convention. 6137 if (FI.getCallingConvention() != llvm::CallingConv::C) 6138 return; 6139 6140 FI.setEffectiveCallingConvention(getRuntimeCC()); 6141 } 6142 6143 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6144 QualType Ty) const { 6145 llvm_unreachable("NVPTX does not support varargs"); 6146 } 6147 6148 void NVPTXTargetCodeGenInfo::setTargetAttributes( 6149 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M, 6150 ForDefinition_t IsForDefinition) const { 6151 if (!IsForDefinition) 6152 return; 6153 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6154 if (!FD) return; 6155 6156 llvm::Function *F = cast<llvm::Function>(GV); 6157 6158 // Perform special handling in OpenCL mode 6159 if (M.getLangOpts().OpenCL) { 6160 // Use OpenCL function attributes to check for kernel functions 6161 // By default, all functions are device functions 6162 if (FD->hasAttr<OpenCLKernelAttr>()) { 6163 // OpenCL __kernel functions get kernel metadata 6164 // Create !{<func-ref>, metadata !"kernel", i32 1} node 6165 addNVVMMetadata(F, "kernel", 1); 6166 // And kernel functions are not subject to inlining 6167 F->addFnAttr(llvm::Attribute::NoInline); 6168 } 6169 } 6170 6171 // Perform special handling in CUDA mode. 6172 if (M.getLangOpts().CUDA) { 6173 // CUDA __global__ functions get a kernel metadata entry. Since 6174 // __global__ functions cannot be called from the device, we do not 6175 // need to set the noinline attribute. 6176 if (FD->hasAttr<CUDAGlobalAttr>()) { 6177 // Create !{<func-ref>, metadata !"kernel", i32 1} node 6178 addNVVMMetadata(F, "kernel", 1); 6179 } 6180 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { 6181 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 6182 llvm::APSInt MaxThreads(32); 6183 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); 6184 if (MaxThreads > 0) 6185 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); 6186 6187 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was 6188 // not specified in __launch_bounds__ or if the user specified a 0 value, 6189 // we don't have to add a PTX directive. 6190 if (Attr->getMinBlocks()) { 6191 llvm::APSInt MinBlocks(32); 6192 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); 6193 if (MinBlocks > 0) 6194 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 6195 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); 6196 } 6197 } 6198 } 6199 } 6200 6201 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, 6202 int Operand) { 6203 llvm::Module *M = F->getParent(); 6204 llvm::LLVMContext &Ctx = M->getContext(); 6205 6206 // Get "nvvm.annotations" metadata node 6207 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 6208 6209 llvm::Metadata *MDVals[] = { 6210 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name), 6211 llvm::ConstantAsMetadata::get( 6212 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 6213 // Append metadata to nvvm.annotations 6214 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 6215 } 6216 } 6217 6218 //===----------------------------------------------------------------------===// 6219 // SystemZ ABI Implementation 6220 //===----------------------------------------------------------------------===// 6221 6222 namespace { 6223 6224 class SystemZABIInfo : public SwiftABIInfo { 6225 bool HasVector; 6226 6227 public: 6228 SystemZABIInfo(CodeGenTypes &CGT, bool HV) 6229 : SwiftABIInfo(CGT), HasVector(HV) {} 6230 6231 bool isPromotableIntegerType(QualType Ty) const; 6232 bool isCompoundType(QualType Ty) const; 6233 bool isVectorArgumentType(QualType Ty) const; 6234 bool isFPArgumentType(QualType Ty) const; 6235 QualType GetSingleElementType(QualType Ty) const; 6236 6237 ABIArgInfo classifyReturnType(QualType RetTy) const; 6238 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 6239 6240 void computeInfo(CGFunctionInfo &FI) const override { 6241 if (!getCXXABI().classifyReturnType(FI)) 6242 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6243 for (auto &I : FI.arguments()) 6244 I.info = classifyArgumentType(I.type); 6245 } 6246 6247 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6248 QualType Ty) const override; 6249 6250 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 6251 ArrayRef<llvm::Type*> scalars, 6252 bool asReturnValue) const override { 6253 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 6254 } 6255 bool isSwiftErrorInRegister() const override { 6256 return true; 6257 } 6258 }; 6259 6260 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 6261 public: 6262 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector) 6263 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {} 6264 }; 6265 6266 } 6267 6268 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 6269 // Treat an enum type as its underlying type. 6270 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6271 Ty = EnumTy->getDecl()->getIntegerType(); 6272 6273 // Promotable integer types are required to be promoted by the ABI. 6274 if (Ty->isPromotableIntegerType()) 6275 return true; 6276 6277 // 32-bit values must also be promoted. 6278 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 6279 switch (BT->getKind()) { 6280 case BuiltinType::Int: 6281 case BuiltinType::UInt: 6282 return true; 6283 default: 6284 return false; 6285 } 6286 return false; 6287 } 6288 6289 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 6290 return (Ty->isAnyComplexType() || 6291 Ty->isVectorType() || 6292 isAggregateTypeForABI(Ty)); 6293 } 6294 6295 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { 6296 return (HasVector && 6297 Ty->isVectorType() && 6298 getContext().getTypeSize(Ty) <= 128); 6299 } 6300 6301 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 6302 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 6303 switch (BT->getKind()) { 6304 case BuiltinType::Float: 6305 case BuiltinType::Double: 6306 return true; 6307 default: 6308 return false; 6309 } 6310 6311 return false; 6312 } 6313 6314 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { 6315 if (const RecordType *RT = Ty->getAsStructureType()) { 6316 const RecordDecl *RD = RT->getDecl(); 6317 QualType Found; 6318 6319 // If this is a C++ record, check the bases first. 6320 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 6321 for (const auto &I : CXXRD->bases()) { 6322 QualType Base = I.getType(); 6323 6324 // Empty bases don't affect things either way. 6325 if (isEmptyRecord(getContext(), Base, true)) 6326 continue; 6327 6328 if (!Found.isNull()) 6329 return Ty; 6330 Found = GetSingleElementType(Base); 6331 } 6332 6333 // Check the fields. 6334 for (const auto *FD : RD->fields()) { 6335 // For compatibility with GCC, ignore empty bitfields in C++ mode. 6336 // Unlike isSingleElementStruct(), empty structure and array fields 6337 // do count. So do anonymous bitfields that aren't zero-sized. 6338 if (getContext().getLangOpts().CPlusPlus && 6339 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 6340 continue; 6341 6342 // Unlike isSingleElementStruct(), arrays do not count. 6343 // Nested structures still do though. 6344 if (!Found.isNull()) 6345 return Ty; 6346 Found = GetSingleElementType(FD->getType()); 6347 } 6348 6349 // Unlike isSingleElementStruct(), trailing padding is allowed. 6350 // An 8-byte aligned struct s { float f; } is passed as a double. 6351 if (!Found.isNull()) 6352 return Found; 6353 } 6354 6355 return Ty; 6356 } 6357 6358 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6359 QualType Ty) const { 6360 // Assume that va_list type is correct; should be pointer to LLVM type: 6361 // struct { 6362 // i64 __gpr; 6363 // i64 __fpr; 6364 // i8 *__overflow_arg_area; 6365 // i8 *__reg_save_area; 6366 // }; 6367 6368 // Every non-vector argument occupies 8 bytes and is passed by preference 6369 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are 6370 // always passed on the stack. 6371 Ty = getContext().getCanonicalType(Ty); 6372 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6373 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); 6374 llvm::Type *DirectTy = ArgTy; 6375 ABIArgInfo AI = classifyArgumentType(Ty); 6376 bool IsIndirect = AI.isIndirect(); 6377 bool InFPRs = false; 6378 bool IsVector = false; 6379 CharUnits UnpaddedSize; 6380 CharUnits DirectAlign; 6381 if (IsIndirect) { 6382 DirectTy = llvm::PointerType::getUnqual(DirectTy); 6383 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); 6384 } else { 6385 if (AI.getCoerceToType()) 6386 ArgTy = AI.getCoerceToType(); 6387 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy(); 6388 IsVector = ArgTy->isVectorTy(); 6389 UnpaddedSize = TyInfo.first; 6390 DirectAlign = TyInfo.second; 6391 } 6392 CharUnits PaddedSize = CharUnits::fromQuantity(8); 6393 if (IsVector && UnpaddedSize > PaddedSize) 6394 PaddedSize = CharUnits::fromQuantity(16); 6395 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size."); 6396 6397 CharUnits Padding = (PaddedSize - UnpaddedSize); 6398 6399 llvm::Type *IndexTy = CGF.Int64Ty; 6400 llvm::Value *PaddedSizeV = 6401 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); 6402 6403 if (IsVector) { 6404 // Work out the address of a vector argument on the stack. 6405 // Vector arguments are always passed in the high bits of a 6406 // single (8 byte) or double (16 byte) stack slot. 6407 Address OverflowArgAreaPtr = 6408 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16), 6409 "overflow_arg_area_ptr"); 6410 Address OverflowArgArea = 6411 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 6412 TyInfo.second); 6413 Address MemAddr = 6414 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr"); 6415 6416 // Update overflow_arg_area_ptr pointer 6417 llvm::Value *NewOverflowArgArea = 6418 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 6419 "overflow_arg_area"); 6420 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 6421 6422 return MemAddr; 6423 } 6424 6425 assert(PaddedSize.getQuantity() == 8); 6426 6427 unsigned MaxRegs, RegCountField, RegSaveIndex; 6428 CharUnits RegPadding; 6429 if (InFPRs) { 6430 MaxRegs = 4; // Maximum of 4 FPR arguments 6431 RegCountField = 1; // __fpr 6432 RegSaveIndex = 16; // save offset for f0 6433 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR 6434 } else { 6435 MaxRegs = 5; // Maximum of 5 GPR arguments 6436 RegCountField = 0; // __gpr 6437 RegSaveIndex = 2; // save offset for r2 6438 RegPadding = Padding; // values are passed in the low bits of a GPR 6439 } 6440 6441 Address RegCountPtr = CGF.Builder.CreateStructGEP( 6442 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8), 6443 "reg_count_ptr"); 6444 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 6445 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 6446 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 6447 "fits_in_regs"); 6448 6449 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 6450 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 6451 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 6452 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 6453 6454 // Emit code to load the value if it was passed in registers. 6455 CGF.EmitBlock(InRegBlock); 6456 6457 // Work out the address of an argument register. 6458 llvm::Value *ScaledRegCount = 6459 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 6460 llvm::Value *RegBase = 6461 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() 6462 + RegPadding.getQuantity()); 6463 llvm::Value *RegOffset = 6464 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 6465 Address RegSaveAreaPtr = 6466 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24), 6467 "reg_save_area_ptr"); 6468 llvm::Value *RegSaveArea = 6469 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 6470 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset, 6471 "raw_reg_addr"), 6472 PaddedSize); 6473 Address RegAddr = 6474 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr"); 6475 6476 // Update the register count 6477 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 6478 llvm::Value *NewRegCount = 6479 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 6480 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 6481 CGF.EmitBranch(ContBlock); 6482 6483 // Emit code to load the value if it was passed in memory. 6484 CGF.EmitBlock(InMemBlock); 6485 6486 // Work out the address of a stack argument. 6487 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP( 6488 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr"); 6489 Address OverflowArgArea = 6490 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 6491 PaddedSize); 6492 Address RawMemAddr = 6493 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); 6494 Address MemAddr = 6495 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); 6496 6497 // Update overflow_arg_area_ptr pointer 6498 llvm::Value *NewOverflowArgArea = 6499 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 6500 "overflow_arg_area"); 6501 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 6502 CGF.EmitBranch(ContBlock); 6503 6504 // Return the appropriate result. 6505 CGF.EmitBlock(ContBlock); 6506 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 6507 MemAddr, InMemBlock, "va_arg.addr"); 6508 6509 if (IsIndirect) 6510 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), 6511 TyInfo.second); 6512 6513 return ResAddr; 6514 } 6515 6516 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 6517 if (RetTy->isVoidType()) 6518 return ABIArgInfo::getIgnore(); 6519 if (isVectorArgumentType(RetTy)) 6520 return ABIArgInfo::getDirect(); 6521 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 6522 return getNaturalAlignIndirect(RetTy); 6523 return (isPromotableIntegerType(RetTy) ? 6524 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6525 } 6526 6527 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 6528 // Handle the generic C++ ABI. 6529 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6530 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6531 6532 // Integers and enums are extended to full register width. 6533 if (isPromotableIntegerType(Ty)) 6534 return ABIArgInfo::getExtend(); 6535 6536 // Handle vector types and vector-like structure types. Note that 6537 // as opposed to float-like structure types, we do not allow any 6538 // padding for vector-like structures, so verify the sizes match. 6539 uint64_t Size = getContext().getTypeSize(Ty); 6540 QualType SingleElementTy = GetSingleElementType(Ty); 6541 if (isVectorArgumentType(SingleElementTy) && 6542 getContext().getTypeSize(SingleElementTy) == Size) 6543 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); 6544 6545 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 6546 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 6547 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6548 6549 // Handle small structures. 6550 if (const RecordType *RT = Ty->getAs<RecordType>()) { 6551 // Structures with flexible arrays have variable length, so really 6552 // fail the size test above. 6553 const RecordDecl *RD = RT->getDecl(); 6554 if (RD->hasFlexibleArrayMember()) 6555 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6556 6557 // The structure is passed as an unextended integer, a float, or a double. 6558 llvm::Type *PassTy; 6559 if (isFPArgumentType(SingleElementTy)) { 6560 assert(Size == 32 || Size == 64); 6561 if (Size == 32) 6562 PassTy = llvm::Type::getFloatTy(getVMContext()); 6563 else 6564 PassTy = llvm::Type::getDoubleTy(getVMContext()); 6565 } else 6566 PassTy = llvm::IntegerType::get(getVMContext(), Size); 6567 return ABIArgInfo::getDirect(PassTy); 6568 } 6569 6570 // Non-structure compounds are passed indirectly. 6571 if (isCompoundType(Ty)) 6572 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6573 6574 return ABIArgInfo::getDirect(nullptr); 6575 } 6576 6577 //===----------------------------------------------------------------------===// 6578 // MSP430 ABI Implementation 6579 //===----------------------------------------------------------------------===// 6580 6581 namespace { 6582 6583 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 6584 public: 6585 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 6586 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 6587 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6588 CodeGen::CodeGenModule &M, 6589 ForDefinition_t IsForDefinition) const override; 6590 }; 6591 6592 } 6593 6594 void MSP430TargetCodeGenInfo::setTargetAttributes( 6595 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M, 6596 ForDefinition_t IsForDefinition) const { 6597 if (!IsForDefinition) 6598 return; 6599 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 6600 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 6601 // Handle 'interrupt' attribute: 6602 llvm::Function *F = cast<llvm::Function>(GV); 6603 6604 // Step 1: Set ISR calling convention. 6605 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 6606 6607 // Step 2: Add attributes goodness. 6608 F->addFnAttr(llvm::Attribute::NoInline); 6609 6610 // Step 3: Emit ISR vector alias. 6611 unsigned Num = attr->getNumber() / 2; 6612 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage, 6613 "__isr_" + Twine(Num), F); 6614 } 6615 } 6616 } 6617 6618 //===----------------------------------------------------------------------===// 6619 // MIPS ABI Implementation. This works for both little-endian and 6620 // big-endian variants. 6621 //===----------------------------------------------------------------------===// 6622 6623 namespace { 6624 class MipsABIInfo : public ABIInfo { 6625 bool IsO32; 6626 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 6627 void CoerceToIntArgs(uint64_t TySize, 6628 SmallVectorImpl<llvm::Type *> &ArgList) const; 6629 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 6630 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 6631 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 6632 public: 6633 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 6634 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 6635 StackAlignInBytes(IsO32 ? 8 : 16) {} 6636 6637 ABIArgInfo classifyReturnType(QualType RetTy) const; 6638 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 6639 void computeInfo(CGFunctionInfo &FI) const override; 6640 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6641 QualType Ty) const override; 6642 bool shouldSignExtUnsignedType(QualType Ty) const override; 6643 }; 6644 6645 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 6646 unsigned SizeOfUnwindException; 6647 public: 6648 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 6649 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 6650 SizeOfUnwindException(IsO32 ? 24 : 32) {} 6651 6652 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 6653 return 29; 6654 } 6655 6656 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6657 CodeGen::CodeGenModule &CGM, 6658 ForDefinition_t IsForDefinition) const override { 6659 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6660 if (!FD) return; 6661 llvm::Function *Fn = cast<llvm::Function>(GV); 6662 6663 if (FD->hasAttr<MipsLongCallAttr>()) 6664 Fn->addFnAttr("long-call"); 6665 else if (FD->hasAttr<MipsShortCallAttr>()) 6666 Fn->addFnAttr("short-call"); 6667 6668 // Other attributes do not have a meaning for declarations. 6669 if (!IsForDefinition) 6670 return; 6671 6672 if (FD->hasAttr<Mips16Attr>()) { 6673 Fn->addFnAttr("mips16"); 6674 } 6675 else if (FD->hasAttr<NoMips16Attr>()) { 6676 Fn->addFnAttr("nomips16"); 6677 } 6678 6679 if (FD->hasAttr<MicroMipsAttr>()) 6680 Fn->addFnAttr("micromips"); 6681 else if (FD->hasAttr<NoMicroMipsAttr>()) 6682 Fn->addFnAttr("nomicromips"); 6683 6684 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>(); 6685 if (!Attr) 6686 return; 6687 6688 const char *Kind; 6689 switch (Attr->getInterrupt()) { 6690 case MipsInterruptAttr::eic: Kind = "eic"; break; 6691 case MipsInterruptAttr::sw0: Kind = "sw0"; break; 6692 case MipsInterruptAttr::sw1: Kind = "sw1"; break; 6693 case MipsInterruptAttr::hw0: Kind = "hw0"; break; 6694 case MipsInterruptAttr::hw1: Kind = "hw1"; break; 6695 case MipsInterruptAttr::hw2: Kind = "hw2"; break; 6696 case MipsInterruptAttr::hw3: Kind = "hw3"; break; 6697 case MipsInterruptAttr::hw4: Kind = "hw4"; break; 6698 case MipsInterruptAttr::hw5: Kind = "hw5"; break; 6699 } 6700 6701 Fn->addFnAttr("interrupt", Kind); 6702 6703 } 6704 6705 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6706 llvm::Value *Address) const override; 6707 6708 unsigned getSizeOfUnwindException() const override { 6709 return SizeOfUnwindException; 6710 } 6711 }; 6712 } 6713 6714 void MipsABIInfo::CoerceToIntArgs( 6715 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const { 6716 llvm::IntegerType *IntTy = 6717 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 6718 6719 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 6720 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 6721 ArgList.push_back(IntTy); 6722 6723 // If necessary, add one more integer type to ArgList. 6724 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 6725 6726 if (R) 6727 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 6728 } 6729 6730 // In N32/64, an aligned double precision floating point field is passed in 6731 // a register. 6732 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 6733 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 6734 6735 if (IsO32) { 6736 CoerceToIntArgs(TySize, ArgList); 6737 return llvm::StructType::get(getVMContext(), ArgList); 6738 } 6739 6740 if (Ty->isComplexType()) 6741 return CGT.ConvertType(Ty); 6742 6743 const RecordType *RT = Ty->getAs<RecordType>(); 6744 6745 // Unions/vectors are passed in integer registers. 6746 if (!RT || !RT->isStructureOrClassType()) { 6747 CoerceToIntArgs(TySize, ArgList); 6748 return llvm::StructType::get(getVMContext(), ArgList); 6749 } 6750 6751 const RecordDecl *RD = RT->getDecl(); 6752 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 6753 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 6754 6755 uint64_t LastOffset = 0; 6756 unsigned idx = 0; 6757 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 6758 6759 // Iterate over fields in the struct/class and check if there are any aligned 6760 // double fields. 6761 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 6762 i != e; ++i, ++idx) { 6763 const QualType Ty = i->getType(); 6764 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 6765 6766 if (!BT || BT->getKind() != BuiltinType::Double) 6767 continue; 6768 6769 uint64_t Offset = Layout.getFieldOffset(idx); 6770 if (Offset % 64) // Ignore doubles that are not aligned. 6771 continue; 6772 6773 // Add ((Offset - LastOffset) / 64) args of type i64. 6774 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 6775 ArgList.push_back(I64); 6776 6777 // Add double type. 6778 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 6779 LastOffset = Offset + 64; 6780 } 6781 6782 CoerceToIntArgs(TySize - LastOffset, IntArgList); 6783 ArgList.append(IntArgList.begin(), IntArgList.end()); 6784 6785 return llvm::StructType::get(getVMContext(), ArgList); 6786 } 6787 6788 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 6789 uint64_t Offset) const { 6790 if (OrigOffset + MinABIStackAlignInBytes > Offset) 6791 return nullptr; 6792 6793 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 6794 } 6795 6796 ABIArgInfo 6797 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 6798 Ty = useFirstFieldIfTransparentUnion(Ty); 6799 6800 uint64_t OrigOffset = Offset; 6801 uint64_t TySize = getContext().getTypeSize(Ty); 6802 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 6803 6804 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 6805 (uint64_t)StackAlignInBytes); 6806 unsigned CurrOffset = llvm::alignTo(Offset, Align); 6807 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8; 6808 6809 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 6810 // Ignore empty aggregates. 6811 if (TySize == 0) 6812 return ABIArgInfo::getIgnore(); 6813 6814 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 6815 Offset = OrigOffset + MinABIStackAlignInBytes; 6816 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6817 } 6818 6819 // Use indirect if the aggregate cannot fit into registers for 6820 // passing arguments according to the ABI 6821 unsigned Threshold = IsO32 ? 16 : 64; 6822 6823 if(getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(Threshold)) 6824 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align), true, 6825 getContext().getTypeAlign(Ty) / 8 > Align); 6826 6827 // If we have reached here, aggregates are passed directly by coercing to 6828 // another structure type. Padding is inserted if the offset of the 6829 // aggregate is unaligned. 6830 ABIArgInfo ArgInfo = 6831 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 6832 getPaddingType(OrigOffset, CurrOffset)); 6833 ArgInfo.setInReg(true); 6834 return ArgInfo; 6835 } 6836 6837 // Treat an enum type as its underlying type. 6838 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6839 Ty = EnumTy->getDecl()->getIntegerType(); 6840 6841 // All integral types are promoted to the GPR width. 6842 if (Ty->isIntegralOrEnumerationType()) 6843 return ABIArgInfo::getExtend(); 6844 6845 return ABIArgInfo::getDirect( 6846 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 6847 } 6848 6849 llvm::Type* 6850 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 6851 const RecordType *RT = RetTy->getAs<RecordType>(); 6852 SmallVector<llvm::Type*, 8> RTList; 6853 6854 if (RT && RT->isStructureOrClassType()) { 6855 const RecordDecl *RD = RT->getDecl(); 6856 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 6857 unsigned FieldCnt = Layout.getFieldCount(); 6858 6859 // N32/64 returns struct/classes in floating point registers if the 6860 // following conditions are met: 6861 // 1. The size of the struct/class is no larger than 128-bit. 6862 // 2. The struct/class has one or two fields all of which are floating 6863 // point types. 6864 // 3. The offset of the first field is zero (this follows what gcc does). 6865 // 6866 // Any other composite results are returned in integer registers. 6867 // 6868 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 6869 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 6870 for (; b != e; ++b) { 6871 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 6872 6873 if (!BT || !BT->isFloatingPoint()) 6874 break; 6875 6876 RTList.push_back(CGT.ConvertType(b->getType())); 6877 } 6878 6879 if (b == e) 6880 return llvm::StructType::get(getVMContext(), RTList, 6881 RD->hasAttr<PackedAttr>()); 6882 6883 RTList.clear(); 6884 } 6885 } 6886 6887 CoerceToIntArgs(Size, RTList); 6888 return llvm::StructType::get(getVMContext(), RTList); 6889 } 6890 6891 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 6892 uint64_t Size = getContext().getTypeSize(RetTy); 6893 6894 if (RetTy->isVoidType()) 6895 return ABIArgInfo::getIgnore(); 6896 6897 // O32 doesn't treat zero-sized structs differently from other structs. 6898 // However, N32/N64 ignores zero sized return values. 6899 if (!IsO32 && Size == 0) 6900 return ABIArgInfo::getIgnore(); 6901 6902 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 6903 if (Size <= 128) { 6904 if (RetTy->isAnyComplexType()) 6905 return ABIArgInfo::getDirect(); 6906 6907 // O32 returns integer vectors in registers and N32/N64 returns all small 6908 // aggregates in registers. 6909 if (!IsO32 || 6910 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 6911 ABIArgInfo ArgInfo = 6912 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 6913 ArgInfo.setInReg(true); 6914 return ArgInfo; 6915 } 6916 } 6917 6918 return getNaturalAlignIndirect(RetTy); 6919 } 6920 6921 // Treat an enum type as its underlying type. 6922 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6923 RetTy = EnumTy->getDecl()->getIntegerType(); 6924 6925 return (RetTy->isPromotableIntegerType() ? 6926 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6927 } 6928 6929 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 6930 ABIArgInfo &RetInfo = FI.getReturnInfo(); 6931 if (!getCXXABI().classifyReturnType(FI)) 6932 RetInfo = classifyReturnType(FI.getReturnType()); 6933 6934 // Check if a pointer to an aggregate is passed as a hidden argument. 6935 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 6936 6937 for (auto &I : FI.arguments()) 6938 I.info = classifyArgumentType(I.type, Offset); 6939 } 6940 6941 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6942 QualType OrigTy) const { 6943 QualType Ty = OrigTy; 6944 6945 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. 6946 // Pointers are also promoted in the same way but this only matters for N32. 6947 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 6948 unsigned PtrWidth = getTarget().getPointerWidth(0); 6949 bool DidPromote = false; 6950 if ((Ty->isIntegerType() && 6951 getContext().getIntWidth(Ty) < SlotSizeInBits) || 6952 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { 6953 DidPromote = true; 6954 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, 6955 Ty->isSignedIntegerType()); 6956 } 6957 6958 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6959 6960 // The alignment of things in the argument area is never larger than 6961 // StackAlignInBytes. 6962 TyInfo.second = 6963 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes)); 6964 6965 // MinABIStackAlignInBytes is the size of argument slots on the stack. 6966 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); 6967 6968 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 6969 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true); 6970 6971 6972 // If there was a promotion, "unpromote" into a temporary. 6973 // TODO: can we just use a pointer into a subset of the original slot? 6974 if (DidPromote) { 6975 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp"); 6976 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); 6977 6978 // Truncate down to the right width. 6979 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() 6980 : CGF.IntPtrTy); 6981 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); 6982 if (OrigTy->isPointerType()) 6983 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); 6984 6985 CGF.Builder.CreateStore(V, Temp); 6986 Addr = Temp; 6987 } 6988 6989 return Addr; 6990 } 6991 6992 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const { 6993 int TySize = getContext().getTypeSize(Ty); 6994 6995 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. 6996 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 6997 return true; 6998 6999 return false; 7000 } 7001 7002 bool 7003 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7004 llvm::Value *Address) const { 7005 // This information comes from gcc's implementation, which seems to 7006 // as canonical as it gets. 7007 7008 // Everything on MIPS is 4 bytes. Double-precision FP registers 7009 // are aliased to pairs of single-precision FP registers. 7010 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 7011 7012 // 0-31 are the general purpose registers, $0 - $31. 7013 // 32-63 are the floating-point registers, $f0 - $f31. 7014 // 64 and 65 are the multiply/divide registers, $hi and $lo. 7015 // 66 is the (notional, I think) register for signal-handler return. 7016 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 7017 7018 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 7019 // They are one bit wide and ignored here. 7020 7021 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 7022 // (coprocessor 1 is the FP unit) 7023 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 7024 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 7025 // 176-181 are the DSP accumulator registers. 7026 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 7027 return false; 7028 } 7029 7030 //===----------------------------------------------------------------------===// 7031 // AVR ABI Implementation. 7032 //===----------------------------------------------------------------------===// 7033 7034 namespace { 7035 class AVRTargetCodeGenInfo : public TargetCodeGenInfo { 7036 public: 7037 AVRTargetCodeGenInfo(CodeGenTypes &CGT) 7038 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { } 7039 7040 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7041 CodeGen::CodeGenModule &CGM, 7042 ForDefinition_t IsForDefinition) const override { 7043 if (!IsForDefinition) 7044 return; 7045 const auto *FD = dyn_cast_or_null<FunctionDecl>(D); 7046 if (!FD) return; 7047 auto *Fn = cast<llvm::Function>(GV); 7048 7049 if (FD->getAttr<AVRInterruptAttr>()) 7050 Fn->addFnAttr("interrupt"); 7051 7052 if (FD->getAttr<AVRSignalAttr>()) 7053 Fn->addFnAttr("signal"); 7054 } 7055 }; 7056 } 7057 7058 //===----------------------------------------------------------------------===// 7059 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 7060 // Currently subclassed only to implement custom OpenCL C function attribute 7061 // handling. 7062 //===----------------------------------------------------------------------===// 7063 7064 namespace { 7065 7066 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 7067 public: 7068 TCETargetCodeGenInfo(CodeGenTypes &CGT) 7069 : DefaultTargetCodeGenInfo(CGT) {} 7070 7071 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7072 CodeGen::CodeGenModule &M, 7073 ForDefinition_t IsForDefinition) const override; 7074 }; 7075 7076 void TCETargetCodeGenInfo::setTargetAttributes( 7077 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M, 7078 ForDefinition_t IsForDefinition) const { 7079 if (!IsForDefinition) 7080 return; 7081 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7082 if (!FD) return; 7083 7084 llvm::Function *F = cast<llvm::Function>(GV); 7085 7086 if (M.getLangOpts().OpenCL) { 7087 if (FD->hasAttr<OpenCLKernelAttr>()) { 7088 // OpenCL C Kernel functions are not subject to inlining 7089 F->addFnAttr(llvm::Attribute::NoInline); 7090 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 7091 if (Attr) { 7092 // Convert the reqd_work_group_size() attributes to metadata. 7093 llvm::LLVMContext &Context = F->getContext(); 7094 llvm::NamedMDNode *OpenCLMetadata = 7095 M.getModule().getOrInsertNamedMetadata( 7096 "opencl.kernel_wg_size_info"); 7097 7098 SmallVector<llvm::Metadata *, 5> Operands; 7099 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 7100 7101 Operands.push_back( 7102 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 7103 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 7104 Operands.push_back( 7105 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 7106 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 7107 Operands.push_back( 7108 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 7109 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 7110 7111 // Add a boolean constant operand for "required" (true) or "hint" 7112 // (false) for implementing the work_group_size_hint attr later. 7113 // Currently always true as the hint is not yet implemented. 7114 Operands.push_back( 7115 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 7116 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 7117 } 7118 } 7119 } 7120 } 7121 7122 } 7123 7124 //===----------------------------------------------------------------------===// 7125 // Hexagon ABI Implementation 7126 //===----------------------------------------------------------------------===// 7127 7128 namespace { 7129 7130 class HexagonABIInfo : public ABIInfo { 7131 7132 7133 public: 7134 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 7135 7136 private: 7137 7138 ABIArgInfo classifyReturnType(QualType RetTy) const; 7139 ABIArgInfo classifyArgumentType(QualType RetTy) const; 7140 7141 void computeInfo(CGFunctionInfo &FI) const override; 7142 7143 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7144 QualType Ty) const override; 7145 }; 7146 7147 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 7148 public: 7149 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 7150 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 7151 7152 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 7153 return 29; 7154 } 7155 }; 7156 7157 } 7158 7159 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 7160 if (!getCXXABI().classifyReturnType(FI)) 7161 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7162 for (auto &I : FI.arguments()) 7163 I.info = classifyArgumentType(I.type); 7164 } 7165 7166 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 7167 if (!isAggregateTypeForABI(Ty)) { 7168 // Treat an enum type as its underlying type. 7169 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7170 Ty = EnumTy->getDecl()->getIntegerType(); 7171 7172 return (Ty->isPromotableIntegerType() ? 7173 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 7174 } 7175 7176 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 7177 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7178 7179 // Ignore empty records. 7180 if (isEmptyRecord(getContext(), Ty, true)) 7181 return ABIArgInfo::getIgnore(); 7182 7183 uint64_t Size = getContext().getTypeSize(Ty); 7184 if (Size > 64) 7185 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 7186 // Pass in the smallest viable integer type. 7187 else if (Size > 32) 7188 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 7189 else if (Size > 16) 7190 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 7191 else if (Size > 8) 7192 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 7193 else 7194 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 7195 } 7196 7197 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 7198 if (RetTy->isVoidType()) 7199 return ABIArgInfo::getIgnore(); 7200 7201 // Large vector types should be returned via memory. 7202 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 7203 return getNaturalAlignIndirect(RetTy); 7204 7205 if (!isAggregateTypeForABI(RetTy)) { 7206 // Treat an enum type as its underlying type. 7207 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 7208 RetTy = EnumTy->getDecl()->getIntegerType(); 7209 7210 return (RetTy->isPromotableIntegerType() ? 7211 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 7212 } 7213 7214 if (isEmptyRecord(getContext(), RetTy, true)) 7215 return ABIArgInfo::getIgnore(); 7216 7217 // Aggregates <= 8 bytes are returned in r0; other aggregates 7218 // are returned indirectly. 7219 uint64_t Size = getContext().getTypeSize(RetTy); 7220 if (Size <= 64) { 7221 // Return in the smallest viable integer type. 7222 if (Size <= 8) 7223 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 7224 if (Size <= 16) 7225 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 7226 if (Size <= 32) 7227 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 7228 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 7229 } 7230 7231 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); 7232 } 7233 7234 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7235 QualType Ty) const { 7236 // FIXME: Someone needs to audit that this handle alignment correctly. 7237 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 7238 getContext().getTypeInfoInChars(Ty), 7239 CharUnits::fromQuantity(4), 7240 /*AllowHigherAlign*/ true); 7241 } 7242 7243 //===----------------------------------------------------------------------===// 7244 // Lanai ABI Implementation 7245 //===----------------------------------------------------------------------===// 7246 7247 namespace { 7248 class LanaiABIInfo : public DefaultABIInfo { 7249 public: 7250 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7251 7252 bool shouldUseInReg(QualType Ty, CCState &State) const; 7253 7254 void computeInfo(CGFunctionInfo &FI) const override { 7255 CCState State(FI.getCallingConvention()); 7256 // Lanai uses 4 registers to pass arguments unless the function has the 7257 // regparm attribute set. 7258 if (FI.getHasRegParm()) { 7259 State.FreeRegs = FI.getRegParm(); 7260 } else { 7261 State.FreeRegs = 4; 7262 } 7263 7264 if (!getCXXABI().classifyReturnType(FI)) 7265 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7266 for (auto &I : FI.arguments()) 7267 I.info = classifyArgumentType(I.type, State); 7268 } 7269 7270 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 7271 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 7272 }; 7273 } // end anonymous namespace 7274 7275 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const { 7276 unsigned Size = getContext().getTypeSize(Ty); 7277 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U; 7278 7279 if (SizeInRegs == 0) 7280 return false; 7281 7282 if (SizeInRegs > State.FreeRegs) { 7283 State.FreeRegs = 0; 7284 return false; 7285 } 7286 7287 State.FreeRegs -= SizeInRegs; 7288 7289 return true; 7290 } 7291 7292 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal, 7293 CCState &State) const { 7294 if (!ByVal) { 7295 if (State.FreeRegs) { 7296 --State.FreeRegs; // Non-byval indirects just use one pointer. 7297 return getNaturalAlignIndirectInReg(Ty); 7298 } 7299 return getNaturalAlignIndirect(Ty, false); 7300 } 7301 7302 // Compute the byval alignment. 7303 const unsigned MinABIStackAlignInBytes = 4; 7304 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 7305 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 7306 /*Realign=*/TypeAlign > 7307 MinABIStackAlignInBytes); 7308 } 7309 7310 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty, 7311 CCState &State) const { 7312 // Check with the C++ ABI first. 7313 const RecordType *RT = Ty->getAs<RecordType>(); 7314 if (RT) { 7315 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 7316 if (RAA == CGCXXABI::RAA_Indirect) { 7317 return getIndirectResult(Ty, /*ByVal=*/false, State); 7318 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 7319 return getNaturalAlignIndirect(Ty, /*ByRef=*/true); 7320 } 7321 } 7322 7323 if (isAggregateTypeForABI(Ty)) { 7324 // Structures with flexible arrays are always indirect. 7325 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 7326 return getIndirectResult(Ty, /*ByVal=*/true, State); 7327 7328 // Ignore empty structs/unions. 7329 if (isEmptyRecord(getContext(), Ty, true)) 7330 return ABIArgInfo::getIgnore(); 7331 7332 llvm::LLVMContext &LLVMContext = getVMContext(); 7333 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 7334 if (SizeInRegs <= State.FreeRegs) { 7335 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 7336 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 7337 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 7338 State.FreeRegs -= SizeInRegs; 7339 return ABIArgInfo::getDirectInReg(Result); 7340 } else { 7341 State.FreeRegs = 0; 7342 } 7343 return getIndirectResult(Ty, true, State); 7344 } 7345 7346 // Treat an enum type as its underlying type. 7347 if (const auto *EnumTy = Ty->getAs<EnumType>()) 7348 Ty = EnumTy->getDecl()->getIntegerType(); 7349 7350 bool InReg = shouldUseInReg(Ty, State); 7351 if (Ty->isPromotableIntegerType()) { 7352 if (InReg) 7353 return ABIArgInfo::getDirectInReg(); 7354 return ABIArgInfo::getExtend(); 7355 } 7356 if (InReg) 7357 return ABIArgInfo::getDirectInReg(); 7358 return ABIArgInfo::getDirect(); 7359 } 7360 7361 namespace { 7362 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo { 7363 public: 7364 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 7365 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {} 7366 }; 7367 } 7368 7369 //===----------------------------------------------------------------------===// 7370 // AMDGPU ABI Implementation 7371 //===----------------------------------------------------------------------===// 7372 7373 namespace { 7374 7375 class AMDGPUABIInfo final : public DefaultABIInfo { 7376 public: 7377 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7378 7379 private: 7380 ABIArgInfo classifyArgumentType(QualType Ty) const; 7381 7382 void computeInfo(CGFunctionInfo &FI) const override; 7383 }; 7384 7385 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const { 7386 if (!getCXXABI().classifyReturnType(FI)) 7387 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7388 7389 unsigned CC = FI.getCallingConvention(); 7390 for (auto &Arg : FI.arguments()) 7391 if (CC == llvm::CallingConv::AMDGPU_KERNEL) 7392 Arg.info = classifyArgumentType(Arg.type); 7393 else 7394 Arg.info = DefaultABIInfo::classifyArgumentType(Arg.type); 7395 } 7396 7397 /// \brief Classify argument of given type \p Ty. 7398 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty) const { 7399 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 7400 if (!StrTy) { 7401 return DefaultABIInfo::classifyArgumentType(Ty); 7402 } 7403 7404 // Coerce single element structs to its element. 7405 if (StrTy->getNumElements() == 1) { 7406 return ABIArgInfo::getDirect(); 7407 } 7408 7409 // If we set CanBeFlattened to true, CodeGen will expand the struct to its 7410 // individual elements, which confuses the Clover OpenCL backend; therefore we 7411 // have to set it to false here. Other args of getDirect() are just defaults. 7412 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 7413 } 7414 7415 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 7416 public: 7417 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 7418 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {} 7419 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 7420 CodeGen::CodeGenModule &M, 7421 ForDefinition_t IsForDefinition) const override; 7422 unsigned getOpenCLKernelCallingConv() const override; 7423 7424 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM, 7425 llvm::PointerType *T, QualType QT) const override; 7426 7427 unsigned getASTAllocaAddressSpace() const override { 7428 return LangAS::FirstTargetAddressSpace + 7429 getABIInfo().getDataLayout().getAllocaAddrSpace(); 7430 } 7431 unsigned getGlobalVarAddressSpace(CodeGenModule &CGM, 7432 const VarDecl *D) const override; 7433 }; 7434 } 7435 7436 void AMDGPUTargetCodeGenInfo::setTargetAttributes( 7437 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M, 7438 ForDefinition_t IsForDefinition) const { 7439 if (!IsForDefinition) 7440 return; 7441 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 7442 if (!FD) 7443 return; 7444 7445 llvm::Function *F = cast<llvm::Function>(GV); 7446 7447 const auto *ReqdWGS = M.getLangOpts().OpenCL ? 7448 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr; 7449 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>(); 7450 if (ReqdWGS || FlatWGS) { 7451 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0; 7452 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0; 7453 if (ReqdWGS && Min == 0 && Max == 0) 7454 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim(); 7455 7456 if (Min != 0) { 7457 assert(Min <= Max && "Min must be less than or equal Max"); 7458 7459 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max); 7460 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal); 7461 } else 7462 assert(Max == 0 && "Max must be zero"); 7463 } 7464 7465 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) { 7466 unsigned Min = Attr->getMin(); 7467 unsigned Max = Attr->getMax(); 7468 7469 if (Min != 0) { 7470 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max"); 7471 7472 std::string AttrVal = llvm::utostr(Min); 7473 if (Max != 0) 7474 AttrVal = AttrVal + "," + llvm::utostr(Max); 7475 F->addFnAttr("amdgpu-waves-per-eu", AttrVal); 7476 } else 7477 assert(Max == 0 && "Max must be zero"); 7478 } 7479 7480 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 7481 unsigned NumSGPR = Attr->getNumSGPR(); 7482 7483 if (NumSGPR != 0) 7484 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR)); 7485 } 7486 7487 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 7488 uint32_t NumVGPR = Attr->getNumVGPR(); 7489 7490 if (NumVGPR != 0) 7491 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR)); 7492 } 7493 } 7494 7495 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 7496 return llvm::CallingConv::AMDGPU_KERNEL; 7497 } 7498 7499 // Currently LLVM assumes null pointers always have value 0, 7500 // which results in incorrectly transformed IR. Therefore, instead of 7501 // emitting null pointers in private and local address spaces, a null 7502 // pointer in generic address space is emitted which is casted to a 7503 // pointer in local or private address space. 7504 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer( 7505 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT, 7506 QualType QT) const { 7507 if (CGM.getContext().getTargetNullPointerValue(QT) == 0) 7508 return llvm::ConstantPointerNull::get(PT); 7509 7510 auto &Ctx = CGM.getContext(); 7511 auto NPT = llvm::PointerType::get(PT->getElementType(), 7512 Ctx.getTargetAddressSpace(LangAS::opencl_generic)); 7513 return llvm::ConstantExpr::getAddrSpaceCast( 7514 llvm::ConstantPointerNull::get(NPT), PT); 7515 } 7516 7517 unsigned 7518 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM, 7519 const VarDecl *D) const { 7520 assert(!CGM.getLangOpts().OpenCL && 7521 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && 7522 "Address space agnostic languages only"); 7523 unsigned DefaultGlobalAS = 7524 LangAS::FirstTargetAddressSpace + 7525 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global); 7526 if (!D) 7527 return DefaultGlobalAS; 7528 7529 unsigned AddrSpace = D->getType().getAddressSpace(); 7530 assert(AddrSpace == LangAS::Default || 7531 AddrSpace >= LangAS::FirstTargetAddressSpace); 7532 if (AddrSpace != LangAS::Default) 7533 return AddrSpace; 7534 7535 if (CGM.isTypeConstant(D->getType(), false)) { 7536 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace()) 7537 return ConstAS.getValue(); 7538 } 7539 return DefaultGlobalAS; 7540 } 7541 7542 //===----------------------------------------------------------------------===// 7543 // SPARC v8 ABI Implementation. 7544 // Based on the SPARC Compliance Definition version 2.4.1. 7545 // 7546 // Ensures that complex values are passed in registers. 7547 // 7548 namespace { 7549 class SparcV8ABIInfo : public DefaultABIInfo { 7550 public: 7551 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7552 7553 private: 7554 ABIArgInfo classifyReturnType(QualType RetTy) const; 7555 void computeInfo(CGFunctionInfo &FI) const override; 7556 }; 7557 } // end anonymous namespace 7558 7559 7560 ABIArgInfo 7561 SparcV8ABIInfo::classifyReturnType(QualType Ty) const { 7562 if (Ty->isAnyComplexType()) { 7563 return ABIArgInfo::getDirect(); 7564 } 7565 else { 7566 return DefaultABIInfo::classifyReturnType(Ty); 7567 } 7568 } 7569 7570 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { 7571 7572 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 7573 for (auto &Arg : FI.arguments()) 7574 Arg.info = classifyArgumentType(Arg.type); 7575 } 7576 7577 namespace { 7578 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo { 7579 public: 7580 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT) 7581 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {} 7582 }; 7583 } // end anonymous namespace 7584 7585 //===----------------------------------------------------------------------===// 7586 // SPARC v9 ABI Implementation. 7587 // Based on the SPARC Compliance Definition version 2.4.1. 7588 // 7589 // Function arguments a mapped to a nominal "parameter array" and promoted to 7590 // registers depending on their type. Each argument occupies 8 or 16 bytes in 7591 // the array, structs larger than 16 bytes are passed indirectly. 7592 // 7593 // One case requires special care: 7594 // 7595 // struct mixed { 7596 // int i; 7597 // float f; 7598 // }; 7599 // 7600 // When a struct mixed is passed by value, it only occupies 8 bytes in the 7601 // parameter array, but the int is passed in an integer register, and the float 7602 // is passed in a floating point register. This is represented as two arguments 7603 // with the LLVM IR inreg attribute: 7604 // 7605 // declare void f(i32 inreg %i, float inreg %f) 7606 // 7607 // The code generator will only allocate 4 bytes from the parameter array for 7608 // the inreg arguments. All other arguments are allocated a multiple of 8 7609 // bytes. 7610 // 7611 namespace { 7612 class SparcV9ABIInfo : public ABIInfo { 7613 public: 7614 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 7615 7616 private: 7617 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 7618 void computeInfo(CGFunctionInfo &FI) const override; 7619 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7620 QualType Ty) const override; 7621 7622 // Coercion type builder for structs passed in registers. The coercion type 7623 // serves two purposes: 7624 // 7625 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 7626 // in registers. 7627 // 2. Expose aligned floating point elements as first-level elements, so the 7628 // code generator knows to pass them in floating point registers. 7629 // 7630 // We also compute the InReg flag which indicates that the struct contains 7631 // aligned 32-bit floats. 7632 // 7633 struct CoerceBuilder { 7634 llvm::LLVMContext &Context; 7635 const llvm::DataLayout &DL; 7636 SmallVector<llvm::Type*, 8> Elems; 7637 uint64_t Size; 7638 bool InReg; 7639 7640 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 7641 : Context(c), DL(dl), Size(0), InReg(false) {} 7642 7643 // Pad Elems with integers until Size is ToSize. 7644 void pad(uint64_t ToSize) { 7645 assert(ToSize >= Size && "Cannot remove elements"); 7646 if (ToSize == Size) 7647 return; 7648 7649 // Finish the current 64-bit word. 7650 uint64_t Aligned = llvm::alignTo(Size, 64); 7651 if (Aligned > Size && Aligned <= ToSize) { 7652 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 7653 Size = Aligned; 7654 } 7655 7656 // Add whole 64-bit words. 7657 while (Size + 64 <= ToSize) { 7658 Elems.push_back(llvm::Type::getInt64Ty(Context)); 7659 Size += 64; 7660 } 7661 7662 // Final in-word padding. 7663 if (Size < ToSize) { 7664 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 7665 Size = ToSize; 7666 } 7667 } 7668 7669 // Add a floating point element at Offset. 7670 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 7671 // Unaligned floats are treated as integers. 7672 if (Offset % Bits) 7673 return; 7674 // The InReg flag is only required if there are any floats < 64 bits. 7675 if (Bits < 64) 7676 InReg = true; 7677 pad(Offset); 7678 Elems.push_back(Ty); 7679 Size = Offset + Bits; 7680 } 7681 7682 // Add a struct type to the coercion type, starting at Offset (in bits). 7683 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 7684 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 7685 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 7686 llvm::Type *ElemTy = StrTy->getElementType(i); 7687 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 7688 switch (ElemTy->getTypeID()) { 7689 case llvm::Type::StructTyID: 7690 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 7691 break; 7692 case llvm::Type::FloatTyID: 7693 addFloat(ElemOffset, ElemTy, 32); 7694 break; 7695 case llvm::Type::DoubleTyID: 7696 addFloat(ElemOffset, ElemTy, 64); 7697 break; 7698 case llvm::Type::FP128TyID: 7699 addFloat(ElemOffset, ElemTy, 128); 7700 break; 7701 case llvm::Type::PointerTyID: 7702 if (ElemOffset % 64 == 0) { 7703 pad(ElemOffset); 7704 Elems.push_back(ElemTy); 7705 Size += 64; 7706 } 7707 break; 7708 default: 7709 break; 7710 } 7711 } 7712 } 7713 7714 // Check if Ty is a usable substitute for the coercion type. 7715 bool isUsableType(llvm::StructType *Ty) const { 7716 return llvm::makeArrayRef(Elems) == Ty->elements(); 7717 } 7718 7719 // Get the coercion type as a literal struct type. 7720 llvm::Type *getType() const { 7721 if (Elems.size() == 1) 7722 return Elems.front(); 7723 else 7724 return llvm::StructType::get(Context, Elems); 7725 } 7726 }; 7727 }; 7728 } // end anonymous namespace 7729 7730 ABIArgInfo 7731 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 7732 if (Ty->isVoidType()) 7733 return ABIArgInfo::getIgnore(); 7734 7735 uint64_t Size = getContext().getTypeSize(Ty); 7736 7737 // Anything too big to fit in registers is passed with an explicit indirect 7738 // pointer / sret pointer. 7739 if (Size > SizeLimit) 7740 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7741 7742 // Treat an enum type as its underlying type. 7743 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7744 Ty = EnumTy->getDecl()->getIntegerType(); 7745 7746 // Integer types smaller than a register are extended. 7747 if (Size < 64 && Ty->isIntegerType()) 7748 return ABIArgInfo::getExtend(); 7749 7750 // Other non-aggregates go in registers. 7751 if (!isAggregateTypeForABI(Ty)) 7752 return ABIArgInfo::getDirect(); 7753 7754 // If a C++ object has either a non-trivial copy constructor or a non-trivial 7755 // destructor, it is passed with an explicit indirect pointer / sret pointer. 7756 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 7757 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7758 7759 // This is a small aggregate type that should be passed in registers. 7760 // Build a coercion type from the LLVM struct type. 7761 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 7762 if (!StrTy) 7763 return ABIArgInfo::getDirect(); 7764 7765 CoerceBuilder CB(getVMContext(), getDataLayout()); 7766 CB.addStruct(0, StrTy); 7767 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64)); 7768 7769 // Try to use the original type for coercion. 7770 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 7771 7772 if (CB.InReg) 7773 return ABIArgInfo::getDirectInReg(CoerceTy); 7774 else 7775 return ABIArgInfo::getDirect(CoerceTy); 7776 } 7777 7778 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7779 QualType Ty) const { 7780 ABIArgInfo AI = classifyType(Ty, 16 * 8); 7781 llvm::Type *ArgTy = CGT.ConvertType(Ty); 7782 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 7783 AI.setCoerceToType(ArgTy); 7784 7785 CharUnits SlotSize = CharUnits::fromQuantity(8); 7786 7787 CGBuilderTy &Builder = CGF.Builder; 7788 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 7789 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 7790 7791 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 7792 7793 Address ArgAddr = Address::invalid(); 7794 CharUnits Stride; 7795 switch (AI.getKind()) { 7796 case ABIArgInfo::Expand: 7797 case ABIArgInfo::CoerceAndExpand: 7798 case ABIArgInfo::InAlloca: 7799 llvm_unreachable("Unsupported ABI kind for va_arg"); 7800 7801 case ABIArgInfo::Extend: { 7802 Stride = SlotSize; 7803 CharUnits Offset = SlotSize - TypeInfo.first; 7804 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); 7805 break; 7806 } 7807 7808 case ABIArgInfo::Direct: { 7809 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 7810 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); 7811 ArgAddr = Addr; 7812 break; 7813 } 7814 7815 case ABIArgInfo::Indirect: 7816 Stride = SlotSize; 7817 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect"); 7818 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), 7819 TypeInfo.second); 7820 break; 7821 7822 case ABIArgInfo::Ignore: 7823 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second); 7824 } 7825 7826 // Update VAList. 7827 llvm::Value *NextPtr = 7828 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next"); 7829 Builder.CreateStore(NextPtr, VAListAddr); 7830 7831 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr"); 7832 } 7833 7834 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 7835 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 7836 for (auto &I : FI.arguments()) 7837 I.info = classifyType(I.type, 16 * 8); 7838 } 7839 7840 namespace { 7841 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 7842 public: 7843 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 7844 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} 7845 7846 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 7847 return 14; 7848 } 7849 7850 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7851 llvm::Value *Address) const override; 7852 }; 7853 } // end anonymous namespace 7854 7855 bool 7856 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7857 llvm::Value *Address) const { 7858 // This is calculated from the LLVM and GCC tables and verified 7859 // against gcc output. AFAIK all ABIs use the same encoding. 7860 7861 CodeGen::CGBuilderTy &Builder = CGF.Builder; 7862 7863 llvm::IntegerType *i8 = CGF.Int8Ty; 7864 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 7865 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 7866 7867 // 0-31: the 8-byte general-purpose registers 7868 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 7869 7870 // 32-63: f0-31, the 4-byte floating-point registers 7871 AssignToArrayRange(Builder, Address, Four8, 32, 63); 7872 7873 // Y = 64 7874 // PSR = 65 7875 // WIM = 66 7876 // TBR = 67 7877 // PC = 68 7878 // NPC = 69 7879 // FSR = 70 7880 // CSR = 71 7881 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 7882 7883 // 72-87: d0-15, the 8-byte floating-point registers 7884 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 7885 7886 return false; 7887 } 7888 7889 7890 //===----------------------------------------------------------------------===// 7891 // XCore ABI Implementation 7892 //===----------------------------------------------------------------------===// 7893 7894 namespace { 7895 7896 /// A SmallStringEnc instance is used to build up the TypeString by passing 7897 /// it by reference between functions that append to it. 7898 typedef llvm::SmallString<128> SmallStringEnc; 7899 7900 /// TypeStringCache caches the meta encodings of Types. 7901 /// 7902 /// The reason for caching TypeStrings is two fold: 7903 /// 1. To cache a type's encoding for later uses; 7904 /// 2. As a means to break recursive member type inclusion. 7905 /// 7906 /// A cache Entry can have a Status of: 7907 /// NonRecursive: The type encoding is not recursive; 7908 /// Recursive: The type encoding is recursive; 7909 /// Incomplete: An incomplete TypeString; 7910 /// IncompleteUsed: An incomplete TypeString that has been used in a 7911 /// Recursive type encoding. 7912 /// 7913 /// A NonRecursive entry will have all of its sub-members expanded as fully 7914 /// as possible. Whilst it may contain types which are recursive, the type 7915 /// itself is not recursive and thus its encoding may be safely used whenever 7916 /// the type is encountered. 7917 /// 7918 /// A Recursive entry will have all of its sub-members expanded as fully as 7919 /// possible. The type itself is recursive and it may contain other types which 7920 /// are recursive. The Recursive encoding must not be used during the expansion 7921 /// of a recursive type's recursive branch. For simplicity the code uses 7922 /// IncompleteCount to reject all usage of Recursive encodings for member types. 7923 /// 7924 /// An Incomplete entry is always a RecordType and only encodes its 7925 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 7926 /// are placed into the cache during type expansion as a means to identify and 7927 /// handle recursive inclusion of types as sub-members. If there is recursion 7928 /// the entry becomes IncompleteUsed. 7929 /// 7930 /// During the expansion of a RecordType's members: 7931 /// 7932 /// If the cache contains a NonRecursive encoding for the member type, the 7933 /// cached encoding is used; 7934 /// 7935 /// If the cache contains a Recursive encoding for the member type, the 7936 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 7937 /// 7938 /// If the member is a RecordType, an Incomplete encoding is placed into the 7939 /// cache to break potential recursive inclusion of itself as a sub-member; 7940 /// 7941 /// Once a member RecordType has been expanded, its temporary incomplete 7942 /// entry is removed from the cache. If a Recursive encoding was swapped out 7943 /// it is swapped back in; 7944 /// 7945 /// If an incomplete entry is used to expand a sub-member, the incomplete 7946 /// entry is marked as IncompleteUsed. The cache keeps count of how many 7947 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 7948 /// 7949 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 7950 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 7951 /// Else the member is part of a recursive type and thus the recursion has 7952 /// been exited too soon for the encoding to be correct for the member. 7953 /// 7954 class TypeStringCache { 7955 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 7956 struct Entry { 7957 std::string Str; // The encoded TypeString for the type. 7958 enum Status State; // Information about the encoding in 'Str'. 7959 std::string Swapped; // A temporary place holder for a Recursive encoding 7960 // during the expansion of RecordType's members. 7961 }; 7962 std::map<const IdentifierInfo *, struct Entry> Map; 7963 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 7964 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 7965 public: 7966 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} 7967 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 7968 bool removeIncomplete(const IdentifierInfo *ID); 7969 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 7970 bool IsRecursive); 7971 StringRef lookupStr(const IdentifierInfo *ID); 7972 }; 7973 7974 /// TypeString encodings for enum & union fields must be order. 7975 /// FieldEncoding is a helper for this ordering process. 7976 class FieldEncoding { 7977 bool HasName; 7978 std::string Enc; 7979 public: 7980 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} 7981 StringRef str() { return Enc; } 7982 bool operator<(const FieldEncoding &rhs) const { 7983 if (HasName != rhs.HasName) return HasName; 7984 return Enc < rhs.Enc; 7985 } 7986 }; 7987 7988 class XCoreABIInfo : public DefaultABIInfo { 7989 public: 7990 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7991 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7992 QualType Ty) const override; 7993 }; 7994 7995 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 7996 mutable TypeStringCache TSC; 7997 public: 7998 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 7999 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} 8000 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 8001 CodeGen::CodeGenModule &M) const override; 8002 }; 8003 8004 } // End anonymous namespace. 8005 8006 // TODO: this implementation is likely now redundant with the default 8007 // EmitVAArg. 8008 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 8009 QualType Ty) const { 8010 CGBuilderTy &Builder = CGF.Builder; 8011 8012 // Get the VAList. 8013 CharUnits SlotSize = CharUnits::fromQuantity(4); 8014 Address AP(Builder.CreateLoad(VAListAddr), SlotSize); 8015 8016 // Handle the argument. 8017 ABIArgInfo AI = classifyArgumentType(Ty); 8018 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty); 8019 llvm::Type *ArgTy = CGT.ConvertType(Ty); 8020 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 8021 AI.setCoerceToType(ArgTy); 8022 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 8023 8024 Address Val = Address::invalid(); 8025 CharUnits ArgSize = CharUnits::Zero(); 8026 switch (AI.getKind()) { 8027 case ABIArgInfo::Expand: 8028 case ABIArgInfo::CoerceAndExpand: 8029 case ABIArgInfo::InAlloca: 8030 llvm_unreachable("Unsupported ABI kind for va_arg"); 8031 case ABIArgInfo::Ignore: 8032 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign); 8033 ArgSize = CharUnits::Zero(); 8034 break; 8035 case ABIArgInfo::Extend: 8036 case ABIArgInfo::Direct: 8037 Val = Builder.CreateBitCast(AP, ArgPtrTy); 8038 ArgSize = CharUnits::fromQuantity( 8039 getDataLayout().getTypeAllocSize(AI.getCoerceToType())); 8040 ArgSize = ArgSize.alignTo(SlotSize); 8041 break; 8042 case ABIArgInfo::Indirect: 8043 Val = Builder.CreateElementBitCast(AP, ArgPtrTy); 8044 Val = Address(Builder.CreateLoad(Val), TypeAlign); 8045 ArgSize = SlotSize; 8046 break; 8047 } 8048 8049 // Increment the VAList. 8050 if (!ArgSize.isZero()) { 8051 llvm::Value *APN = 8052 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize); 8053 Builder.CreateStore(APN, VAListAddr); 8054 } 8055 8056 return Val; 8057 } 8058 8059 /// During the expansion of a RecordType, an incomplete TypeString is placed 8060 /// into the cache as a means to identify and break recursion. 8061 /// If there is a Recursive encoding in the cache, it is swapped out and will 8062 /// be reinserted by removeIncomplete(). 8063 /// All other types of encoding should have been used rather than arriving here. 8064 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 8065 std::string StubEnc) { 8066 if (!ID) 8067 return; 8068 Entry &E = Map[ID]; 8069 assert( (E.Str.empty() || E.State == Recursive) && 8070 "Incorrectly use of addIncomplete"); 8071 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 8072 E.Swapped.swap(E.Str); // swap out the Recursive 8073 E.Str.swap(StubEnc); 8074 E.State = Incomplete; 8075 ++IncompleteCount; 8076 } 8077 8078 /// Once the RecordType has been expanded, the temporary incomplete TypeString 8079 /// must be removed from the cache. 8080 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 8081 /// Returns true if the RecordType was defined recursively. 8082 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 8083 if (!ID) 8084 return false; 8085 auto I = Map.find(ID); 8086 assert(I != Map.end() && "Entry not present"); 8087 Entry &E = I->second; 8088 assert( (E.State == Incomplete || 8089 E.State == IncompleteUsed) && 8090 "Entry must be an incomplete type"); 8091 bool IsRecursive = false; 8092 if (E.State == IncompleteUsed) { 8093 // We made use of our Incomplete encoding, thus we are recursive. 8094 IsRecursive = true; 8095 --IncompleteUsedCount; 8096 } 8097 if (E.Swapped.empty()) 8098 Map.erase(I); 8099 else { 8100 // Swap the Recursive back. 8101 E.Swapped.swap(E.Str); 8102 E.Swapped.clear(); 8103 E.State = Recursive; 8104 } 8105 --IncompleteCount; 8106 return IsRecursive; 8107 } 8108 8109 /// Add the encoded TypeString to the cache only if it is NonRecursive or 8110 /// Recursive (viz: all sub-members were expanded as fully as possible). 8111 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 8112 bool IsRecursive) { 8113 if (!ID || IncompleteUsedCount) 8114 return; // No key or it is is an incomplete sub-type so don't add. 8115 Entry &E = Map[ID]; 8116 if (IsRecursive && !E.Str.empty()) { 8117 assert(E.State==Recursive && E.Str.size() == Str.size() && 8118 "This is not the same Recursive entry"); 8119 // The parent container was not recursive after all, so we could have used 8120 // this Recursive sub-member entry after all, but we assumed the worse when 8121 // we started viz: IncompleteCount!=0. 8122 return; 8123 } 8124 assert(E.Str.empty() && "Entry already present"); 8125 E.Str = Str.str(); 8126 E.State = IsRecursive? Recursive : NonRecursive; 8127 } 8128 8129 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 8130 /// are recursively expanding a type (IncompleteCount != 0) and the cached 8131 /// encoding is Recursive, return an empty StringRef. 8132 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 8133 if (!ID) 8134 return StringRef(); // We have no key. 8135 auto I = Map.find(ID); 8136 if (I == Map.end()) 8137 return StringRef(); // We have no encoding. 8138 Entry &E = I->second; 8139 if (E.State == Recursive && IncompleteCount) 8140 return StringRef(); // We don't use Recursive encodings for member types. 8141 8142 if (E.State == Incomplete) { 8143 // The incomplete type is being used to break out of recursion. 8144 E.State = IncompleteUsed; 8145 ++IncompleteUsedCount; 8146 } 8147 return E.Str; 8148 } 8149 8150 /// The XCore ABI includes a type information section that communicates symbol 8151 /// type information to the linker. The linker uses this information to verify 8152 /// safety/correctness of things such as array bound and pointers et al. 8153 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 8154 /// This type information (TypeString) is emitted into meta data for all global 8155 /// symbols: definitions, declarations, functions & variables. 8156 /// 8157 /// The TypeString carries type, qualifier, name, size & value details. 8158 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 8159 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf 8160 /// The output is tested by test/CodeGen/xcore-stringtype.c. 8161 /// 8162 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 8163 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); 8164 8165 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 8166 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 8167 CodeGen::CodeGenModule &CGM) const { 8168 SmallStringEnc Enc; 8169 if (getTypeString(Enc, D, CGM, TSC)) { 8170 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 8171 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV), 8172 llvm::MDString::get(Ctx, Enc.str())}; 8173 llvm::NamedMDNode *MD = 8174 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 8175 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 8176 } 8177 } 8178 8179 //===----------------------------------------------------------------------===// 8180 // SPIR ABI Implementation 8181 //===----------------------------------------------------------------------===// 8182 8183 namespace { 8184 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo { 8185 public: 8186 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 8187 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 8188 unsigned getOpenCLKernelCallingConv() const override; 8189 }; 8190 8191 } // End anonymous namespace. 8192 8193 namespace clang { 8194 namespace CodeGen { 8195 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { 8196 DefaultABIInfo SPIRABI(CGM.getTypes()); 8197 SPIRABI.computeInfo(FI); 8198 } 8199 } 8200 } 8201 8202 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 8203 return llvm::CallingConv::SPIR_KERNEL; 8204 } 8205 8206 static bool appendType(SmallStringEnc &Enc, QualType QType, 8207 const CodeGen::CodeGenModule &CGM, 8208 TypeStringCache &TSC); 8209 8210 /// Helper function for appendRecordType(). 8211 /// Builds a SmallVector containing the encoded field types in declaration 8212 /// order. 8213 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 8214 const RecordDecl *RD, 8215 const CodeGen::CodeGenModule &CGM, 8216 TypeStringCache &TSC) { 8217 for (const auto *Field : RD->fields()) { 8218 SmallStringEnc Enc; 8219 Enc += "m("; 8220 Enc += Field->getName(); 8221 Enc += "){"; 8222 if (Field->isBitField()) { 8223 Enc += "b("; 8224 llvm::raw_svector_ostream OS(Enc); 8225 OS << Field->getBitWidthValue(CGM.getContext()); 8226 Enc += ':'; 8227 } 8228 if (!appendType(Enc, Field->getType(), CGM, TSC)) 8229 return false; 8230 if (Field->isBitField()) 8231 Enc += ')'; 8232 Enc += '}'; 8233 FE.emplace_back(!Field->getName().empty(), Enc); 8234 } 8235 return true; 8236 } 8237 8238 /// Appends structure and union types to Enc and adds encoding to cache. 8239 /// Recursively calls appendType (via extractFieldType) for each field. 8240 /// Union types have their fields ordered according to the ABI. 8241 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 8242 const CodeGen::CodeGenModule &CGM, 8243 TypeStringCache &TSC, const IdentifierInfo *ID) { 8244 // Append the cached TypeString if we have one. 8245 StringRef TypeString = TSC.lookupStr(ID); 8246 if (!TypeString.empty()) { 8247 Enc += TypeString; 8248 return true; 8249 } 8250 8251 // Start to emit an incomplete TypeString. 8252 size_t Start = Enc.size(); 8253 Enc += (RT->isUnionType()? 'u' : 's'); 8254 Enc += '('; 8255 if (ID) 8256 Enc += ID->getName(); 8257 Enc += "){"; 8258 8259 // We collect all encoded fields and order as necessary. 8260 bool IsRecursive = false; 8261 const RecordDecl *RD = RT->getDecl()->getDefinition(); 8262 if (RD && !RD->field_empty()) { 8263 // An incomplete TypeString stub is placed in the cache for this RecordType 8264 // so that recursive calls to this RecordType will use it whilst building a 8265 // complete TypeString for this RecordType. 8266 SmallVector<FieldEncoding, 16> FE; 8267 std::string StubEnc(Enc.substr(Start).str()); 8268 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 8269 TSC.addIncomplete(ID, std::move(StubEnc)); 8270 if (!extractFieldType(FE, RD, CGM, TSC)) { 8271 (void) TSC.removeIncomplete(ID); 8272 return false; 8273 } 8274 IsRecursive = TSC.removeIncomplete(ID); 8275 // The ABI requires unions to be sorted but not structures. 8276 // See FieldEncoding::operator< for sort algorithm. 8277 if (RT->isUnionType()) 8278 std::sort(FE.begin(), FE.end()); 8279 // We can now complete the TypeString. 8280 unsigned E = FE.size(); 8281 for (unsigned I = 0; I != E; ++I) { 8282 if (I) 8283 Enc += ','; 8284 Enc += FE[I].str(); 8285 } 8286 } 8287 Enc += '}'; 8288 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 8289 return true; 8290 } 8291 8292 /// Appends enum types to Enc and adds the encoding to the cache. 8293 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 8294 TypeStringCache &TSC, 8295 const IdentifierInfo *ID) { 8296 // Append the cached TypeString if we have one. 8297 StringRef TypeString = TSC.lookupStr(ID); 8298 if (!TypeString.empty()) { 8299 Enc += TypeString; 8300 return true; 8301 } 8302 8303 size_t Start = Enc.size(); 8304 Enc += "e("; 8305 if (ID) 8306 Enc += ID->getName(); 8307 Enc += "){"; 8308 8309 // We collect all encoded enumerations and order them alphanumerically. 8310 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 8311 SmallVector<FieldEncoding, 16> FE; 8312 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 8313 ++I) { 8314 SmallStringEnc EnumEnc; 8315 EnumEnc += "m("; 8316 EnumEnc += I->getName(); 8317 EnumEnc += "){"; 8318 I->getInitVal().toString(EnumEnc); 8319 EnumEnc += '}'; 8320 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 8321 } 8322 std::sort(FE.begin(), FE.end()); 8323 unsigned E = FE.size(); 8324 for (unsigned I = 0; I != E; ++I) { 8325 if (I) 8326 Enc += ','; 8327 Enc += FE[I].str(); 8328 } 8329 } 8330 Enc += '}'; 8331 TSC.addIfComplete(ID, Enc.substr(Start), false); 8332 return true; 8333 } 8334 8335 /// Appends type's qualifier to Enc. 8336 /// This is done prior to appending the type's encoding. 8337 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 8338 // Qualifiers are emitted in alphabetical order. 8339 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 8340 int Lookup = 0; 8341 if (QT.isConstQualified()) 8342 Lookup += 1<<0; 8343 if (QT.isRestrictQualified()) 8344 Lookup += 1<<1; 8345 if (QT.isVolatileQualified()) 8346 Lookup += 1<<2; 8347 Enc += Table[Lookup]; 8348 } 8349 8350 /// Appends built-in types to Enc. 8351 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 8352 const char *EncType; 8353 switch (BT->getKind()) { 8354 case BuiltinType::Void: 8355 EncType = "0"; 8356 break; 8357 case BuiltinType::Bool: 8358 EncType = "b"; 8359 break; 8360 case BuiltinType::Char_U: 8361 EncType = "uc"; 8362 break; 8363 case BuiltinType::UChar: 8364 EncType = "uc"; 8365 break; 8366 case BuiltinType::SChar: 8367 EncType = "sc"; 8368 break; 8369 case BuiltinType::UShort: 8370 EncType = "us"; 8371 break; 8372 case BuiltinType::Short: 8373 EncType = "ss"; 8374 break; 8375 case BuiltinType::UInt: 8376 EncType = "ui"; 8377 break; 8378 case BuiltinType::Int: 8379 EncType = "si"; 8380 break; 8381 case BuiltinType::ULong: 8382 EncType = "ul"; 8383 break; 8384 case BuiltinType::Long: 8385 EncType = "sl"; 8386 break; 8387 case BuiltinType::ULongLong: 8388 EncType = "ull"; 8389 break; 8390 case BuiltinType::LongLong: 8391 EncType = "sll"; 8392 break; 8393 case BuiltinType::Float: 8394 EncType = "ft"; 8395 break; 8396 case BuiltinType::Double: 8397 EncType = "d"; 8398 break; 8399 case BuiltinType::LongDouble: 8400 EncType = "ld"; 8401 break; 8402 default: 8403 return false; 8404 } 8405 Enc += EncType; 8406 return true; 8407 } 8408 8409 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 8410 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 8411 const CodeGen::CodeGenModule &CGM, 8412 TypeStringCache &TSC) { 8413 Enc += "p("; 8414 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 8415 return false; 8416 Enc += ')'; 8417 return true; 8418 } 8419 8420 /// Appends array encoding to Enc before calling appendType for the element. 8421 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 8422 const ArrayType *AT, 8423 const CodeGen::CodeGenModule &CGM, 8424 TypeStringCache &TSC, StringRef NoSizeEnc) { 8425 if (AT->getSizeModifier() != ArrayType::Normal) 8426 return false; 8427 Enc += "a("; 8428 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 8429 CAT->getSize().toStringUnsigned(Enc); 8430 else 8431 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 8432 Enc += ':'; 8433 // The Qualifiers should be attached to the type rather than the array. 8434 appendQualifier(Enc, QT); 8435 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 8436 return false; 8437 Enc += ')'; 8438 return true; 8439 } 8440 8441 /// Appends a function encoding to Enc, calling appendType for the return type 8442 /// and the arguments. 8443 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 8444 const CodeGen::CodeGenModule &CGM, 8445 TypeStringCache &TSC) { 8446 Enc += "f{"; 8447 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 8448 return false; 8449 Enc += "}("; 8450 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 8451 // N.B. we are only interested in the adjusted param types. 8452 auto I = FPT->param_type_begin(); 8453 auto E = FPT->param_type_end(); 8454 if (I != E) { 8455 do { 8456 if (!appendType(Enc, *I, CGM, TSC)) 8457 return false; 8458 ++I; 8459 if (I != E) 8460 Enc += ','; 8461 } while (I != E); 8462 if (FPT->isVariadic()) 8463 Enc += ",va"; 8464 } else { 8465 if (FPT->isVariadic()) 8466 Enc += "va"; 8467 else 8468 Enc += '0'; 8469 } 8470 } 8471 Enc += ')'; 8472 return true; 8473 } 8474 8475 /// Handles the type's qualifier before dispatching a call to handle specific 8476 /// type encodings. 8477 static bool appendType(SmallStringEnc &Enc, QualType QType, 8478 const CodeGen::CodeGenModule &CGM, 8479 TypeStringCache &TSC) { 8480 8481 QualType QT = QType.getCanonicalType(); 8482 8483 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 8484 // The Qualifiers should be attached to the type rather than the array. 8485 // Thus we don't call appendQualifier() here. 8486 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 8487 8488 appendQualifier(Enc, QT); 8489 8490 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 8491 return appendBuiltinType(Enc, BT); 8492 8493 if (const PointerType *PT = QT->getAs<PointerType>()) 8494 return appendPointerType(Enc, PT, CGM, TSC); 8495 8496 if (const EnumType *ET = QT->getAs<EnumType>()) 8497 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 8498 8499 if (const RecordType *RT = QT->getAsStructureType()) 8500 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 8501 8502 if (const RecordType *RT = QT->getAsUnionType()) 8503 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 8504 8505 if (const FunctionType *FT = QT->getAs<FunctionType>()) 8506 return appendFunctionType(Enc, FT, CGM, TSC); 8507 8508 return false; 8509 } 8510 8511 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 8512 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { 8513 if (!D) 8514 return false; 8515 8516 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 8517 if (FD->getLanguageLinkage() != CLanguageLinkage) 8518 return false; 8519 return appendType(Enc, FD->getType(), CGM, TSC); 8520 } 8521 8522 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 8523 if (VD->getLanguageLinkage() != CLanguageLinkage) 8524 return false; 8525 QualType QT = VD->getType().getCanonicalType(); 8526 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 8527 // Global ArrayTypes are given a size of '*' if the size is unknown. 8528 // The Qualifiers should be attached to the type rather than the array. 8529 // Thus we don't call appendQualifier() here. 8530 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 8531 } 8532 return appendType(Enc, QT, CGM, TSC); 8533 } 8534 return false; 8535 } 8536 8537 8538 //===----------------------------------------------------------------------===// 8539 // Driver code 8540 //===----------------------------------------------------------------------===// 8541 8542 bool CodeGenModule::supportsCOMDAT() const { 8543 return getTriple().supportsCOMDAT(); 8544 } 8545 8546 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 8547 if (TheTargetCodeGenInfo) 8548 return *TheTargetCodeGenInfo; 8549 8550 // Helper to set the unique_ptr while still keeping the return value. 8551 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & { 8552 this->TheTargetCodeGenInfo.reset(P); 8553 return *P; 8554 }; 8555 8556 const llvm::Triple &Triple = getTarget().getTriple(); 8557 switch (Triple.getArch()) { 8558 default: 8559 return SetCGInfo(new DefaultTargetCodeGenInfo(Types)); 8560 8561 case llvm::Triple::le32: 8562 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 8563 case llvm::Triple::mips: 8564 case llvm::Triple::mipsel: 8565 if (Triple.getOS() == llvm::Triple::NaCl) 8566 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 8567 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true)); 8568 8569 case llvm::Triple::mips64: 8570 case llvm::Triple::mips64el: 8571 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false)); 8572 8573 case llvm::Triple::avr: 8574 return SetCGInfo(new AVRTargetCodeGenInfo(Types)); 8575 8576 case llvm::Triple::aarch64: 8577 case llvm::Triple::aarch64_be: { 8578 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 8579 if (getTarget().getABI() == "darwinpcs") 8580 Kind = AArch64ABIInfo::DarwinPCS; 8581 else if (Triple.isOSWindows()) 8582 return SetCGInfo( 8583 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64)); 8584 8585 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind)); 8586 } 8587 8588 case llvm::Triple::wasm32: 8589 case llvm::Triple::wasm64: 8590 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types)); 8591 8592 case llvm::Triple::arm: 8593 case llvm::Triple::armeb: 8594 case llvm::Triple::thumb: 8595 case llvm::Triple::thumbeb: { 8596 if (Triple.getOS() == llvm::Triple::Win32) { 8597 return SetCGInfo( 8598 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP)); 8599 } 8600 8601 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 8602 StringRef ABIStr = getTarget().getABI(); 8603 if (ABIStr == "apcs-gnu") 8604 Kind = ARMABIInfo::APCS; 8605 else if (ABIStr == "aapcs16") 8606 Kind = ARMABIInfo::AAPCS16_VFP; 8607 else if (CodeGenOpts.FloatABI == "hard" || 8608 (CodeGenOpts.FloatABI != "soft" && 8609 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF || 8610 Triple.getEnvironment() == llvm::Triple::MuslEABIHF || 8611 Triple.getEnvironment() == llvm::Triple::EABIHF))) 8612 Kind = ARMABIInfo::AAPCS_VFP; 8613 8614 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind)); 8615 } 8616 8617 case llvm::Triple::ppc: 8618 return SetCGInfo( 8619 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft")); 8620 case llvm::Triple::ppc64: 8621 if (Triple.isOSBinFormatELF()) { 8622 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 8623 if (getTarget().getABI() == "elfv2") 8624 Kind = PPC64_SVR4_ABIInfo::ELFv2; 8625 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 8626 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 8627 8628 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, 8629 IsSoftFloat)); 8630 } else 8631 return SetCGInfo(new PPC64TargetCodeGenInfo(Types)); 8632 case llvm::Triple::ppc64le: { 8633 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 8634 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 8635 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx") 8636 Kind = PPC64_SVR4_ABIInfo::ELFv1; 8637 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 8638 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft"; 8639 8640 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX, 8641 IsSoftFloat)); 8642 } 8643 8644 case llvm::Triple::nvptx: 8645 case llvm::Triple::nvptx64: 8646 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); 8647 8648 case llvm::Triple::msp430: 8649 return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); 8650 8651 case llvm::Triple::systemz: { 8652 bool HasVector = getTarget().getABI() == "vector"; 8653 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector)); 8654 } 8655 8656 case llvm::Triple::tce: 8657 case llvm::Triple::tcele: 8658 return SetCGInfo(new TCETargetCodeGenInfo(Types)); 8659 8660 case llvm::Triple::x86: { 8661 bool IsDarwinVectorABI = Triple.isOSDarwin(); 8662 bool RetSmallStructInRegABI = 8663 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 8664 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 8665 8666 if (Triple.getOS() == llvm::Triple::Win32) { 8667 return SetCGInfo(new WinX86_32TargetCodeGenInfo( 8668 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 8669 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); 8670 } else { 8671 return SetCGInfo(new X86_32TargetCodeGenInfo( 8672 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 8673 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters, 8674 CodeGenOpts.FloatABI == "soft")); 8675 } 8676 } 8677 8678 case llvm::Triple::x86_64: { 8679 StringRef ABI = getTarget().getABI(); 8680 X86AVXABILevel AVXLevel = 8681 (ABI == "avx512" 8682 ? X86AVXABILevel::AVX512 8683 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None); 8684 8685 switch (Triple.getOS()) { 8686 case llvm::Triple::Win32: 8687 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); 8688 case llvm::Triple::PS4: 8689 return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel)); 8690 default: 8691 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel)); 8692 } 8693 } 8694 case llvm::Triple::hexagon: 8695 return SetCGInfo(new HexagonTargetCodeGenInfo(Types)); 8696 case llvm::Triple::lanai: 8697 return SetCGInfo(new LanaiTargetCodeGenInfo(Types)); 8698 case llvm::Triple::r600: 8699 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 8700 case llvm::Triple::amdgcn: 8701 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 8702 case llvm::Triple::sparc: 8703 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types)); 8704 case llvm::Triple::sparcv9: 8705 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types)); 8706 case llvm::Triple::xcore: 8707 return SetCGInfo(new XCoreTargetCodeGenInfo(Types)); 8708 case llvm::Triple::spir: 8709 case llvm::Triple::spir64: 8710 return SetCGInfo(new SPIRTargetCodeGenInfo(Types)); 8711 } 8712 } 8713