1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CGValue.h" 19 #include "CodeGenFunction.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/CodeGen/CGFunctionInfo.h" 22 #include "clang/CodeGen/SwiftCallingConv.h" 23 #include "clang/Frontend/CodeGenOptions.h" 24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/ADT/Triple.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Type.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include <algorithm> // std::sort 30 31 using namespace clang; 32 using namespace CodeGen; 33 34 // Helper for coercing an aggregate argument or return value into an integer 35 // array of the same size (including padding) and alignment. This alternate 36 // coercion happens only for the RenderScript ABI and can be removed after 37 // runtimes that rely on it are no longer supported. 38 // 39 // RenderScript assumes that the size of the argument / return value in the IR 40 // is the same as the size of the corresponding qualified type. This helper 41 // coerces the aggregate type into an array of the same size (including 42 // padding). This coercion is used in lieu of expansion of struct members or 43 // other canonical coercions that return a coerced-type of larger size. 44 // 45 // Ty - The argument / return value type 46 // Context - The associated ASTContext 47 // LLVMContext - The associated LLVMContext 48 static ABIArgInfo coerceToIntArray(QualType Ty, 49 ASTContext &Context, 50 llvm::LLVMContext &LLVMContext) { 51 // Alignment and Size are measured in bits. 52 const uint64_t Size = Context.getTypeSize(Ty); 53 const uint64_t Alignment = Context.getTypeAlign(Ty); 54 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment); 55 const uint64_t NumElements = (Size + Alignment - 1) / Alignment; 56 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements)); 57 } 58 59 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 60 llvm::Value *Array, 61 llvm::Value *Value, 62 unsigned FirstIndex, 63 unsigned LastIndex) { 64 // Alternatively, we could emit this as a loop in the source. 65 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 66 llvm::Value *Cell = 67 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); 68 Builder.CreateAlignedStore(Value, Cell, CharUnits::One()); 69 } 70 } 71 72 static bool isAggregateTypeForABI(QualType T) { 73 return !CodeGenFunction::hasScalarEvaluationKind(T) || 74 T->isMemberFunctionPointerType(); 75 } 76 77 ABIArgInfo 78 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign, 79 llvm::Type *Padding) const { 80 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), 81 ByRef, Realign, Padding); 82 } 83 84 ABIArgInfo 85 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const { 86 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty), 87 /*ByRef*/ false, Realign); 88 } 89 90 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 91 QualType Ty) const { 92 return Address::invalid(); 93 } 94 95 ABIInfo::~ABIInfo() {} 96 97 /// Does the given lowering require more than the given number of 98 /// registers when expanded? 99 /// 100 /// This is intended to be the basis of a reasonable basic implementation 101 /// of should{Pass,Return}IndirectlyForSwift. 102 /// 103 /// For most targets, a limit of four total registers is reasonable; this 104 /// limits the amount of code required in order to move around the value 105 /// in case it wasn't produced immediately prior to the call by the caller 106 /// (or wasn't produced in exactly the right registers) or isn't used 107 /// immediately within the callee. But some targets may need to further 108 /// limit the register count due to an inability to support that many 109 /// return registers. 110 static bool occupiesMoreThan(CodeGenTypes &cgt, 111 ArrayRef<llvm::Type*> scalarTypes, 112 unsigned maxAllRegisters) { 113 unsigned intCount = 0, fpCount = 0; 114 for (llvm::Type *type : scalarTypes) { 115 if (type->isPointerTy()) { 116 intCount++; 117 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) { 118 auto ptrWidth = cgt.getTarget().getPointerWidth(0); 119 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth; 120 } else { 121 assert(type->isVectorTy() || type->isFloatingPointTy()); 122 fpCount++; 123 } 124 } 125 126 return (intCount + fpCount > maxAllRegisters); 127 } 128 129 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize, 130 llvm::Type *eltTy, 131 unsigned numElts) const { 132 // The default implementation of this assumes that the target guarantees 133 // 128-bit SIMD support but nothing more. 134 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16); 135 } 136 137 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, 138 CGCXXABI &CXXABI) { 139 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 140 if (!RD) 141 return CGCXXABI::RAA_Default; 142 return CXXABI.getRecordArgABI(RD); 143 } 144 145 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T, 146 CGCXXABI &CXXABI) { 147 const RecordType *RT = T->getAs<RecordType>(); 148 if (!RT) 149 return CGCXXABI::RAA_Default; 150 return getRecordArgABI(RT, CXXABI); 151 } 152 153 /// Pass transparent unions as if they were the type of the first element. Sema 154 /// should ensure that all elements of the union have the same "machine type". 155 static QualType useFirstFieldIfTransparentUnion(QualType Ty) { 156 if (const RecordType *UT = Ty->getAsUnionType()) { 157 const RecordDecl *UD = UT->getDecl(); 158 if (UD->hasAttr<TransparentUnionAttr>()) { 159 assert(!UD->field_empty() && "sema created an empty transparent union"); 160 return UD->field_begin()->getType(); 161 } 162 } 163 return Ty; 164 } 165 166 CGCXXABI &ABIInfo::getCXXABI() const { 167 return CGT.getCXXABI(); 168 } 169 170 ASTContext &ABIInfo::getContext() const { 171 return CGT.getContext(); 172 } 173 174 llvm::LLVMContext &ABIInfo::getVMContext() const { 175 return CGT.getLLVMContext(); 176 } 177 178 const llvm::DataLayout &ABIInfo::getDataLayout() const { 179 return CGT.getDataLayout(); 180 } 181 182 const TargetInfo &ABIInfo::getTarget() const { 183 return CGT.getTarget(); 184 } 185 186 bool ABIInfo:: isAndroid() const { return getTarget().getTriple().isAndroid(); } 187 188 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 189 return false; 190 } 191 192 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 193 uint64_t Members) const { 194 return false; 195 } 196 197 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const { 198 return false; 199 } 200 201 LLVM_DUMP_METHOD void ABIArgInfo::dump() const { 202 raw_ostream &OS = llvm::errs(); 203 OS << "(ABIArgInfo Kind="; 204 switch (TheKind) { 205 case Direct: 206 OS << "Direct Type="; 207 if (llvm::Type *Ty = getCoerceToType()) 208 Ty->print(OS); 209 else 210 OS << "null"; 211 break; 212 case Extend: 213 OS << "Extend"; 214 break; 215 case Ignore: 216 OS << "Ignore"; 217 break; 218 case InAlloca: 219 OS << "InAlloca Offset=" << getInAllocaFieldIndex(); 220 break; 221 case Indirect: 222 OS << "Indirect Align=" << getIndirectAlign().getQuantity() 223 << " ByVal=" << getIndirectByVal() 224 << " Realign=" << getIndirectRealign(); 225 break; 226 case Expand: 227 OS << "Expand"; 228 break; 229 case CoerceAndExpand: 230 OS << "CoerceAndExpand Type="; 231 getCoerceAndExpandType()->print(OS); 232 break; 233 } 234 OS << ")\n"; 235 } 236 237 // Dynamically round a pointer up to a multiple of the given alignment. 238 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF, 239 llvm::Value *Ptr, 240 CharUnits Align) { 241 llvm::Value *PtrAsInt = Ptr; 242 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align; 243 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy); 244 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt, 245 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1)); 246 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt, 247 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())); 248 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt, 249 Ptr->getType(), 250 Ptr->getName() + ".aligned"); 251 return PtrAsInt; 252 } 253 254 /// Emit va_arg for a platform using the common void* representation, 255 /// where arguments are simply emitted in an array of slots on the stack. 256 /// 257 /// This version implements the core direct-value passing rules. 258 /// 259 /// \param SlotSize - The size and alignment of a stack slot. 260 /// Each argument will be allocated to a multiple of this number of 261 /// slots, and all the slots will be aligned to this value. 262 /// \param AllowHigherAlign - The slot alignment is not a cap; 263 /// an argument type with an alignment greater than the slot size 264 /// will be emitted on a higher-alignment address, potentially 265 /// leaving one or more empty slots behind as padding. If this 266 /// is false, the returned address might be less-aligned than 267 /// DirectAlign. 268 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, 269 Address VAListAddr, 270 llvm::Type *DirectTy, 271 CharUnits DirectSize, 272 CharUnits DirectAlign, 273 CharUnits SlotSize, 274 bool AllowHigherAlign) { 275 // Cast the element type to i8* if necessary. Some platforms define 276 // va_list as a struct containing an i8* instead of just an i8*. 277 if (VAListAddr.getElementType() != CGF.Int8PtrTy) 278 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); 279 280 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur"); 281 282 // If the CC aligns values higher than the slot size, do so if needed. 283 Address Addr = Address::invalid(); 284 if (AllowHigherAlign && DirectAlign > SlotSize) { 285 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign), 286 DirectAlign); 287 } else { 288 Addr = Address(Ptr, SlotSize); 289 } 290 291 // Advance the pointer past the argument, then store that back. 292 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize); 293 llvm::Value *NextPtr = 294 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize, 295 "argp.next"); 296 CGF.Builder.CreateStore(NextPtr, VAListAddr); 297 298 // If the argument is smaller than a slot, and this is a big-endian 299 // target, the argument will be right-adjusted in its slot. 300 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && 301 !DirectTy->isStructTy()) { 302 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize); 303 } 304 305 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy); 306 return Addr; 307 } 308 309 /// Emit va_arg for a platform using the common void* representation, 310 /// where arguments are simply emitted in an array of slots on the stack. 311 /// 312 /// \param IsIndirect - Values of this type are passed indirectly. 313 /// \param ValueInfo - The size and alignment of this type, generally 314 /// computed with getContext().getTypeInfoInChars(ValueTy). 315 /// \param SlotSizeAndAlign - The size and alignment of a stack slot. 316 /// Each argument will be allocated to a multiple of this number of 317 /// slots, and all the slots will be aligned to this value. 318 /// \param AllowHigherAlign - The slot alignment is not a cap; 319 /// an argument type with an alignment greater than the slot size 320 /// will be emitted on a higher-alignment address, potentially 321 /// leaving one or more empty slots behind as padding. 322 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, 323 QualType ValueTy, bool IsIndirect, 324 std::pair<CharUnits, CharUnits> ValueInfo, 325 CharUnits SlotSizeAndAlign, 326 bool AllowHigherAlign) { 327 // The size and alignment of the value that was passed directly. 328 CharUnits DirectSize, DirectAlign; 329 if (IsIndirect) { 330 DirectSize = CGF.getPointerSize(); 331 DirectAlign = CGF.getPointerAlign(); 332 } else { 333 DirectSize = ValueInfo.first; 334 DirectAlign = ValueInfo.second; 335 } 336 337 // Cast the address we've calculated to the right type. 338 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy); 339 if (IsIndirect) 340 DirectTy = DirectTy->getPointerTo(0); 341 342 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, 343 DirectSize, DirectAlign, 344 SlotSizeAndAlign, 345 AllowHigherAlign); 346 347 if (IsIndirect) { 348 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second); 349 } 350 351 return Addr; 352 353 } 354 355 static Address emitMergePHI(CodeGenFunction &CGF, 356 Address Addr1, llvm::BasicBlock *Block1, 357 Address Addr2, llvm::BasicBlock *Block2, 358 const llvm::Twine &Name = "") { 359 assert(Addr1.getType() == Addr2.getType()); 360 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name); 361 PHI->addIncoming(Addr1.getPointer(), Block1); 362 PHI->addIncoming(Addr2.getPointer(), Block2); 363 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment()); 364 return Address(PHI, Align); 365 } 366 367 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 368 369 // If someone can figure out a general rule for this, that would be great. 370 // It's probably just doomed to be platform-dependent, though. 371 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const { 372 // Verified for: 373 // x86-64 FreeBSD, Linux, Darwin 374 // x86-32 FreeBSD, Linux, Darwin 375 // PowerPC Linux, Darwin 376 // ARM Darwin (*not* EABI) 377 // AArch64 Linux 378 return 32; 379 } 380 381 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args, 382 const FunctionNoProtoType *fnType) const { 383 // The following conventions are known to require this to be false: 384 // x86_stdcall 385 // MIPS 386 // For everything else, we just prefer false unless we opt out. 387 return false; 388 } 389 390 void 391 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib, 392 llvm::SmallString<24> &Opt) const { 393 // This assumes the user is passing a library name like "rt" instead of a 394 // filename like "librt.a/so", and that they don't care whether it's static or 395 // dynamic. 396 Opt = "-l"; 397 Opt += Lib; 398 } 399 400 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const { 401 return llvm::CallingConv::C; 402 } 403 404 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 405 406 /// isEmptyField - Return true iff a the field is "empty", that is it 407 /// is an unnamed bit-field or an (array of) empty record(s). 408 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 409 bool AllowArrays) { 410 if (FD->isUnnamedBitfield()) 411 return true; 412 413 QualType FT = FD->getType(); 414 415 // Constant arrays of empty records count as empty, strip them off. 416 // Constant arrays of zero length always count as empty. 417 if (AllowArrays) 418 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 419 if (AT->getSize() == 0) 420 return true; 421 FT = AT->getElementType(); 422 } 423 424 const RecordType *RT = FT->getAs<RecordType>(); 425 if (!RT) 426 return false; 427 428 // C++ record fields are never empty, at least in the Itanium ABI. 429 // 430 // FIXME: We should use a predicate for whether this behavior is true in the 431 // current ABI. 432 if (isa<CXXRecordDecl>(RT->getDecl())) 433 return false; 434 435 return isEmptyRecord(Context, FT, AllowArrays); 436 } 437 438 /// isEmptyRecord - Return true iff a structure contains only empty 439 /// fields. Note that a structure with a flexible array member is not 440 /// considered empty. 441 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 442 const RecordType *RT = T->getAs<RecordType>(); 443 if (!RT) 444 return false; 445 const RecordDecl *RD = RT->getDecl(); 446 if (RD->hasFlexibleArrayMember()) 447 return false; 448 449 // If this is a C++ record, check the bases first. 450 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 451 for (const auto &I : CXXRD->bases()) 452 if (!isEmptyRecord(Context, I.getType(), true)) 453 return false; 454 455 for (const auto *I : RD->fields()) 456 if (!isEmptyField(Context, I, AllowArrays)) 457 return false; 458 return true; 459 } 460 461 /// isSingleElementStruct - Determine if a structure is a "single 462 /// element struct", i.e. it has exactly one non-empty field or 463 /// exactly one field which is itself a single element 464 /// struct. Structures with flexible array members are never 465 /// considered single element structs. 466 /// 467 /// \return The field declaration for the single non-empty field, if 468 /// it exists. 469 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 470 const RecordType *RT = T->getAs<RecordType>(); 471 if (!RT) 472 return nullptr; 473 474 const RecordDecl *RD = RT->getDecl(); 475 if (RD->hasFlexibleArrayMember()) 476 return nullptr; 477 478 const Type *Found = nullptr; 479 480 // If this is a C++ record, check the bases first. 481 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 482 for (const auto &I : CXXRD->bases()) { 483 // Ignore empty records. 484 if (isEmptyRecord(Context, I.getType(), true)) 485 continue; 486 487 // If we already found an element then this isn't a single-element struct. 488 if (Found) 489 return nullptr; 490 491 // If this is non-empty and not a single element struct, the composite 492 // cannot be a single element struct. 493 Found = isSingleElementStruct(I.getType(), Context); 494 if (!Found) 495 return nullptr; 496 } 497 } 498 499 // Check for single element. 500 for (const auto *FD : RD->fields()) { 501 QualType FT = FD->getType(); 502 503 // Ignore empty fields. 504 if (isEmptyField(Context, FD, true)) 505 continue; 506 507 // If we already found an element then this isn't a single-element 508 // struct. 509 if (Found) 510 return nullptr; 511 512 // Treat single element arrays as the element. 513 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 514 if (AT->getSize().getZExtValue() != 1) 515 break; 516 FT = AT->getElementType(); 517 } 518 519 if (!isAggregateTypeForABI(FT)) { 520 Found = FT.getTypePtr(); 521 } else { 522 Found = isSingleElementStruct(FT, Context); 523 if (!Found) 524 return nullptr; 525 } 526 } 527 528 // We don't consider a struct a single-element struct if it has 529 // padding beyond the element type. 530 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T)) 531 return nullptr; 532 533 return Found; 534 } 535 536 namespace { 537 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, 538 const ABIArgInfo &AI) { 539 // This default implementation defers to the llvm backend's va_arg 540 // instruction. It can handle only passing arguments directly 541 // (typically only handled in the backend for primitive types), or 542 // aggregates passed indirectly by pointer (NOTE: if the "byval" 543 // flag has ABI impact in the callee, this implementation cannot 544 // work.) 545 546 // Only a few cases are covered here at the moment -- those needed 547 // by the default abi. 548 llvm::Value *Val; 549 550 if (AI.isIndirect()) { 551 assert(!AI.getPaddingType() && 552 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 553 assert( 554 !AI.getIndirectRealign() && 555 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!"); 556 557 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty); 558 CharUnits TyAlignForABI = TyInfo.second; 559 560 llvm::Type *BaseTy = 561 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); 562 llvm::Value *Addr = 563 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy); 564 return Address(Addr, TyAlignForABI); 565 } else { 566 assert((AI.isDirect() || AI.isExtend()) && 567 "Unexpected ArgInfo Kind in generic VAArg emitter!"); 568 569 assert(!AI.getInReg() && 570 "Unexpected InReg seen in arginfo in generic VAArg emitter!"); 571 assert(!AI.getPaddingType() && 572 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!"); 573 assert(!AI.getDirectOffset() && 574 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!"); 575 assert(!AI.getCoerceToType() && 576 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!"); 577 578 Address Temp = CGF.CreateMemTemp(Ty, "varet"); 579 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty)); 580 CGF.Builder.CreateStore(Val, Temp); 581 return Temp; 582 } 583 } 584 585 /// DefaultABIInfo - The default implementation for ABI specific 586 /// details. This implementation provides information which results in 587 /// self-consistent and sensible LLVM IR generation, but does not 588 /// conform to any particular ABI. 589 class DefaultABIInfo : public ABIInfo { 590 public: 591 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 592 593 ABIArgInfo classifyReturnType(QualType RetTy) const; 594 ABIArgInfo classifyArgumentType(QualType RetTy) const; 595 596 void computeInfo(CGFunctionInfo &FI) const override { 597 if (!getCXXABI().classifyReturnType(FI)) 598 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 599 for (auto &I : FI.arguments()) 600 I.info = classifyArgumentType(I.type); 601 } 602 603 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 604 QualType Ty) const override { 605 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty)); 606 } 607 }; 608 609 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 610 public: 611 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 612 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 613 }; 614 615 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 616 Ty = useFirstFieldIfTransparentUnion(Ty); 617 618 if (isAggregateTypeForABI(Ty)) { 619 // Records with non-trivial destructors/copy-constructors should not be 620 // passed by value. 621 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 622 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 623 624 return getNaturalAlignIndirect(Ty); 625 } 626 627 // Treat an enum type as its underlying type. 628 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 629 Ty = EnumTy->getDecl()->getIntegerType(); 630 631 return (Ty->isPromotableIntegerType() ? 632 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 633 } 634 635 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 636 if (RetTy->isVoidType()) 637 return ABIArgInfo::getIgnore(); 638 639 if (isAggregateTypeForABI(RetTy)) 640 return getNaturalAlignIndirect(RetTy); 641 642 // Treat an enum type as its underlying type. 643 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 644 RetTy = EnumTy->getDecl()->getIntegerType(); 645 646 return (RetTy->isPromotableIntegerType() ? 647 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 648 } 649 650 //===----------------------------------------------------------------------===// 651 // WebAssembly ABI Implementation 652 // 653 // This is a very simple ABI that relies a lot on DefaultABIInfo. 654 //===----------------------------------------------------------------------===// 655 656 class WebAssemblyABIInfo final : public DefaultABIInfo { 657 public: 658 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT) 659 : DefaultABIInfo(CGT) {} 660 661 private: 662 ABIArgInfo classifyReturnType(QualType RetTy) const; 663 ABIArgInfo classifyArgumentType(QualType Ty) const; 664 665 // DefaultABIInfo's classifyReturnType and classifyArgumentType are 666 // non-virtual, but computeInfo and EmitVAArg are virtual, so we 667 // overload them. 668 void computeInfo(CGFunctionInfo &FI) const override { 669 if (!getCXXABI().classifyReturnType(FI)) 670 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 671 for (auto &Arg : FI.arguments()) 672 Arg.info = classifyArgumentType(Arg.type); 673 } 674 675 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 676 QualType Ty) const override; 677 }; 678 679 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo { 680 public: 681 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 682 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {} 683 }; 684 685 /// \brief Classify argument of given type \p Ty. 686 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const { 687 Ty = useFirstFieldIfTransparentUnion(Ty); 688 689 if (isAggregateTypeForABI(Ty)) { 690 // Records with non-trivial destructors/copy-constructors should not be 691 // passed by value. 692 if (auto RAA = getRecordArgABI(Ty, getCXXABI())) 693 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 694 // Ignore empty structs/unions. 695 if (isEmptyRecord(getContext(), Ty, true)) 696 return ABIArgInfo::getIgnore(); 697 // Lower single-element structs to just pass a regular value. TODO: We 698 // could do reasonable-size multiple-element structs too, using getExpand(), 699 // though watch out for things like bitfields. 700 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext())) 701 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 702 } 703 704 // Otherwise just do the default thing. 705 return DefaultABIInfo::classifyArgumentType(Ty); 706 } 707 708 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const { 709 if (isAggregateTypeForABI(RetTy)) { 710 // Records with non-trivial destructors/copy-constructors should not be 711 // returned by value. 712 if (!getRecordArgABI(RetTy, getCXXABI())) { 713 // Ignore empty structs/unions. 714 if (isEmptyRecord(getContext(), RetTy, true)) 715 return ABIArgInfo::getIgnore(); 716 // Lower single-element structs to just return a regular value. TODO: We 717 // could do reasonable-size multiple-element structs too, using 718 // ABIArgInfo::getDirect(). 719 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 720 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 721 } 722 } 723 724 // Otherwise just do the default thing. 725 return DefaultABIInfo::classifyReturnType(RetTy); 726 } 727 728 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 729 QualType Ty) const { 730 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false, 731 getContext().getTypeInfoInChars(Ty), 732 CharUnits::fromQuantity(4), 733 /*AllowHigherAlign=*/ true); 734 } 735 736 //===----------------------------------------------------------------------===// 737 // le32/PNaCl bitcode ABI Implementation 738 // 739 // This is a simplified version of the x86_32 ABI. Arguments and return values 740 // are always passed on the stack. 741 //===----------------------------------------------------------------------===// 742 743 class PNaClABIInfo : public ABIInfo { 744 public: 745 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 746 747 ABIArgInfo classifyReturnType(QualType RetTy) const; 748 ABIArgInfo classifyArgumentType(QualType RetTy) const; 749 750 void computeInfo(CGFunctionInfo &FI) const override; 751 Address EmitVAArg(CodeGenFunction &CGF, 752 Address VAListAddr, QualType Ty) const override; 753 }; 754 755 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo { 756 public: 757 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 758 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {} 759 }; 760 761 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const { 762 if (!getCXXABI().classifyReturnType(FI)) 763 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 764 765 for (auto &I : FI.arguments()) 766 I.info = classifyArgumentType(I.type); 767 } 768 769 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 770 QualType Ty) const { 771 // The PNaCL ABI is a bit odd, in that varargs don't use normal 772 // function classification. Structs get passed directly for varargs 773 // functions, through a rewriting transform in 774 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows 775 // this target to actually support a va_arg instructions with an 776 // aggregate type, unlike other targets. 777 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 778 } 779 780 /// \brief Classify argument of given type \p Ty. 781 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const { 782 if (isAggregateTypeForABI(Ty)) { 783 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 784 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 785 return getNaturalAlignIndirect(Ty); 786 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 787 // Treat an enum type as its underlying type. 788 Ty = EnumTy->getDecl()->getIntegerType(); 789 } else if (Ty->isFloatingType()) { 790 // Floating-point types don't go inreg. 791 return ABIArgInfo::getDirect(); 792 } 793 794 return (Ty->isPromotableIntegerType() ? 795 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 796 } 797 798 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const { 799 if (RetTy->isVoidType()) 800 return ABIArgInfo::getIgnore(); 801 802 // In the PNaCl ABI we always return records/structures on the stack. 803 if (isAggregateTypeForABI(RetTy)) 804 return getNaturalAlignIndirect(RetTy); 805 806 // Treat an enum type as its underlying type. 807 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 808 RetTy = EnumTy->getDecl()->getIntegerType(); 809 810 return (RetTy->isPromotableIntegerType() ? 811 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 812 } 813 814 /// IsX86_MMXType - Return true if this is an MMX type. 815 bool IsX86_MMXType(llvm::Type *IRType) { 816 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>. 817 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 818 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 819 IRType->getScalarSizeInBits() != 64; 820 } 821 822 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 823 StringRef Constraint, 824 llvm::Type* Ty) { 825 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) { 826 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) { 827 // Invalid MMX constraint 828 return nullptr; 829 } 830 831 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 832 } 833 834 // No operation needed 835 return Ty; 836 } 837 838 /// Returns true if this type can be passed in SSE registers with the 839 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64. 840 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) { 841 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 842 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) 843 return true; 844 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 845 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX 846 // registers specially. 847 unsigned VecSize = Context.getTypeSize(VT); 848 if (VecSize == 128 || VecSize == 256 || VecSize == 512) 849 return true; 850 } 851 return false; 852 } 853 854 /// Returns true if this aggregate is small enough to be passed in SSE registers 855 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64. 856 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) { 857 return NumMembers <= 4; 858 } 859 860 //===----------------------------------------------------------------------===// 861 // X86-32 ABI Implementation 862 //===----------------------------------------------------------------------===// 863 864 /// \brief Similar to llvm::CCState, but for Clang. 865 struct CCState { 866 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {} 867 868 unsigned CC; 869 unsigned FreeRegs; 870 unsigned FreeSSERegs; 871 }; 872 873 /// X86_32ABIInfo - The X86-32 ABI information. 874 class X86_32ABIInfo : public SwiftABIInfo { 875 enum Class { 876 Integer, 877 Float 878 }; 879 880 static const unsigned MinABIStackAlignInBytes = 4; 881 882 bool IsDarwinVectorABI; 883 bool IsRetSmallStructInRegABI; 884 bool IsWin32StructABI; 885 bool IsSoftFloatABI; 886 bool IsMCUABI; 887 unsigned DefaultNumRegisterParameters; 888 889 static bool isRegisterSize(unsigned Size) { 890 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 891 } 892 893 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 894 // FIXME: Assumes vectorcall is in use. 895 return isX86VectorTypeForVectorCall(getContext(), Ty); 896 } 897 898 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 899 uint64_t NumMembers) const override { 900 // FIXME: Assumes vectorcall is in use. 901 return isX86VectorCallAggregateSmallEnough(NumMembers); 902 } 903 904 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const; 905 906 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 907 /// such that the argument will be passed in memory. 908 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 909 910 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const; 911 912 /// \brief Return the alignment to use for the given type on the stack. 913 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 914 915 Class classify(QualType Ty) const; 916 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const; 917 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 918 /// \brief Updates the number of available free registers, returns 919 /// true if any registers were allocated. 920 bool updateFreeRegs(QualType Ty, CCState &State) const; 921 922 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg, 923 bool &NeedsPadding) const; 924 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const; 925 926 bool canExpandIndirectArgument(QualType Ty) const; 927 928 /// \brief Rewrite the function info so that all memory arguments use 929 /// inalloca. 930 void rewriteWithInAlloca(CGFunctionInfo &FI) const; 931 932 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 933 CharUnits &StackOffset, ABIArgInfo &Info, 934 QualType Type) const; 935 936 public: 937 938 void computeInfo(CGFunctionInfo &FI) const override; 939 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 940 QualType Ty) const override; 941 942 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 943 bool RetSmallStructInRegABI, bool Win32StructABI, 944 unsigned NumRegisterParameters, bool SoftFloatABI) 945 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI), 946 IsRetSmallStructInRegABI(RetSmallStructInRegABI), 947 IsWin32StructABI(Win32StructABI), 948 IsSoftFloatABI(SoftFloatABI), 949 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()), 950 DefaultNumRegisterParameters(NumRegisterParameters) {} 951 952 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 953 ArrayRef<llvm::Type*> scalars, 954 bool asReturnValue) const override { 955 // LLVM's x86-32 lowering currently only assigns up to three 956 // integer registers and three fp registers. Oddly, it'll use up to 957 // four vector registers for vectors, but those can overlap with the 958 // scalar registers. 959 return occupiesMoreThan(CGT, scalars, /*total*/ 3); 960 } 961 }; 962 963 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 964 public: 965 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI, 966 bool RetSmallStructInRegABI, bool Win32StructABI, 967 unsigned NumRegisterParameters, bool SoftFloatABI) 968 : TargetCodeGenInfo(new X86_32ABIInfo( 969 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI, 970 NumRegisterParameters, SoftFloatABI)) {} 971 972 static bool isStructReturnInRegABI( 973 const llvm::Triple &Triple, const CodeGenOptions &Opts); 974 975 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 976 CodeGen::CodeGenModule &CGM) const override; 977 978 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 979 // Darwin uses different dwarf register numbers for EH. 980 if (CGM.getTarget().getTriple().isOSDarwin()) return 5; 981 return 4; 982 } 983 984 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 985 llvm::Value *Address) const override; 986 987 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 988 StringRef Constraint, 989 llvm::Type* Ty) const override { 990 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 991 } 992 993 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue, 994 std::string &Constraints, 995 std::vector<llvm::Type *> &ResultRegTypes, 996 std::vector<llvm::Type *> &ResultTruncRegTypes, 997 std::vector<LValue> &ResultRegDests, 998 std::string &AsmString, 999 unsigned NumOutputs) const override; 1000 1001 llvm::Constant * 1002 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 1003 unsigned Sig = (0xeb << 0) | // jmp rel8 1004 (0x06 << 8) | // .+0x08 1005 ('F' << 16) | 1006 ('T' << 24); 1007 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 1008 } 1009 1010 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 1011 return "movl\t%ebp, %ebp" 1012 "\t\t## marker for objc_retainAutoreleaseReturnValue"; 1013 } 1014 }; 1015 1016 } 1017 1018 /// Rewrite input constraint references after adding some output constraints. 1019 /// In the case where there is one output and one input and we add one output, 1020 /// we need to replace all operand references greater than or equal to 1: 1021 /// mov $0, $1 1022 /// mov eax, $1 1023 /// The result will be: 1024 /// mov $0, $2 1025 /// mov eax, $2 1026 static void rewriteInputConstraintReferences(unsigned FirstIn, 1027 unsigned NumNewOuts, 1028 std::string &AsmString) { 1029 std::string Buf; 1030 llvm::raw_string_ostream OS(Buf); 1031 size_t Pos = 0; 1032 while (Pos < AsmString.size()) { 1033 size_t DollarStart = AsmString.find('$', Pos); 1034 if (DollarStart == std::string::npos) 1035 DollarStart = AsmString.size(); 1036 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart); 1037 if (DollarEnd == std::string::npos) 1038 DollarEnd = AsmString.size(); 1039 OS << StringRef(&AsmString[Pos], DollarEnd - Pos); 1040 Pos = DollarEnd; 1041 size_t NumDollars = DollarEnd - DollarStart; 1042 if (NumDollars % 2 != 0 && Pos < AsmString.size()) { 1043 // We have an operand reference. 1044 size_t DigitStart = Pos; 1045 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart); 1046 if (DigitEnd == std::string::npos) 1047 DigitEnd = AsmString.size(); 1048 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart); 1049 unsigned OperandIndex; 1050 if (!OperandStr.getAsInteger(10, OperandIndex)) { 1051 if (OperandIndex >= FirstIn) 1052 OperandIndex += NumNewOuts; 1053 OS << OperandIndex; 1054 } else { 1055 OS << OperandStr; 1056 } 1057 Pos = DigitEnd; 1058 } 1059 } 1060 AsmString = std::move(OS.str()); 1061 } 1062 1063 /// Add output constraints for EAX:EDX because they are return registers. 1064 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs( 1065 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints, 1066 std::vector<llvm::Type *> &ResultRegTypes, 1067 std::vector<llvm::Type *> &ResultTruncRegTypes, 1068 std::vector<LValue> &ResultRegDests, std::string &AsmString, 1069 unsigned NumOutputs) const { 1070 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType()); 1071 1072 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is 1073 // larger. 1074 if (!Constraints.empty()) 1075 Constraints += ','; 1076 if (RetWidth <= 32) { 1077 Constraints += "={eax}"; 1078 ResultRegTypes.push_back(CGF.Int32Ty); 1079 } else { 1080 // Use the 'A' constraint for EAX:EDX. 1081 Constraints += "=A"; 1082 ResultRegTypes.push_back(CGF.Int64Ty); 1083 } 1084 1085 // Truncate EAX or EAX:EDX to an integer of the appropriate size. 1086 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth); 1087 ResultTruncRegTypes.push_back(CoerceTy); 1088 1089 // Coerce the integer by bitcasting the return slot pointer. 1090 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(), 1091 CoerceTy->getPointerTo())); 1092 ResultRegDests.push_back(ReturnSlot); 1093 1094 rewriteInputConstraintReferences(NumOutputs, 1, AsmString); 1095 } 1096 1097 /// shouldReturnTypeInRegister - Determine if the given type should be 1098 /// returned in a register (for the Darwin and MCU ABI). 1099 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 1100 ASTContext &Context) const { 1101 uint64_t Size = Context.getTypeSize(Ty); 1102 1103 // For i386, type must be register sized. 1104 // For the MCU ABI, it only needs to be <= 8-byte 1105 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size))) 1106 return false; 1107 1108 if (Ty->isVectorType()) { 1109 // 64- and 128- bit vectors inside structures are not returned in 1110 // registers. 1111 if (Size == 64 || Size == 128) 1112 return false; 1113 1114 return true; 1115 } 1116 1117 // If this is a builtin, pointer, enum, complex type, member pointer, or 1118 // member function pointer it is ok. 1119 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 1120 Ty->isAnyComplexType() || Ty->isEnumeralType() || 1121 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 1122 return true; 1123 1124 // Arrays are treated like records. 1125 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 1126 return shouldReturnTypeInRegister(AT->getElementType(), Context); 1127 1128 // Otherwise, it must be a record type. 1129 const RecordType *RT = Ty->getAs<RecordType>(); 1130 if (!RT) return false; 1131 1132 // FIXME: Traverse bases here too. 1133 1134 // Structure types are passed in register if all fields would be 1135 // passed in a register. 1136 for (const auto *FD : RT->getDecl()->fields()) { 1137 // Empty fields are ignored. 1138 if (isEmptyField(Context, FD, true)) 1139 continue; 1140 1141 // Check fields recursively. 1142 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 1143 return false; 1144 } 1145 return true; 1146 } 1147 1148 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 1149 // Treat complex types as the element type. 1150 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 1151 Ty = CTy->getElementType(); 1152 1153 // Check for a type which we know has a simple scalar argument-passing 1154 // convention without any padding. (We're specifically looking for 32 1155 // and 64-bit integer and integer-equivalents, float, and double.) 1156 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 1157 !Ty->isEnumeralType() && !Ty->isBlockPointerType()) 1158 return false; 1159 1160 uint64_t Size = Context.getTypeSize(Ty); 1161 return Size == 32 || Size == 64; 1162 } 1163 1164 /// Test whether an argument type which is to be passed indirectly (on the 1165 /// stack) would have the equivalent layout if it was expanded into separate 1166 /// arguments. If so, we prefer to do the latter to avoid inhibiting 1167 /// optimizations. 1168 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const { 1169 // We can only expand structure types. 1170 const RecordType *RT = Ty->getAs<RecordType>(); 1171 if (!RT) 1172 return false; 1173 const RecordDecl *RD = RT->getDecl(); 1174 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1175 if (!IsWin32StructABI ) { 1176 // On non-Windows, we have to conservatively match our old bitcode 1177 // prototypes in order to be ABI-compatible at the bitcode level. 1178 if (!CXXRD->isCLike()) 1179 return false; 1180 } else { 1181 // Don't do this for dynamic classes. 1182 if (CXXRD->isDynamicClass()) 1183 return false; 1184 // Don't do this if there are any non-empty bases. 1185 for (const CXXBaseSpecifier &Base : CXXRD->bases()) { 1186 if (!isEmptyRecord(getContext(), Base.getType(), /*AllowArrays=*/true)) 1187 return false; 1188 } 1189 } 1190 } 1191 1192 uint64_t Size = 0; 1193 1194 for (const auto *FD : RD->fields()) { 1195 // Scalar arguments on the stack get 4 byte alignment on x86. If the 1196 // argument is smaller than 32-bits, expanding the struct will create 1197 // alignment padding. 1198 if (!is32Or64BitBasicType(FD->getType(), getContext())) 1199 return false; 1200 1201 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 1202 // how to expand them yet, and the predicate for telling if a bitfield still 1203 // counts as "basic" is more complicated than what we were doing previously. 1204 if (FD->isBitField()) 1205 return false; 1206 1207 Size += getContext().getTypeSize(FD->getType()); 1208 } 1209 1210 // We can do this if there was no alignment padding. 1211 return Size == getContext().getTypeSize(Ty); 1212 } 1213 1214 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const { 1215 // If the return value is indirect, then the hidden argument is consuming one 1216 // integer register. 1217 if (State.FreeRegs) { 1218 --State.FreeRegs; 1219 if (!IsMCUABI) 1220 return getNaturalAlignIndirectInReg(RetTy); 1221 } 1222 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false); 1223 } 1224 1225 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, 1226 CCState &State) const { 1227 if (RetTy->isVoidType()) 1228 return ABIArgInfo::getIgnore(); 1229 1230 const Type *Base = nullptr; 1231 uint64_t NumElts = 0; 1232 if (State.CC == llvm::CallingConv::X86_VectorCall && 1233 isHomogeneousAggregate(RetTy, Base, NumElts)) { 1234 // The LLVM struct type for such an aggregate should lower properly. 1235 return ABIArgInfo::getDirect(); 1236 } 1237 1238 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 1239 // On Darwin, some vectors are returned in registers. 1240 if (IsDarwinVectorABI) { 1241 uint64_t Size = getContext().getTypeSize(RetTy); 1242 1243 // 128-bit vectors are a special case; they are returned in 1244 // registers and we need to make sure to pick a type the LLVM 1245 // backend will like. 1246 if (Size == 128) 1247 return ABIArgInfo::getDirect(llvm::VectorType::get( 1248 llvm::Type::getInt64Ty(getVMContext()), 2)); 1249 1250 // Always return in register if it fits in a general purpose 1251 // register, or if it is 64 bits and has a single element. 1252 if ((Size == 8 || Size == 16 || Size == 32) || 1253 (Size == 64 && VT->getNumElements() == 1)) 1254 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1255 Size)); 1256 1257 return getIndirectReturnResult(RetTy, State); 1258 } 1259 1260 return ABIArgInfo::getDirect(); 1261 } 1262 1263 if (isAggregateTypeForABI(RetTy)) { 1264 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 1265 // Structures with flexible arrays are always indirect. 1266 if (RT->getDecl()->hasFlexibleArrayMember()) 1267 return getIndirectReturnResult(RetTy, State); 1268 } 1269 1270 // If specified, structs and unions are always indirect. 1271 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType()) 1272 return getIndirectReturnResult(RetTy, State); 1273 1274 // Ignore empty structs/unions. 1275 if (isEmptyRecord(getContext(), RetTy, true)) 1276 return ABIArgInfo::getIgnore(); 1277 1278 // Small structures which are register sized are generally returned 1279 // in a register. 1280 if (shouldReturnTypeInRegister(RetTy, getContext())) { 1281 uint64_t Size = getContext().getTypeSize(RetTy); 1282 1283 // As a special-case, if the struct is a "single-element" struct, and 1284 // the field is of type "float" or "double", return it in a 1285 // floating-point register. (MSVC does not apply this special case.) 1286 // We apply a similar transformation for pointer types to improve the 1287 // quality of the generated IR. 1288 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) 1289 if ((!IsWin32StructABI && SeltTy->isRealFloatingType()) 1290 || SeltTy->hasPointerRepresentation()) 1291 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0))); 1292 1293 // FIXME: We should be able to narrow this integer in cases with dead 1294 // padding. 1295 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 1296 } 1297 1298 return getIndirectReturnResult(RetTy, State); 1299 } 1300 1301 // Treat an enum type as its underlying type. 1302 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1303 RetTy = EnumTy->getDecl()->getIntegerType(); 1304 1305 return (RetTy->isPromotableIntegerType() ? 1306 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1307 } 1308 1309 static bool isSSEVectorType(ASTContext &Context, QualType Ty) { 1310 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128; 1311 } 1312 1313 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 1314 const RecordType *RT = Ty->getAs<RecordType>(); 1315 if (!RT) 1316 return 0; 1317 const RecordDecl *RD = RT->getDecl(); 1318 1319 // If this is a C++ record, check the bases first. 1320 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 1321 for (const auto &I : CXXRD->bases()) 1322 if (!isRecordWithSSEVectorType(Context, I.getType())) 1323 return false; 1324 1325 for (const auto *i : RD->fields()) { 1326 QualType FT = i->getType(); 1327 1328 if (isSSEVectorType(Context, FT)) 1329 return true; 1330 1331 if (isRecordWithSSEVectorType(Context, FT)) 1332 return true; 1333 } 1334 1335 return false; 1336 } 1337 1338 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 1339 unsigned Align) const { 1340 // Otherwise, if the alignment is less than or equal to the minimum ABI 1341 // alignment, just use the default; the backend will handle this. 1342 if (Align <= MinABIStackAlignInBytes) 1343 return 0; // Use default alignment. 1344 1345 // On non-Darwin, the stack type alignment is always 4. 1346 if (!IsDarwinVectorABI) { 1347 // Set explicit alignment, since we may need to realign the top. 1348 return MinABIStackAlignInBytes; 1349 } 1350 1351 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 1352 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) || 1353 isRecordWithSSEVectorType(getContext(), Ty))) 1354 return 16; 1355 1356 return MinABIStackAlignInBytes; 1357 } 1358 1359 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal, 1360 CCState &State) const { 1361 if (!ByVal) { 1362 if (State.FreeRegs) { 1363 --State.FreeRegs; // Non-byval indirects just use one pointer. 1364 if (!IsMCUABI) 1365 return getNaturalAlignIndirectInReg(Ty); 1366 } 1367 return getNaturalAlignIndirect(Ty, false); 1368 } 1369 1370 // Compute the byval alignment. 1371 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 1372 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 1373 if (StackAlign == 0) 1374 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true); 1375 1376 // If the stack alignment is less than the type alignment, realign the 1377 // argument. 1378 bool Realign = TypeAlign > StackAlign; 1379 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign), 1380 /*ByVal=*/true, Realign); 1381 } 1382 1383 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const { 1384 const Type *T = isSingleElementStruct(Ty, getContext()); 1385 if (!T) 1386 T = Ty.getTypePtr(); 1387 1388 if (const BuiltinType *BT = T->getAs<BuiltinType>()) { 1389 BuiltinType::Kind K = BT->getKind(); 1390 if (K == BuiltinType::Float || K == BuiltinType::Double) 1391 return Float; 1392 } 1393 return Integer; 1394 } 1395 1396 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const { 1397 if (!IsSoftFloatABI) { 1398 Class C = classify(Ty); 1399 if (C == Float) 1400 return false; 1401 } 1402 1403 unsigned Size = getContext().getTypeSize(Ty); 1404 unsigned SizeInRegs = (Size + 31) / 32; 1405 1406 if (SizeInRegs == 0) 1407 return false; 1408 1409 if (!IsMCUABI) { 1410 if (SizeInRegs > State.FreeRegs) { 1411 State.FreeRegs = 0; 1412 return false; 1413 } 1414 } else { 1415 // The MCU psABI allows passing parameters in-reg even if there are 1416 // earlier parameters that are passed on the stack. Also, 1417 // it does not allow passing >8-byte structs in-register, 1418 // even if there are 3 free registers available. 1419 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2) 1420 return false; 1421 } 1422 1423 State.FreeRegs -= SizeInRegs; 1424 return true; 1425 } 1426 1427 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State, 1428 bool &InReg, 1429 bool &NeedsPadding) const { 1430 // On Windows, aggregates other than HFAs are never passed in registers, and 1431 // they do not consume register slots. Homogenous floating-point aggregates 1432 // (HFAs) have already been dealt with at this point. 1433 if (IsWin32StructABI && isAggregateTypeForABI(Ty)) 1434 return false; 1435 1436 NeedsPadding = false; 1437 InReg = !IsMCUABI; 1438 1439 if (!updateFreeRegs(Ty, State)) 1440 return false; 1441 1442 if (IsMCUABI) 1443 return true; 1444 1445 if (State.CC == llvm::CallingConv::X86_FastCall || 1446 State.CC == llvm::CallingConv::X86_VectorCall) { 1447 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs) 1448 NeedsPadding = true; 1449 1450 return false; 1451 } 1452 1453 return true; 1454 } 1455 1456 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const { 1457 if (!updateFreeRegs(Ty, State)) 1458 return false; 1459 1460 if (IsMCUABI) 1461 return false; 1462 1463 if (State.CC == llvm::CallingConv::X86_FastCall || 1464 State.CC == llvm::CallingConv::X86_VectorCall) { 1465 if (getContext().getTypeSize(Ty) > 32) 1466 return false; 1467 1468 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() || 1469 Ty->isReferenceType()); 1470 } 1471 1472 return true; 1473 } 1474 1475 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, 1476 CCState &State) const { 1477 // FIXME: Set alignment on indirect arguments. 1478 1479 Ty = useFirstFieldIfTransparentUnion(Ty); 1480 1481 // Check with the C++ ABI first. 1482 const RecordType *RT = Ty->getAs<RecordType>(); 1483 if (RT) { 1484 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 1485 if (RAA == CGCXXABI::RAA_Indirect) { 1486 return getIndirectResult(Ty, false, State); 1487 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 1488 // The field index doesn't matter, we'll fix it up later. 1489 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0); 1490 } 1491 } 1492 1493 // vectorcall adds the concept of a homogenous vector aggregate, similar 1494 // to other targets. 1495 const Type *Base = nullptr; 1496 uint64_t NumElts = 0; 1497 if (State.CC == llvm::CallingConv::X86_VectorCall && 1498 isHomogeneousAggregate(Ty, Base, NumElts)) { 1499 if (State.FreeSSERegs >= NumElts) { 1500 State.FreeSSERegs -= NumElts; 1501 if (Ty->isBuiltinType() || Ty->isVectorType()) 1502 return ABIArgInfo::getDirect(); 1503 return ABIArgInfo::getExpand(); 1504 } 1505 return getIndirectResult(Ty, /*ByVal=*/false, State); 1506 } 1507 1508 if (isAggregateTypeForABI(Ty)) { 1509 // Structures with flexible arrays are always indirect. 1510 // FIXME: This should not be byval! 1511 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 1512 return getIndirectResult(Ty, true, State); 1513 1514 // Ignore empty structs/unions on non-Windows. 1515 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true)) 1516 return ABIArgInfo::getIgnore(); 1517 1518 llvm::LLVMContext &LLVMContext = getVMContext(); 1519 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 1520 bool NeedsPadding = false; 1521 bool InReg; 1522 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) { 1523 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 1524 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32); 1525 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 1526 if (InReg) 1527 return ABIArgInfo::getDirectInReg(Result); 1528 else 1529 return ABIArgInfo::getDirect(Result); 1530 } 1531 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr; 1532 1533 // Expand small (<= 128-bit) record types when we know that the stack layout 1534 // of those arguments will match the struct. This is important because the 1535 // LLVM backend isn't smart enough to remove byval, which inhibits many 1536 // optimizations. 1537 // Don't do this for the MCU if there are still free integer registers 1538 // (see X86_64 ABI for full explanation). 1539 if (getContext().getTypeSize(Ty) <= 4 * 32 && 1540 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty)) 1541 return ABIArgInfo::getExpandWithPadding( 1542 State.CC == llvm::CallingConv::X86_FastCall || 1543 State.CC == llvm::CallingConv::X86_VectorCall, 1544 PaddingType); 1545 1546 return getIndirectResult(Ty, true, State); 1547 } 1548 1549 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1550 // On Darwin, some vectors are passed in memory, we handle this by passing 1551 // it as an i8/i16/i32/i64. 1552 if (IsDarwinVectorABI) { 1553 uint64_t Size = getContext().getTypeSize(Ty); 1554 if ((Size == 8 || Size == 16 || Size == 32) || 1555 (Size == 64 && VT->getNumElements() == 1)) 1556 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 1557 Size)); 1558 } 1559 1560 if (IsX86_MMXType(CGT.ConvertType(Ty))) 1561 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64)); 1562 1563 return ABIArgInfo::getDirect(); 1564 } 1565 1566 1567 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1568 Ty = EnumTy->getDecl()->getIntegerType(); 1569 1570 bool InReg = shouldPrimitiveUseInReg(Ty, State); 1571 1572 if (Ty->isPromotableIntegerType()) { 1573 if (InReg) 1574 return ABIArgInfo::getExtendInReg(); 1575 return ABIArgInfo::getExtend(); 1576 } 1577 1578 if (InReg) 1579 return ABIArgInfo::getDirectInReg(); 1580 return ABIArgInfo::getDirect(); 1581 } 1582 1583 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1584 CCState State(FI.getCallingConvention()); 1585 if (IsMCUABI) 1586 State.FreeRegs = 3; 1587 else if (State.CC == llvm::CallingConv::X86_FastCall) 1588 State.FreeRegs = 2; 1589 else if (State.CC == llvm::CallingConv::X86_VectorCall) { 1590 State.FreeRegs = 2; 1591 State.FreeSSERegs = 6; 1592 } else if (FI.getHasRegParm()) 1593 State.FreeRegs = FI.getRegParm(); 1594 else 1595 State.FreeRegs = DefaultNumRegisterParameters; 1596 1597 if (!getCXXABI().classifyReturnType(FI)) { 1598 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State); 1599 } else if (FI.getReturnInfo().isIndirect()) { 1600 // The C++ ABI is not aware of register usage, so we have to check if the 1601 // return value was sret and put it in a register ourselves if appropriate. 1602 if (State.FreeRegs) { 1603 --State.FreeRegs; // The sret parameter consumes a register. 1604 if (!IsMCUABI) 1605 FI.getReturnInfo().setInReg(true); 1606 } 1607 } 1608 1609 // The chain argument effectively gives us another free register. 1610 if (FI.isChainCall()) 1611 ++State.FreeRegs; 1612 1613 bool UsedInAlloca = false; 1614 for (auto &I : FI.arguments()) { 1615 I.info = classifyArgumentType(I.type, State); 1616 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca); 1617 } 1618 1619 // If we needed to use inalloca for any argument, do a second pass and rewrite 1620 // all the memory arguments to use inalloca. 1621 if (UsedInAlloca) 1622 rewriteWithInAlloca(FI); 1623 } 1624 1625 void 1626 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields, 1627 CharUnits &StackOffset, ABIArgInfo &Info, 1628 QualType Type) const { 1629 // Arguments are always 4-byte-aligned. 1630 CharUnits FieldAlign = CharUnits::fromQuantity(4); 1631 1632 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct"); 1633 Info = ABIArgInfo::getInAlloca(FrameFields.size()); 1634 FrameFields.push_back(CGT.ConvertTypeForMem(Type)); 1635 StackOffset += getContext().getTypeSizeInChars(Type); 1636 1637 // Insert padding bytes to respect alignment. 1638 CharUnits FieldEnd = StackOffset; 1639 StackOffset = FieldEnd.alignTo(FieldAlign); 1640 if (StackOffset != FieldEnd) { 1641 CharUnits NumBytes = StackOffset - FieldEnd; 1642 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext()); 1643 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity()); 1644 FrameFields.push_back(Ty); 1645 } 1646 } 1647 1648 static bool isArgInAlloca(const ABIArgInfo &Info) { 1649 // Leave ignored and inreg arguments alone. 1650 switch (Info.getKind()) { 1651 case ABIArgInfo::InAlloca: 1652 return true; 1653 case ABIArgInfo::Indirect: 1654 assert(Info.getIndirectByVal()); 1655 return true; 1656 case ABIArgInfo::Ignore: 1657 return false; 1658 case ABIArgInfo::Direct: 1659 case ABIArgInfo::Extend: 1660 if (Info.getInReg()) 1661 return false; 1662 return true; 1663 case ABIArgInfo::Expand: 1664 case ABIArgInfo::CoerceAndExpand: 1665 // These are aggregate types which are never passed in registers when 1666 // inalloca is involved. 1667 return true; 1668 } 1669 llvm_unreachable("invalid enum"); 1670 } 1671 1672 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const { 1673 assert(IsWin32StructABI && "inalloca only supported on win32"); 1674 1675 // Build a packed struct type for all of the arguments in memory. 1676 SmallVector<llvm::Type *, 6> FrameFields; 1677 1678 // The stack alignment is always 4. 1679 CharUnits StackAlign = CharUnits::fromQuantity(4); 1680 1681 CharUnits StackOffset; 1682 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end(); 1683 1684 // Put 'this' into the struct before 'sret', if necessary. 1685 bool IsThisCall = 1686 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall; 1687 ABIArgInfo &Ret = FI.getReturnInfo(); 1688 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall && 1689 isArgInAlloca(I->info)) { 1690 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1691 ++I; 1692 } 1693 1694 // Put the sret parameter into the inalloca struct if it's in memory. 1695 if (Ret.isIndirect() && !Ret.getInReg()) { 1696 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType()); 1697 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy); 1698 // On Windows, the hidden sret parameter is always returned in eax. 1699 Ret.setInAllocaSRet(IsWin32StructABI); 1700 } 1701 1702 // Skip the 'this' parameter in ecx. 1703 if (IsThisCall) 1704 ++I; 1705 1706 // Put arguments passed in memory into the struct. 1707 for (; I != E; ++I) { 1708 if (isArgInAlloca(I->info)) 1709 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type); 1710 } 1711 1712 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields, 1713 /*isPacked=*/true), 1714 StackAlign); 1715 } 1716 1717 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF, 1718 Address VAListAddr, QualType Ty) const { 1719 1720 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 1721 1722 // x86-32 changes the alignment of certain arguments on the stack. 1723 // 1724 // Just messing with TypeInfo like this works because we never pass 1725 // anything indirectly. 1726 TypeInfo.second = CharUnits::fromQuantity( 1727 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity())); 1728 1729 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 1730 TypeInfo, CharUnits::fromQuantity(4), 1731 /*AllowHigherAlign*/ true); 1732 } 1733 1734 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI( 1735 const llvm::Triple &Triple, const CodeGenOptions &Opts) { 1736 assert(Triple.getArch() == llvm::Triple::x86); 1737 1738 switch (Opts.getStructReturnConvention()) { 1739 case CodeGenOptions::SRCK_Default: 1740 break; 1741 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return 1742 return false; 1743 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return 1744 return true; 1745 } 1746 1747 if (Triple.isOSDarwin() || Triple.isOSIAMCU()) 1748 return true; 1749 1750 switch (Triple.getOS()) { 1751 case llvm::Triple::DragonFly: 1752 case llvm::Triple::FreeBSD: 1753 case llvm::Triple::OpenBSD: 1754 case llvm::Triple::Bitrig: 1755 case llvm::Triple::Win32: 1756 return true; 1757 default: 1758 return false; 1759 } 1760 } 1761 1762 void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D, 1763 llvm::GlobalValue *GV, 1764 CodeGen::CodeGenModule &CGM) const { 1765 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 1766 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 1767 // Get the LLVM function. 1768 llvm::Function *Fn = cast<llvm::Function>(GV); 1769 1770 // Now add the 'alignstack' attribute with a value of 16. 1771 llvm::AttrBuilder B; 1772 B.addStackAlignmentAttr(16); 1773 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 1774 llvm::AttributeSet::get(CGM.getLLVMContext(), 1775 llvm::AttributeSet::FunctionIndex, 1776 B)); 1777 } 1778 if (FD->hasAttr<AnyX86InterruptAttr>()) { 1779 llvm::Function *Fn = cast<llvm::Function>(GV); 1780 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 1781 } 1782 } 1783 } 1784 1785 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 1786 CodeGen::CodeGenFunction &CGF, 1787 llvm::Value *Address) const { 1788 CodeGen::CGBuilderTy &Builder = CGF.Builder; 1789 1790 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 1791 1792 // 0-7 are the eight integer registers; the order is different 1793 // on Darwin (for EH), but the range is the same. 1794 // 8 is %eip. 1795 AssignToArrayRange(Builder, Address, Four8, 0, 8); 1796 1797 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) { 1798 // 12-16 are st(0..4). Not sure why we stop at 4. 1799 // These have size 16, which is sizeof(long double) on 1800 // platforms with 8-byte alignment for that type. 1801 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16); 1802 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 1803 1804 } else { 1805 // 9 is %eflags, which doesn't get a size on Darwin for some 1806 // reason. 1807 Builder.CreateAlignedStore( 1808 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9), 1809 CharUnits::One()); 1810 1811 // 11-16 are st(0..5). Not sure why we stop at 5. 1812 // These have size 12, which is sizeof(long double) on 1813 // platforms with 4-byte alignment for that type. 1814 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12); 1815 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 1816 } 1817 1818 return false; 1819 } 1820 1821 //===----------------------------------------------------------------------===// 1822 // X86-64 ABI Implementation 1823 //===----------------------------------------------------------------------===// 1824 1825 1826 namespace { 1827 /// The AVX ABI level for X86 targets. 1828 enum class X86AVXABILevel { 1829 None, 1830 AVX, 1831 AVX512 1832 }; 1833 1834 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel. 1835 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) { 1836 switch (AVXLevel) { 1837 case X86AVXABILevel::AVX512: 1838 return 512; 1839 case X86AVXABILevel::AVX: 1840 return 256; 1841 case X86AVXABILevel::None: 1842 return 128; 1843 } 1844 llvm_unreachable("Unknown AVXLevel"); 1845 } 1846 1847 /// X86_64ABIInfo - The X86_64 ABI information. 1848 class X86_64ABIInfo : public SwiftABIInfo { 1849 enum Class { 1850 Integer = 0, 1851 SSE, 1852 SSEUp, 1853 X87, 1854 X87Up, 1855 ComplexX87, 1856 NoClass, 1857 Memory 1858 }; 1859 1860 /// merge - Implement the X86_64 ABI merging algorithm. 1861 /// 1862 /// Merge an accumulating classification \arg Accum with a field 1863 /// classification \arg Field. 1864 /// 1865 /// \param Accum - The accumulating classification. This should 1866 /// always be either NoClass or the result of a previous merge 1867 /// call. In addition, this should never be Memory (the caller 1868 /// should just return Memory for the aggregate). 1869 static Class merge(Class Accum, Class Field); 1870 1871 /// postMerge - Implement the X86_64 ABI post merging algorithm. 1872 /// 1873 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 1874 /// final MEMORY or SSE classes when necessary. 1875 /// 1876 /// \param AggregateSize - The size of the current aggregate in 1877 /// the classification process. 1878 /// 1879 /// \param Lo - The classification for the parts of the type 1880 /// residing in the low word of the containing object. 1881 /// 1882 /// \param Hi - The classification for the parts of the type 1883 /// residing in the higher words of the containing object. 1884 /// 1885 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 1886 1887 /// classify - Determine the x86_64 register classes in which the 1888 /// given type T should be passed. 1889 /// 1890 /// \param Lo - The classification for the parts of the type 1891 /// residing in the low word of the containing object. 1892 /// 1893 /// \param Hi - The classification for the parts of the type 1894 /// residing in the high word of the containing object. 1895 /// 1896 /// \param OffsetBase - The bit offset of this type in the 1897 /// containing object. Some parameters are classified different 1898 /// depending on whether they straddle an eightbyte boundary. 1899 /// 1900 /// \param isNamedArg - Whether the argument in question is a "named" 1901 /// argument, as used in AMD64-ABI 3.5.7. 1902 /// 1903 /// If a word is unused its result will be NoClass; if a type should 1904 /// be passed in Memory then at least the classification of \arg Lo 1905 /// will be Memory. 1906 /// 1907 /// The \arg Lo class will be NoClass iff the argument is ignored. 1908 /// 1909 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 1910 /// also be ComplexX87. 1911 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi, 1912 bool isNamedArg) const; 1913 1914 llvm::Type *GetByteVectorType(QualType Ty) const; 1915 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 1916 unsigned IROffset, QualType SourceTy, 1917 unsigned SourceOffset) const; 1918 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 1919 unsigned IROffset, QualType SourceTy, 1920 unsigned SourceOffset) const; 1921 1922 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1923 /// such that the argument will be returned in memory. 1924 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 1925 1926 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 1927 /// such that the argument will be passed in memory. 1928 /// 1929 /// \param freeIntRegs - The number of free integer registers remaining 1930 /// available. 1931 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const; 1932 1933 ABIArgInfo classifyReturnType(QualType RetTy) const; 1934 1935 ABIArgInfo classifyArgumentType(QualType Ty, 1936 unsigned freeIntRegs, 1937 unsigned &neededInt, 1938 unsigned &neededSSE, 1939 bool isNamedArg) const; 1940 1941 bool IsIllegalVectorType(QualType Ty) const; 1942 1943 /// The 0.98 ABI revision clarified a lot of ambiguities, 1944 /// unfortunately in ways that were not always consistent with 1945 /// certain previous compilers. In particular, platforms which 1946 /// required strict binary compatibility with older versions of GCC 1947 /// may need to exempt themselves. 1948 bool honorsRevision0_98() const { 1949 return !getTarget().getTriple().isOSDarwin(); 1950 } 1951 1952 /// GCC classifies <1 x long long> as SSE but compatibility with older clang 1953 // compilers require us to classify it as INTEGER. 1954 bool classifyIntegerMMXAsSSE() const { 1955 const llvm::Triple &Triple = getTarget().getTriple(); 1956 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4) 1957 return false; 1958 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10) 1959 return false; 1960 return true; 1961 } 1962 1963 X86AVXABILevel AVXLevel; 1964 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on 1965 // 64-bit hardware. 1966 bool Has64BitPointers; 1967 1968 public: 1969 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) : 1970 SwiftABIInfo(CGT), AVXLevel(AVXLevel), 1971 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { 1972 } 1973 1974 bool isPassedUsingAVXType(QualType type) const { 1975 unsigned neededInt, neededSSE; 1976 // The freeIntRegs argument doesn't matter here. 1977 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE, 1978 /*isNamedArg*/true); 1979 if (info.isDirect()) { 1980 llvm::Type *ty = info.getCoerceToType(); 1981 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty)) 1982 return (vectorTy->getBitWidth() > 128); 1983 } 1984 return false; 1985 } 1986 1987 void computeInfo(CGFunctionInfo &FI) const override; 1988 1989 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 1990 QualType Ty) const override; 1991 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 1992 QualType Ty) const override; 1993 1994 bool has64BitPointers() const { 1995 return Has64BitPointers; 1996 } 1997 1998 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 1999 ArrayRef<llvm::Type*> scalars, 2000 bool asReturnValue) const override { 2001 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 2002 } 2003 }; 2004 2005 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 2006 class WinX86_64ABIInfo : public ABIInfo { 2007 public: 2008 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) 2009 : ABIInfo(CGT), 2010 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {} 2011 2012 void computeInfo(CGFunctionInfo &FI) const override; 2013 2014 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 2015 QualType Ty) const override; 2016 2017 bool isHomogeneousAggregateBaseType(QualType Ty) const override { 2018 // FIXME: Assumes vectorcall is in use. 2019 return isX86VectorTypeForVectorCall(getContext(), Ty); 2020 } 2021 2022 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 2023 uint64_t NumMembers) const override { 2024 // FIXME: Assumes vectorcall is in use. 2025 return isX86VectorCallAggregateSmallEnough(NumMembers); 2026 } 2027 2028 private: 2029 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, 2030 bool IsReturnType) const; 2031 2032 bool IsMingw64; 2033 }; 2034 2035 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2036 public: 2037 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2038 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {} 2039 2040 const X86_64ABIInfo &getABIInfo() const { 2041 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); 2042 } 2043 2044 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2045 return 7; 2046 } 2047 2048 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2049 llvm::Value *Address) const override { 2050 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2051 2052 // 0-15 are the 16 integer registers. 2053 // 16 is %rip. 2054 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2055 return false; 2056 } 2057 2058 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 2059 StringRef Constraint, 2060 llvm::Type* Ty) const override { 2061 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 2062 } 2063 2064 bool isNoProtoCallVariadic(const CallArgList &args, 2065 const FunctionNoProtoType *fnType) const override { 2066 // The default CC on x86-64 sets %al to the number of SSA 2067 // registers used, and GCC sets this when calling an unprototyped 2068 // function, so we override the default behavior. However, don't do 2069 // that when AVX types are involved: the ABI explicitly states it is 2070 // undefined, and it doesn't work in practice because of how the ABI 2071 // defines varargs anyway. 2072 if (fnType->getCallConv() == CC_C) { 2073 bool HasAVXType = false; 2074 for (CallArgList::const_iterator 2075 it = args.begin(), ie = args.end(); it != ie; ++it) { 2076 if (getABIInfo().isPassedUsingAVXType(it->Ty)) { 2077 HasAVXType = true; 2078 break; 2079 } 2080 } 2081 2082 if (!HasAVXType) 2083 return true; 2084 } 2085 2086 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType); 2087 } 2088 2089 llvm::Constant * 2090 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { 2091 unsigned Sig; 2092 if (getABIInfo().has64BitPointers()) 2093 Sig = (0xeb << 0) | // jmp rel8 2094 (0x0a << 8) | // .+0x0c 2095 ('F' << 16) | 2096 ('T' << 24); 2097 else 2098 Sig = (0xeb << 0) | // jmp rel8 2099 (0x06 << 8) | // .+0x08 2100 ('F' << 16) | 2101 ('T' << 24); 2102 return llvm::ConstantInt::get(CGM.Int32Ty, Sig); 2103 } 2104 2105 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2106 CodeGen::CodeGenModule &CGM) const override { 2107 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2108 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2109 llvm::Function *Fn = cast<llvm::Function>(GV); 2110 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2111 } 2112 } 2113 } 2114 }; 2115 2116 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo { 2117 public: 2118 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) 2119 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {} 2120 2121 void getDependentLibraryOption(llvm::StringRef Lib, 2122 llvm::SmallString<24> &Opt) const override { 2123 Opt = "\01"; 2124 // If the argument contains a space, enclose it in quotes. 2125 if (Lib.find(" ") != StringRef::npos) 2126 Opt += "\"" + Lib.str() + "\""; 2127 else 2128 Opt += Lib; 2129 } 2130 }; 2131 2132 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { 2133 // If the argument does not end in .lib, automatically add the suffix. 2134 // If the argument contains a space, enclose it in quotes. 2135 // This matches the behavior of MSVC. 2136 bool Quote = (Lib.find(" ") != StringRef::npos); 2137 std::string ArgStr = Quote ? "\"" : ""; 2138 ArgStr += Lib; 2139 if (!Lib.endswith_lower(".lib")) 2140 ArgStr += ".lib"; 2141 ArgStr += Quote ? "\"" : ""; 2142 return ArgStr; 2143 } 2144 2145 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo { 2146 public: 2147 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2148 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI, 2149 unsigned NumRegisterParameters) 2150 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI, 2151 Win32StructABI, NumRegisterParameters, false) {} 2152 2153 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2154 CodeGen::CodeGenModule &CGM) const override; 2155 2156 void getDependentLibraryOption(llvm::StringRef Lib, 2157 llvm::SmallString<24> &Opt) const override { 2158 Opt = "/DEFAULTLIB:"; 2159 Opt += qualifyWindowsLibrary(Lib); 2160 } 2161 2162 void getDetectMismatchOption(llvm::StringRef Name, 2163 llvm::StringRef Value, 2164 llvm::SmallString<32> &Opt) const override { 2165 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2166 } 2167 }; 2168 2169 static void addStackProbeSizeTargetAttribute(const Decl *D, 2170 llvm::GlobalValue *GV, 2171 CodeGen::CodeGenModule &CGM) { 2172 if (D && isa<FunctionDecl>(D)) { 2173 if (CGM.getCodeGenOpts().StackProbeSize != 4096) { 2174 llvm::Function *Fn = cast<llvm::Function>(GV); 2175 2176 Fn->addFnAttr("stack-probe-size", 2177 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); 2178 } 2179 } 2180 } 2181 2182 void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D, 2183 llvm::GlobalValue *GV, 2184 CodeGen::CodeGenModule &CGM) const { 2185 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 2186 2187 addStackProbeSizeTargetAttribute(D, GV, CGM); 2188 } 2189 2190 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 2191 public: 2192 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, 2193 X86AVXABILevel AVXLevel) 2194 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 2195 2196 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2197 CodeGen::CodeGenModule &CGM) const override; 2198 2199 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 2200 return 7; 2201 } 2202 2203 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2204 llvm::Value *Address) const override { 2205 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8); 2206 2207 // 0-15 are the 16 integer registers. 2208 // 16 is %rip. 2209 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16); 2210 return false; 2211 } 2212 2213 void getDependentLibraryOption(llvm::StringRef Lib, 2214 llvm::SmallString<24> &Opt) const override { 2215 Opt = "/DEFAULTLIB:"; 2216 Opt += qualifyWindowsLibrary(Lib); 2217 } 2218 2219 void getDetectMismatchOption(llvm::StringRef Name, 2220 llvm::StringRef Value, 2221 llvm::SmallString<32> &Opt) const override { 2222 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 2223 } 2224 }; 2225 2226 void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D, 2227 llvm::GlobalValue *GV, 2228 CodeGen::CodeGenModule &CGM) const { 2229 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 2230 2231 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 2232 if (FD->hasAttr<AnyX86InterruptAttr>()) { 2233 llvm::Function *Fn = cast<llvm::Function>(GV); 2234 Fn->setCallingConv(llvm::CallingConv::X86_INTR); 2235 } 2236 } 2237 2238 addStackProbeSizeTargetAttribute(D, GV, CGM); 2239 } 2240 } 2241 2242 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 2243 Class &Hi) const { 2244 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 2245 // 2246 // (a) If one of the classes is Memory, the whole argument is passed in 2247 // memory. 2248 // 2249 // (b) If X87UP is not preceded by X87, the whole argument is passed in 2250 // memory. 2251 // 2252 // (c) If the size of the aggregate exceeds two eightbytes and the first 2253 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 2254 // argument is passed in memory. NOTE: This is necessary to keep the 2255 // ABI working for processors that don't support the __m256 type. 2256 // 2257 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 2258 // 2259 // Some of these are enforced by the merging logic. Others can arise 2260 // only with unions; for example: 2261 // union { _Complex double; unsigned; } 2262 // 2263 // Note that clauses (b) and (c) were added in 0.98. 2264 // 2265 if (Hi == Memory) 2266 Lo = Memory; 2267 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 2268 Lo = Memory; 2269 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 2270 Lo = Memory; 2271 if (Hi == SSEUp && Lo != SSE) 2272 Hi = SSE; 2273 } 2274 2275 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 2276 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 2277 // classified recursively so that always two fields are 2278 // considered. The resulting class is calculated according to 2279 // the classes of the fields in the eightbyte: 2280 // 2281 // (a) If both classes are equal, this is the resulting class. 2282 // 2283 // (b) If one of the classes is NO_CLASS, the resulting class is 2284 // the other class. 2285 // 2286 // (c) If one of the classes is MEMORY, the result is the MEMORY 2287 // class. 2288 // 2289 // (d) If one of the classes is INTEGER, the result is the 2290 // INTEGER. 2291 // 2292 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 2293 // MEMORY is used as class. 2294 // 2295 // (f) Otherwise class SSE is used. 2296 2297 // Accum should never be memory (we should have returned) or 2298 // ComplexX87 (because this cannot be passed in a structure). 2299 assert((Accum != Memory && Accum != ComplexX87) && 2300 "Invalid accumulated classification during merge."); 2301 if (Accum == Field || Field == NoClass) 2302 return Accum; 2303 if (Field == Memory) 2304 return Memory; 2305 if (Accum == NoClass) 2306 return Field; 2307 if (Accum == Integer || Field == Integer) 2308 return Integer; 2309 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 2310 Accum == X87 || Accum == X87Up) 2311 return Memory; 2312 return SSE; 2313 } 2314 2315 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 2316 Class &Lo, Class &Hi, bool isNamedArg) const { 2317 // FIXME: This code can be simplified by introducing a simple value class for 2318 // Class pairs with appropriate constructor methods for the various 2319 // situations. 2320 2321 // FIXME: Some of the split computations are wrong; unaligned vectors 2322 // shouldn't be passed in registers for example, so there is no chance they 2323 // can straddle an eightbyte. Verify & simplify. 2324 2325 Lo = Hi = NoClass; 2326 2327 Class &Current = OffsetBase < 64 ? Lo : Hi; 2328 Current = Memory; 2329 2330 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 2331 BuiltinType::Kind k = BT->getKind(); 2332 2333 if (k == BuiltinType::Void) { 2334 Current = NoClass; 2335 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 2336 Lo = Integer; 2337 Hi = Integer; 2338 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 2339 Current = Integer; 2340 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 2341 Current = SSE; 2342 } else if (k == BuiltinType::LongDouble) { 2343 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2344 if (LDF == &llvm::APFloat::IEEEquad) { 2345 Lo = SSE; 2346 Hi = SSEUp; 2347 } else if (LDF == &llvm::APFloat::x87DoubleExtended) { 2348 Lo = X87; 2349 Hi = X87Up; 2350 } else if (LDF == &llvm::APFloat::IEEEdouble) { 2351 Current = SSE; 2352 } else 2353 llvm_unreachable("unexpected long double representation!"); 2354 } 2355 // FIXME: _Decimal32 and _Decimal64 are SSE. 2356 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 2357 return; 2358 } 2359 2360 if (const EnumType *ET = Ty->getAs<EnumType>()) { 2361 // Classify the underlying integer type. 2362 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg); 2363 return; 2364 } 2365 2366 if (Ty->hasPointerRepresentation()) { 2367 Current = Integer; 2368 return; 2369 } 2370 2371 if (Ty->isMemberPointerType()) { 2372 if (Ty->isMemberFunctionPointerType()) { 2373 if (Has64BitPointers) { 2374 // If Has64BitPointers, this is an {i64, i64}, so classify both 2375 // Lo and Hi now. 2376 Lo = Hi = Integer; 2377 } else { 2378 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that 2379 // straddles an eightbyte boundary, Hi should be classified as well. 2380 uint64_t EB_FuncPtr = (OffsetBase) / 64; 2381 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64; 2382 if (EB_FuncPtr != EB_ThisAdj) { 2383 Lo = Hi = Integer; 2384 } else { 2385 Current = Integer; 2386 } 2387 } 2388 } else { 2389 Current = Integer; 2390 } 2391 return; 2392 } 2393 2394 if (const VectorType *VT = Ty->getAs<VectorType>()) { 2395 uint64_t Size = getContext().getTypeSize(VT); 2396 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) { 2397 // gcc passes the following as integer: 2398 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float> 2399 // 2 bytes - <2 x char>, <1 x short> 2400 // 1 byte - <1 x char> 2401 Current = Integer; 2402 2403 // If this type crosses an eightbyte boundary, it should be 2404 // split. 2405 uint64_t EB_Lo = (OffsetBase) / 64; 2406 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64; 2407 if (EB_Lo != EB_Hi) 2408 Hi = Lo; 2409 } else if (Size == 64) { 2410 QualType ElementType = VT->getElementType(); 2411 2412 // gcc passes <1 x double> in memory. :( 2413 if (ElementType->isSpecificBuiltinType(BuiltinType::Double)) 2414 return; 2415 2416 // gcc passes <1 x long long> as SSE but clang used to unconditionally 2417 // pass them as integer. For platforms where clang is the de facto 2418 // platform compiler, we must continue to use integer. 2419 if (!classifyIntegerMMXAsSSE() && 2420 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) || 2421 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) || 2422 ElementType->isSpecificBuiltinType(BuiltinType::Long) || 2423 ElementType->isSpecificBuiltinType(BuiltinType::ULong))) 2424 Current = Integer; 2425 else 2426 Current = SSE; 2427 2428 // If this type crosses an eightbyte boundary, it should be 2429 // split. 2430 if (OffsetBase && OffsetBase != 64) 2431 Hi = Lo; 2432 } else if (Size == 128 || 2433 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) { 2434 // Arguments of 256-bits are split into four eightbyte chunks. The 2435 // least significant one belongs to class SSE and all the others to class 2436 // SSEUP. The original Lo and Hi design considers that types can't be 2437 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 2438 // This design isn't correct for 256-bits, but since there're no cases 2439 // where the upper parts would need to be inspected, avoid adding 2440 // complexity and just consider Hi to match the 64-256 part. 2441 // 2442 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in 2443 // registers if they are "named", i.e. not part of the "..." of a 2444 // variadic function. 2445 // 2446 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are 2447 // split into eight eightbyte chunks, one SSE and seven SSEUP. 2448 Lo = SSE; 2449 Hi = SSEUp; 2450 } 2451 return; 2452 } 2453 2454 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 2455 QualType ET = getContext().getCanonicalType(CT->getElementType()); 2456 2457 uint64_t Size = getContext().getTypeSize(Ty); 2458 if (ET->isIntegralOrEnumerationType()) { 2459 if (Size <= 64) 2460 Current = Integer; 2461 else if (Size <= 128) 2462 Lo = Hi = Integer; 2463 } else if (ET == getContext().FloatTy) { 2464 Current = SSE; 2465 } else if (ET == getContext().DoubleTy) { 2466 Lo = Hi = SSE; 2467 } else if (ET == getContext().LongDoubleTy) { 2468 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 2469 if (LDF == &llvm::APFloat::IEEEquad) 2470 Current = Memory; 2471 else if (LDF == &llvm::APFloat::x87DoubleExtended) 2472 Current = ComplexX87; 2473 else if (LDF == &llvm::APFloat::IEEEdouble) 2474 Lo = Hi = SSE; 2475 else 2476 llvm_unreachable("unexpected long double representation!"); 2477 } 2478 2479 // If this complex type crosses an eightbyte boundary then it 2480 // should be split. 2481 uint64_t EB_Real = (OffsetBase) / 64; 2482 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 2483 if (Hi == NoClass && EB_Real != EB_Imag) 2484 Hi = Lo; 2485 2486 return; 2487 } 2488 2489 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2490 // Arrays are treated like structures. 2491 2492 uint64_t Size = getContext().getTypeSize(Ty); 2493 2494 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2495 // than four eightbytes, ..., it has class MEMORY. 2496 if (Size > 256) 2497 return; 2498 2499 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 2500 // fields, it has class MEMORY. 2501 // 2502 // Only need to check alignment of array base. 2503 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 2504 return; 2505 2506 // Otherwise implement simplified merge. We could be smarter about 2507 // this, but it isn't worth it and would be harder to verify. 2508 Current = NoClass; 2509 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 2510 uint64_t ArraySize = AT->getSize().getZExtValue(); 2511 2512 // The only case a 256-bit wide vector could be used is when the array 2513 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2514 // to work for sizes wider than 128, early check and fallback to memory. 2515 if (Size > 128 && EltSize != 256) 2516 return; 2517 2518 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 2519 Class FieldLo, FieldHi; 2520 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg); 2521 Lo = merge(Lo, FieldLo); 2522 Hi = merge(Hi, FieldHi); 2523 if (Lo == Memory || Hi == Memory) 2524 break; 2525 } 2526 2527 postMerge(Size, Lo, Hi); 2528 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 2529 return; 2530 } 2531 2532 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2533 uint64_t Size = getContext().getTypeSize(Ty); 2534 2535 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 2536 // than four eightbytes, ..., it has class MEMORY. 2537 if (Size > 256) 2538 return; 2539 2540 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 2541 // copy constructor or a non-trivial destructor, it is passed by invisible 2542 // reference. 2543 if (getRecordArgABI(RT, getCXXABI())) 2544 return; 2545 2546 const RecordDecl *RD = RT->getDecl(); 2547 2548 // Assume variable sized types are passed in memory. 2549 if (RD->hasFlexibleArrayMember()) 2550 return; 2551 2552 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2553 2554 // Reset Lo class, this will be recomputed. 2555 Current = NoClass; 2556 2557 // If this is a C++ record, classify the bases first. 2558 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2559 for (const auto &I : CXXRD->bases()) { 2560 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2561 "Unexpected base class!"); 2562 const CXXRecordDecl *Base = 2563 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2564 2565 // Classify this field. 2566 // 2567 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 2568 // single eightbyte, each is classified separately. Each eightbyte gets 2569 // initialized to class NO_CLASS. 2570 Class FieldLo, FieldHi; 2571 uint64_t Offset = 2572 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base)); 2573 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg); 2574 Lo = merge(Lo, FieldLo); 2575 Hi = merge(Hi, FieldHi); 2576 if (Lo == Memory || Hi == Memory) { 2577 postMerge(Size, Lo, Hi); 2578 return; 2579 } 2580 } 2581 } 2582 2583 // Classify the fields one at a time, merging the results. 2584 unsigned idx = 0; 2585 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2586 i != e; ++i, ++idx) { 2587 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2588 bool BitField = i->isBitField(); 2589 2590 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than 2591 // four eightbytes, or it contains unaligned fields, it has class MEMORY. 2592 // 2593 // The only case a 256-bit wide vector could be used is when the struct 2594 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 2595 // to work for sizes wider than 128, early check and fallback to memory. 2596 // 2597 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) { 2598 Lo = Memory; 2599 postMerge(Size, Lo, Hi); 2600 return; 2601 } 2602 // Note, skip this test for bit-fields, see below. 2603 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 2604 Lo = Memory; 2605 postMerge(Size, Lo, Hi); 2606 return; 2607 } 2608 2609 // Classify this field. 2610 // 2611 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 2612 // exceeds a single eightbyte, each is classified 2613 // separately. Each eightbyte gets initialized to class 2614 // NO_CLASS. 2615 Class FieldLo, FieldHi; 2616 2617 // Bit-fields require special handling, they do not force the 2618 // structure to be passed in memory even if unaligned, and 2619 // therefore they can straddle an eightbyte. 2620 if (BitField) { 2621 // Ignore padding bit-fields. 2622 if (i->isUnnamedBitfield()) 2623 continue; 2624 2625 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 2626 uint64_t Size = i->getBitWidthValue(getContext()); 2627 2628 uint64_t EB_Lo = Offset / 64; 2629 uint64_t EB_Hi = (Offset + Size - 1) / 64; 2630 2631 if (EB_Lo) { 2632 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 2633 FieldLo = NoClass; 2634 FieldHi = Integer; 2635 } else { 2636 FieldLo = Integer; 2637 FieldHi = EB_Hi ? Integer : NoClass; 2638 } 2639 } else 2640 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg); 2641 Lo = merge(Lo, FieldLo); 2642 Hi = merge(Hi, FieldHi); 2643 if (Lo == Memory || Hi == Memory) 2644 break; 2645 } 2646 2647 postMerge(Size, Lo, Hi); 2648 } 2649 } 2650 2651 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 2652 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2653 // place naturally. 2654 if (!isAggregateTypeForABI(Ty)) { 2655 // Treat an enum type as its underlying type. 2656 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2657 Ty = EnumTy->getDecl()->getIntegerType(); 2658 2659 return (Ty->isPromotableIntegerType() ? 2660 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2661 } 2662 2663 return getNaturalAlignIndirect(Ty); 2664 } 2665 2666 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { 2667 if (const VectorType *VecTy = Ty->getAs<VectorType>()) { 2668 uint64_t Size = getContext().getTypeSize(VecTy); 2669 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel); 2670 if (Size <= 64 || Size > LargestVector) 2671 return true; 2672 } 2673 2674 return false; 2675 } 2676 2677 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, 2678 unsigned freeIntRegs) const { 2679 // If this is a scalar LLVM value then assume LLVM will pass it in the right 2680 // place naturally. 2681 // 2682 // This assumption is optimistic, as there could be free registers available 2683 // when we need to pass this argument in memory, and LLVM could try to pass 2684 // the argument in the free register. This does not seem to happen currently, 2685 // but this code would be much safer if we could mark the argument with 2686 // 'onstack'. See PR12193. 2687 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) { 2688 // Treat an enum type as its underlying type. 2689 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2690 Ty = EnumTy->getDecl()->getIntegerType(); 2691 2692 return (Ty->isPromotableIntegerType() ? 2693 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2694 } 2695 2696 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 2697 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 2698 2699 // Compute the byval alignment. We specify the alignment of the byval in all 2700 // cases so that the mid-level optimizer knows the alignment of the byval. 2701 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 2702 2703 // Attempt to avoid passing indirect results using byval when possible. This 2704 // is important for good codegen. 2705 // 2706 // We do this by coercing the value into a scalar type which the backend can 2707 // handle naturally (i.e., without using byval). 2708 // 2709 // For simplicity, we currently only do this when we have exhausted all of the 2710 // free integer registers. Doing this when there are free integer registers 2711 // would require more care, as we would have to ensure that the coerced value 2712 // did not claim the unused register. That would require either reording the 2713 // arguments to the function (so that any subsequent inreg values came first), 2714 // or only doing this optimization when there were no following arguments that 2715 // might be inreg. 2716 // 2717 // We currently expect it to be rare (particularly in well written code) for 2718 // arguments to be passed on the stack when there are still free integer 2719 // registers available (this would typically imply large structs being passed 2720 // by value), so this seems like a fair tradeoff for now. 2721 // 2722 // We can revisit this if the backend grows support for 'onstack' parameter 2723 // attributes. See PR12193. 2724 if (freeIntRegs == 0) { 2725 uint64_t Size = getContext().getTypeSize(Ty); 2726 2727 // If this type fits in an eightbyte, coerce it into the matching integral 2728 // type, which will end up on the stack (with alignment 8). 2729 if (Align == 8 && Size <= 64) 2730 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2731 Size)); 2732 } 2733 2734 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align)); 2735 } 2736 2737 /// The ABI specifies that a value should be passed in a full vector XMM/YMM 2738 /// register. Pick an LLVM IR type that will be passed as a vector register. 2739 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 2740 // Wrapper structs/arrays that only contain vectors are passed just like 2741 // vectors; strip them off if present. 2742 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext())) 2743 Ty = QualType(InnerTy, 0); 2744 2745 llvm::Type *IRType = CGT.ConvertType(Ty); 2746 if (isa<llvm::VectorType>(IRType) || 2747 IRType->getTypeID() == llvm::Type::FP128TyID) 2748 return IRType; 2749 2750 // We couldn't find the preferred IR vector type for 'Ty'. 2751 uint64_t Size = getContext().getTypeSize(Ty); 2752 assert((Size == 128 || Size == 256) && "Invalid type found!"); 2753 2754 // Return a LLVM IR vector type based on the size of 'Ty'. 2755 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2756 Size / 64); 2757 } 2758 2759 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 2760 /// is known to either be off the end of the specified type or being in 2761 /// alignment padding. The user type specified is known to be at most 128 bits 2762 /// in size, and have passed through X86_64ABIInfo::classify with a successful 2763 /// classification that put one of the two halves in the INTEGER class. 2764 /// 2765 /// It is conservatively correct to return false. 2766 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 2767 unsigned EndBit, ASTContext &Context) { 2768 // If the bytes being queried are off the end of the type, there is no user 2769 // data hiding here. This handles analysis of builtins, vectors and other 2770 // types that don't contain interesting padding. 2771 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 2772 if (TySize <= StartBit) 2773 return true; 2774 2775 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 2776 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 2777 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 2778 2779 // Check each element to see if the element overlaps with the queried range. 2780 for (unsigned i = 0; i != NumElts; ++i) { 2781 // If the element is after the span we care about, then we're done.. 2782 unsigned EltOffset = i*EltSize; 2783 if (EltOffset >= EndBit) break; 2784 2785 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 2786 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 2787 EndBit-EltOffset, Context)) 2788 return false; 2789 } 2790 // If it overlaps no elements, then it is safe to process as padding. 2791 return true; 2792 } 2793 2794 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2795 const RecordDecl *RD = RT->getDecl(); 2796 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2797 2798 // If this is a C++ record, check the bases first. 2799 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 2800 for (const auto &I : CXXRD->bases()) { 2801 assert(!I.isVirtual() && !I.getType()->isDependentType() && 2802 "Unexpected base class!"); 2803 const CXXRecordDecl *Base = 2804 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2805 2806 // If the base is after the span we care about, ignore it. 2807 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base)); 2808 if (BaseOffset >= EndBit) continue; 2809 2810 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 2811 if (!BitsContainNoUserData(I.getType(), BaseStart, 2812 EndBit-BaseOffset, Context)) 2813 return false; 2814 } 2815 } 2816 2817 // Verify that no field has data that overlaps the region of interest. Yes 2818 // this could be sped up a lot by being smarter about queried fields, 2819 // however we're only looking at structs up to 16 bytes, so we don't care 2820 // much. 2821 unsigned idx = 0; 2822 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2823 i != e; ++i, ++idx) { 2824 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 2825 2826 // If we found a field after the region we care about, then we're done. 2827 if (FieldOffset >= EndBit) break; 2828 2829 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 2830 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 2831 Context)) 2832 return false; 2833 } 2834 2835 // If nothing in this record overlapped the area of interest, then we're 2836 // clean. 2837 return true; 2838 } 2839 2840 return false; 2841 } 2842 2843 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 2844 /// float member at the specified offset. For example, {int,{float}} has a 2845 /// float at offset 4. It is conservatively correct for this routine to return 2846 /// false. 2847 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset, 2848 const llvm::DataLayout &TD) { 2849 // Base case if we find a float. 2850 if (IROffset == 0 && IRType->isFloatTy()) 2851 return true; 2852 2853 // If this is a struct, recurse into the field at the specified offset. 2854 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2855 const llvm::StructLayout *SL = TD.getStructLayout(STy); 2856 unsigned Elt = SL->getElementContainingOffset(IROffset); 2857 IROffset -= SL->getElementOffset(Elt); 2858 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 2859 } 2860 2861 // If this is an array, recurse into the field at the specified offset. 2862 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2863 llvm::Type *EltTy = ATy->getElementType(); 2864 unsigned EltSize = TD.getTypeAllocSize(EltTy); 2865 IROffset -= IROffset/EltSize*EltSize; 2866 return ContainsFloatAtOffset(EltTy, IROffset, TD); 2867 } 2868 2869 return false; 2870 } 2871 2872 2873 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 2874 /// low 8 bytes of an XMM register, corresponding to the SSE class. 2875 llvm::Type *X86_64ABIInfo:: 2876 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2877 QualType SourceTy, unsigned SourceOffset) const { 2878 // The only three choices we have are either double, <2 x float>, or float. We 2879 // pass as float if the last 4 bytes is just padding. This happens for 2880 // structs that contain 3 floats. 2881 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 2882 SourceOffset*8+64, getContext())) 2883 return llvm::Type::getFloatTy(getVMContext()); 2884 2885 // We want to pass as <2 x float> if the LLVM IR type contains a float at 2886 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 2887 // case. 2888 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) && 2889 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout())) 2890 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 2891 2892 return llvm::Type::getDoubleTy(getVMContext()); 2893 } 2894 2895 2896 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 2897 /// an 8-byte GPR. This means that we either have a scalar or we are talking 2898 /// about the high or low part of an up-to-16-byte struct. This routine picks 2899 /// the best LLVM IR type to represent this, which may be i64 or may be anything 2900 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 2901 /// etc). 2902 /// 2903 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 2904 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 2905 /// the 8-byte value references. PrefType may be null. 2906 /// 2907 /// SourceTy is the source-level type for the entire argument. SourceOffset is 2908 /// an offset into this that we're processing (which is always either 0 or 8). 2909 /// 2910 llvm::Type *X86_64ABIInfo:: 2911 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 2912 QualType SourceTy, unsigned SourceOffset) const { 2913 // If we're dealing with an un-offset LLVM IR type, then it means that we're 2914 // returning an 8-byte unit starting with it. See if we can safely use it. 2915 if (IROffset == 0) { 2916 // Pointers and int64's always fill the 8-byte unit. 2917 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || 2918 IRType->isIntegerTy(64)) 2919 return IRType; 2920 2921 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 2922 // goodness in the source type is just tail padding. This is allowed to 2923 // kick in for struct {double,int} on the int, but not on 2924 // struct{double,int,int} because we wouldn't return the second int. We 2925 // have to do this analysis on the source type because we can't depend on 2926 // unions being lowered a specific way etc. 2927 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 2928 IRType->isIntegerTy(32) || 2929 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { 2930 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : 2931 cast<llvm::IntegerType>(IRType)->getBitWidth(); 2932 2933 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 2934 SourceOffset*8+64, getContext())) 2935 return IRType; 2936 } 2937 } 2938 2939 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 2940 // If this is a struct, recurse into the field at the specified offset. 2941 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy); 2942 if (IROffset < SL->getSizeInBytes()) { 2943 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 2944 IROffset -= SL->getElementOffset(FieldIdx); 2945 2946 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 2947 SourceTy, SourceOffset); 2948 } 2949 } 2950 2951 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 2952 llvm::Type *EltTy = ATy->getElementType(); 2953 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy); 2954 unsigned EltOffset = IROffset/EltSize*EltSize; 2955 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 2956 SourceOffset); 2957 } 2958 2959 // Okay, we don't have any better idea of what to pass, so we pass this in an 2960 // integer register that isn't too big to fit the rest of the struct. 2961 unsigned TySizeInBytes = 2962 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 2963 2964 assert(TySizeInBytes != SourceOffset && "Empty field?"); 2965 2966 // It is always safe to classify this as an integer type up to i64 that 2967 // isn't larger than the structure. 2968 return llvm::IntegerType::get(getVMContext(), 2969 std::min(TySizeInBytes-SourceOffset, 8U)*8); 2970 } 2971 2972 2973 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 2974 /// be used as elements of a two register pair to pass or return, return a 2975 /// first class aggregate to represent them. For example, if the low part of 2976 /// a by-value argument should be passed as i32* and the high part as float, 2977 /// return {i32*, float}. 2978 static llvm::Type * 2979 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 2980 const llvm::DataLayout &TD) { 2981 // In order to correctly satisfy the ABI, we need to the high part to start 2982 // at offset 8. If the high and low parts we inferred are both 4-byte types 2983 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 2984 // the second element at offset 8. Check for this: 2985 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 2986 unsigned HiAlign = TD.getABITypeAlignment(Hi); 2987 unsigned HiStart = llvm::alignTo(LoSize, HiAlign); 2988 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 2989 2990 // To handle this, we have to increase the size of the low part so that the 2991 // second element will start at an 8 byte offset. We can't increase the size 2992 // of the second element because it might make us access off the end of the 2993 // struct. 2994 if (HiStart != 8) { 2995 // There are usually two sorts of types the ABI generation code can produce 2996 // for the low part of a pair that aren't 8 bytes in size: float or 2997 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and 2998 // NaCl). 2999 // Promote these to a larger type. 3000 if (Lo->isFloatTy()) 3001 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 3002 else { 3003 assert((Lo->isIntegerTy() || Lo->isPointerTy()) 3004 && "Invalid/unknown lo type"); 3005 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 3006 } 3007 } 3008 3009 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr); 3010 3011 3012 // Verify that the second element is at an 8-byte offset. 3013 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 3014 "Invalid x86-64 argument pair!"); 3015 return Result; 3016 } 3017 3018 ABIArgInfo X86_64ABIInfo:: 3019 classifyReturnType(QualType RetTy) const { 3020 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 3021 // classification algorithm. 3022 X86_64ABIInfo::Class Lo, Hi; 3023 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true); 3024 3025 // Check some invariants. 3026 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3027 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3028 3029 llvm::Type *ResType = nullptr; 3030 switch (Lo) { 3031 case NoClass: 3032 if (Hi == NoClass) 3033 return ABIArgInfo::getIgnore(); 3034 // If the low part is just padding, it takes no register, leave ResType 3035 // null. 3036 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3037 "Unknown missing lo part"); 3038 break; 3039 3040 case SSEUp: 3041 case X87Up: 3042 llvm_unreachable("Invalid classification for lo word."); 3043 3044 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 3045 // hidden argument. 3046 case Memory: 3047 return getIndirectReturnResult(RetTy); 3048 3049 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 3050 // available register of the sequence %rax, %rdx is used. 3051 case Integer: 3052 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3053 3054 // If we have a sign or zero extended integer, make sure to return Extend 3055 // so that the parameter gets the right LLVM IR attributes. 3056 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3057 // Treat an enum type as its underlying type. 3058 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 3059 RetTy = EnumTy->getDecl()->getIntegerType(); 3060 3061 if (RetTy->isIntegralOrEnumerationType() && 3062 RetTy->isPromotableIntegerType()) 3063 return ABIArgInfo::getExtend(); 3064 } 3065 break; 3066 3067 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 3068 // available SSE register of the sequence %xmm0, %xmm1 is used. 3069 case SSE: 3070 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 3071 break; 3072 3073 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 3074 // returned on the X87 stack in %st0 as 80-bit x87 number. 3075 case X87: 3076 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 3077 break; 3078 3079 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 3080 // part of the value is returned in %st0 and the imaginary part in 3081 // %st1. 3082 case ComplexX87: 3083 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 3084 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 3085 llvm::Type::getX86_FP80Ty(getVMContext()), 3086 nullptr); 3087 break; 3088 } 3089 3090 llvm::Type *HighPart = nullptr; 3091 switch (Hi) { 3092 // Memory was handled previously and X87 should 3093 // never occur as a hi class. 3094 case Memory: 3095 case X87: 3096 llvm_unreachable("Invalid classification for hi word."); 3097 3098 case ComplexX87: // Previously handled. 3099 case NoClass: 3100 break; 3101 3102 case Integer: 3103 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3104 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3105 return ABIArgInfo::getDirect(HighPart, 8); 3106 break; 3107 case SSE: 3108 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3109 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3110 return ABIArgInfo::getDirect(HighPart, 8); 3111 break; 3112 3113 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 3114 // is passed in the next available eightbyte chunk if the last used 3115 // vector register. 3116 // 3117 // SSEUP should always be preceded by SSE, just widen. 3118 case SSEUp: 3119 assert(Lo == SSE && "Unexpected SSEUp classification."); 3120 ResType = GetByteVectorType(RetTy); 3121 break; 3122 3123 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 3124 // returned together with the previous X87 value in %st0. 3125 case X87Up: 3126 // If X87Up is preceded by X87, we don't need to do 3127 // anything. However, in some cases with unions it may not be 3128 // preceded by X87. In such situations we follow gcc and pass the 3129 // extra bits in an SSE reg. 3130 if (Lo != X87) { 3131 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 3132 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 3133 return ABIArgInfo::getDirect(HighPart, 8); 3134 } 3135 break; 3136 } 3137 3138 // If a high part was specified, merge it together with the low part. It is 3139 // known to pass in the high eightbyte of the result. We do this by forming a 3140 // first class struct aggregate with the high and low part: {low, high} 3141 if (HighPart) 3142 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3143 3144 return ABIArgInfo::getDirect(ResType); 3145 } 3146 3147 ABIArgInfo X86_64ABIInfo::classifyArgumentType( 3148 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE, 3149 bool isNamedArg) 3150 const 3151 { 3152 Ty = useFirstFieldIfTransparentUnion(Ty); 3153 3154 X86_64ABIInfo::Class Lo, Hi; 3155 classify(Ty, 0, Lo, Hi, isNamedArg); 3156 3157 // Check some invariants. 3158 // FIXME: Enforce these by construction. 3159 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 3160 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 3161 3162 neededInt = 0; 3163 neededSSE = 0; 3164 llvm::Type *ResType = nullptr; 3165 switch (Lo) { 3166 case NoClass: 3167 if (Hi == NoClass) 3168 return ABIArgInfo::getIgnore(); 3169 // If the low part is just padding, it takes no register, leave ResType 3170 // null. 3171 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 3172 "Unknown missing lo part"); 3173 break; 3174 3175 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 3176 // on the stack. 3177 case Memory: 3178 3179 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 3180 // COMPLEX_X87, it is passed in memory. 3181 case X87: 3182 case ComplexX87: 3183 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect) 3184 ++neededInt; 3185 return getIndirectResult(Ty, freeIntRegs); 3186 3187 case SSEUp: 3188 case X87Up: 3189 llvm_unreachable("Invalid classification for lo word."); 3190 3191 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 3192 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 3193 // and %r9 is used. 3194 case Integer: 3195 ++neededInt; 3196 3197 // Pick an 8-byte type based on the preferred type. 3198 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 3199 3200 // If we have a sign or zero extended integer, make sure to return Extend 3201 // so that the parameter gets the right LLVM IR attributes. 3202 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 3203 // Treat an enum type as its underlying type. 3204 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3205 Ty = EnumTy->getDecl()->getIntegerType(); 3206 3207 if (Ty->isIntegralOrEnumerationType() && 3208 Ty->isPromotableIntegerType()) 3209 return ABIArgInfo::getExtend(); 3210 } 3211 3212 break; 3213 3214 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 3215 // available SSE register is used, the registers are taken in the 3216 // order from %xmm0 to %xmm7. 3217 case SSE: { 3218 llvm::Type *IRType = CGT.ConvertType(Ty); 3219 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 3220 ++neededSSE; 3221 break; 3222 } 3223 } 3224 3225 llvm::Type *HighPart = nullptr; 3226 switch (Hi) { 3227 // Memory was handled previously, ComplexX87 and X87 should 3228 // never occur as hi classes, and X87Up must be preceded by X87, 3229 // which is passed in memory. 3230 case Memory: 3231 case X87: 3232 case ComplexX87: 3233 llvm_unreachable("Invalid classification for hi word."); 3234 3235 case NoClass: break; 3236 3237 case Integer: 3238 ++neededInt; 3239 // Pick an 8-byte type based on the preferred type. 3240 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3241 3242 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3243 return ABIArgInfo::getDirect(HighPart, 8); 3244 break; 3245 3246 // X87Up generally doesn't occur here (long double is passed in 3247 // memory), except in situations involving unions. 3248 case X87Up: 3249 case SSE: 3250 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 3251 3252 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 3253 return ABIArgInfo::getDirect(HighPart, 8); 3254 3255 ++neededSSE; 3256 break; 3257 3258 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 3259 // eightbyte is passed in the upper half of the last used SSE 3260 // register. This only happens when 128-bit vectors are passed. 3261 case SSEUp: 3262 assert(Lo == SSE && "Unexpected SSEUp classification"); 3263 ResType = GetByteVectorType(Ty); 3264 break; 3265 } 3266 3267 // If a high part was specified, merge it together with the low part. It is 3268 // known to pass in the high eightbyte of the result. We do this by forming a 3269 // first class struct aggregate with the high and low part: {low, high} 3270 if (HighPart) 3271 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout()); 3272 3273 return ABIArgInfo::getDirect(ResType); 3274 } 3275 3276 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3277 3278 if (!getCXXABI().classifyReturnType(FI)) 3279 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3280 3281 // Keep track of the number of assigned registers. 3282 unsigned freeIntRegs = 6, freeSSERegs = 8; 3283 3284 // If the return value is indirect, then the hidden argument is consuming one 3285 // integer register. 3286 if (FI.getReturnInfo().isIndirect()) 3287 --freeIntRegs; 3288 3289 // The chain argument effectively gives us another free register. 3290 if (FI.isChainCall()) 3291 ++freeIntRegs; 3292 3293 unsigned NumRequiredArgs = FI.getNumRequiredArgs(); 3294 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 3295 // get assigned (in left-to-right order) for passing as follows... 3296 unsigned ArgNo = 0; 3297 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 3298 it != ie; ++it, ++ArgNo) { 3299 bool IsNamedArg = ArgNo < NumRequiredArgs; 3300 3301 unsigned neededInt, neededSSE; 3302 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt, 3303 neededSSE, IsNamedArg); 3304 3305 // AMD64-ABI 3.2.3p3: If there are no registers available for any 3306 // eightbyte of an argument, the whole argument is passed on the 3307 // stack. If registers have already been assigned for some 3308 // eightbytes of such an argument, the assignments get reverted. 3309 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 3310 freeIntRegs -= neededInt; 3311 freeSSERegs -= neededSSE; 3312 } else { 3313 it->info = getIndirectResult(it->type, freeIntRegs); 3314 } 3315 } 3316 } 3317 3318 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, 3319 Address VAListAddr, QualType Ty) { 3320 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP( 3321 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p"); 3322 llvm::Value *overflow_arg_area = 3323 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 3324 3325 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 3326 // byte boundary if alignment needed by type exceeds 8 byte boundary. 3327 // It isn't stated explicitly in the standard, but in practice we use 3328 // alignment greater than 16 where necessary. 3329 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 3330 if (Align > CharUnits::fromQuantity(8)) { 3331 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area, 3332 Align); 3333 } 3334 3335 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 3336 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3337 llvm::Value *Res = 3338 CGF.Builder.CreateBitCast(overflow_arg_area, 3339 llvm::PointerType::getUnqual(LTy)); 3340 3341 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 3342 // l->overflow_arg_area + sizeof(type). 3343 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 3344 // an 8 byte boundary. 3345 3346 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 3347 llvm::Value *Offset = 3348 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 3349 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 3350 "overflow_arg_area.next"); 3351 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 3352 3353 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 3354 return Address(Res, Align); 3355 } 3356 3357 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3358 QualType Ty) const { 3359 // Assume that va_list type is correct; should be pointer to LLVM type: 3360 // struct { 3361 // i32 gp_offset; 3362 // i32 fp_offset; 3363 // i8* overflow_arg_area; 3364 // i8* reg_save_area; 3365 // }; 3366 unsigned neededInt, neededSSE; 3367 3368 Ty = getContext().getCanonicalType(Ty); 3369 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE, 3370 /*isNamedArg*/false); 3371 3372 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 3373 // in the registers. If not go to step 7. 3374 if (!neededInt && !neededSSE) 3375 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3376 3377 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 3378 // general purpose registers needed to pass type and num_fp to hold 3379 // the number of floating point registers needed. 3380 3381 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 3382 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 3383 // l->fp_offset > 304 - num_fp * 16 go to step 7. 3384 // 3385 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 3386 // register save space). 3387 3388 llvm::Value *InRegs = nullptr; 3389 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid(); 3390 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr; 3391 if (neededInt) { 3392 gp_offset_p = 3393 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(), 3394 "gp_offset_p"); 3395 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 3396 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 3397 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 3398 } 3399 3400 if (neededSSE) { 3401 fp_offset_p = 3402 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4), 3403 "fp_offset_p"); 3404 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 3405 llvm::Value *FitsInFP = 3406 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 3407 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 3408 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 3409 } 3410 3411 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 3412 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 3413 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 3414 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 3415 3416 // Emit code to load the value if it was passed in registers. 3417 3418 CGF.EmitBlock(InRegBlock); 3419 3420 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 3421 // an offset of l->gp_offset and/or l->fp_offset. This may require 3422 // copying to a temporary location in case the parameter is passed 3423 // in different register classes or requires an alignment greater 3424 // than 8 for general purpose registers and 16 for XMM registers. 3425 // 3426 // FIXME: This really results in shameful code when we end up needing to 3427 // collect arguments from different places; often what should result in a 3428 // simple assembling of a structure from scattered addresses has many more 3429 // loads than necessary. Can we clean this up? 3430 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 3431 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad( 3432 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)), 3433 "reg_save_area"); 3434 3435 Address RegAddr = Address::invalid(); 3436 if (neededInt && neededSSE) { 3437 // FIXME: Cleanup. 3438 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 3439 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 3440 Address Tmp = CGF.CreateMemTemp(Ty); 3441 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 3442 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 3443 llvm::Type *TyLo = ST->getElementType(0); 3444 llvm::Type *TyHi = ST->getElementType(1); 3445 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 3446 "Unexpected ABI info for mixed regs"); 3447 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 3448 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 3449 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset); 3450 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset); 3451 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr; 3452 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; 3453 3454 // Copy the first element. 3455 llvm::Value *V = 3456 CGF.Builder.CreateDefaultAlignedLoad( 3457 CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 3458 CGF.Builder.CreateStore(V, 3459 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero())); 3460 3461 // Copy the second element. 3462 V = CGF.Builder.CreateDefaultAlignedLoad( 3463 CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 3464 CharUnits Offset = CharUnits::fromQuantity( 3465 getDataLayout().getStructLayout(ST)->getElementOffset(1)); 3466 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset)); 3467 3468 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 3469 } else if (neededInt) { 3470 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset), 3471 CharUnits::fromQuantity(8)); 3472 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 3473 3474 // Copy to a temporary if necessary to ensure the appropriate alignment. 3475 std::pair<CharUnits, CharUnits> SizeAlign = 3476 getContext().getTypeInfoInChars(Ty); 3477 uint64_t TySize = SizeAlign.first.getQuantity(); 3478 CharUnits TyAlign = SizeAlign.second; 3479 3480 // Copy into a temporary if the type is more aligned than the 3481 // register save area. 3482 if (TyAlign.getQuantity() > 8) { 3483 Address Tmp = CGF.CreateMemTemp(Ty); 3484 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false); 3485 RegAddr = Tmp; 3486 } 3487 3488 } else if (neededSSE == 1) { 3489 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 3490 CharUnits::fromQuantity(16)); 3491 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy); 3492 } else { 3493 assert(neededSSE == 2 && "Invalid number of needed registers!"); 3494 // SSE registers are spaced 16 bytes apart in the register save 3495 // area, we need to collect the two eightbytes together. 3496 // The ABI isn't explicit about this, but it seems reasonable 3497 // to assume that the slots are 16-byte aligned, since the stack is 3498 // naturally 16-byte aligned and the prologue is expected to store 3499 // all the SSE registers to the RSA. 3500 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset), 3501 CharUnits::fromQuantity(16)); 3502 Address RegAddrHi = 3503 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo, 3504 CharUnits::fromQuantity(16)); 3505 llvm::Type *DoubleTy = CGF.DoubleTy; 3506 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr); 3507 llvm::Value *V; 3508 Address Tmp = CGF.CreateMemTemp(Ty); 3509 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST); 3510 V = CGF.Builder.CreateLoad( 3511 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy)); 3512 CGF.Builder.CreateStore(V, 3513 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero())); 3514 V = CGF.Builder.CreateLoad( 3515 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy)); 3516 CGF.Builder.CreateStore(V, 3517 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8))); 3518 3519 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy); 3520 } 3521 3522 // AMD64-ABI 3.5.7p5: Step 5. Set: 3523 // l->gp_offset = l->gp_offset + num_gp * 8 3524 // l->fp_offset = l->fp_offset + num_fp * 16. 3525 if (neededInt) { 3526 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 3527 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 3528 gp_offset_p); 3529 } 3530 if (neededSSE) { 3531 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 3532 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 3533 fp_offset_p); 3534 } 3535 CGF.EmitBranch(ContBlock); 3536 3537 // Emit code to load the value if it was passed in memory. 3538 3539 CGF.EmitBlock(InMemBlock); 3540 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty); 3541 3542 // Return the appropriate result. 3543 3544 CGF.EmitBlock(ContBlock); 3545 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock, 3546 "vaarg.addr"); 3547 return ResAddr; 3548 } 3549 3550 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr, 3551 QualType Ty) const { 3552 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 3553 CGF.getContext().getTypeInfoInChars(Ty), 3554 CharUnits::fromQuantity(8), 3555 /*allowHigherAlign*/ false); 3556 } 3557 3558 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, 3559 bool IsReturnType) const { 3560 3561 if (Ty->isVoidType()) 3562 return ABIArgInfo::getIgnore(); 3563 3564 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 3565 Ty = EnumTy->getDecl()->getIntegerType(); 3566 3567 TypeInfo Info = getContext().getTypeInfo(Ty); 3568 uint64_t Width = Info.Width; 3569 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align); 3570 3571 const RecordType *RT = Ty->getAs<RecordType>(); 3572 if (RT) { 3573 if (!IsReturnType) { 3574 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI())) 3575 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 3576 } 3577 3578 if (RT->getDecl()->hasFlexibleArrayMember()) 3579 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3580 3581 } 3582 3583 // vectorcall adds the concept of a homogenous vector aggregate, similar to 3584 // other targets. 3585 const Type *Base = nullptr; 3586 uint64_t NumElts = 0; 3587 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) { 3588 if (FreeSSERegs >= NumElts) { 3589 FreeSSERegs -= NumElts; 3590 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType()) 3591 return ABIArgInfo::getDirect(); 3592 return ABIArgInfo::getExpand(); 3593 } 3594 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3595 } 3596 3597 3598 if (Ty->isMemberPointerType()) { 3599 // If the member pointer is represented by an LLVM int or ptr, pass it 3600 // directly. 3601 llvm::Type *LLTy = CGT.ConvertType(Ty); 3602 if (LLTy->isPointerTy() || LLTy->isIntegerTy()) 3603 return ABIArgInfo::getDirect(); 3604 } 3605 3606 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { 3607 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 3608 // not 1, 2, 4, or 8 bytes, must be passed by reference." 3609 if (Width > 64 || !llvm::isPowerOf2_64(Width)) 3610 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 3611 3612 // Otherwise, coerce it to a small integer. 3613 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width)); 3614 } 3615 3616 // Bool type is always extended to the ABI, other builtin types are not 3617 // extended. 3618 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 3619 if (BT && BT->getKind() == BuiltinType::Bool) 3620 return ABIArgInfo::getExtend(); 3621 3622 // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It 3623 // passes them indirectly through memory. 3624 if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) { 3625 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat(); 3626 if (LDF == &llvm::APFloat::x87DoubleExtended) 3627 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 3628 } 3629 3630 return ABIArgInfo::getDirect(); 3631 } 3632 3633 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 3634 bool IsVectorCall = 3635 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall; 3636 3637 // We can use up to 4 SSE return registers with vectorcall. 3638 unsigned FreeSSERegs = IsVectorCall ? 4 : 0; 3639 if (!getCXXABI().classifyReturnType(FI)) 3640 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true); 3641 3642 // We can use up to 6 SSE register parameters with vectorcall. 3643 FreeSSERegs = IsVectorCall ? 6 : 0; 3644 for (auto &I : FI.arguments()) 3645 I.info = classify(I.type, FreeSSERegs, false); 3646 } 3647 3648 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3649 QualType Ty) const { 3650 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 3651 CGF.getContext().getTypeInfoInChars(Ty), 3652 CharUnits::fromQuantity(8), 3653 /*allowHigherAlign*/ false); 3654 } 3655 3656 // PowerPC-32 3657 namespace { 3658 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. 3659 class PPC32_SVR4_ABIInfo : public DefaultABIInfo { 3660 bool IsSoftFloatABI; 3661 public: 3662 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI) 3663 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {} 3664 3665 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3666 QualType Ty) const override; 3667 }; 3668 3669 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo { 3670 public: 3671 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI) 3672 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {} 3673 3674 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3675 // This is recovered from gcc output. 3676 return 1; // r1 is the dedicated stack pointer 3677 } 3678 3679 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3680 llvm::Value *Address) const override; 3681 }; 3682 3683 } 3684 3685 // TODO: this implementation is now likely redundant with 3686 // DefaultABIInfo::EmitVAArg. 3687 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, 3688 QualType Ty) const { 3689 const unsigned OverflowLimit = 8; 3690 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 3691 // TODO: Implement this. For now ignore. 3692 (void)CTy; 3693 return Address::invalid(); // FIXME? 3694 } 3695 3696 // struct __va_list_tag { 3697 // unsigned char gpr; 3698 // unsigned char fpr; 3699 // unsigned short reserved; 3700 // void *overflow_arg_area; 3701 // void *reg_save_area; 3702 // }; 3703 3704 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64; 3705 bool isInt = 3706 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType(); 3707 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64; 3708 3709 // All aggregates are passed indirectly? That doesn't seem consistent 3710 // with the argument-lowering code. 3711 bool isIndirect = Ty->isAggregateType(); 3712 3713 CGBuilderTy &Builder = CGF.Builder; 3714 3715 // The calling convention either uses 1-2 GPRs or 1 FPR. 3716 Address NumRegsAddr = Address::invalid(); 3717 if (isInt || IsSoftFloatABI) { 3718 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr"); 3719 } else { 3720 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr"); 3721 } 3722 3723 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs"); 3724 3725 // "Align" the register count when TY is i64. 3726 if (isI64 || (isF64 && IsSoftFloatABI)) { 3727 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1)); 3728 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U)); 3729 } 3730 3731 llvm::Value *CC = 3732 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond"); 3733 3734 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs"); 3735 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow"); 3736 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 3737 3738 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow); 3739 3740 llvm::Type *DirectTy = CGF.ConvertType(Ty); 3741 if (isIndirect) DirectTy = DirectTy->getPointerTo(0); 3742 3743 // Case 1: consume registers. 3744 Address RegAddr = Address::invalid(); 3745 { 3746 CGF.EmitBlock(UsingRegs); 3747 3748 Address RegSaveAreaPtr = 3749 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8)); 3750 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), 3751 CharUnits::fromQuantity(8)); 3752 assert(RegAddr.getElementType() == CGF.Int8Ty); 3753 3754 // Floating-point registers start after the general-purpose registers. 3755 if (!(isInt || IsSoftFloatABI)) { 3756 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr, 3757 CharUnits::fromQuantity(32)); 3758 } 3759 3760 // Get the address of the saved value by scaling the number of 3761 // registers we've used by the number of 3762 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8); 3763 llvm::Value *RegOffset = 3764 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity())); 3765 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty, 3766 RegAddr.getPointer(), RegOffset), 3767 RegAddr.getAlignment().alignmentOfArrayElement(RegSize)); 3768 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy); 3769 3770 // Increase the used-register count. 3771 NumRegs = 3772 Builder.CreateAdd(NumRegs, 3773 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1)); 3774 Builder.CreateStore(NumRegs, NumRegsAddr); 3775 3776 CGF.EmitBranch(Cont); 3777 } 3778 3779 // Case 2: consume space in the overflow area. 3780 Address MemAddr = Address::invalid(); 3781 { 3782 CGF.EmitBlock(UsingOverflow); 3783 3784 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr); 3785 3786 // Everything in the overflow area is rounded up to a size of at least 4. 3787 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4); 3788 3789 CharUnits Size; 3790 if (!isIndirect) { 3791 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty); 3792 Size = TypeInfo.first.alignTo(OverflowAreaAlign); 3793 } else { 3794 Size = CGF.getPointerSize(); 3795 } 3796 3797 Address OverflowAreaAddr = 3798 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4)); 3799 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), 3800 OverflowAreaAlign); 3801 // Round up address of argument to alignment 3802 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty); 3803 if (Align > OverflowAreaAlign) { 3804 llvm::Value *Ptr = OverflowArea.getPointer(); 3805 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align), 3806 Align); 3807 } 3808 3809 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy); 3810 3811 // Increase the overflow area. 3812 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size); 3813 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr); 3814 CGF.EmitBranch(Cont); 3815 } 3816 3817 CGF.EmitBlock(Cont); 3818 3819 // Merge the cases with a phi. 3820 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow, 3821 "vaarg.addr"); 3822 3823 // Load the pointer if the argument was passed indirectly. 3824 if (isIndirect) { 3825 Result = Address(Builder.CreateLoad(Result, "aggr"), 3826 getContext().getTypeAlignInChars(Ty)); 3827 } 3828 3829 return Result; 3830 } 3831 3832 bool 3833 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3834 llvm::Value *Address) const { 3835 // This is calculated from the LLVM and GCC tables and verified 3836 // against gcc output. AFAIK all ABIs use the same encoding. 3837 3838 CodeGen::CGBuilderTy &Builder = CGF.Builder; 3839 3840 llvm::IntegerType *i8 = CGF.Int8Ty; 3841 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 3842 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 3843 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 3844 3845 // 0-31: r0-31, the 4-byte general-purpose registers 3846 AssignToArrayRange(Builder, Address, Four8, 0, 31); 3847 3848 // 32-63: fp0-31, the 8-byte floating-point registers 3849 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 3850 3851 // 64-76 are various 4-byte special-purpose registers: 3852 // 64: mq 3853 // 65: lr 3854 // 66: ctr 3855 // 67: ap 3856 // 68-75 cr0-7 3857 // 76: xer 3858 AssignToArrayRange(Builder, Address, Four8, 64, 76); 3859 3860 // 77-108: v0-31, the 16-byte vector registers 3861 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 3862 3863 // 109: vrsave 3864 // 110: vscr 3865 // 111: spe_acc 3866 // 112: spefscr 3867 // 113: sfp 3868 AssignToArrayRange(Builder, Address, Four8, 109, 113); 3869 3870 return false; 3871 } 3872 3873 // PowerPC-64 3874 3875 namespace { 3876 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information. 3877 class PPC64_SVR4_ABIInfo : public ABIInfo { 3878 public: 3879 enum ABIKind { 3880 ELFv1 = 0, 3881 ELFv2 3882 }; 3883 3884 private: 3885 static const unsigned GPRBits = 64; 3886 ABIKind Kind; 3887 bool HasQPX; 3888 3889 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and 3890 // will be passed in a QPX register. 3891 bool IsQPXVectorTy(const Type *Ty) const { 3892 if (!HasQPX) 3893 return false; 3894 3895 if (const VectorType *VT = Ty->getAs<VectorType>()) { 3896 unsigned NumElements = VT->getNumElements(); 3897 if (NumElements == 1) 3898 return false; 3899 3900 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) { 3901 if (getContext().getTypeSize(Ty) <= 256) 3902 return true; 3903 } else if (VT->getElementType()-> 3904 isSpecificBuiltinType(BuiltinType::Float)) { 3905 if (getContext().getTypeSize(Ty) <= 128) 3906 return true; 3907 } 3908 } 3909 3910 return false; 3911 } 3912 3913 bool IsQPXVectorTy(QualType Ty) const { 3914 return IsQPXVectorTy(Ty.getTypePtr()); 3915 } 3916 3917 public: 3918 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX) 3919 : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {} 3920 3921 bool isPromotableTypeForABI(QualType Ty) const; 3922 CharUnits getParamTypeAlignment(QualType Ty) const; 3923 3924 ABIArgInfo classifyReturnType(QualType RetTy) const; 3925 ABIArgInfo classifyArgumentType(QualType Ty) const; 3926 3927 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 3928 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 3929 uint64_t Members) const override; 3930 3931 // TODO: We can add more logic to computeInfo to improve performance. 3932 // Example: For aggregate arguments that fit in a register, we could 3933 // use getDirectInReg (as is done below for structs containing a single 3934 // floating-point value) to avoid pushing them to memory on function 3935 // entry. This would require changing the logic in PPCISelLowering 3936 // when lowering the parameters in the caller and args in the callee. 3937 void computeInfo(CGFunctionInfo &FI) const override { 3938 if (!getCXXABI().classifyReturnType(FI)) 3939 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 3940 for (auto &I : FI.arguments()) { 3941 // We rely on the default argument classification for the most part. 3942 // One exception: An aggregate containing a single floating-point 3943 // or vector item must be passed in a register if one is available. 3944 const Type *T = isSingleElementStruct(I.type, getContext()); 3945 if (T) { 3946 const BuiltinType *BT = T->getAs<BuiltinType>(); 3947 if (IsQPXVectorTy(T) || 3948 (T->isVectorType() && getContext().getTypeSize(T) == 128) || 3949 (BT && BT->isFloatingPoint())) { 3950 QualType QT(T, 0); 3951 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); 3952 continue; 3953 } 3954 } 3955 I.info = classifyArgumentType(I.type); 3956 } 3957 } 3958 3959 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 3960 QualType Ty) const override; 3961 }; 3962 3963 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { 3964 3965 public: 3966 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, 3967 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX) 3968 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)) {} 3969 3970 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3971 // This is recovered from gcc output. 3972 return 1; // r1 is the dedicated stack pointer 3973 } 3974 3975 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3976 llvm::Value *Address) const override; 3977 }; 3978 3979 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 3980 public: 3981 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 3982 3983 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 3984 // This is recovered from gcc output. 3985 return 1; // r1 is the dedicated stack pointer 3986 } 3987 3988 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 3989 llvm::Value *Address) const override; 3990 }; 3991 3992 } 3993 3994 // Return true if the ABI requires Ty to be passed sign- or zero- 3995 // extended to 64 bits. 3996 bool 3997 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { 3998 // Treat an enum type as its underlying type. 3999 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4000 Ty = EnumTy->getDecl()->getIntegerType(); 4001 4002 // Promotable integer types are required to be promoted by the ABI. 4003 if (Ty->isPromotableIntegerType()) 4004 return true; 4005 4006 // In addition to the usual promotable integer types, we also need to 4007 // extend all 32-bit types, since the ABI requires promotion to 64 bits. 4008 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 4009 switch (BT->getKind()) { 4010 case BuiltinType::Int: 4011 case BuiltinType::UInt: 4012 return true; 4013 default: 4014 break; 4015 } 4016 4017 return false; 4018 } 4019 4020 /// isAlignedParamType - Determine whether a type requires 16-byte or 4021 /// higher alignment in the parameter area. Always returns at least 8. 4022 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const { 4023 // Complex types are passed just like their elements. 4024 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) 4025 Ty = CTy->getElementType(); 4026 4027 // Only vector types of size 16 bytes need alignment (larger types are 4028 // passed via reference, smaller types are not aligned). 4029 if (IsQPXVectorTy(Ty)) { 4030 if (getContext().getTypeSize(Ty) > 128) 4031 return CharUnits::fromQuantity(32); 4032 4033 return CharUnits::fromQuantity(16); 4034 } else if (Ty->isVectorType()) { 4035 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8); 4036 } 4037 4038 // For single-element float/vector structs, we consider the whole type 4039 // to have the same alignment requirements as its single element. 4040 const Type *AlignAsType = nullptr; 4041 const Type *EltType = isSingleElementStruct(Ty, getContext()); 4042 if (EltType) { 4043 const BuiltinType *BT = EltType->getAs<BuiltinType>(); 4044 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() && 4045 getContext().getTypeSize(EltType) == 128) || 4046 (BT && BT->isFloatingPoint())) 4047 AlignAsType = EltType; 4048 } 4049 4050 // Likewise for ELFv2 homogeneous aggregates. 4051 const Type *Base = nullptr; 4052 uint64_t Members = 0; 4053 if (!AlignAsType && Kind == ELFv2 && 4054 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members)) 4055 AlignAsType = Base; 4056 4057 // With special case aggregates, only vector base types need alignment. 4058 if (AlignAsType && IsQPXVectorTy(AlignAsType)) { 4059 if (getContext().getTypeSize(AlignAsType) > 128) 4060 return CharUnits::fromQuantity(32); 4061 4062 return CharUnits::fromQuantity(16); 4063 } else if (AlignAsType) { 4064 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8); 4065 } 4066 4067 // Otherwise, we only need alignment for any aggregate type that 4068 // has an alignment requirement of >= 16 bytes. 4069 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { 4070 if (HasQPX && getContext().getTypeAlign(Ty) >= 256) 4071 return CharUnits::fromQuantity(32); 4072 return CharUnits::fromQuantity(16); 4073 } 4074 4075 return CharUnits::fromQuantity(8); 4076 } 4077 4078 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous 4079 /// aggregate. Base is set to the base element type, and Members is set 4080 /// to the number of base elements. 4081 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base, 4082 uint64_t &Members) const { 4083 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 4084 uint64_t NElements = AT->getSize().getZExtValue(); 4085 if (NElements == 0) 4086 return false; 4087 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members)) 4088 return false; 4089 Members *= NElements; 4090 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 4091 const RecordDecl *RD = RT->getDecl(); 4092 if (RD->hasFlexibleArrayMember()) 4093 return false; 4094 4095 Members = 0; 4096 4097 // If this is a C++ record, check the bases first. 4098 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 4099 for (const auto &I : CXXRD->bases()) { 4100 // Ignore empty records. 4101 if (isEmptyRecord(getContext(), I.getType(), true)) 4102 continue; 4103 4104 uint64_t FldMembers; 4105 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers)) 4106 return false; 4107 4108 Members += FldMembers; 4109 } 4110 } 4111 4112 for (const auto *FD : RD->fields()) { 4113 // Ignore (non-zero arrays of) empty records. 4114 QualType FT = FD->getType(); 4115 while (const ConstantArrayType *AT = 4116 getContext().getAsConstantArrayType(FT)) { 4117 if (AT->getSize().getZExtValue() == 0) 4118 return false; 4119 FT = AT->getElementType(); 4120 } 4121 if (isEmptyRecord(getContext(), FT, true)) 4122 continue; 4123 4124 // For compatibility with GCC, ignore empty bitfields in C++ mode. 4125 if (getContext().getLangOpts().CPlusPlus && 4126 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 4127 continue; 4128 4129 uint64_t FldMembers; 4130 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers)) 4131 return false; 4132 4133 Members = (RD->isUnion() ? 4134 std::max(Members, FldMembers) : Members + FldMembers); 4135 } 4136 4137 if (!Base) 4138 return false; 4139 4140 // Ensure there is no padding. 4141 if (getContext().getTypeSize(Base) * Members != 4142 getContext().getTypeSize(Ty)) 4143 return false; 4144 } else { 4145 Members = 1; 4146 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 4147 Members = 2; 4148 Ty = CT->getElementType(); 4149 } 4150 4151 // Most ABIs only support float, double, and some vector type widths. 4152 if (!isHomogeneousAggregateBaseType(Ty)) 4153 return false; 4154 4155 // The base type must be the same for all members. Types that 4156 // agree in both total size and mode (float vs. vector) are 4157 // treated as being equivalent here. 4158 const Type *TyPtr = Ty.getTypePtr(); 4159 if (!Base) { 4160 Base = TyPtr; 4161 // If it's a non-power-of-2 vector, its size is already a power-of-2, 4162 // so make sure to widen it explicitly. 4163 if (const VectorType *VT = Base->getAs<VectorType>()) { 4164 QualType EltTy = VT->getElementType(); 4165 unsigned NumElements = 4166 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy); 4167 Base = getContext() 4168 .getVectorType(EltTy, NumElements, VT->getVectorKind()) 4169 .getTypePtr(); 4170 } 4171 } 4172 4173 if (Base->isVectorType() != TyPtr->isVectorType() || 4174 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr)) 4175 return false; 4176 } 4177 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members); 4178 } 4179 4180 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 4181 // Homogeneous aggregates for ELFv2 must have base types of float, 4182 // double, long double, or 128-bit vectors. 4183 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4184 if (BT->getKind() == BuiltinType::Float || 4185 BT->getKind() == BuiltinType::Double || 4186 BT->getKind() == BuiltinType::LongDouble) 4187 return true; 4188 } 4189 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4190 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty)) 4191 return true; 4192 } 4193 return false; 4194 } 4195 4196 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough( 4197 const Type *Base, uint64_t Members) const { 4198 // Vector types require one register, floating point types require one 4199 // or two registers depending on their size. 4200 uint32_t NumRegs = 4201 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64; 4202 4203 // Homogeneous Aggregates may occupy at most 8 registers. 4204 return Members * NumRegs <= 8; 4205 } 4206 4207 ABIArgInfo 4208 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { 4209 Ty = useFirstFieldIfTransparentUnion(Ty); 4210 4211 if (Ty->isAnyComplexType()) 4212 return ABIArgInfo::getDirect(); 4213 4214 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) 4215 // or via reference (larger than 16 bytes). 4216 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) { 4217 uint64_t Size = getContext().getTypeSize(Ty); 4218 if (Size > 128) 4219 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4220 else if (Size < 128) { 4221 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 4222 return ABIArgInfo::getDirect(CoerceTy); 4223 } 4224 } 4225 4226 if (isAggregateTypeForABI(Ty)) { 4227 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 4228 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 4229 4230 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity(); 4231 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity(); 4232 4233 // ELFv2 homogeneous aggregates are passed as array types. 4234 const Type *Base = nullptr; 4235 uint64_t Members = 0; 4236 if (Kind == ELFv2 && 4237 isHomogeneousAggregate(Ty, Base, Members)) { 4238 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 4239 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 4240 return ABIArgInfo::getDirect(CoerceTy); 4241 } 4242 4243 // If an aggregate may end up fully in registers, we do not 4244 // use the ByVal method, but pass the aggregate as array. 4245 // This is usually beneficial since we avoid forcing the 4246 // back-end to store the argument to memory. 4247 uint64_t Bits = getContext().getTypeSize(Ty); 4248 if (Bits > 0 && Bits <= 8 * GPRBits) { 4249 llvm::Type *CoerceTy; 4250 4251 // Types up to 8 bytes are passed as integer type (which will be 4252 // properly aligned in the argument save area doubleword). 4253 if (Bits <= GPRBits) 4254 CoerceTy = 4255 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 4256 // Larger types are passed as arrays, with the base type selected 4257 // according to the required alignment in the save area. 4258 else { 4259 uint64_t RegBits = ABIAlign * 8; 4260 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits; 4261 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits); 4262 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs); 4263 } 4264 4265 return ABIArgInfo::getDirect(CoerceTy); 4266 } 4267 4268 // All other aggregates are passed ByVal. 4269 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 4270 /*ByVal=*/true, 4271 /*Realign=*/TyAlign > ABIAlign); 4272 } 4273 4274 return (isPromotableTypeForABI(Ty) ? 4275 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4276 } 4277 4278 ABIArgInfo 4279 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { 4280 if (RetTy->isVoidType()) 4281 return ABIArgInfo::getIgnore(); 4282 4283 if (RetTy->isAnyComplexType()) 4284 return ABIArgInfo::getDirect(); 4285 4286 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) 4287 // or via reference (larger than 16 bytes). 4288 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) { 4289 uint64_t Size = getContext().getTypeSize(RetTy); 4290 if (Size > 128) 4291 return getNaturalAlignIndirect(RetTy); 4292 else if (Size < 128) { 4293 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size); 4294 return ABIArgInfo::getDirect(CoerceTy); 4295 } 4296 } 4297 4298 if (isAggregateTypeForABI(RetTy)) { 4299 // ELFv2 homogeneous aggregates are returned as array types. 4300 const Type *Base = nullptr; 4301 uint64_t Members = 0; 4302 if (Kind == ELFv2 && 4303 isHomogeneousAggregate(RetTy, Base, Members)) { 4304 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0)); 4305 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members); 4306 return ABIArgInfo::getDirect(CoerceTy); 4307 } 4308 4309 // ELFv2 small aggregates are returned in up to two registers. 4310 uint64_t Bits = getContext().getTypeSize(RetTy); 4311 if (Kind == ELFv2 && Bits <= 2 * GPRBits) { 4312 if (Bits == 0) 4313 return ABIArgInfo::getIgnore(); 4314 4315 llvm::Type *CoerceTy; 4316 if (Bits > GPRBits) { 4317 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits); 4318 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr); 4319 } else 4320 CoerceTy = 4321 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8)); 4322 return ABIArgInfo::getDirect(CoerceTy); 4323 } 4324 4325 // All other aggregates are returned indirectly. 4326 return getNaturalAlignIndirect(RetTy); 4327 } 4328 4329 return (isPromotableTypeForABI(RetTy) ? 4330 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 4331 } 4332 4333 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine. 4334 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4335 QualType Ty) const { 4336 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 4337 TypeInfo.second = getParamTypeAlignment(Ty); 4338 4339 CharUnits SlotSize = CharUnits::fromQuantity(8); 4340 4341 // If we have a complex type and the base type is smaller than 8 bytes, 4342 // the ABI calls for the real and imaginary parts to be right-adjusted 4343 // in separate doublewords. However, Clang expects us to produce a 4344 // pointer to a structure with the two parts packed tightly. So generate 4345 // loads of the real and imaginary parts relative to the va_list pointer, 4346 // and store them to a temporary structure. 4347 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 4348 CharUnits EltSize = TypeInfo.first / 2; 4349 if (EltSize < SlotSize) { 4350 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, 4351 SlotSize * 2, SlotSize, 4352 SlotSize, /*AllowHigher*/ true); 4353 4354 Address RealAddr = Addr; 4355 Address ImagAddr = RealAddr; 4356 if (CGF.CGM.getDataLayout().isBigEndian()) { 4357 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, 4358 SlotSize - EltSize); 4359 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr, 4360 2 * SlotSize - EltSize); 4361 } else { 4362 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize); 4363 } 4364 4365 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType()); 4366 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy); 4367 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy); 4368 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal"); 4369 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag"); 4370 4371 Address Temp = CGF.CreateMemTemp(Ty, "vacplx"); 4372 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty), 4373 /*init*/ true); 4374 return Temp; 4375 } 4376 } 4377 4378 // Otherwise, just use the general rule. 4379 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, 4380 TypeInfo, SlotSize, /*AllowHigher*/ true); 4381 } 4382 4383 static bool 4384 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4385 llvm::Value *Address) { 4386 // This is calculated from the LLVM and GCC tables and verified 4387 // against gcc output. AFAIK all ABIs use the same encoding. 4388 4389 CodeGen::CGBuilderTy &Builder = CGF.Builder; 4390 4391 llvm::IntegerType *i8 = CGF.Int8Ty; 4392 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 4393 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 4394 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 4395 4396 // 0-31: r0-31, the 8-byte general-purpose registers 4397 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 4398 4399 // 32-63: fp0-31, the 8-byte floating-point registers 4400 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 4401 4402 // 64-76 are various 4-byte special-purpose registers: 4403 // 64: mq 4404 // 65: lr 4405 // 66: ctr 4406 // 67: ap 4407 // 68-75 cr0-7 4408 // 76: xer 4409 AssignToArrayRange(Builder, Address, Four8, 64, 76); 4410 4411 // 77-108: v0-31, the 16-byte vector registers 4412 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 4413 4414 // 109: vrsave 4415 // 110: vscr 4416 // 111: spe_acc 4417 // 112: spefscr 4418 // 113: sfp 4419 AssignToArrayRange(Builder, Address, Four8, 109, 113); 4420 4421 return false; 4422 } 4423 4424 bool 4425 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable( 4426 CodeGen::CodeGenFunction &CGF, 4427 llvm::Value *Address) const { 4428 4429 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 4430 } 4431 4432 bool 4433 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 4434 llvm::Value *Address) const { 4435 4436 return PPC64_initDwarfEHRegSizeTable(CGF, Address); 4437 } 4438 4439 //===----------------------------------------------------------------------===// 4440 // AArch64 ABI Implementation 4441 //===----------------------------------------------------------------------===// 4442 4443 namespace { 4444 4445 class AArch64ABIInfo : public SwiftABIInfo { 4446 public: 4447 enum ABIKind { 4448 AAPCS = 0, 4449 DarwinPCS 4450 }; 4451 4452 private: 4453 ABIKind Kind; 4454 4455 public: 4456 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) 4457 : SwiftABIInfo(CGT), Kind(Kind) {} 4458 4459 private: 4460 ABIKind getABIKind() const { return Kind; } 4461 bool isDarwinPCS() const { return Kind == DarwinPCS; } 4462 4463 ABIArgInfo classifyReturnType(QualType RetTy) const; 4464 ABIArgInfo classifyArgumentType(QualType RetTy) const; 4465 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 4466 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 4467 uint64_t Members) const override; 4468 4469 bool isIllegalVectorType(QualType Ty) const; 4470 4471 void computeInfo(CGFunctionInfo &FI) const override { 4472 if (!getCXXABI().classifyReturnType(FI)) 4473 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 4474 4475 for (auto &it : FI.arguments()) 4476 it.info = classifyArgumentType(it.type); 4477 } 4478 4479 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty, 4480 CodeGenFunction &CGF) const; 4481 4482 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, 4483 CodeGenFunction &CGF) const; 4484 4485 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 4486 QualType Ty) const override { 4487 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) 4488 : EmitAAPCSVAArg(VAListAddr, Ty, CGF); 4489 } 4490 4491 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 4492 ArrayRef<llvm::Type*> scalars, 4493 bool asReturnValue) const override { 4494 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 4495 } 4496 }; 4497 4498 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo { 4499 public: 4500 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) 4501 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} 4502 4503 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 4504 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue"; 4505 } 4506 4507 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 4508 return 31; 4509 } 4510 4511 bool doesReturnSlotInterfereWithArgs() const override { return false; } 4512 }; 4513 } 4514 4515 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const { 4516 Ty = useFirstFieldIfTransparentUnion(Ty); 4517 4518 // Handle illegal vector types here. 4519 if (isIllegalVectorType(Ty)) { 4520 uint64_t Size = getContext().getTypeSize(Ty); 4521 // Android promotes <2 x i8> to i16, not i32 4522 if (isAndroid() && (Size <= 16)) { 4523 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext()); 4524 return ABIArgInfo::getDirect(ResType); 4525 } 4526 if (Size <= 32) { 4527 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); 4528 return ABIArgInfo::getDirect(ResType); 4529 } 4530 if (Size == 64) { 4531 llvm::Type *ResType = 4532 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2); 4533 return ABIArgInfo::getDirect(ResType); 4534 } 4535 if (Size == 128) { 4536 llvm::Type *ResType = 4537 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4); 4538 return ABIArgInfo::getDirect(ResType); 4539 } 4540 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4541 } 4542 4543 if (!isAggregateTypeForABI(Ty)) { 4544 // Treat an enum type as its underlying type. 4545 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 4546 Ty = EnumTy->getDecl()->getIntegerType(); 4547 4548 return (Ty->isPromotableIntegerType() && isDarwinPCS() 4549 ? ABIArgInfo::getExtend() 4550 : ABIArgInfo::getDirect()); 4551 } 4552 4553 // Structures with either a non-trivial destructor or a non-trivial 4554 // copy constructor are always indirect. 4555 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 4556 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA == 4557 CGCXXABI::RAA_DirectInMemory); 4558 } 4559 4560 // Empty records are always ignored on Darwin, but actually passed in C++ mode 4561 // elsewhere for GNU compatibility. 4562 if (isEmptyRecord(getContext(), Ty, true)) { 4563 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS()) 4564 return ABIArgInfo::getIgnore(); 4565 4566 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 4567 } 4568 4569 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded. 4570 const Type *Base = nullptr; 4571 uint64_t Members = 0; 4572 if (isHomogeneousAggregate(Ty, Base, Members)) { 4573 return ABIArgInfo::getDirect( 4574 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members)); 4575 } 4576 4577 // Aggregates <= 16 bytes are passed directly in registers or on the stack. 4578 uint64_t Size = getContext().getTypeSize(Ty); 4579 if (Size <= 128) { 4580 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 4581 // same size and alignment. 4582 if (getTarget().isRenderScriptTarget()) { 4583 return coerceToIntArray(Ty, getContext(), getVMContext()); 4584 } 4585 unsigned Alignment = getContext().getTypeAlign(Ty); 4586 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes 4587 4588 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 4589 // For aggregates with 16-byte alignment, we use i128. 4590 if (Alignment < 128 && Size == 128) { 4591 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 4592 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 4593 } 4594 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 4595 } 4596 4597 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 4598 } 4599 4600 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { 4601 if (RetTy->isVoidType()) 4602 return ABIArgInfo::getIgnore(); 4603 4604 // Large vector types should be returned via memory. 4605 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 4606 return getNaturalAlignIndirect(RetTy); 4607 4608 if (!isAggregateTypeForABI(RetTy)) { 4609 // Treat an enum type as its underlying type. 4610 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 4611 RetTy = EnumTy->getDecl()->getIntegerType(); 4612 4613 return (RetTy->isPromotableIntegerType() && isDarwinPCS() 4614 ? ABIArgInfo::getExtend() 4615 : ABIArgInfo::getDirect()); 4616 } 4617 4618 if (isEmptyRecord(getContext(), RetTy, true)) 4619 return ABIArgInfo::getIgnore(); 4620 4621 const Type *Base = nullptr; 4622 uint64_t Members = 0; 4623 if (isHomogeneousAggregate(RetTy, Base, Members)) 4624 // Homogeneous Floating-point Aggregates (HFAs) are returned directly. 4625 return ABIArgInfo::getDirect(); 4626 4627 // Aggregates <= 16 bytes are returned directly in registers or on the stack. 4628 uint64_t Size = getContext().getTypeSize(RetTy); 4629 if (Size <= 128) { 4630 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of 4631 // same size and alignment. 4632 if (getTarget().isRenderScriptTarget()) { 4633 return coerceToIntArray(RetTy, getContext(), getVMContext()); 4634 } 4635 unsigned Alignment = getContext().getTypeAlign(RetTy); 4636 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes 4637 4638 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. 4639 // For aggregates with 16-byte alignment, we use i128. 4640 if (Alignment < 128 && Size == 128) { 4641 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); 4642 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); 4643 } 4644 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); 4645 } 4646 4647 return getNaturalAlignIndirect(RetTy); 4648 } 4649 4650 /// isIllegalVectorType - check whether the vector type is legal for AArch64. 4651 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const { 4652 if (const VectorType *VT = Ty->getAs<VectorType>()) { 4653 // Check whether VT is legal. 4654 unsigned NumElements = VT->getNumElements(); 4655 uint64_t Size = getContext().getTypeSize(VT); 4656 // NumElements should be power of 2. 4657 if (!llvm::isPowerOf2_32(NumElements)) 4658 return true; 4659 return Size != 64 && (Size != 128 || NumElements == 1); 4660 } 4661 return false; 4662 } 4663 4664 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 4665 // Homogeneous aggregates for AAPCS64 must have base types of a floating 4666 // point type or a short-vector type. This is the same as the 32-bit ABI, 4667 // but with the difference that any floating-point type is allowed, 4668 // including __fp16. 4669 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 4670 if (BT->isFloatingPoint()) 4671 return true; 4672 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 4673 unsigned VecSize = getContext().getTypeSize(VT); 4674 if (VecSize == 64 || VecSize == 128) 4675 return true; 4676 } 4677 return false; 4678 } 4679 4680 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 4681 uint64_t Members) const { 4682 return Members <= 4; 4683 } 4684 4685 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, 4686 QualType Ty, 4687 CodeGenFunction &CGF) const { 4688 ABIArgInfo AI = classifyArgumentType(Ty); 4689 bool IsIndirect = AI.isIndirect(); 4690 4691 llvm::Type *BaseTy = CGF.ConvertType(Ty); 4692 if (IsIndirect) 4693 BaseTy = llvm::PointerType::getUnqual(BaseTy); 4694 else if (AI.getCoerceToType()) 4695 BaseTy = AI.getCoerceToType(); 4696 4697 unsigned NumRegs = 1; 4698 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) { 4699 BaseTy = ArrTy->getElementType(); 4700 NumRegs = ArrTy->getNumElements(); 4701 } 4702 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy(); 4703 4704 // The AArch64 va_list type and handling is specified in the Procedure Call 4705 // Standard, section B.4: 4706 // 4707 // struct { 4708 // void *__stack; 4709 // void *__gr_top; 4710 // void *__vr_top; 4711 // int __gr_offs; 4712 // int __vr_offs; 4713 // }; 4714 4715 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg"); 4716 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 4717 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack"); 4718 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 4719 4720 auto TyInfo = getContext().getTypeInfoInChars(Ty); 4721 CharUnits TyAlign = TyInfo.second; 4722 4723 Address reg_offs_p = Address::invalid(); 4724 llvm::Value *reg_offs = nullptr; 4725 int reg_top_index; 4726 CharUnits reg_top_offset; 4727 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity(); 4728 if (!IsFPR) { 4729 // 3 is the field number of __gr_offs 4730 reg_offs_p = 4731 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24), 4732 "gr_offs_p"); 4733 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); 4734 reg_top_index = 1; // field number for __gr_top 4735 reg_top_offset = CharUnits::fromQuantity(8); 4736 RegSize = llvm::alignTo(RegSize, 8); 4737 } else { 4738 // 4 is the field number of __vr_offs. 4739 reg_offs_p = 4740 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28), 4741 "vr_offs_p"); 4742 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); 4743 reg_top_index = 2; // field number for __vr_top 4744 reg_top_offset = CharUnits::fromQuantity(16); 4745 RegSize = 16 * NumRegs; 4746 } 4747 4748 //======================================= 4749 // Find out where argument was passed 4750 //======================================= 4751 4752 // If reg_offs >= 0 we're already using the stack for this type of 4753 // argument. We don't want to keep updating reg_offs (in case it overflows, 4754 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves 4755 // whatever they get). 4756 llvm::Value *UsingStack = nullptr; 4757 UsingStack = CGF.Builder.CreateICmpSGE( 4758 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0)); 4759 4760 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock); 4761 4762 // Otherwise, at least some kind of argument could go in these registers, the 4763 // question is whether this particular type is too big. 4764 CGF.EmitBlock(MaybeRegBlock); 4765 4766 // Integer arguments may need to correct register alignment (for example a 4767 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we 4768 // align __gr_offs to calculate the potential address. 4769 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) { 4770 int Align = TyAlign.getQuantity(); 4771 4772 reg_offs = CGF.Builder.CreateAdd( 4773 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1), 4774 "align_regoffs"); 4775 reg_offs = CGF.Builder.CreateAnd( 4776 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align), 4777 "aligned_regoffs"); 4778 } 4779 4780 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list. 4781 // The fact that this is done unconditionally reflects the fact that 4782 // allocating an argument to the stack also uses up all the remaining 4783 // registers of the appropriate kind. 4784 llvm::Value *NewOffset = nullptr; 4785 NewOffset = CGF.Builder.CreateAdd( 4786 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs"); 4787 CGF.Builder.CreateStore(NewOffset, reg_offs_p); 4788 4789 // Now we're in a position to decide whether this argument really was in 4790 // registers or not. 4791 llvm::Value *InRegs = nullptr; 4792 InRegs = CGF.Builder.CreateICmpSLE( 4793 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg"); 4794 4795 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock); 4796 4797 //======================================= 4798 // Argument was in registers 4799 //======================================= 4800 4801 // Now we emit the code for if the argument was originally passed in 4802 // registers. First start the appropriate block: 4803 CGF.EmitBlock(InRegBlock); 4804 4805 llvm::Value *reg_top = nullptr; 4806 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, 4807 reg_top_offset, "reg_top_p"); 4808 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); 4809 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs), 4810 CharUnits::fromQuantity(IsFPR ? 16 : 8)); 4811 Address RegAddr = Address::invalid(); 4812 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty); 4813 4814 if (IsIndirect) { 4815 // If it's been passed indirectly (actually a struct), whatever we find from 4816 // stored registers or on the stack will actually be a struct **. 4817 MemTy = llvm::PointerType::getUnqual(MemTy); 4818 } 4819 4820 const Type *Base = nullptr; 4821 uint64_t NumMembers = 0; 4822 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers); 4823 if (IsHFA && NumMembers > 1) { 4824 // Homogeneous aggregates passed in registers will have their elements split 4825 // and stored 16-bytes apart regardless of size (they're notionally in qN, 4826 // qN+1, ...). We reload and store into a temporary local variable 4827 // contiguously. 4828 assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); 4829 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0)); 4830 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); 4831 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); 4832 Address Tmp = CGF.CreateTempAlloca(HFATy, 4833 std::max(TyAlign, BaseTyInfo.second)); 4834 4835 // On big-endian platforms, the value will be right-aligned in its slot. 4836 int Offset = 0; 4837 if (CGF.CGM.getDataLayout().isBigEndian() && 4838 BaseTyInfo.first.getQuantity() < 16) 4839 Offset = 16 - BaseTyInfo.first.getQuantity(); 4840 4841 for (unsigned i = 0; i < NumMembers; ++i) { 4842 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset); 4843 Address LoadAddr = 4844 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset); 4845 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy); 4846 4847 Address StoreAddr = 4848 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first); 4849 4850 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); 4851 CGF.Builder.CreateStore(Elem, StoreAddr); 4852 } 4853 4854 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy); 4855 } else { 4856 // Otherwise the object is contiguous in memory. 4857 4858 // It might be right-aligned in its slot. 4859 CharUnits SlotSize = BaseAddr.getAlignment(); 4860 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect && 4861 (IsHFA || !isAggregateTypeForABI(Ty)) && 4862 TyInfo.first < SlotSize) { 4863 CharUnits Offset = SlotSize - TyInfo.first; 4864 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset); 4865 } 4866 4867 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy); 4868 } 4869 4870 CGF.EmitBranch(ContBlock); 4871 4872 //======================================= 4873 // Argument was on the stack 4874 //======================================= 4875 CGF.EmitBlock(OnStackBlock); 4876 4877 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, 4878 CharUnits::Zero(), "stack_p"); 4879 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack"); 4880 4881 // Again, stack arguments may need realignment. In this case both integer and 4882 // floating-point ones might be affected. 4883 if (!IsIndirect && TyAlign.getQuantity() > 8) { 4884 int Align = TyAlign.getQuantity(); 4885 4886 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty); 4887 4888 OnStackPtr = CGF.Builder.CreateAdd( 4889 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1), 4890 "align_stack"); 4891 OnStackPtr = CGF.Builder.CreateAnd( 4892 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align), 4893 "align_stack"); 4894 4895 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy); 4896 } 4897 Address OnStackAddr(OnStackPtr, 4898 std::max(CharUnits::fromQuantity(8), TyAlign)); 4899 4900 // All stack slots are multiples of 8 bytes. 4901 CharUnits StackSlotSize = CharUnits::fromQuantity(8); 4902 CharUnits StackSize; 4903 if (IsIndirect) 4904 StackSize = StackSlotSize; 4905 else 4906 StackSize = TyInfo.first.alignTo(StackSlotSize); 4907 4908 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize); 4909 llvm::Value *NewStack = 4910 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack"); 4911 4912 // Write the new value of __stack for the next call to va_arg 4913 CGF.Builder.CreateStore(NewStack, stack_p); 4914 4915 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) && 4916 TyInfo.first < StackSlotSize) { 4917 CharUnits Offset = StackSlotSize - TyInfo.first; 4918 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset); 4919 } 4920 4921 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy); 4922 4923 CGF.EmitBranch(ContBlock); 4924 4925 //======================================= 4926 // Tidy up 4927 //======================================= 4928 CGF.EmitBlock(ContBlock); 4929 4930 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 4931 OnStackAddr, OnStackBlock, "vaargs.addr"); 4932 4933 if (IsIndirect) 4934 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), 4935 TyInfo.second); 4936 4937 return ResAddr; 4938 } 4939 4940 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty, 4941 CodeGenFunction &CGF) const { 4942 // The backend's lowering doesn't support va_arg for aggregates or 4943 // illegal vector types. Lower VAArg here for these cases and use 4944 // the LLVM va_arg instruction for everything else. 4945 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty)) 4946 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect()); 4947 4948 CharUnits SlotSize = CharUnits::fromQuantity(8); 4949 4950 // Empty records are ignored for parameter passing purposes. 4951 if (isEmptyRecord(getContext(), Ty, true)) { 4952 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 4953 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 4954 return Addr; 4955 } 4956 4957 // The size of the actual thing passed, which might end up just 4958 // being a pointer for indirect types. 4959 auto TyInfo = getContext().getTypeInfoInChars(Ty); 4960 4961 // Arguments bigger than 16 bytes which aren't homogeneous 4962 // aggregates should be passed indirectly. 4963 bool IsIndirect = false; 4964 if (TyInfo.first.getQuantity() > 16) { 4965 const Type *Base = nullptr; 4966 uint64_t Members = 0; 4967 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members); 4968 } 4969 4970 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, 4971 TyInfo, SlotSize, /*AllowHigherAlign*/ true); 4972 } 4973 4974 //===----------------------------------------------------------------------===// 4975 // ARM ABI Implementation 4976 //===----------------------------------------------------------------------===// 4977 4978 namespace { 4979 4980 class ARMABIInfo : public SwiftABIInfo { 4981 public: 4982 enum ABIKind { 4983 APCS = 0, 4984 AAPCS = 1, 4985 AAPCS_VFP = 2, 4986 AAPCS16_VFP = 3, 4987 }; 4988 4989 private: 4990 ABIKind Kind; 4991 4992 public: 4993 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) 4994 : SwiftABIInfo(CGT), Kind(_Kind) { 4995 setCCs(); 4996 } 4997 4998 bool isEABI() const { 4999 switch (getTarget().getTriple().getEnvironment()) { 5000 case llvm::Triple::Android: 5001 case llvm::Triple::EABI: 5002 case llvm::Triple::EABIHF: 5003 case llvm::Triple::GNUEABI: 5004 case llvm::Triple::GNUEABIHF: 5005 case llvm::Triple::MuslEABI: 5006 case llvm::Triple::MuslEABIHF: 5007 return true; 5008 default: 5009 return false; 5010 } 5011 } 5012 5013 bool isEABIHF() const { 5014 switch (getTarget().getTriple().getEnvironment()) { 5015 case llvm::Triple::EABIHF: 5016 case llvm::Triple::GNUEABIHF: 5017 case llvm::Triple::MuslEABIHF: 5018 return true; 5019 default: 5020 return false; 5021 } 5022 } 5023 5024 ABIKind getABIKind() const { return Kind; } 5025 5026 private: 5027 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; 5028 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const; 5029 bool isIllegalVectorType(QualType Ty) const; 5030 5031 bool isHomogeneousAggregateBaseType(QualType Ty) const override; 5032 bool isHomogeneousAggregateSmallEnough(const Type *Ty, 5033 uint64_t Members) const override; 5034 5035 void computeInfo(CGFunctionInfo &FI) const override; 5036 5037 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5038 QualType Ty) const override; 5039 5040 llvm::CallingConv::ID getLLVMDefaultCC() const; 5041 llvm::CallingConv::ID getABIDefaultCC() const; 5042 void setCCs(); 5043 5044 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 5045 ArrayRef<llvm::Type*> scalars, 5046 bool asReturnValue) const override { 5047 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 5048 } 5049 }; 5050 5051 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 5052 public: 5053 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 5054 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 5055 5056 const ARMABIInfo &getABIInfo() const { 5057 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo()); 5058 } 5059 5060 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 5061 return 13; 5062 } 5063 5064 StringRef getARCRetainAutoreleasedReturnValueMarker() const override { 5065 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 5066 } 5067 5068 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 5069 llvm::Value *Address) const override { 5070 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 5071 5072 // 0-15 are the 16 integer registers. 5073 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15); 5074 return false; 5075 } 5076 5077 unsigned getSizeOfUnwindException() const override { 5078 if (getABIInfo().isEABI()) return 88; 5079 return TargetCodeGenInfo::getSizeOfUnwindException(); 5080 } 5081 5082 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5083 CodeGen::CodeGenModule &CGM) const override { 5084 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5085 if (!FD) 5086 return; 5087 5088 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>(); 5089 if (!Attr) 5090 return; 5091 5092 const char *Kind; 5093 switch (Attr->getInterrupt()) { 5094 case ARMInterruptAttr::Generic: Kind = ""; break; 5095 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break; 5096 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break; 5097 case ARMInterruptAttr::SWI: Kind = "SWI"; break; 5098 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break; 5099 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break; 5100 } 5101 5102 llvm::Function *Fn = cast<llvm::Function>(GV); 5103 5104 Fn->addFnAttr("interrupt", Kind); 5105 5106 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind(); 5107 if (ABI == ARMABIInfo::APCS) 5108 return; 5109 5110 // AAPCS guarantees that sp will be 8-byte aligned on any public interface, 5111 // however this is not necessarily true on taking any interrupt. Instruct 5112 // the backend to perform a realignment as part of the function prologue. 5113 llvm::AttrBuilder B; 5114 B.addStackAlignmentAttr(8); 5115 Fn->addAttributes(llvm::AttributeSet::FunctionIndex, 5116 llvm::AttributeSet::get(CGM.getLLVMContext(), 5117 llvm::AttributeSet::FunctionIndex, 5118 B)); 5119 } 5120 }; 5121 5122 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { 5123 public: 5124 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 5125 : ARMTargetCodeGenInfo(CGT, K) {} 5126 5127 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5128 CodeGen::CodeGenModule &CGM) const override; 5129 5130 void getDependentLibraryOption(llvm::StringRef Lib, 5131 llvm::SmallString<24> &Opt) const override { 5132 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib); 5133 } 5134 5135 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value, 5136 llvm::SmallString<32> &Opt) const override { 5137 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\""; 5138 } 5139 }; 5140 5141 void WindowsARMTargetCodeGenInfo::setTargetAttributes( 5142 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { 5143 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM); 5144 addStackProbeSizeTargetAttribute(D, GV, CGM); 5145 } 5146 } 5147 5148 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 5149 if (!getCXXABI().classifyReturnType(FI)) 5150 FI.getReturnInfo() = 5151 classifyReturnType(FI.getReturnType(), FI.isVariadic()); 5152 5153 for (auto &I : FI.arguments()) 5154 I.info = classifyArgumentType(I.type, FI.isVariadic()); 5155 5156 // Always honor user-specified calling convention. 5157 if (FI.getCallingConvention() != llvm::CallingConv::C) 5158 return; 5159 5160 llvm::CallingConv::ID cc = getRuntimeCC(); 5161 if (cc != llvm::CallingConv::C) 5162 FI.setEffectiveCallingConvention(cc); 5163 } 5164 5165 /// Return the default calling convention that LLVM will use. 5166 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const { 5167 // The default calling convention that LLVM will infer. 5168 if (isEABIHF() || getTarget().getTriple().isWatchABI()) 5169 return llvm::CallingConv::ARM_AAPCS_VFP; 5170 else if (isEABI()) 5171 return llvm::CallingConv::ARM_AAPCS; 5172 else 5173 return llvm::CallingConv::ARM_APCS; 5174 } 5175 5176 /// Return the calling convention that our ABI would like us to use 5177 /// as the C calling convention. 5178 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const { 5179 switch (getABIKind()) { 5180 case APCS: return llvm::CallingConv::ARM_APCS; 5181 case AAPCS: return llvm::CallingConv::ARM_AAPCS; 5182 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 5183 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 5184 } 5185 llvm_unreachable("bad ABI kind"); 5186 } 5187 5188 void ARMABIInfo::setCCs() { 5189 assert(getRuntimeCC() == llvm::CallingConv::C); 5190 5191 // Don't muddy up the IR with a ton of explicit annotations if 5192 // they'd just match what LLVM will infer from the triple. 5193 llvm::CallingConv::ID abiCC = getABIDefaultCC(); 5194 if (abiCC != getLLVMDefaultCC()) 5195 RuntimeCC = abiCC; 5196 5197 // AAPCS apparently requires runtime support functions to be soft-float, but 5198 // that's almost certainly for historic reasons (Thumb1 not supporting VFP 5199 // most likely). It's more convenient for AAPCS16_VFP to be hard-float. 5200 switch (getABIKind()) { 5201 case APCS: 5202 case AAPCS16_VFP: 5203 if (abiCC != getLLVMDefaultCC()) 5204 BuiltinCC = abiCC; 5205 break; 5206 case AAPCS: 5207 case AAPCS_VFP: 5208 BuiltinCC = llvm::CallingConv::ARM_AAPCS; 5209 break; 5210 } 5211 } 5212 5213 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, 5214 bool isVariadic) const { 5215 // 6.1.2.1 The following argument types are VFP CPRCs: 5216 // A single-precision floating-point type (including promoted 5217 // half-precision types); A double-precision floating-point type; 5218 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate 5219 // with a Base Type of a single- or double-precision floating-point type, 5220 // 64-bit containerized vectors or 128-bit containerized vectors with one 5221 // to four Elements. 5222 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic; 5223 5224 Ty = useFirstFieldIfTransparentUnion(Ty); 5225 5226 // Handle illegal vector types here. 5227 if (isIllegalVectorType(Ty)) { 5228 uint64_t Size = getContext().getTypeSize(Ty); 5229 if (Size <= 32) { 5230 llvm::Type *ResType = 5231 llvm::Type::getInt32Ty(getVMContext()); 5232 return ABIArgInfo::getDirect(ResType); 5233 } 5234 if (Size == 64) { 5235 llvm::Type *ResType = llvm::VectorType::get( 5236 llvm::Type::getInt32Ty(getVMContext()), 2); 5237 return ABIArgInfo::getDirect(ResType); 5238 } 5239 if (Size == 128) { 5240 llvm::Type *ResType = llvm::VectorType::get( 5241 llvm::Type::getInt32Ty(getVMContext()), 4); 5242 return ABIArgInfo::getDirect(ResType); 5243 } 5244 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 5245 } 5246 5247 // __fp16 gets passed as if it were an int or float, but with the top 16 bits 5248 // unspecified. This is not done for OpenCL as it handles the half type 5249 // natively, and does not need to interwork with AAPCS code. 5250 if (Ty->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) { 5251 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ? 5252 llvm::Type::getFloatTy(getVMContext()) : 5253 llvm::Type::getInt32Ty(getVMContext()); 5254 return ABIArgInfo::getDirect(ResType); 5255 } 5256 5257 if (!isAggregateTypeForABI(Ty)) { 5258 // Treat an enum type as its underlying type. 5259 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) { 5260 Ty = EnumTy->getDecl()->getIntegerType(); 5261 } 5262 5263 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() 5264 : ABIArgInfo::getDirect()); 5265 } 5266 5267 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 5268 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 5269 } 5270 5271 // Ignore empty records. 5272 if (isEmptyRecord(getContext(), Ty, true)) 5273 return ABIArgInfo::getIgnore(); 5274 5275 if (IsEffectivelyAAPCS_VFP) { 5276 // Homogeneous Aggregates need to be expanded when we can fit the aggregate 5277 // into VFP registers. 5278 const Type *Base = nullptr; 5279 uint64_t Members = 0; 5280 if (isHomogeneousAggregate(Ty, Base, Members)) { 5281 assert(Base && "Base class should be set for homogeneous aggregate"); 5282 // Base can be a floating-point or a vector. 5283 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 5284 } 5285 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 5286 // WatchOS does have homogeneous aggregates. Note that we intentionally use 5287 // this convention even for a variadic function: the backend will use GPRs 5288 // if needed. 5289 const Type *Base = nullptr; 5290 uint64_t Members = 0; 5291 if (isHomogeneousAggregate(Ty, Base, Members)) { 5292 assert(Base && Members <= 4 && "unexpected homogeneous aggregate"); 5293 llvm::Type *Ty = 5294 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members); 5295 return ABIArgInfo::getDirect(Ty, 0, nullptr, false); 5296 } 5297 } 5298 5299 if (getABIKind() == ARMABIInfo::AAPCS16_VFP && 5300 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) { 5301 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're 5302 // bigger than 128-bits, they get placed in space allocated by the caller, 5303 // and a pointer is passed. 5304 return ABIArgInfo::getIndirect( 5305 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false); 5306 } 5307 5308 // Support byval for ARM. 5309 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at 5310 // most 8-byte. We realign the indirect argument if type alignment is bigger 5311 // than ABI alignment. 5312 uint64_t ABIAlign = 4; 5313 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; 5314 if (getABIKind() == ARMABIInfo::AAPCS_VFP || 5315 getABIKind() == ARMABIInfo::AAPCS) 5316 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); 5317 5318 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { 5319 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval"); 5320 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign), 5321 /*ByVal=*/true, 5322 /*Realign=*/TyAlign > ABIAlign); 5323 } 5324 5325 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of 5326 // same size and alignment. 5327 if (getTarget().isRenderScriptTarget()) { 5328 return coerceToIntArray(Ty, getContext(), getVMContext()); 5329 } 5330 5331 // Otherwise, pass by coercing to a structure of the appropriate size. 5332 llvm::Type* ElemTy; 5333 unsigned SizeRegs; 5334 // FIXME: Try to match the types of the arguments more accurately where 5335 // we can. 5336 if (getContext().getTypeAlign(Ty) <= 32) { 5337 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 5338 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 5339 } else { 5340 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 5341 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 5342 } 5343 5344 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); 5345 } 5346 5347 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 5348 llvm::LLVMContext &VMContext) { 5349 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 5350 // is called integer-like if its size is less than or equal to one word, and 5351 // the offset of each of its addressable sub-fields is zero. 5352 5353 uint64_t Size = Context.getTypeSize(Ty); 5354 5355 // Check that the type fits in a word. 5356 if (Size > 32) 5357 return false; 5358 5359 // FIXME: Handle vector types! 5360 if (Ty->isVectorType()) 5361 return false; 5362 5363 // Float types are never treated as "integer like". 5364 if (Ty->isRealFloatingType()) 5365 return false; 5366 5367 // If this is a builtin or pointer type then it is ok. 5368 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 5369 return true; 5370 5371 // Small complex integer types are "integer like". 5372 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 5373 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 5374 5375 // Single element and zero sized arrays should be allowed, by the definition 5376 // above, but they are not. 5377 5378 // Otherwise, it must be a record type. 5379 const RecordType *RT = Ty->getAs<RecordType>(); 5380 if (!RT) return false; 5381 5382 // Ignore records with flexible arrays. 5383 const RecordDecl *RD = RT->getDecl(); 5384 if (RD->hasFlexibleArrayMember()) 5385 return false; 5386 5387 // Check that all sub-fields are at offset 0, and are themselves "integer 5388 // like". 5389 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 5390 5391 bool HadField = false; 5392 unsigned idx = 0; 5393 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 5394 i != e; ++i, ++idx) { 5395 const FieldDecl *FD = *i; 5396 5397 // Bit-fields are not addressable, we only need to verify they are "integer 5398 // like". We still have to disallow a subsequent non-bitfield, for example: 5399 // struct { int : 0; int x } 5400 // is non-integer like according to gcc. 5401 if (FD->isBitField()) { 5402 if (!RD->isUnion()) 5403 HadField = true; 5404 5405 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 5406 return false; 5407 5408 continue; 5409 } 5410 5411 // Check if this field is at offset 0. 5412 if (Layout.getFieldOffset(idx) != 0) 5413 return false; 5414 5415 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 5416 return false; 5417 5418 // Only allow at most one field in a structure. This doesn't match the 5419 // wording above, but follows gcc in situations with a field following an 5420 // empty structure. 5421 if (!RD->isUnion()) { 5422 if (HadField) 5423 return false; 5424 5425 HadField = true; 5426 } 5427 } 5428 5429 return true; 5430 } 5431 5432 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, 5433 bool isVariadic) const { 5434 bool IsEffectivelyAAPCS_VFP = 5435 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic; 5436 5437 if (RetTy->isVoidType()) 5438 return ABIArgInfo::getIgnore(); 5439 5440 // Large vector types should be returned via memory. 5441 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) { 5442 return getNaturalAlignIndirect(RetTy); 5443 } 5444 5445 // __fp16 gets returned as if it were an int or float, but with the top 16 5446 // bits unspecified. This is not done for OpenCL as it handles the half type 5447 // natively, and does not need to interwork with AAPCS code. 5448 if (RetTy->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) { 5449 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ? 5450 llvm::Type::getFloatTy(getVMContext()) : 5451 llvm::Type::getInt32Ty(getVMContext()); 5452 return ABIArgInfo::getDirect(ResType); 5453 } 5454 5455 if (!isAggregateTypeForABI(RetTy)) { 5456 // Treat an enum type as its underlying type. 5457 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5458 RetTy = EnumTy->getDecl()->getIntegerType(); 5459 5460 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend() 5461 : ABIArgInfo::getDirect(); 5462 } 5463 5464 // Are we following APCS? 5465 if (getABIKind() == APCS) { 5466 if (isEmptyRecord(getContext(), RetTy, false)) 5467 return ABIArgInfo::getIgnore(); 5468 5469 // Complex types are all returned as packed integers. 5470 // 5471 // FIXME: Consider using 2 x vector types if the back end handles them 5472 // correctly. 5473 if (RetTy->isAnyComplexType()) 5474 return ABIArgInfo::getDirect(llvm::IntegerType::get( 5475 getVMContext(), getContext().getTypeSize(RetTy))); 5476 5477 // Integer like structures are returned in r0. 5478 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 5479 // Return in the smallest viable integer type. 5480 uint64_t Size = getContext().getTypeSize(RetTy); 5481 if (Size <= 8) 5482 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5483 if (Size <= 16) 5484 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5485 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5486 } 5487 5488 // Otherwise return in memory. 5489 return getNaturalAlignIndirect(RetTy); 5490 } 5491 5492 // Otherwise this is an AAPCS variant. 5493 5494 if (isEmptyRecord(getContext(), RetTy, true)) 5495 return ABIArgInfo::getIgnore(); 5496 5497 // Check for homogeneous aggregates with AAPCS-VFP. 5498 if (IsEffectivelyAAPCS_VFP) { 5499 const Type *Base = nullptr; 5500 uint64_t Members = 0; 5501 if (isHomogeneousAggregate(RetTy, Base, Members)) { 5502 assert(Base && "Base class should be set for homogeneous aggregate"); 5503 // Homogeneous Aggregates are returned directly. 5504 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); 5505 } 5506 } 5507 5508 // Aggregates <= 4 bytes are returned in r0; other aggregates 5509 // are returned indirectly. 5510 uint64_t Size = getContext().getTypeSize(RetTy); 5511 if (Size <= 32) { 5512 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of 5513 // same size and alignment. 5514 if (getTarget().isRenderScriptTarget()) { 5515 return coerceToIntArray(RetTy, getContext(), getVMContext()); 5516 } 5517 if (getDataLayout().isBigEndian()) 5518 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4) 5519 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5520 5521 // Return in the smallest viable integer type. 5522 if (Size <= 8) 5523 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 5524 if (Size <= 16) 5525 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 5526 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 5527 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) { 5528 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext()); 5529 llvm::Type *CoerceTy = 5530 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32); 5531 return ABIArgInfo::getDirect(CoerceTy); 5532 } 5533 5534 return getNaturalAlignIndirect(RetTy); 5535 } 5536 5537 /// isIllegalVector - check whether Ty is an illegal vector type. 5538 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const { 5539 if (const VectorType *VT = Ty->getAs<VectorType> ()) { 5540 if (isAndroid()) { 5541 // Android shipped using Clang 3.1, which supported a slightly different 5542 // vector ABI. The primary differences were that 3-element vector types 5543 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path 5544 // accepts that legacy behavior for Android only. 5545 // Check whether VT is legal. 5546 unsigned NumElements = VT->getNumElements(); 5547 // NumElements should be power of 2 or equal to 3. 5548 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3) 5549 return true; 5550 } else { 5551 // Check whether VT is legal. 5552 unsigned NumElements = VT->getNumElements(); 5553 uint64_t Size = getContext().getTypeSize(VT); 5554 // NumElements should be power of 2. 5555 if (!llvm::isPowerOf2_32(NumElements)) 5556 return true; 5557 // Size should be greater than 32 bits. 5558 return Size <= 32; 5559 } 5560 } 5561 return false; 5562 } 5563 5564 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { 5565 // Homogeneous aggregates for AAPCS-VFP must have base types of float, 5566 // double, or 64-bit or 128-bit vectors. 5567 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 5568 if (BT->getKind() == BuiltinType::Float || 5569 BT->getKind() == BuiltinType::Double || 5570 BT->getKind() == BuiltinType::LongDouble) 5571 return true; 5572 } else if (const VectorType *VT = Ty->getAs<VectorType>()) { 5573 unsigned VecSize = getContext().getTypeSize(VT); 5574 if (VecSize == 64 || VecSize == 128) 5575 return true; 5576 } 5577 return false; 5578 } 5579 5580 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, 5581 uint64_t Members) const { 5582 return Members <= 4; 5583 } 5584 5585 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5586 QualType Ty) const { 5587 CharUnits SlotSize = CharUnits::fromQuantity(4); 5588 5589 // Empty records are ignored for parameter passing purposes. 5590 if (isEmptyRecord(getContext(), Ty, true)) { 5591 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize); 5592 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); 5593 return Addr; 5594 } 5595 5596 auto TyInfo = getContext().getTypeInfoInChars(Ty); 5597 CharUnits TyAlignForABI = TyInfo.second; 5598 5599 // Use indirect if size of the illegal vector is bigger than 16 bytes. 5600 bool IsIndirect = false; 5601 const Type *Base = nullptr; 5602 uint64_t Members = 0; 5603 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) { 5604 IsIndirect = true; 5605 5606 // ARMv7k passes structs bigger than 16 bytes indirectly, in space 5607 // allocated by the caller. 5608 } else if (TyInfo.first > CharUnits::fromQuantity(16) && 5609 getABIKind() == ARMABIInfo::AAPCS16_VFP && 5610 !isHomogeneousAggregate(Ty, Base, Members)) { 5611 IsIndirect = true; 5612 5613 // Otherwise, bound the type's ABI alignment. 5614 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for 5615 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte. 5616 // Our callers should be prepared to handle an under-aligned address. 5617 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP || 5618 getABIKind() == ARMABIInfo::AAPCS) { 5619 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 5620 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8)); 5621 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) { 5622 // ARMv7k allows type alignment up to 16 bytes. 5623 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4)); 5624 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16)); 5625 } else { 5626 TyAlignForABI = CharUnits::fromQuantity(4); 5627 } 5628 TyInfo.second = TyAlignForABI; 5629 5630 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo, 5631 SlotSize, /*AllowHigherAlign*/ true); 5632 } 5633 5634 //===----------------------------------------------------------------------===// 5635 // NVPTX ABI Implementation 5636 //===----------------------------------------------------------------------===// 5637 5638 namespace { 5639 5640 class NVPTXABIInfo : public ABIInfo { 5641 public: 5642 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 5643 5644 ABIArgInfo classifyReturnType(QualType RetTy) const; 5645 ABIArgInfo classifyArgumentType(QualType Ty) const; 5646 5647 void computeInfo(CGFunctionInfo &FI) const override; 5648 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5649 QualType Ty) const override; 5650 }; 5651 5652 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo { 5653 public: 5654 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT) 5655 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {} 5656 5657 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5658 CodeGen::CodeGenModule &M) const override; 5659 private: 5660 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the 5661 // resulting MDNode to the nvvm.annotations MDNode. 5662 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand); 5663 }; 5664 5665 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const { 5666 if (RetTy->isVoidType()) 5667 return ABIArgInfo::getIgnore(); 5668 5669 // note: this is different from default ABI 5670 if (!RetTy->isScalarType()) 5671 return ABIArgInfo::getDirect(); 5672 5673 // Treat an enum type as its underlying type. 5674 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 5675 RetTy = EnumTy->getDecl()->getIntegerType(); 5676 5677 return (RetTy->isPromotableIntegerType() ? 5678 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5679 } 5680 5681 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const { 5682 // Treat an enum type as its underlying type. 5683 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5684 Ty = EnumTy->getDecl()->getIntegerType(); 5685 5686 // Return aggregates type as indirect by value 5687 if (isAggregateTypeForABI(Ty)) 5688 return getNaturalAlignIndirect(Ty, /* byval */ true); 5689 5690 return (Ty->isPromotableIntegerType() ? 5691 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 5692 } 5693 5694 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 5695 if (!getCXXABI().classifyReturnType(FI)) 5696 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5697 for (auto &I : FI.arguments()) 5698 I.info = classifyArgumentType(I.type); 5699 5700 // Always honor user-specified calling convention. 5701 if (FI.getCallingConvention() != llvm::CallingConv::C) 5702 return; 5703 5704 FI.setEffectiveCallingConvention(getRuntimeCC()); 5705 } 5706 5707 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5708 QualType Ty) const { 5709 llvm_unreachable("NVPTX does not support varargs"); 5710 } 5711 5712 void NVPTXTargetCodeGenInfo:: 5713 setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 5714 CodeGen::CodeGenModule &M) const{ 5715 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 5716 if (!FD) return; 5717 5718 llvm::Function *F = cast<llvm::Function>(GV); 5719 5720 // Perform special handling in OpenCL mode 5721 if (M.getLangOpts().OpenCL) { 5722 // Use OpenCL function attributes to check for kernel functions 5723 // By default, all functions are device functions 5724 if (FD->hasAttr<OpenCLKernelAttr>()) { 5725 // OpenCL __kernel functions get kernel metadata 5726 // Create !{<func-ref>, metadata !"kernel", i32 1} node 5727 addNVVMMetadata(F, "kernel", 1); 5728 // And kernel functions are not subject to inlining 5729 F->addFnAttr(llvm::Attribute::NoInline); 5730 } 5731 } 5732 5733 // Perform special handling in CUDA mode. 5734 if (M.getLangOpts().CUDA) { 5735 // CUDA __global__ functions get a kernel metadata entry. Since 5736 // __global__ functions cannot be called from the device, we do not 5737 // need to set the noinline attribute. 5738 if (FD->hasAttr<CUDAGlobalAttr>()) { 5739 // Create !{<func-ref>, metadata !"kernel", i32 1} node 5740 addNVVMMetadata(F, "kernel", 1); 5741 } 5742 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { 5743 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node 5744 llvm::APSInt MaxThreads(32); 5745 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); 5746 if (MaxThreads > 0) 5747 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); 5748 5749 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was 5750 // not specified in __launch_bounds__ or if the user specified a 0 value, 5751 // we don't have to add a PTX directive. 5752 if (Attr->getMinBlocks()) { 5753 llvm::APSInt MinBlocks(32); 5754 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); 5755 if (MinBlocks > 0) 5756 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node 5757 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); 5758 } 5759 } 5760 } 5761 } 5762 5763 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, 5764 int Operand) { 5765 llvm::Module *M = F->getParent(); 5766 llvm::LLVMContext &Ctx = M->getContext(); 5767 5768 // Get "nvvm.annotations" metadata node 5769 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations"); 5770 5771 llvm::Metadata *MDVals[] = { 5772 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name), 5773 llvm::ConstantAsMetadata::get( 5774 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))}; 5775 // Append metadata to nvvm.annotations 5776 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 5777 } 5778 } 5779 5780 //===----------------------------------------------------------------------===// 5781 // SystemZ ABI Implementation 5782 //===----------------------------------------------------------------------===// 5783 5784 namespace { 5785 5786 class SystemZABIInfo : public SwiftABIInfo { 5787 bool HasVector; 5788 5789 public: 5790 SystemZABIInfo(CodeGenTypes &CGT, bool HV) 5791 : SwiftABIInfo(CGT), HasVector(HV) {} 5792 5793 bool isPromotableIntegerType(QualType Ty) const; 5794 bool isCompoundType(QualType Ty) const; 5795 bool isVectorArgumentType(QualType Ty) const; 5796 bool isFPArgumentType(QualType Ty) const; 5797 QualType GetSingleElementType(QualType Ty) const; 5798 5799 ABIArgInfo classifyReturnType(QualType RetTy) const; 5800 ABIArgInfo classifyArgumentType(QualType ArgTy) const; 5801 5802 void computeInfo(CGFunctionInfo &FI) const override { 5803 if (!getCXXABI().classifyReturnType(FI)) 5804 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 5805 for (auto &I : FI.arguments()) 5806 I.info = classifyArgumentType(I.type); 5807 } 5808 5809 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5810 QualType Ty) const override; 5811 5812 bool shouldPassIndirectlyForSwift(CharUnits totalSize, 5813 ArrayRef<llvm::Type*> scalars, 5814 bool asReturnValue) const override { 5815 return occupiesMoreThan(CGT, scalars, /*total*/ 4); 5816 } 5817 }; 5818 5819 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 5820 public: 5821 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector) 5822 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {} 5823 }; 5824 5825 } 5826 5827 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 5828 // Treat an enum type as its underlying type. 5829 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 5830 Ty = EnumTy->getDecl()->getIntegerType(); 5831 5832 // Promotable integer types are required to be promoted by the ABI. 5833 if (Ty->isPromotableIntegerType()) 5834 return true; 5835 5836 // 32-bit values must also be promoted. 5837 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5838 switch (BT->getKind()) { 5839 case BuiltinType::Int: 5840 case BuiltinType::UInt: 5841 return true; 5842 default: 5843 return false; 5844 } 5845 return false; 5846 } 5847 5848 bool SystemZABIInfo::isCompoundType(QualType Ty) const { 5849 return (Ty->isAnyComplexType() || 5850 Ty->isVectorType() || 5851 isAggregateTypeForABI(Ty)); 5852 } 5853 5854 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { 5855 return (HasVector && 5856 Ty->isVectorType() && 5857 getContext().getTypeSize(Ty) <= 128); 5858 } 5859 5860 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { 5861 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 5862 switch (BT->getKind()) { 5863 case BuiltinType::Float: 5864 case BuiltinType::Double: 5865 return true; 5866 default: 5867 return false; 5868 } 5869 5870 return false; 5871 } 5872 5873 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { 5874 if (const RecordType *RT = Ty->getAsStructureType()) { 5875 const RecordDecl *RD = RT->getDecl(); 5876 QualType Found; 5877 5878 // If this is a C++ record, check the bases first. 5879 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 5880 for (const auto &I : CXXRD->bases()) { 5881 QualType Base = I.getType(); 5882 5883 // Empty bases don't affect things either way. 5884 if (isEmptyRecord(getContext(), Base, true)) 5885 continue; 5886 5887 if (!Found.isNull()) 5888 return Ty; 5889 Found = GetSingleElementType(Base); 5890 } 5891 5892 // Check the fields. 5893 for (const auto *FD : RD->fields()) { 5894 // For compatibility with GCC, ignore empty bitfields in C++ mode. 5895 // Unlike isSingleElementStruct(), empty structure and array fields 5896 // do count. So do anonymous bitfields that aren't zero-sized. 5897 if (getContext().getLangOpts().CPlusPlus && 5898 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) 5899 continue; 5900 5901 // Unlike isSingleElementStruct(), arrays do not count. 5902 // Nested structures still do though. 5903 if (!Found.isNull()) 5904 return Ty; 5905 Found = GetSingleElementType(FD->getType()); 5906 } 5907 5908 // Unlike isSingleElementStruct(), trailing padding is allowed. 5909 // An 8-byte aligned struct s { float f; } is passed as a double. 5910 if (!Found.isNull()) 5911 return Found; 5912 } 5913 5914 return Ty; 5915 } 5916 5917 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 5918 QualType Ty) const { 5919 // Assume that va_list type is correct; should be pointer to LLVM type: 5920 // struct { 5921 // i64 __gpr; 5922 // i64 __fpr; 5923 // i8 *__overflow_arg_area; 5924 // i8 *__reg_save_area; 5925 // }; 5926 5927 // Every non-vector argument occupies 8 bytes and is passed by preference 5928 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are 5929 // always passed on the stack. 5930 Ty = getContext().getCanonicalType(Ty); 5931 auto TyInfo = getContext().getTypeInfoInChars(Ty); 5932 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); 5933 llvm::Type *DirectTy = ArgTy; 5934 ABIArgInfo AI = classifyArgumentType(Ty); 5935 bool IsIndirect = AI.isIndirect(); 5936 bool InFPRs = false; 5937 bool IsVector = false; 5938 CharUnits UnpaddedSize; 5939 CharUnits DirectAlign; 5940 if (IsIndirect) { 5941 DirectTy = llvm::PointerType::getUnqual(DirectTy); 5942 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8); 5943 } else { 5944 if (AI.getCoerceToType()) 5945 ArgTy = AI.getCoerceToType(); 5946 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy(); 5947 IsVector = ArgTy->isVectorTy(); 5948 UnpaddedSize = TyInfo.first; 5949 DirectAlign = TyInfo.second; 5950 } 5951 CharUnits PaddedSize = CharUnits::fromQuantity(8); 5952 if (IsVector && UnpaddedSize > PaddedSize) 5953 PaddedSize = CharUnits::fromQuantity(16); 5954 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size."); 5955 5956 CharUnits Padding = (PaddedSize - UnpaddedSize); 5957 5958 llvm::Type *IndexTy = CGF.Int64Ty; 5959 llvm::Value *PaddedSizeV = 5960 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity()); 5961 5962 if (IsVector) { 5963 // Work out the address of a vector argument on the stack. 5964 // Vector arguments are always passed in the high bits of a 5965 // single (8 byte) or double (16 byte) stack slot. 5966 Address OverflowArgAreaPtr = 5967 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16), 5968 "overflow_arg_area_ptr"); 5969 Address OverflowArgArea = 5970 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 5971 TyInfo.second); 5972 Address MemAddr = 5973 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr"); 5974 5975 // Update overflow_arg_area_ptr pointer 5976 llvm::Value *NewOverflowArgArea = 5977 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 5978 "overflow_arg_area"); 5979 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 5980 5981 return MemAddr; 5982 } 5983 5984 assert(PaddedSize.getQuantity() == 8); 5985 5986 unsigned MaxRegs, RegCountField, RegSaveIndex; 5987 CharUnits RegPadding; 5988 if (InFPRs) { 5989 MaxRegs = 4; // Maximum of 4 FPR arguments 5990 RegCountField = 1; // __fpr 5991 RegSaveIndex = 16; // save offset for f0 5992 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR 5993 } else { 5994 MaxRegs = 5; // Maximum of 5 GPR arguments 5995 RegCountField = 0; // __gpr 5996 RegSaveIndex = 2; // save offset for r2 5997 RegPadding = Padding; // values are passed in the low bits of a GPR 5998 } 5999 6000 Address RegCountPtr = CGF.Builder.CreateStructGEP( 6001 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8), 6002 "reg_count_ptr"); 6003 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); 6004 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); 6005 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, 6006 "fits_in_regs"); 6007 6008 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 6009 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 6010 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 6011 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 6012 6013 // Emit code to load the value if it was passed in registers. 6014 CGF.EmitBlock(InRegBlock); 6015 6016 // Work out the address of an argument register. 6017 llvm::Value *ScaledRegCount = 6018 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); 6019 llvm::Value *RegBase = 6020 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity() 6021 + RegPadding.getQuantity()); 6022 llvm::Value *RegOffset = 6023 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); 6024 Address RegSaveAreaPtr = 6025 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24), 6026 "reg_save_area_ptr"); 6027 llvm::Value *RegSaveArea = 6028 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); 6029 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset, 6030 "raw_reg_addr"), 6031 PaddedSize); 6032 Address RegAddr = 6033 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr"); 6034 6035 // Update the register count 6036 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1); 6037 llvm::Value *NewRegCount = 6038 CGF.Builder.CreateAdd(RegCount, One, "reg_count"); 6039 CGF.Builder.CreateStore(NewRegCount, RegCountPtr); 6040 CGF.EmitBranch(ContBlock); 6041 6042 // Emit code to load the value if it was passed in memory. 6043 CGF.EmitBlock(InMemBlock); 6044 6045 // Work out the address of a stack argument. 6046 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP( 6047 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr"); 6048 Address OverflowArgArea = 6049 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"), 6050 PaddedSize); 6051 Address RawMemAddr = 6052 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr"); 6053 Address MemAddr = 6054 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr"); 6055 6056 // Update overflow_arg_area_ptr pointer 6057 llvm::Value *NewOverflowArgArea = 6058 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV, 6059 "overflow_arg_area"); 6060 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); 6061 CGF.EmitBranch(ContBlock); 6062 6063 // Return the appropriate result. 6064 CGF.EmitBlock(ContBlock); 6065 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, 6066 MemAddr, InMemBlock, "va_arg.addr"); 6067 6068 if (IsIndirect) 6069 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), 6070 TyInfo.second); 6071 6072 return ResAddr; 6073 } 6074 6075 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 6076 if (RetTy->isVoidType()) 6077 return ABIArgInfo::getIgnore(); 6078 if (isVectorArgumentType(RetTy)) 6079 return ABIArgInfo::getDirect(); 6080 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) 6081 return getNaturalAlignIndirect(RetTy); 6082 return (isPromotableIntegerType(RetTy) ? 6083 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6084 } 6085 6086 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 6087 // Handle the generic C++ ABI. 6088 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6089 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6090 6091 // Integers and enums are extended to full register width. 6092 if (isPromotableIntegerType(Ty)) 6093 return ABIArgInfo::getExtend(); 6094 6095 // Handle vector types and vector-like structure types. Note that 6096 // as opposed to float-like structure types, we do not allow any 6097 // padding for vector-like structures, so verify the sizes match. 6098 uint64_t Size = getContext().getTypeSize(Ty); 6099 QualType SingleElementTy = GetSingleElementType(Ty); 6100 if (isVectorArgumentType(SingleElementTy) && 6101 getContext().getTypeSize(SingleElementTy) == Size) 6102 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); 6103 6104 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. 6105 if (Size != 8 && Size != 16 && Size != 32 && Size != 64) 6106 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6107 6108 // Handle small structures. 6109 if (const RecordType *RT = Ty->getAs<RecordType>()) { 6110 // Structures with flexible arrays have variable length, so really 6111 // fail the size test above. 6112 const RecordDecl *RD = RT->getDecl(); 6113 if (RD->hasFlexibleArrayMember()) 6114 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6115 6116 // The structure is passed as an unextended integer, a float, or a double. 6117 llvm::Type *PassTy; 6118 if (isFPArgumentType(SingleElementTy)) { 6119 assert(Size == 32 || Size == 64); 6120 if (Size == 32) 6121 PassTy = llvm::Type::getFloatTy(getVMContext()); 6122 else 6123 PassTy = llvm::Type::getDoubleTy(getVMContext()); 6124 } else 6125 PassTy = llvm::IntegerType::get(getVMContext(), Size); 6126 return ABIArgInfo::getDirect(PassTy); 6127 } 6128 6129 // Non-structure compounds are passed indirectly. 6130 if (isCompoundType(Ty)) 6131 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 6132 6133 return ABIArgInfo::getDirect(nullptr); 6134 } 6135 6136 //===----------------------------------------------------------------------===// 6137 // MSP430 ABI Implementation 6138 //===----------------------------------------------------------------------===// 6139 6140 namespace { 6141 6142 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 6143 public: 6144 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 6145 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 6146 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6147 CodeGen::CodeGenModule &M) const override; 6148 }; 6149 6150 } 6151 6152 void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D, 6153 llvm::GlobalValue *GV, 6154 CodeGen::CodeGenModule &M) const { 6155 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 6156 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 6157 // Handle 'interrupt' attribute: 6158 llvm::Function *F = cast<llvm::Function>(GV); 6159 6160 // Step 1: Set ISR calling convention. 6161 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 6162 6163 // Step 2: Add attributes goodness. 6164 F->addFnAttr(llvm::Attribute::NoInline); 6165 6166 // Step 3: Emit ISR vector alias. 6167 unsigned Num = attr->getNumber() / 2; 6168 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage, 6169 "__isr_" + Twine(Num), F); 6170 } 6171 } 6172 } 6173 6174 //===----------------------------------------------------------------------===// 6175 // MIPS ABI Implementation. This works for both little-endian and 6176 // big-endian variants. 6177 //===----------------------------------------------------------------------===// 6178 6179 namespace { 6180 class MipsABIInfo : public ABIInfo { 6181 bool IsO32; 6182 unsigned MinABIStackAlignInBytes, StackAlignInBytes; 6183 void CoerceToIntArgs(uint64_t TySize, 6184 SmallVectorImpl<llvm::Type *> &ArgList) const; 6185 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const; 6186 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const; 6187 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const; 6188 public: 6189 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) : 6190 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8), 6191 StackAlignInBytes(IsO32 ? 8 : 16) {} 6192 6193 ABIArgInfo classifyReturnType(QualType RetTy) const; 6194 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const; 6195 void computeInfo(CGFunctionInfo &FI) const override; 6196 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6197 QualType Ty) const override; 6198 bool shouldSignExtUnsignedType(QualType Ty) const override; 6199 }; 6200 6201 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 6202 unsigned SizeOfUnwindException; 6203 public: 6204 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32) 6205 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)), 6206 SizeOfUnwindException(IsO32 ? 24 : 32) {} 6207 6208 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { 6209 return 29; 6210 } 6211 6212 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6213 CodeGen::CodeGenModule &CGM) const override { 6214 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6215 if (!FD) return; 6216 llvm::Function *Fn = cast<llvm::Function>(GV); 6217 if (FD->hasAttr<Mips16Attr>()) { 6218 Fn->addFnAttr("mips16"); 6219 } 6220 else if (FD->hasAttr<NoMips16Attr>()) { 6221 Fn->addFnAttr("nomips16"); 6222 } 6223 6224 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>(); 6225 if (!Attr) 6226 return; 6227 6228 const char *Kind; 6229 switch (Attr->getInterrupt()) { 6230 case MipsInterruptAttr::eic: Kind = "eic"; break; 6231 case MipsInterruptAttr::sw0: Kind = "sw0"; break; 6232 case MipsInterruptAttr::sw1: Kind = "sw1"; break; 6233 case MipsInterruptAttr::hw0: Kind = "hw0"; break; 6234 case MipsInterruptAttr::hw1: Kind = "hw1"; break; 6235 case MipsInterruptAttr::hw2: Kind = "hw2"; break; 6236 case MipsInterruptAttr::hw3: Kind = "hw3"; break; 6237 case MipsInterruptAttr::hw4: Kind = "hw4"; break; 6238 case MipsInterruptAttr::hw5: Kind = "hw5"; break; 6239 } 6240 6241 Fn->addFnAttr("interrupt", Kind); 6242 6243 } 6244 6245 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6246 llvm::Value *Address) const override; 6247 6248 unsigned getSizeOfUnwindException() const override { 6249 return SizeOfUnwindException; 6250 } 6251 }; 6252 } 6253 6254 void MipsABIInfo::CoerceToIntArgs( 6255 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const { 6256 llvm::IntegerType *IntTy = 6257 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8); 6258 6259 // Add (TySize / MinABIStackAlignInBytes) args of IntTy. 6260 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N) 6261 ArgList.push_back(IntTy); 6262 6263 // If necessary, add one more integer type to ArgList. 6264 unsigned R = TySize % (MinABIStackAlignInBytes * 8); 6265 6266 if (R) 6267 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R)); 6268 } 6269 6270 // In N32/64, an aligned double precision floating point field is passed in 6271 // a register. 6272 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const { 6273 SmallVector<llvm::Type*, 8> ArgList, IntArgList; 6274 6275 if (IsO32) { 6276 CoerceToIntArgs(TySize, ArgList); 6277 return llvm::StructType::get(getVMContext(), ArgList); 6278 } 6279 6280 if (Ty->isComplexType()) 6281 return CGT.ConvertType(Ty); 6282 6283 const RecordType *RT = Ty->getAs<RecordType>(); 6284 6285 // Unions/vectors are passed in integer registers. 6286 if (!RT || !RT->isStructureOrClassType()) { 6287 CoerceToIntArgs(TySize, ArgList); 6288 return llvm::StructType::get(getVMContext(), ArgList); 6289 } 6290 6291 const RecordDecl *RD = RT->getDecl(); 6292 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 6293 assert(!(TySize % 8) && "Size of structure must be multiple of 8."); 6294 6295 uint64_t LastOffset = 0; 6296 unsigned idx = 0; 6297 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64); 6298 6299 // Iterate over fields in the struct/class and check if there are any aligned 6300 // double fields. 6301 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 6302 i != e; ++i, ++idx) { 6303 const QualType Ty = i->getType(); 6304 const BuiltinType *BT = Ty->getAs<BuiltinType>(); 6305 6306 if (!BT || BT->getKind() != BuiltinType::Double) 6307 continue; 6308 6309 uint64_t Offset = Layout.getFieldOffset(idx); 6310 if (Offset % 64) // Ignore doubles that are not aligned. 6311 continue; 6312 6313 // Add ((Offset - LastOffset) / 64) args of type i64. 6314 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j) 6315 ArgList.push_back(I64); 6316 6317 // Add double type. 6318 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext())); 6319 LastOffset = Offset + 64; 6320 } 6321 6322 CoerceToIntArgs(TySize - LastOffset, IntArgList); 6323 ArgList.append(IntArgList.begin(), IntArgList.end()); 6324 6325 return llvm::StructType::get(getVMContext(), ArgList); 6326 } 6327 6328 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset, 6329 uint64_t Offset) const { 6330 if (OrigOffset + MinABIStackAlignInBytes > Offset) 6331 return nullptr; 6332 6333 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8); 6334 } 6335 6336 ABIArgInfo 6337 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const { 6338 Ty = useFirstFieldIfTransparentUnion(Ty); 6339 6340 uint64_t OrigOffset = Offset; 6341 uint64_t TySize = getContext().getTypeSize(Ty); 6342 uint64_t Align = getContext().getTypeAlign(Ty) / 8; 6343 6344 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes), 6345 (uint64_t)StackAlignInBytes); 6346 unsigned CurrOffset = llvm::alignTo(Offset, Align); 6347 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8; 6348 6349 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) { 6350 // Ignore empty aggregates. 6351 if (TySize == 0) 6352 return ABIArgInfo::getIgnore(); 6353 6354 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { 6355 Offset = OrigOffset + MinABIStackAlignInBytes; 6356 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6357 } 6358 6359 // If we have reached here, aggregates are passed directly by coercing to 6360 // another structure type. Padding is inserted if the offset of the 6361 // aggregate is unaligned. 6362 ABIArgInfo ArgInfo = 6363 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0, 6364 getPaddingType(OrigOffset, CurrOffset)); 6365 ArgInfo.setInReg(true); 6366 return ArgInfo; 6367 } 6368 6369 // Treat an enum type as its underlying type. 6370 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6371 Ty = EnumTy->getDecl()->getIntegerType(); 6372 6373 // All integral types are promoted to the GPR width. 6374 if (Ty->isIntegralOrEnumerationType()) 6375 return ABIArgInfo::getExtend(); 6376 6377 return ABIArgInfo::getDirect( 6378 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset)); 6379 } 6380 6381 llvm::Type* 6382 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const { 6383 const RecordType *RT = RetTy->getAs<RecordType>(); 6384 SmallVector<llvm::Type*, 8> RTList; 6385 6386 if (RT && RT->isStructureOrClassType()) { 6387 const RecordDecl *RD = RT->getDecl(); 6388 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 6389 unsigned FieldCnt = Layout.getFieldCount(); 6390 6391 // N32/64 returns struct/classes in floating point registers if the 6392 // following conditions are met: 6393 // 1. The size of the struct/class is no larger than 128-bit. 6394 // 2. The struct/class has one or two fields all of which are floating 6395 // point types. 6396 // 3. The offset of the first field is zero (this follows what gcc does). 6397 // 6398 // Any other composite results are returned in integer registers. 6399 // 6400 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) { 6401 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end(); 6402 for (; b != e; ++b) { 6403 const BuiltinType *BT = b->getType()->getAs<BuiltinType>(); 6404 6405 if (!BT || !BT->isFloatingPoint()) 6406 break; 6407 6408 RTList.push_back(CGT.ConvertType(b->getType())); 6409 } 6410 6411 if (b == e) 6412 return llvm::StructType::get(getVMContext(), RTList, 6413 RD->hasAttr<PackedAttr>()); 6414 6415 RTList.clear(); 6416 } 6417 } 6418 6419 CoerceToIntArgs(Size, RTList); 6420 return llvm::StructType::get(getVMContext(), RTList); 6421 } 6422 6423 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 6424 uint64_t Size = getContext().getTypeSize(RetTy); 6425 6426 if (RetTy->isVoidType()) 6427 return ABIArgInfo::getIgnore(); 6428 6429 // O32 doesn't treat zero-sized structs differently from other structs. 6430 // However, N32/N64 ignores zero sized return values. 6431 if (!IsO32 && Size == 0) 6432 return ABIArgInfo::getIgnore(); 6433 6434 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) { 6435 if (Size <= 128) { 6436 if (RetTy->isAnyComplexType()) 6437 return ABIArgInfo::getDirect(); 6438 6439 // O32 returns integer vectors in registers and N32/N64 returns all small 6440 // aggregates in registers. 6441 if (!IsO32 || 6442 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) { 6443 ABIArgInfo ArgInfo = 6444 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size)); 6445 ArgInfo.setInReg(true); 6446 return ArgInfo; 6447 } 6448 } 6449 6450 return getNaturalAlignIndirect(RetTy); 6451 } 6452 6453 // Treat an enum type as its underlying type. 6454 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6455 RetTy = EnumTy->getDecl()->getIntegerType(); 6456 6457 return (RetTy->isPromotableIntegerType() ? 6458 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6459 } 6460 6461 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 6462 ABIArgInfo &RetInfo = FI.getReturnInfo(); 6463 if (!getCXXABI().classifyReturnType(FI)) 6464 RetInfo = classifyReturnType(FI.getReturnType()); 6465 6466 // Check if a pointer to an aggregate is passed as a hidden argument. 6467 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0; 6468 6469 for (auto &I : FI.arguments()) 6470 I.info = classifyArgumentType(I.type, Offset); 6471 } 6472 6473 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6474 QualType OrigTy) const { 6475 QualType Ty = OrigTy; 6476 6477 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64. 6478 // Pointers are also promoted in the same way but this only matters for N32. 6479 unsigned SlotSizeInBits = IsO32 ? 32 : 64; 6480 unsigned PtrWidth = getTarget().getPointerWidth(0); 6481 bool DidPromote = false; 6482 if ((Ty->isIntegerType() && 6483 getContext().getIntWidth(Ty) < SlotSizeInBits) || 6484 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) { 6485 DidPromote = true; 6486 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits, 6487 Ty->isSignedIntegerType()); 6488 } 6489 6490 auto TyInfo = getContext().getTypeInfoInChars(Ty); 6491 6492 // The alignment of things in the argument area is never larger than 6493 // StackAlignInBytes. 6494 TyInfo.second = 6495 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes)); 6496 6497 // MinABIStackAlignInBytes is the size of argument slots on the stack. 6498 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes); 6499 6500 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 6501 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true); 6502 6503 6504 // If there was a promotion, "unpromote" into a temporary. 6505 // TODO: can we just use a pointer into a subset of the original slot? 6506 if (DidPromote) { 6507 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp"); 6508 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr); 6509 6510 // Truncate down to the right width. 6511 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType() 6512 : CGF.IntPtrTy); 6513 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy); 6514 if (OrigTy->isPointerType()) 6515 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType()); 6516 6517 CGF.Builder.CreateStore(V, Temp); 6518 Addr = Temp; 6519 } 6520 6521 return Addr; 6522 } 6523 6524 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const { 6525 int TySize = getContext().getTypeSize(Ty); 6526 6527 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. 6528 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) 6529 return true; 6530 6531 return false; 6532 } 6533 6534 bool 6535 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 6536 llvm::Value *Address) const { 6537 // This information comes from gcc's implementation, which seems to 6538 // as canonical as it gets. 6539 6540 // Everything on MIPS is 4 bytes. Double-precision FP registers 6541 // are aliased to pairs of single-precision FP registers. 6542 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4); 6543 6544 // 0-31 are the general purpose registers, $0 - $31. 6545 // 32-63 are the floating-point registers, $f0 - $f31. 6546 // 64 and 65 are the multiply/divide registers, $hi and $lo. 6547 // 66 is the (notional, I think) register for signal-handler return. 6548 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65); 6549 6550 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 6551 // They are one bit wide and ignored here. 6552 6553 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 6554 // (coprocessor 1 is the FP unit) 6555 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 6556 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 6557 // 176-181 are the DSP accumulator registers. 6558 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181); 6559 return false; 6560 } 6561 6562 //===----------------------------------------------------------------------===// 6563 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults. 6564 // Currently subclassed only to implement custom OpenCL C function attribute 6565 // handling. 6566 //===----------------------------------------------------------------------===// 6567 6568 namespace { 6569 6570 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo { 6571 public: 6572 TCETargetCodeGenInfo(CodeGenTypes &CGT) 6573 : DefaultTargetCodeGenInfo(CGT) {} 6574 6575 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6576 CodeGen::CodeGenModule &M) const override; 6577 }; 6578 6579 void TCETargetCodeGenInfo::setTargetAttributes( 6580 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const { 6581 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6582 if (!FD) return; 6583 6584 llvm::Function *F = cast<llvm::Function>(GV); 6585 6586 if (M.getLangOpts().OpenCL) { 6587 if (FD->hasAttr<OpenCLKernelAttr>()) { 6588 // OpenCL C Kernel functions are not subject to inlining 6589 F->addFnAttr(llvm::Attribute::NoInline); 6590 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>(); 6591 if (Attr) { 6592 // Convert the reqd_work_group_size() attributes to metadata. 6593 llvm::LLVMContext &Context = F->getContext(); 6594 llvm::NamedMDNode *OpenCLMetadata = 6595 M.getModule().getOrInsertNamedMetadata( 6596 "opencl.kernel_wg_size_info"); 6597 6598 SmallVector<llvm::Metadata *, 5> Operands; 6599 Operands.push_back(llvm::ConstantAsMetadata::get(F)); 6600 6601 Operands.push_back( 6602 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 6603 M.Int32Ty, llvm::APInt(32, Attr->getXDim())))); 6604 Operands.push_back( 6605 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 6606 M.Int32Ty, llvm::APInt(32, Attr->getYDim())))); 6607 Operands.push_back( 6608 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue( 6609 M.Int32Ty, llvm::APInt(32, Attr->getZDim())))); 6610 6611 // Add a boolean constant operand for "required" (true) or "hint" 6612 // (false) for implementing the work_group_size_hint attr later. 6613 // Currently always true as the hint is not yet implemented. 6614 Operands.push_back( 6615 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context))); 6616 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands)); 6617 } 6618 } 6619 } 6620 } 6621 6622 } 6623 6624 //===----------------------------------------------------------------------===// 6625 // Hexagon ABI Implementation 6626 //===----------------------------------------------------------------------===// 6627 6628 namespace { 6629 6630 class HexagonABIInfo : public ABIInfo { 6631 6632 6633 public: 6634 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 6635 6636 private: 6637 6638 ABIArgInfo classifyReturnType(QualType RetTy) const; 6639 ABIArgInfo classifyArgumentType(QualType RetTy) const; 6640 6641 void computeInfo(CGFunctionInfo &FI) const override; 6642 6643 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6644 QualType Ty) const override; 6645 }; 6646 6647 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo { 6648 public: 6649 HexagonTargetCodeGenInfo(CodeGenTypes &CGT) 6650 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {} 6651 6652 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 6653 return 29; 6654 } 6655 }; 6656 6657 } 6658 6659 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const { 6660 if (!getCXXABI().classifyReturnType(FI)) 6661 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6662 for (auto &I : FI.arguments()) 6663 I.info = classifyArgumentType(I.type); 6664 } 6665 6666 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const { 6667 if (!isAggregateTypeForABI(Ty)) { 6668 // Treat an enum type as its underlying type. 6669 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 6670 Ty = EnumTy->getDecl()->getIntegerType(); 6671 6672 return (Ty->isPromotableIntegerType() ? 6673 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6674 } 6675 6676 // Ignore empty records. 6677 if (isEmptyRecord(getContext(), Ty, true)) 6678 return ABIArgInfo::getIgnore(); 6679 6680 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 6681 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 6682 6683 uint64_t Size = getContext().getTypeSize(Ty); 6684 if (Size > 64) 6685 return getNaturalAlignIndirect(Ty, /*ByVal=*/true); 6686 // Pass in the smallest viable integer type. 6687 else if (Size > 32) 6688 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 6689 else if (Size > 16) 6690 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6691 else if (Size > 8) 6692 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6693 else 6694 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6695 } 6696 6697 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const { 6698 if (RetTy->isVoidType()) 6699 return ABIArgInfo::getIgnore(); 6700 6701 // Large vector types should be returned via memory. 6702 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64) 6703 return getNaturalAlignIndirect(RetTy); 6704 6705 if (!isAggregateTypeForABI(RetTy)) { 6706 // Treat an enum type as its underlying type. 6707 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 6708 RetTy = EnumTy->getDecl()->getIntegerType(); 6709 6710 return (RetTy->isPromotableIntegerType() ? 6711 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 6712 } 6713 6714 if (isEmptyRecord(getContext(), RetTy, true)) 6715 return ABIArgInfo::getIgnore(); 6716 6717 // Aggregates <= 8 bytes are returned in r0; other aggregates 6718 // are returned indirectly. 6719 uint64_t Size = getContext().getTypeSize(RetTy); 6720 if (Size <= 64) { 6721 // Return in the smallest viable integer type. 6722 if (Size <= 8) 6723 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 6724 if (Size <= 16) 6725 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 6726 if (Size <= 32) 6727 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 6728 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext())); 6729 } 6730 6731 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true); 6732 } 6733 6734 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6735 QualType Ty) const { 6736 // FIXME: Someone needs to audit that this handle alignment correctly. 6737 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false, 6738 getContext().getTypeInfoInChars(Ty), 6739 CharUnits::fromQuantity(4), 6740 /*AllowHigherAlign*/ true); 6741 } 6742 6743 //===----------------------------------------------------------------------===// 6744 // Lanai ABI Implementation 6745 //===----------------------------------------------------------------------===// 6746 6747 namespace { 6748 class LanaiABIInfo : public DefaultABIInfo { 6749 public: 6750 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 6751 6752 bool shouldUseInReg(QualType Ty, CCState &State) const; 6753 6754 void computeInfo(CGFunctionInfo &FI) const override { 6755 CCState State(FI.getCallingConvention()); 6756 // Lanai uses 4 registers to pass arguments unless the function has the 6757 // regparm attribute set. 6758 if (FI.getHasRegParm()) { 6759 State.FreeRegs = FI.getRegParm(); 6760 } else { 6761 State.FreeRegs = 4; 6762 } 6763 6764 if (!getCXXABI().classifyReturnType(FI)) 6765 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6766 for (auto &I : FI.arguments()) 6767 I.info = classifyArgumentType(I.type, State); 6768 } 6769 6770 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const; 6771 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const; 6772 }; 6773 } // end anonymous namespace 6774 6775 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const { 6776 unsigned Size = getContext().getTypeSize(Ty); 6777 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U; 6778 6779 if (SizeInRegs == 0) 6780 return false; 6781 6782 if (SizeInRegs > State.FreeRegs) { 6783 State.FreeRegs = 0; 6784 return false; 6785 } 6786 6787 State.FreeRegs -= SizeInRegs; 6788 6789 return true; 6790 } 6791 6792 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal, 6793 CCState &State) const { 6794 if (!ByVal) { 6795 if (State.FreeRegs) { 6796 --State.FreeRegs; // Non-byval indirects just use one pointer. 6797 return getNaturalAlignIndirectInReg(Ty); 6798 } 6799 return getNaturalAlignIndirect(Ty, false); 6800 } 6801 6802 // Compute the byval alignment. 6803 const unsigned MinABIStackAlignInBytes = 4; 6804 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 6805 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true, 6806 /*Realign=*/TypeAlign > 6807 MinABIStackAlignInBytes); 6808 } 6809 6810 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty, 6811 CCState &State) const { 6812 // Check with the C++ ABI first. 6813 const RecordType *RT = Ty->getAs<RecordType>(); 6814 if (RT) { 6815 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()); 6816 if (RAA == CGCXXABI::RAA_Indirect) { 6817 return getIndirectResult(Ty, /*ByVal=*/false, State); 6818 } else if (RAA == CGCXXABI::RAA_DirectInMemory) { 6819 return getNaturalAlignIndirect(Ty, /*ByRef=*/true); 6820 } 6821 } 6822 6823 if (isAggregateTypeForABI(Ty)) { 6824 // Structures with flexible arrays are always indirect. 6825 if (RT && RT->getDecl()->hasFlexibleArrayMember()) 6826 return getIndirectResult(Ty, /*ByVal=*/true, State); 6827 6828 // Ignore empty structs/unions. 6829 if (isEmptyRecord(getContext(), Ty, true)) 6830 return ABIArgInfo::getIgnore(); 6831 6832 llvm::LLVMContext &LLVMContext = getVMContext(); 6833 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32; 6834 if (SizeInRegs <= State.FreeRegs) { 6835 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext); 6836 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32); 6837 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements); 6838 State.FreeRegs -= SizeInRegs; 6839 return ABIArgInfo::getDirectInReg(Result); 6840 } else { 6841 State.FreeRegs = 0; 6842 } 6843 return getIndirectResult(Ty, true, State); 6844 } 6845 6846 // Treat an enum type as its underlying type. 6847 if (const auto *EnumTy = Ty->getAs<EnumType>()) 6848 Ty = EnumTy->getDecl()->getIntegerType(); 6849 6850 bool InReg = shouldUseInReg(Ty, State); 6851 if (Ty->isPromotableIntegerType()) { 6852 if (InReg) 6853 return ABIArgInfo::getDirectInReg(); 6854 return ABIArgInfo::getExtend(); 6855 } 6856 if (InReg) 6857 return ABIArgInfo::getDirectInReg(); 6858 return ABIArgInfo::getDirect(); 6859 } 6860 6861 namespace { 6862 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo { 6863 public: 6864 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 6865 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {} 6866 }; 6867 } 6868 6869 //===----------------------------------------------------------------------===// 6870 // AMDGPU ABI Implementation 6871 //===----------------------------------------------------------------------===// 6872 6873 namespace { 6874 6875 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo { 6876 public: 6877 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT) 6878 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 6879 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 6880 CodeGen::CodeGenModule &M) const override; 6881 unsigned getOpenCLKernelCallingConv() const override; 6882 }; 6883 6884 } 6885 6886 static void appendOpenCLVersionMD (CodeGen::CodeGenModule &CGM); 6887 6888 void AMDGPUTargetCodeGenInfo::setTargetAttributes( 6889 const Decl *D, 6890 llvm::GlobalValue *GV, 6891 CodeGen::CodeGenModule &M) const { 6892 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D); 6893 if (!FD) 6894 return; 6895 6896 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) { 6897 llvm::Function *F = cast<llvm::Function>(GV); 6898 uint32_t NumVGPR = Attr->getNumVGPR(); 6899 if (NumVGPR != 0) 6900 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR)); 6901 } 6902 6903 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) { 6904 llvm::Function *F = cast<llvm::Function>(GV); 6905 unsigned NumSGPR = Attr->getNumSGPR(); 6906 if (NumSGPR != 0) 6907 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR)); 6908 } 6909 6910 appendOpenCLVersionMD(M); 6911 } 6912 6913 6914 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 6915 return llvm::CallingConv::AMDGPU_KERNEL; 6916 } 6917 6918 //===----------------------------------------------------------------------===// 6919 // SPARC v8 ABI Implementation. 6920 // Based on the SPARC Compliance Definition version 2.4.1. 6921 // 6922 // Ensures that complex values are passed in registers. 6923 // 6924 namespace { 6925 class SparcV8ABIInfo : public DefaultABIInfo { 6926 public: 6927 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 6928 6929 private: 6930 ABIArgInfo classifyReturnType(QualType RetTy) const; 6931 void computeInfo(CGFunctionInfo &FI) const override; 6932 }; 6933 } // end anonymous namespace 6934 6935 6936 ABIArgInfo 6937 SparcV8ABIInfo::classifyReturnType(QualType Ty) const { 6938 if (Ty->isAnyComplexType()) { 6939 return ABIArgInfo::getDirect(); 6940 } 6941 else { 6942 return DefaultABIInfo::classifyReturnType(Ty); 6943 } 6944 } 6945 6946 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const { 6947 6948 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 6949 for (auto &Arg : FI.arguments()) 6950 Arg.info = classifyArgumentType(Arg.type); 6951 } 6952 6953 namespace { 6954 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo { 6955 public: 6956 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT) 6957 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {} 6958 }; 6959 } // end anonymous namespace 6960 6961 //===----------------------------------------------------------------------===// 6962 // SPARC v9 ABI Implementation. 6963 // Based on the SPARC Compliance Definition version 2.4.1. 6964 // 6965 // Function arguments a mapped to a nominal "parameter array" and promoted to 6966 // registers depending on their type. Each argument occupies 8 or 16 bytes in 6967 // the array, structs larger than 16 bytes are passed indirectly. 6968 // 6969 // One case requires special care: 6970 // 6971 // struct mixed { 6972 // int i; 6973 // float f; 6974 // }; 6975 // 6976 // When a struct mixed is passed by value, it only occupies 8 bytes in the 6977 // parameter array, but the int is passed in an integer register, and the float 6978 // is passed in a floating point register. This is represented as two arguments 6979 // with the LLVM IR inreg attribute: 6980 // 6981 // declare void f(i32 inreg %i, float inreg %f) 6982 // 6983 // The code generator will only allocate 4 bytes from the parameter array for 6984 // the inreg arguments. All other arguments are allocated a multiple of 8 6985 // bytes. 6986 // 6987 namespace { 6988 class SparcV9ABIInfo : public ABIInfo { 6989 public: 6990 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 6991 6992 private: 6993 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const; 6994 void computeInfo(CGFunctionInfo &FI) const override; 6995 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 6996 QualType Ty) const override; 6997 6998 // Coercion type builder for structs passed in registers. The coercion type 6999 // serves two purposes: 7000 // 7001 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned' 7002 // in registers. 7003 // 2. Expose aligned floating point elements as first-level elements, so the 7004 // code generator knows to pass them in floating point registers. 7005 // 7006 // We also compute the InReg flag which indicates that the struct contains 7007 // aligned 32-bit floats. 7008 // 7009 struct CoerceBuilder { 7010 llvm::LLVMContext &Context; 7011 const llvm::DataLayout &DL; 7012 SmallVector<llvm::Type*, 8> Elems; 7013 uint64_t Size; 7014 bool InReg; 7015 7016 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl) 7017 : Context(c), DL(dl), Size(0), InReg(false) {} 7018 7019 // Pad Elems with integers until Size is ToSize. 7020 void pad(uint64_t ToSize) { 7021 assert(ToSize >= Size && "Cannot remove elements"); 7022 if (ToSize == Size) 7023 return; 7024 7025 // Finish the current 64-bit word. 7026 uint64_t Aligned = llvm::alignTo(Size, 64); 7027 if (Aligned > Size && Aligned <= ToSize) { 7028 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size)); 7029 Size = Aligned; 7030 } 7031 7032 // Add whole 64-bit words. 7033 while (Size + 64 <= ToSize) { 7034 Elems.push_back(llvm::Type::getInt64Ty(Context)); 7035 Size += 64; 7036 } 7037 7038 // Final in-word padding. 7039 if (Size < ToSize) { 7040 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size)); 7041 Size = ToSize; 7042 } 7043 } 7044 7045 // Add a floating point element at Offset. 7046 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) { 7047 // Unaligned floats are treated as integers. 7048 if (Offset % Bits) 7049 return; 7050 // The InReg flag is only required if there are any floats < 64 bits. 7051 if (Bits < 64) 7052 InReg = true; 7053 pad(Offset); 7054 Elems.push_back(Ty); 7055 Size = Offset + Bits; 7056 } 7057 7058 // Add a struct type to the coercion type, starting at Offset (in bits). 7059 void addStruct(uint64_t Offset, llvm::StructType *StrTy) { 7060 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy); 7061 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) { 7062 llvm::Type *ElemTy = StrTy->getElementType(i); 7063 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i); 7064 switch (ElemTy->getTypeID()) { 7065 case llvm::Type::StructTyID: 7066 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy)); 7067 break; 7068 case llvm::Type::FloatTyID: 7069 addFloat(ElemOffset, ElemTy, 32); 7070 break; 7071 case llvm::Type::DoubleTyID: 7072 addFloat(ElemOffset, ElemTy, 64); 7073 break; 7074 case llvm::Type::FP128TyID: 7075 addFloat(ElemOffset, ElemTy, 128); 7076 break; 7077 case llvm::Type::PointerTyID: 7078 if (ElemOffset % 64 == 0) { 7079 pad(ElemOffset); 7080 Elems.push_back(ElemTy); 7081 Size += 64; 7082 } 7083 break; 7084 default: 7085 break; 7086 } 7087 } 7088 } 7089 7090 // Check if Ty is a usable substitute for the coercion type. 7091 bool isUsableType(llvm::StructType *Ty) const { 7092 return llvm::makeArrayRef(Elems) == Ty->elements(); 7093 } 7094 7095 // Get the coercion type as a literal struct type. 7096 llvm::Type *getType() const { 7097 if (Elems.size() == 1) 7098 return Elems.front(); 7099 else 7100 return llvm::StructType::get(Context, Elems); 7101 } 7102 }; 7103 }; 7104 } // end anonymous namespace 7105 7106 ABIArgInfo 7107 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const { 7108 if (Ty->isVoidType()) 7109 return ABIArgInfo::getIgnore(); 7110 7111 uint64_t Size = getContext().getTypeSize(Ty); 7112 7113 // Anything too big to fit in registers is passed with an explicit indirect 7114 // pointer / sret pointer. 7115 if (Size > SizeLimit) 7116 return getNaturalAlignIndirect(Ty, /*ByVal=*/false); 7117 7118 // Treat an enum type as its underlying type. 7119 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 7120 Ty = EnumTy->getDecl()->getIntegerType(); 7121 7122 // Integer types smaller than a register are extended. 7123 if (Size < 64 && Ty->isIntegerType()) 7124 return ABIArgInfo::getExtend(); 7125 7126 // Other non-aggregates go in registers. 7127 if (!isAggregateTypeForABI(Ty)) 7128 return ABIArgInfo::getDirect(); 7129 7130 // If a C++ object has either a non-trivial copy constructor or a non-trivial 7131 // destructor, it is passed with an explicit indirect pointer / sret pointer. 7132 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) 7133 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory); 7134 7135 // This is a small aggregate type that should be passed in registers. 7136 // Build a coercion type from the LLVM struct type. 7137 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty)); 7138 if (!StrTy) 7139 return ABIArgInfo::getDirect(); 7140 7141 CoerceBuilder CB(getVMContext(), getDataLayout()); 7142 CB.addStruct(0, StrTy); 7143 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64)); 7144 7145 // Try to use the original type for coercion. 7146 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType(); 7147 7148 if (CB.InReg) 7149 return ABIArgInfo::getDirectInReg(CoerceTy); 7150 else 7151 return ABIArgInfo::getDirect(CoerceTy); 7152 } 7153 7154 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7155 QualType Ty) const { 7156 ABIArgInfo AI = classifyType(Ty, 16 * 8); 7157 llvm::Type *ArgTy = CGT.ConvertType(Ty); 7158 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 7159 AI.setCoerceToType(ArgTy); 7160 7161 CharUnits SlotSize = CharUnits::fromQuantity(8); 7162 7163 CGBuilderTy &Builder = CGF.Builder; 7164 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize); 7165 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 7166 7167 auto TypeInfo = getContext().getTypeInfoInChars(Ty); 7168 7169 Address ArgAddr = Address::invalid(); 7170 CharUnits Stride; 7171 switch (AI.getKind()) { 7172 case ABIArgInfo::Expand: 7173 case ABIArgInfo::CoerceAndExpand: 7174 case ABIArgInfo::InAlloca: 7175 llvm_unreachable("Unsupported ABI kind for va_arg"); 7176 7177 case ABIArgInfo::Extend: { 7178 Stride = SlotSize; 7179 CharUnits Offset = SlotSize - TypeInfo.first; 7180 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend"); 7181 break; 7182 } 7183 7184 case ABIArgInfo::Direct: { 7185 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType()); 7186 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize); 7187 ArgAddr = Addr; 7188 break; 7189 } 7190 7191 case ABIArgInfo::Indirect: 7192 Stride = SlotSize; 7193 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect"); 7194 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), 7195 TypeInfo.second); 7196 break; 7197 7198 case ABIArgInfo::Ignore: 7199 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second); 7200 } 7201 7202 // Update VAList. 7203 llvm::Value *NextPtr = 7204 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next"); 7205 Builder.CreateStore(NextPtr, VAListAddr); 7206 7207 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr"); 7208 } 7209 7210 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const { 7211 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8); 7212 for (auto &I : FI.arguments()) 7213 I.info = classifyType(I.type, 16 * 8); 7214 } 7215 7216 namespace { 7217 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo { 7218 public: 7219 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT) 7220 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {} 7221 7222 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { 7223 return 14; 7224 } 7225 7226 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7227 llvm::Value *Address) const override; 7228 }; 7229 } // end anonymous namespace 7230 7231 bool 7232 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 7233 llvm::Value *Address) const { 7234 // This is calculated from the LLVM and GCC tables and verified 7235 // against gcc output. AFAIK all ABIs use the same encoding. 7236 7237 CodeGen::CGBuilderTy &Builder = CGF.Builder; 7238 7239 llvm::IntegerType *i8 = CGF.Int8Ty; 7240 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 7241 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 7242 7243 // 0-31: the 8-byte general-purpose registers 7244 AssignToArrayRange(Builder, Address, Eight8, 0, 31); 7245 7246 // 32-63: f0-31, the 4-byte floating-point registers 7247 AssignToArrayRange(Builder, Address, Four8, 32, 63); 7248 7249 // Y = 64 7250 // PSR = 65 7251 // WIM = 66 7252 // TBR = 67 7253 // PC = 68 7254 // NPC = 69 7255 // FSR = 70 7256 // CSR = 71 7257 AssignToArrayRange(Builder, Address, Eight8, 64, 71); 7258 7259 // 72-87: d0-15, the 8-byte floating-point registers 7260 AssignToArrayRange(Builder, Address, Eight8, 72, 87); 7261 7262 return false; 7263 } 7264 7265 7266 //===----------------------------------------------------------------------===// 7267 // XCore ABI Implementation 7268 //===----------------------------------------------------------------------===// 7269 7270 namespace { 7271 7272 /// A SmallStringEnc instance is used to build up the TypeString by passing 7273 /// it by reference between functions that append to it. 7274 typedef llvm::SmallString<128> SmallStringEnc; 7275 7276 /// TypeStringCache caches the meta encodings of Types. 7277 /// 7278 /// The reason for caching TypeStrings is two fold: 7279 /// 1. To cache a type's encoding for later uses; 7280 /// 2. As a means to break recursive member type inclusion. 7281 /// 7282 /// A cache Entry can have a Status of: 7283 /// NonRecursive: The type encoding is not recursive; 7284 /// Recursive: The type encoding is recursive; 7285 /// Incomplete: An incomplete TypeString; 7286 /// IncompleteUsed: An incomplete TypeString that has been used in a 7287 /// Recursive type encoding. 7288 /// 7289 /// A NonRecursive entry will have all of its sub-members expanded as fully 7290 /// as possible. Whilst it may contain types which are recursive, the type 7291 /// itself is not recursive and thus its encoding may be safely used whenever 7292 /// the type is encountered. 7293 /// 7294 /// A Recursive entry will have all of its sub-members expanded as fully as 7295 /// possible. The type itself is recursive and it may contain other types which 7296 /// are recursive. The Recursive encoding must not be used during the expansion 7297 /// of a recursive type's recursive branch. For simplicity the code uses 7298 /// IncompleteCount to reject all usage of Recursive encodings for member types. 7299 /// 7300 /// An Incomplete entry is always a RecordType and only encodes its 7301 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and 7302 /// are placed into the cache during type expansion as a means to identify and 7303 /// handle recursive inclusion of types as sub-members. If there is recursion 7304 /// the entry becomes IncompleteUsed. 7305 /// 7306 /// During the expansion of a RecordType's members: 7307 /// 7308 /// If the cache contains a NonRecursive encoding for the member type, the 7309 /// cached encoding is used; 7310 /// 7311 /// If the cache contains a Recursive encoding for the member type, the 7312 /// cached encoding is 'Swapped' out, as it may be incorrect, and... 7313 /// 7314 /// If the member is a RecordType, an Incomplete encoding is placed into the 7315 /// cache to break potential recursive inclusion of itself as a sub-member; 7316 /// 7317 /// Once a member RecordType has been expanded, its temporary incomplete 7318 /// entry is removed from the cache. If a Recursive encoding was swapped out 7319 /// it is swapped back in; 7320 /// 7321 /// If an incomplete entry is used to expand a sub-member, the incomplete 7322 /// entry is marked as IncompleteUsed. The cache keeps count of how many 7323 /// IncompleteUsed entries it currently contains in IncompleteUsedCount; 7324 /// 7325 /// If a member's encoding is found to be a NonRecursive or Recursive viz: 7326 /// IncompleteUsedCount==0, the member's encoding is added to the cache. 7327 /// Else the member is part of a recursive type and thus the recursion has 7328 /// been exited too soon for the encoding to be correct for the member. 7329 /// 7330 class TypeStringCache { 7331 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed}; 7332 struct Entry { 7333 std::string Str; // The encoded TypeString for the type. 7334 enum Status State; // Information about the encoding in 'Str'. 7335 std::string Swapped; // A temporary place holder for a Recursive encoding 7336 // during the expansion of RecordType's members. 7337 }; 7338 std::map<const IdentifierInfo *, struct Entry> Map; 7339 unsigned IncompleteCount; // Number of Incomplete entries in the Map. 7340 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map. 7341 public: 7342 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {} 7343 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc); 7344 bool removeIncomplete(const IdentifierInfo *ID); 7345 void addIfComplete(const IdentifierInfo *ID, StringRef Str, 7346 bool IsRecursive); 7347 StringRef lookupStr(const IdentifierInfo *ID); 7348 }; 7349 7350 /// TypeString encodings for enum & union fields must be order. 7351 /// FieldEncoding is a helper for this ordering process. 7352 class FieldEncoding { 7353 bool HasName; 7354 std::string Enc; 7355 public: 7356 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {} 7357 StringRef str() {return Enc.c_str();} 7358 bool operator<(const FieldEncoding &rhs) const { 7359 if (HasName != rhs.HasName) return HasName; 7360 return Enc < rhs.Enc; 7361 } 7362 }; 7363 7364 class XCoreABIInfo : public DefaultABIInfo { 7365 public: 7366 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {} 7367 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7368 QualType Ty) const override; 7369 }; 7370 7371 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo { 7372 mutable TypeStringCache TSC; 7373 public: 7374 XCoreTargetCodeGenInfo(CodeGenTypes &CGT) 7375 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {} 7376 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 7377 CodeGen::CodeGenModule &M) const override; 7378 }; 7379 7380 } // End anonymous namespace. 7381 7382 // TODO: this implementation is likely now redundant with the default 7383 // EmitVAArg. 7384 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, 7385 QualType Ty) const { 7386 CGBuilderTy &Builder = CGF.Builder; 7387 7388 // Get the VAList. 7389 CharUnits SlotSize = CharUnits::fromQuantity(4); 7390 Address AP(Builder.CreateLoad(VAListAddr), SlotSize); 7391 7392 // Handle the argument. 7393 ABIArgInfo AI = classifyArgumentType(Ty); 7394 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty); 7395 llvm::Type *ArgTy = CGT.ConvertType(Ty); 7396 if (AI.canHaveCoerceToType() && !AI.getCoerceToType()) 7397 AI.setCoerceToType(ArgTy); 7398 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy); 7399 7400 Address Val = Address::invalid(); 7401 CharUnits ArgSize = CharUnits::Zero(); 7402 switch (AI.getKind()) { 7403 case ABIArgInfo::Expand: 7404 case ABIArgInfo::CoerceAndExpand: 7405 case ABIArgInfo::InAlloca: 7406 llvm_unreachable("Unsupported ABI kind for va_arg"); 7407 case ABIArgInfo::Ignore: 7408 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign); 7409 ArgSize = CharUnits::Zero(); 7410 break; 7411 case ABIArgInfo::Extend: 7412 case ABIArgInfo::Direct: 7413 Val = Builder.CreateBitCast(AP, ArgPtrTy); 7414 ArgSize = CharUnits::fromQuantity( 7415 getDataLayout().getTypeAllocSize(AI.getCoerceToType())); 7416 ArgSize = ArgSize.alignTo(SlotSize); 7417 break; 7418 case ABIArgInfo::Indirect: 7419 Val = Builder.CreateElementBitCast(AP, ArgPtrTy); 7420 Val = Address(Builder.CreateLoad(Val), TypeAlign); 7421 ArgSize = SlotSize; 7422 break; 7423 } 7424 7425 // Increment the VAList. 7426 if (!ArgSize.isZero()) { 7427 llvm::Value *APN = 7428 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize); 7429 Builder.CreateStore(APN, VAListAddr); 7430 } 7431 7432 return Val; 7433 } 7434 7435 /// During the expansion of a RecordType, an incomplete TypeString is placed 7436 /// into the cache as a means to identify and break recursion. 7437 /// If there is a Recursive encoding in the cache, it is swapped out and will 7438 /// be reinserted by removeIncomplete(). 7439 /// All other types of encoding should have been used rather than arriving here. 7440 void TypeStringCache::addIncomplete(const IdentifierInfo *ID, 7441 std::string StubEnc) { 7442 if (!ID) 7443 return; 7444 Entry &E = Map[ID]; 7445 assert( (E.Str.empty() || E.State == Recursive) && 7446 "Incorrectly use of addIncomplete"); 7447 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()"); 7448 E.Swapped.swap(E.Str); // swap out the Recursive 7449 E.Str.swap(StubEnc); 7450 E.State = Incomplete; 7451 ++IncompleteCount; 7452 } 7453 7454 /// Once the RecordType has been expanded, the temporary incomplete TypeString 7455 /// must be removed from the cache. 7456 /// If a Recursive was swapped out by addIncomplete(), it will be replaced. 7457 /// Returns true if the RecordType was defined recursively. 7458 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) { 7459 if (!ID) 7460 return false; 7461 auto I = Map.find(ID); 7462 assert(I != Map.end() && "Entry not present"); 7463 Entry &E = I->second; 7464 assert( (E.State == Incomplete || 7465 E.State == IncompleteUsed) && 7466 "Entry must be an incomplete type"); 7467 bool IsRecursive = false; 7468 if (E.State == IncompleteUsed) { 7469 // We made use of our Incomplete encoding, thus we are recursive. 7470 IsRecursive = true; 7471 --IncompleteUsedCount; 7472 } 7473 if (E.Swapped.empty()) 7474 Map.erase(I); 7475 else { 7476 // Swap the Recursive back. 7477 E.Swapped.swap(E.Str); 7478 E.Swapped.clear(); 7479 E.State = Recursive; 7480 } 7481 --IncompleteCount; 7482 return IsRecursive; 7483 } 7484 7485 /// Add the encoded TypeString to the cache only if it is NonRecursive or 7486 /// Recursive (viz: all sub-members were expanded as fully as possible). 7487 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str, 7488 bool IsRecursive) { 7489 if (!ID || IncompleteUsedCount) 7490 return; // No key or it is is an incomplete sub-type so don't add. 7491 Entry &E = Map[ID]; 7492 if (IsRecursive && !E.Str.empty()) { 7493 assert(E.State==Recursive && E.Str.size() == Str.size() && 7494 "This is not the same Recursive entry"); 7495 // The parent container was not recursive after all, so we could have used 7496 // this Recursive sub-member entry after all, but we assumed the worse when 7497 // we started viz: IncompleteCount!=0. 7498 return; 7499 } 7500 assert(E.Str.empty() && "Entry already present"); 7501 E.Str = Str.str(); 7502 E.State = IsRecursive? Recursive : NonRecursive; 7503 } 7504 7505 /// Return a cached TypeString encoding for the ID. If there isn't one, or we 7506 /// are recursively expanding a type (IncompleteCount != 0) and the cached 7507 /// encoding is Recursive, return an empty StringRef. 7508 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) { 7509 if (!ID) 7510 return StringRef(); // We have no key. 7511 auto I = Map.find(ID); 7512 if (I == Map.end()) 7513 return StringRef(); // We have no encoding. 7514 Entry &E = I->second; 7515 if (E.State == Recursive && IncompleteCount) 7516 return StringRef(); // We don't use Recursive encodings for member types. 7517 7518 if (E.State == Incomplete) { 7519 // The incomplete type is being used to break out of recursion. 7520 E.State = IncompleteUsed; 7521 ++IncompleteUsedCount; 7522 } 7523 return E.Str.c_str(); 7524 } 7525 7526 /// The XCore ABI includes a type information section that communicates symbol 7527 /// type information to the linker. The linker uses this information to verify 7528 /// safety/correctness of things such as array bound and pointers et al. 7529 /// The ABI only requires C (and XC) language modules to emit TypeStrings. 7530 /// This type information (TypeString) is emitted into meta data for all global 7531 /// symbols: definitions, declarations, functions & variables. 7532 /// 7533 /// The TypeString carries type, qualifier, name, size & value details. 7534 /// Please see 'Tools Development Guide' section 2.16.2 for format details: 7535 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf 7536 /// The output is tested by test/CodeGen/xcore-stringtype.c. 7537 /// 7538 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 7539 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); 7540 7541 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols. 7542 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 7543 CodeGen::CodeGenModule &CGM) const { 7544 SmallStringEnc Enc; 7545 if (getTypeString(Enc, D, CGM, TSC)) { 7546 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 7547 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV), 7548 llvm::MDString::get(Ctx, Enc.str())}; 7549 llvm::NamedMDNode *MD = 7550 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings"); 7551 MD->addOperand(llvm::MDNode::get(Ctx, MDVals)); 7552 } 7553 } 7554 7555 //===----------------------------------------------------------------------===// 7556 // SPIR ABI Implementation 7557 //===----------------------------------------------------------------------===// 7558 7559 namespace { 7560 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo { 7561 public: 7562 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 7563 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 7564 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 7565 CodeGen::CodeGenModule &M) const override; 7566 unsigned getOpenCLKernelCallingConv() const override; 7567 }; 7568 } // End anonymous namespace. 7569 7570 /// Emit SPIR specific metadata: OpenCL and SPIR version. 7571 void SPIRTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV, 7572 CodeGen::CodeGenModule &CGM) const { 7573 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 7574 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Ctx); 7575 llvm::Module &M = CGM.getModule(); 7576 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the 7577 // opencl.spir.version named metadata. 7578 llvm::Metadata *SPIRVerElts[] = { 7579 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 2)), 7580 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 0))}; 7581 llvm::NamedMDNode *SPIRVerMD = 7582 M.getOrInsertNamedMetadata("opencl.spir.version"); 7583 SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts)); 7584 appendOpenCLVersionMD(CGM); 7585 } 7586 7587 static void appendOpenCLVersionMD (CodeGen::CodeGenModule &CGM) { 7588 llvm::LLVMContext &Ctx = CGM.getModule().getContext(); 7589 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Ctx); 7590 llvm::Module &M = CGM.getModule(); 7591 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the 7592 // opencl.ocl.version named metadata node. 7593 llvm::Metadata *OCLVerElts[] = { 7594 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 7595 Int32Ty, CGM.getLangOpts().OpenCLVersion / 100)), 7596 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 7597 Int32Ty, (CGM.getLangOpts().OpenCLVersion % 100) / 10))}; 7598 llvm::NamedMDNode *OCLVerMD = 7599 M.getOrInsertNamedMetadata("opencl.ocl.version"); 7600 OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts)); 7601 } 7602 7603 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const { 7604 return llvm::CallingConv::SPIR_KERNEL; 7605 } 7606 7607 static bool appendType(SmallStringEnc &Enc, QualType QType, 7608 const CodeGen::CodeGenModule &CGM, 7609 TypeStringCache &TSC); 7610 7611 /// Helper function for appendRecordType(). 7612 /// Builds a SmallVector containing the encoded field types in declaration 7613 /// order. 7614 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE, 7615 const RecordDecl *RD, 7616 const CodeGen::CodeGenModule &CGM, 7617 TypeStringCache &TSC) { 7618 for (const auto *Field : RD->fields()) { 7619 SmallStringEnc Enc; 7620 Enc += "m("; 7621 Enc += Field->getName(); 7622 Enc += "){"; 7623 if (Field->isBitField()) { 7624 Enc += "b("; 7625 llvm::raw_svector_ostream OS(Enc); 7626 OS << Field->getBitWidthValue(CGM.getContext()); 7627 Enc += ':'; 7628 } 7629 if (!appendType(Enc, Field->getType(), CGM, TSC)) 7630 return false; 7631 if (Field->isBitField()) 7632 Enc += ')'; 7633 Enc += '}'; 7634 FE.emplace_back(!Field->getName().empty(), Enc); 7635 } 7636 return true; 7637 } 7638 7639 /// Appends structure and union types to Enc and adds encoding to cache. 7640 /// Recursively calls appendType (via extractFieldType) for each field. 7641 /// Union types have their fields ordered according to the ABI. 7642 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT, 7643 const CodeGen::CodeGenModule &CGM, 7644 TypeStringCache &TSC, const IdentifierInfo *ID) { 7645 // Append the cached TypeString if we have one. 7646 StringRef TypeString = TSC.lookupStr(ID); 7647 if (!TypeString.empty()) { 7648 Enc += TypeString; 7649 return true; 7650 } 7651 7652 // Start to emit an incomplete TypeString. 7653 size_t Start = Enc.size(); 7654 Enc += (RT->isUnionType()? 'u' : 's'); 7655 Enc += '('; 7656 if (ID) 7657 Enc += ID->getName(); 7658 Enc += "){"; 7659 7660 // We collect all encoded fields and order as necessary. 7661 bool IsRecursive = false; 7662 const RecordDecl *RD = RT->getDecl()->getDefinition(); 7663 if (RD && !RD->field_empty()) { 7664 // An incomplete TypeString stub is placed in the cache for this RecordType 7665 // so that recursive calls to this RecordType will use it whilst building a 7666 // complete TypeString for this RecordType. 7667 SmallVector<FieldEncoding, 16> FE; 7668 std::string StubEnc(Enc.substr(Start).str()); 7669 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString. 7670 TSC.addIncomplete(ID, std::move(StubEnc)); 7671 if (!extractFieldType(FE, RD, CGM, TSC)) { 7672 (void) TSC.removeIncomplete(ID); 7673 return false; 7674 } 7675 IsRecursive = TSC.removeIncomplete(ID); 7676 // The ABI requires unions to be sorted but not structures. 7677 // See FieldEncoding::operator< for sort algorithm. 7678 if (RT->isUnionType()) 7679 std::sort(FE.begin(), FE.end()); 7680 // We can now complete the TypeString. 7681 unsigned E = FE.size(); 7682 for (unsigned I = 0; I != E; ++I) { 7683 if (I) 7684 Enc += ','; 7685 Enc += FE[I].str(); 7686 } 7687 } 7688 Enc += '}'; 7689 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive); 7690 return true; 7691 } 7692 7693 /// Appends enum types to Enc and adds the encoding to the cache. 7694 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET, 7695 TypeStringCache &TSC, 7696 const IdentifierInfo *ID) { 7697 // Append the cached TypeString if we have one. 7698 StringRef TypeString = TSC.lookupStr(ID); 7699 if (!TypeString.empty()) { 7700 Enc += TypeString; 7701 return true; 7702 } 7703 7704 size_t Start = Enc.size(); 7705 Enc += "e("; 7706 if (ID) 7707 Enc += ID->getName(); 7708 Enc += "){"; 7709 7710 // We collect all encoded enumerations and order them alphanumerically. 7711 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) { 7712 SmallVector<FieldEncoding, 16> FE; 7713 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; 7714 ++I) { 7715 SmallStringEnc EnumEnc; 7716 EnumEnc += "m("; 7717 EnumEnc += I->getName(); 7718 EnumEnc += "){"; 7719 I->getInitVal().toString(EnumEnc); 7720 EnumEnc += '}'; 7721 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc)); 7722 } 7723 std::sort(FE.begin(), FE.end()); 7724 unsigned E = FE.size(); 7725 for (unsigned I = 0; I != E; ++I) { 7726 if (I) 7727 Enc += ','; 7728 Enc += FE[I].str(); 7729 } 7730 } 7731 Enc += '}'; 7732 TSC.addIfComplete(ID, Enc.substr(Start), false); 7733 return true; 7734 } 7735 7736 /// Appends type's qualifier to Enc. 7737 /// This is done prior to appending the type's encoding. 7738 static void appendQualifier(SmallStringEnc &Enc, QualType QT) { 7739 // Qualifiers are emitted in alphabetical order. 7740 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"}; 7741 int Lookup = 0; 7742 if (QT.isConstQualified()) 7743 Lookup += 1<<0; 7744 if (QT.isRestrictQualified()) 7745 Lookup += 1<<1; 7746 if (QT.isVolatileQualified()) 7747 Lookup += 1<<2; 7748 Enc += Table[Lookup]; 7749 } 7750 7751 /// Appends built-in types to Enc. 7752 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) { 7753 const char *EncType; 7754 switch (BT->getKind()) { 7755 case BuiltinType::Void: 7756 EncType = "0"; 7757 break; 7758 case BuiltinType::Bool: 7759 EncType = "b"; 7760 break; 7761 case BuiltinType::Char_U: 7762 EncType = "uc"; 7763 break; 7764 case BuiltinType::UChar: 7765 EncType = "uc"; 7766 break; 7767 case BuiltinType::SChar: 7768 EncType = "sc"; 7769 break; 7770 case BuiltinType::UShort: 7771 EncType = "us"; 7772 break; 7773 case BuiltinType::Short: 7774 EncType = "ss"; 7775 break; 7776 case BuiltinType::UInt: 7777 EncType = "ui"; 7778 break; 7779 case BuiltinType::Int: 7780 EncType = "si"; 7781 break; 7782 case BuiltinType::ULong: 7783 EncType = "ul"; 7784 break; 7785 case BuiltinType::Long: 7786 EncType = "sl"; 7787 break; 7788 case BuiltinType::ULongLong: 7789 EncType = "ull"; 7790 break; 7791 case BuiltinType::LongLong: 7792 EncType = "sll"; 7793 break; 7794 case BuiltinType::Float: 7795 EncType = "ft"; 7796 break; 7797 case BuiltinType::Double: 7798 EncType = "d"; 7799 break; 7800 case BuiltinType::LongDouble: 7801 EncType = "ld"; 7802 break; 7803 default: 7804 return false; 7805 } 7806 Enc += EncType; 7807 return true; 7808 } 7809 7810 /// Appends a pointer encoding to Enc before calling appendType for the pointee. 7811 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT, 7812 const CodeGen::CodeGenModule &CGM, 7813 TypeStringCache &TSC) { 7814 Enc += "p("; 7815 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC)) 7816 return false; 7817 Enc += ')'; 7818 return true; 7819 } 7820 7821 /// Appends array encoding to Enc before calling appendType for the element. 7822 static bool appendArrayType(SmallStringEnc &Enc, QualType QT, 7823 const ArrayType *AT, 7824 const CodeGen::CodeGenModule &CGM, 7825 TypeStringCache &TSC, StringRef NoSizeEnc) { 7826 if (AT->getSizeModifier() != ArrayType::Normal) 7827 return false; 7828 Enc += "a("; 7829 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) 7830 CAT->getSize().toStringUnsigned(Enc); 7831 else 7832 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "". 7833 Enc += ':'; 7834 // The Qualifiers should be attached to the type rather than the array. 7835 appendQualifier(Enc, QT); 7836 if (!appendType(Enc, AT->getElementType(), CGM, TSC)) 7837 return false; 7838 Enc += ')'; 7839 return true; 7840 } 7841 7842 /// Appends a function encoding to Enc, calling appendType for the return type 7843 /// and the arguments. 7844 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT, 7845 const CodeGen::CodeGenModule &CGM, 7846 TypeStringCache &TSC) { 7847 Enc += "f{"; 7848 if (!appendType(Enc, FT->getReturnType(), CGM, TSC)) 7849 return false; 7850 Enc += "}("; 7851 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) { 7852 // N.B. we are only interested in the adjusted param types. 7853 auto I = FPT->param_type_begin(); 7854 auto E = FPT->param_type_end(); 7855 if (I != E) { 7856 do { 7857 if (!appendType(Enc, *I, CGM, TSC)) 7858 return false; 7859 ++I; 7860 if (I != E) 7861 Enc += ','; 7862 } while (I != E); 7863 if (FPT->isVariadic()) 7864 Enc += ",va"; 7865 } else { 7866 if (FPT->isVariadic()) 7867 Enc += "va"; 7868 else 7869 Enc += '0'; 7870 } 7871 } 7872 Enc += ')'; 7873 return true; 7874 } 7875 7876 /// Handles the type's qualifier before dispatching a call to handle specific 7877 /// type encodings. 7878 static bool appendType(SmallStringEnc &Enc, QualType QType, 7879 const CodeGen::CodeGenModule &CGM, 7880 TypeStringCache &TSC) { 7881 7882 QualType QT = QType.getCanonicalType(); 7883 7884 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) 7885 // The Qualifiers should be attached to the type rather than the array. 7886 // Thus we don't call appendQualifier() here. 7887 return appendArrayType(Enc, QT, AT, CGM, TSC, ""); 7888 7889 appendQualifier(Enc, QT); 7890 7891 if (const BuiltinType *BT = QT->getAs<BuiltinType>()) 7892 return appendBuiltinType(Enc, BT); 7893 7894 if (const PointerType *PT = QT->getAs<PointerType>()) 7895 return appendPointerType(Enc, PT, CGM, TSC); 7896 7897 if (const EnumType *ET = QT->getAs<EnumType>()) 7898 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier()); 7899 7900 if (const RecordType *RT = QT->getAsStructureType()) 7901 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 7902 7903 if (const RecordType *RT = QT->getAsUnionType()) 7904 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier()); 7905 7906 if (const FunctionType *FT = QT->getAs<FunctionType>()) 7907 return appendFunctionType(Enc, FT, CGM, TSC); 7908 7909 return false; 7910 } 7911 7912 static bool getTypeString(SmallStringEnc &Enc, const Decl *D, 7913 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) { 7914 if (!D) 7915 return false; 7916 7917 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 7918 if (FD->getLanguageLinkage() != CLanguageLinkage) 7919 return false; 7920 return appendType(Enc, FD->getType(), CGM, TSC); 7921 } 7922 7923 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 7924 if (VD->getLanguageLinkage() != CLanguageLinkage) 7925 return false; 7926 QualType QT = VD->getType().getCanonicalType(); 7927 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) { 7928 // Global ArrayTypes are given a size of '*' if the size is unknown. 7929 // The Qualifiers should be attached to the type rather than the array. 7930 // Thus we don't call appendQualifier() here. 7931 return appendArrayType(Enc, QT, AT, CGM, TSC, "*"); 7932 } 7933 return appendType(Enc, QT, CGM, TSC); 7934 } 7935 return false; 7936 } 7937 7938 7939 //===----------------------------------------------------------------------===// 7940 // Driver code 7941 //===----------------------------------------------------------------------===// 7942 7943 const llvm::Triple &CodeGenModule::getTriple() const { 7944 return getTarget().getTriple(); 7945 } 7946 7947 bool CodeGenModule::supportsCOMDAT() const { 7948 return getTriple().supportsCOMDAT(); 7949 } 7950 7951 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 7952 if (TheTargetCodeGenInfo) 7953 return *TheTargetCodeGenInfo; 7954 7955 // Helper to set the unique_ptr while still keeping the return value. 7956 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & { 7957 this->TheTargetCodeGenInfo.reset(P); 7958 return *P; 7959 }; 7960 7961 const llvm::Triple &Triple = getTarget().getTriple(); 7962 switch (Triple.getArch()) { 7963 default: 7964 return SetCGInfo(new DefaultTargetCodeGenInfo(Types)); 7965 7966 case llvm::Triple::le32: 7967 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 7968 case llvm::Triple::mips: 7969 case llvm::Triple::mipsel: 7970 if (Triple.getOS() == llvm::Triple::NaCl) 7971 return SetCGInfo(new PNaClTargetCodeGenInfo(Types)); 7972 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true)); 7973 7974 case llvm::Triple::mips64: 7975 case llvm::Triple::mips64el: 7976 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false)); 7977 7978 case llvm::Triple::aarch64: 7979 case llvm::Triple::aarch64_be: { 7980 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS; 7981 if (getTarget().getABI() == "darwinpcs") 7982 Kind = AArch64ABIInfo::DarwinPCS; 7983 7984 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind)); 7985 } 7986 7987 case llvm::Triple::wasm32: 7988 case llvm::Triple::wasm64: 7989 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types)); 7990 7991 case llvm::Triple::arm: 7992 case llvm::Triple::armeb: 7993 case llvm::Triple::thumb: 7994 case llvm::Triple::thumbeb: { 7995 if (Triple.getOS() == llvm::Triple::Win32) { 7996 return SetCGInfo( 7997 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP)); 7998 } 7999 8000 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 8001 StringRef ABIStr = getTarget().getABI(); 8002 if (ABIStr == "apcs-gnu") 8003 Kind = ARMABIInfo::APCS; 8004 else if (ABIStr == "aapcs16") 8005 Kind = ARMABIInfo::AAPCS16_VFP; 8006 else if (CodeGenOpts.FloatABI == "hard" || 8007 (CodeGenOpts.FloatABI != "soft" && 8008 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF || 8009 Triple.getEnvironment() == llvm::Triple::MuslEABIHF || 8010 Triple.getEnvironment() == llvm::Triple::EABIHF))) 8011 Kind = ARMABIInfo::AAPCS_VFP; 8012 8013 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind)); 8014 } 8015 8016 case llvm::Triple::ppc: 8017 return SetCGInfo( 8018 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft")); 8019 case llvm::Triple::ppc64: 8020 if (Triple.isOSBinFormatELF()) { 8021 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; 8022 if (getTarget().getABI() == "elfv2") 8023 Kind = PPC64_SVR4_ABIInfo::ELFv2; 8024 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 8025 8026 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX)); 8027 } else 8028 return SetCGInfo(new PPC64TargetCodeGenInfo(Types)); 8029 case llvm::Triple::ppc64le: { 8030 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); 8031 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; 8032 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx") 8033 Kind = PPC64_SVR4_ABIInfo::ELFv1; 8034 bool HasQPX = getTarget().getABI() == "elfv1-qpx"; 8035 8036 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX)); 8037 } 8038 8039 case llvm::Triple::nvptx: 8040 case llvm::Triple::nvptx64: 8041 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types)); 8042 8043 case llvm::Triple::msp430: 8044 return SetCGInfo(new MSP430TargetCodeGenInfo(Types)); 8045 8046 case llvm::Triple::systemz: { 8047 bool HasVector = getTarget().getABI() == "vector"; 8048 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector)); 8049 } 8050 8051 case llvm::Triple::tce: 8052 return SetCGInfo(new TCETargetCodeGenInfo(Types)); 8053 8054 case llvm::Triple::x86: { 8055 bool IsDarwinVectorABI = Triple.isOSDarwin(); 8056 bool RetSmallStructInRegABI = 8057 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts); 8058 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing(); 8059 8060 if (Triple.getOS() == llvm::Triple::Win32) { 8061 return SetCGInfo(new WinX86_32TargetCodeGenInfo( 8062 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 8063 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters)); 8064 } else { 8065 return SetCGInfo(new X86_32TargetCodeGenInfo( 8066 Types, IsDarwinVectorABI, RetSmallStructInRegABI, 8067 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters, 8068 CodeGenOpts.FloatABI == "soft")); 8069 } 8070 } 8071 8072 case llvm::Triple::x86_64: { 8073 StringRef ABI = getTarget().getABI(); 8074 X86AVXABILevel AVXLevel = 8075 (ABI == "avx512" 8076 ? X86AVXABILevel::AVX512 8077 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None); 8078 8079 switch (Triple.getOS()) { 8080 case llvm::Triple::Win32: 8081 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel)); 8082 case llvm::Triple::PS4: 8083 return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel)); 8084 default: 8085 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel)); 8086 } 8087 } 8088 case llvm::Triple::hexagon: 8089 return SetCGInfo(new HexagonTargetCodeGenInfo(Types)); 8090 case llvm::Triple::lanai: 8091 return SetCGInfo(new LanaiTargetCodeGenInfo(Types)); 8092 case llvm::Triple::r600: 8093 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 8094 case llvm::Triple::amdgcn: 8095 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types)); 8096 case llvm::Triple::sparc: 8097 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types)); 8098 case llvm::Triple::sparcv9: 8099 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types)); 8100 case llvm::Triple::xcore: 8101 return SetCGInfo(new XCoreTargetCodeGenInfo(Types)); 8102 case llvm::Triple::spir: 8103 case llvm::Triple::spir64: 8104 return SetCGInfo(new SPIRTargetCodeGenInfo(Types)); 8105 } 8106 } 8107