1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "TargetInfo.h" 16 #include "ABIInfo.h" 17 #include "CodeGenFunction.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/Type.h" 21 #include "llvm/Target/TargetData.h" 22 #include "llvm/ADT/Triple.h" 23 #include "llvm/Support/raw_ostream.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, 28 llvm::Value *Array, 29 llvm::Value *Value, 30 unsigned FirstIndex, 31 unsigned LastIndex) { 32 // Alternatively, we could emit this as a loop in the source. 33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) { 34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); 35 Builder.CreateStore(Value, Cell); 36 } 37 } 38 39 static bool isAggregateTypeForABI(QualType T) { 40 return CodeGenFunction::hasAggregateLLVMType(T) || 41 T->isMemberFunctionPointerType(); 42 } 43 44 ABIInfo::~ABIInfo() {} 45 46 ASTContext &ABIInfo::getContext() const { 47 return CGT.getContext(); 48 } 49 50 llvm::LLVMContext &ABIInfo::getVMContext() const { 51 return CGT.getLLVMContext(); 52 } 53 54 const llvm::TargetData &ABIInfo::getTargetData() const { 55 return CGT.getTargetData(); 56 } 57 58 59 void ABIArgInfo::dump() const { 60 llvm::raw_ostream &OS = llvm::errs(); 61 OS << "(ABIArgInfo Kind="; 62 switch (TheKind) { 63 case Direct: 64 OS << "Direct Type="; 65 if (const llvm::Type *Ty = getCoerceToType()) 66 Ty->print(OS); 67 else 68 OS << "null"; 69 break; 70 case Extend: 71 OS << "Extend"; 72 break; 73 case Ignore: 74 OS << "Ignore"; 75 break; 76 case Indirect: 77 OS << "Indirect Align=" << getIndirectAlign() 78 << " Byal=" << getIndirectByVal() 79 << " Realign=" << getIndirectRealign(); 80 break; 81 case Expand: 82 OS << "Expand"; 83 break; 84 } 85 OS << ")\n"; 86 } 87 88 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; } 89 90 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays); 91 92 /// isEmptyField - Return true iff a the field is "empty", that is it 93 /// is an unnamed bit-field or an (array of) empty record(s). 94 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD, 95 bool AllowArrays) { 96 if (FD->isUnnamedBitfield()) 97 return true; 98 99 QualType FT = FD->getType(); 100 101 // Constant arrays of empty records count as empty, strip them off. 102 if (AllowArrays) 103 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) 104 FT = AT->getElementType(); 105 106 const RecordType *RT = FT->getAs<RecordType>(); 107 if (!RT) 108 return false; 109 110 // C++ record fields are never empty, at least in the Itanium ABI. 111 // 112 // FIXME: We should use a predicate for whether this behavior is true in the 113 // current ABI. 114 if (isa<CXXRecordDecl>(RT->getDecl())) 115 return false; 116 117 return isEmptyRecord(Context, FT, AllowArrays); 118 } 119 120 /// isEmptyRecord - Return true iff a structure contains only empty 121 /// fields. Note that a structure with a flexible array member is not 122 /// considered empty. 123 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { 124 const RecordType *RT = T->getAs<RecordType>(); 125 if (!RT) 126 return 0; 127 const RecordDecl *RD = RT->getDecl(); 128 if (RD->hasFlexibleArrayMember()) 129 return false; 130 131 // If this is a C++ record, check the bases first. 132 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 133 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 134 e = CXXRD->bases_end(); i != e; ++i) 135 if (!isEmptyRecord(Context, i->getType(), true)) 136 return false; 137 138 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 139 i != e; ++i) 140 if (!isEmptyField(Context, *i, AllowArrays)) 141 return false; 142 return true; 143 } 144 145 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either 146 /// a non-trivial destructor or a non-trivial copy constructor. 147 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) { 148 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 149 if (!RD) 150 return false; 151 152 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor(); 153 } 154 155 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is 156 /// a record type with either a non-trivial destructor or a non-trivial copy 157 /// constructor. 158 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) { 159 const RecordType *RT = T->getAs<RecordType>(); 160 if (!RT) 161 return false; 162 163 return hasNonTrivialDestructorOrCopyConstructor(RT); 164 } 165 166 /// isSingleElementStruct - Determine if a structure is a "single 167 /// element struct", i.e. it has exactly one non-empty field or 168 /// exactly one field which is itself a single element 169 /// struct. Structures with flexible array members are never 170 /// considered single element structs. 171 /// 172 /// \return The field declaration for the single non-empty field, if 173 /// it exists. 174 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { 175 const RecordType *RT = T->getAsStructureType(); 176 if (!RT) 177 return 0; 178 179 const RecordDecl *RD = RT->getDecl(); 180 if (RD->hasFlexibleArrayMember()) 181 return 0; 182 183 const Type *Found = 0; 184 185 // If this is a C++ record, check the bases first. 186 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 187 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 188 e = CXXRD->bases_end(); i != e; ++i) { 189 // Ignore empty records. 190 if (isEmptyRecord(Context, i->getType(), true)) 191 continue; 192 193 // If we already found an element then this isn't a single-element struct. 194 if (Found) 195 return 0; 196 197 // If this is non-empty and not a single element struct, the composite 198 // cannot be a single element struct. 199 Found = isSingleElementStruct(i->getType(), Context); 200 if (!Found) 201 return 0; 202 } 203 } 204 205 // Check for single element. 206 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 207 i != e; ++i) { 208 const FieldDecl *FD = *i; 209 QualType FT = FD->getType(); 210 211 // Ignore empty fields. 212 if (isEmptyField(Context, FD, true)) 213 continue; 214 215 // If we already found an element then this isn't a single-element 216 // struct. 217 if (Found) 218 return 0; 219 220 // Treat single element arrays as the element. 221 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { 222 if (AT->getSize().getZExtValue() != 1) 223 break; 224 FT = AT->getElementType(); 225 } 226 227 if (!isAggregateTypeForABI(FT)) { 228 Found = FT.getTypePtr(); 229 } else { 230 Found = isSingleElementStruct(FT, Context); 231 if (!Found) 232 return 0; 233 } 234 } 235 236 return Found; 237 } 238 239 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { 240 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() && 241 !Ty->isAnyComplexType() && !Ty->isEnumeralType() && 242 !Ty->isBlockPointerType()) 243 return false; 244 245 uint64_t Size = Context.getTypeSize(Ty); 246 return Size == 32 || Size == 64; 247 } 248 249 /// canExpandIndirectArgument - Test whether an argument type which is to be 250 /// passed indirectly (on the stack) would have the equivalent layout if it was 251 /// expanded into separate arguments. If so, we prefer to do the latter to avoid 252 /// inhibiting optimizations. 253 /// 254 // FIXME: This predicate is missing many cases, currently it just follows 255 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We 256 // should probably make this smarter, or better yet make the LLVM backend 257 // capable of handling it. 258 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { 259 // We can only expand structure types. 260 const RecordType *RT = Ty->getAs<RecordType>(); 261 if (!RT) 262 return false; 263 264 // We can only expand (C) structures. 265 // 266 // FIXME: This needs to be generalized to handle classes as well. 267 const RecordDecl *RD = RT->getDecl(); 268 if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) 269 return false; 270 271 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 272 i != e; ++i) { 273 const FieldDecl *FD = *i; 274 275 if (!is32Or64BitBasicType(FD->getType(), Context)) 276 return false; 277 278 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know 279 // how to expand them yet, and the predicate for telling if a bitfield still 280 // counts as "basic" is more complicated than what we were doing previously. 281 if (FD->isBitField()) 282 return false; 283 } 284 285 return true; 286 } 287 288 namespace { 289 /// DefaultABIInfo - The default implementation for ABI specific 290 /// details. This implementation provides information which results in 291 /// self-consistent and sensible LLVM IR generation, but does not 292 /// conform to any particular ABI. 293 class DefaultABIInfo : public ABIInfo { 294 public: 295 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 296 297 ABIArgInfo classifyReturnType(QualType RetTy) const; 298 ABIArgInfo classifyArgumentType(QualType RetTy) const; 299 300 virtual void computeInfo(CGFunctionInfo &FI) const { 301 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 302 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 303 it != ie; ++it) 304 it->info = classifyArgumentType(it->type); 305 } 306 307 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 308 CodeGenFunction &CGF) const; 309 }; 310 311 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo { 312 public: 313 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 314 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 315 }; 316 317 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 318 CodeGenFunction &CGF) const { 319 return 0; 320 } 321 322 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { 323 if (isAggregateTypeForABI(Ty)) 324 return ABIArgInfo::getIndirect(0); 325 326 // Treat an enum type as its underlying type. 327 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 328 Ty = EnumTy->getDecl()->getIntegerType(); 329 330 return (Ty->isPromotableIntegerType() ? 331 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 332 } 333 334 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { 335 if (RetTy->isVoidType()) 336 return ABIArgInfo::getIgnore(); 337 338 if (isAggregateTypeForABI(RetTy)) 339 return ABIArgInfo::getIndirect(0); 340 341 // Treat an enum type as its underlying type. 342 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 343 RetTy = EnumTy->getDecl()->getIntegerType(); 344 345 return (RetTy->isPromotableIntegerType() ? 346 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 347 } 348 349 /// UseX86_MMXType - Return true if this is an MMX type that should use the special 350 /// x86_mmx type. 351 bool UseX86_MMXType(const llvm::Type *IRType) { 352 // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the 353 // special x86_mmx type. 354 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 && 355 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() && 356 IRType->getScalarSizeInBits() != 64; 357 } 358 359 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 360 llvm::StringRef Constraint, 361 llvm::Type* Ty) { 362 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) 363 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext()); 364 return Ty; 365 } 366 367 //===----------------------------------------------------------------------===// 368 // X86-32 ABI Implementation 369 //===----------------------------------------------------------------------===// 370 371 /// X86_32ABIInfo - The X86-32 ABI information. 372 class X86_32ABIInfo : public ABIInfo { 373 static const unsigned MinABIStackAlignInBytes = 4; 374 375 bool IsDarwinVectorABI; 376 bool IsSmallStructInRegABI; 377 bool IsMMXDisabled; 378 379 static bool isRegisterSize(unsigned Size) { 380 return (Size == 8 || Size == 16 || Size == 32 || Size == 64); 381 } 382 383 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); 384 385 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 386 /// such that the argument will be passed in memory. 387 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const; 388 389 /// \brief Return the alignment to use for the given type on the stack. 390 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const; 391 392 public: 393 394 ABIArgInfo classifyReturnType(QualType RetTy) const; 395 ABIArgInfo classifyArgumentType(QualType RetTy) const; 396 397 virtual void computeInfo(CGFunctionInfo &FI) const { 398 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 399 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 400 it != ie; ++it) 401 it->info = classifyArgumentType(it->type); 402 } 403 404 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 405 CodeGenFunction &CGF) const; 406 407 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m) 408 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p), 409 IsMMXDisabled(m) {} 410 }; 411 412 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo { 413 public: 414 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m) 415 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m)) {} 416 417 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 418 CodeGen::CodeGenModule &CGM) const; 419 420 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 421 // Darwin uses different dwarf register numbers for EH. 422 if (CGM.isTargetDarwin()) return 5; 423 424 return 4; 425 } 426 427 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 428 llvm::Value *Address) const; 429 430 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 431 llvm::StringRef Constraint, 432 llvm::Type* Ty) const { 433 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 434 } 435 436 }; 437 438 } 439 440 /// shouldReturnTypeInRegister - Determine if the given type should be 441 /// passed in a register (for the Darwin ABI). 442 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, 443 ASTContext &Context) { 444 uint64_t Size = Context.getTypeSize(Ty); 445 446 // Type must be register sized. 447 if (!isRegisterSize(Size)) 448 return false; 449 450 if (Ty->isVectorType()) { 451 // 64- and 128- bit vectors inside structures are not returned in 452 // registers. 453 if (Size == 64 || Size == 128) 454 return false; 455 456 return true; 457 } 458 459 // If this is a builtin, pointer, enum, complex type, member pointer, or 460 // member function pointer it is ok. 461 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() || 462 Ty->isAnyComplexType() || Ty->isEnumeralType() || 463 Ty->isBlockPointerType() || Ty->isMemberPointerType()) 464 return true; 465 466 // Arrays are treated like records. 467 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) 468 return shouldReturnTypeInRegister(AT->getElementType(), Context); 469 470 // Otherwise, it must be a record type. 471 const RecordType *RT = Ty->getAs<RecordType>(); 472 if (!RT) return false; 473 474 // FIXME: Traverse bases here too. 475 476 // Structure types are passed in register if all fields would be 477 // passed in a register. 478 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(), 479 e = RT->getDecl()->field_end(); i != e; ++i) { 480 const FieldDecl *FD = *i; 481 482 // Empty fields are ignored. 483 if (isEmptyField(Context, FD, true)) 484 continue; 485 486 // Check fields recursively. 487 if (!shouldReturnTypeInRegister(FD->getType(), Context)) 488 return false; 489 } 490 491 return true; 492 } 493 494 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy) const { 495 if (RetTy->isVoidType()) 496 return ABIArgInfo::getIgnore(); 497 498 if (const VectorType *VT = RetTy->getAs<VectorType>()) { 499 // On Darwin, some vectors are returned in registers. 500 if (IsDarwinVectorABI) { 501 uint64_t Size = getContext().getTypeSize(RetTy); 502 503 // 128-bit vectors are a special case; they are returned in 504 // registers and we need to make sure to pick a type the LLVM 505 // backend will like. 506 if (Size == 128) 507 return ABIArgInfo::getDirect(llvm::VectorType::get( 508 llvm::Type::getInt64Ty(getVMContext()), 2)); 509 510 // Always return in register if it fits in a general purpose 511 // register, or if it is 64 bits and has a single element. 512 if ((Size == 8 || Size == 16 || Size == 32) || 513 (Size == 64 && VT->getNumElements() == 1)) 514 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 515 Size)); 516 517 return ABIArgInfo::getIndirect(0); 518 } 519 520 return ABIArgInfo::getDirect(); 521 } 522 523 if (isAggregateTypeForABI(RetTy)) { 524 if (const RecordType *RT = RetTy->getAs<RecordType>()) { 525 // Structures with either a non-trivial destructor or a non-trivial 526 // copy constructor are always indirect. 527 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 528 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 529 530 // Structures with flexible arrays are always indirect. 531 if (RT->getDecl()->hasFlexibleArrayMember()) 532 return ABIArgInfo::getIndirect(0); 533 } 534 535 // If specified, structs and unions are always indirect. 536 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType()) 537 return ABIArgInfo::getIndirect(0); 538 539 // Classify "single element" structs as their element type. 540 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext())) { 541 if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) { 542 if (BT->isIntegerType()) { 543 // We need to use the size of the structure, padding 544 // bit-fields can adjust that to be larger than the single 545 // element type. 546 uint64_t Size = getContext().getTypeSize(RetTy); 547 return ABIArgInfo::getDirect( 548 llvm::IntegerType::get(getVMContext(), (unsigned)Size)); 549 } 550 551 if (BT->getKind() == BuiltinType::Float) { 552 assert(getContext().getTypeSize(RetTy) == 553 getContext().getTypeSize(SeltTy) && 554 "Unexpect single element structure size!"); 555 return ABIArgInfo::getDirect(llvm::Type::getFloatTy(getVMContext())); 556 } 557 558 if (BT->getKind() == BuiltinType::Double) { 559 assert(getContext().getTypeSize(RetTy) == 560 getContext().getTypeSize(SeltTy) && 561 "Unexpect single element structure size!"); 562 return ABIArgInfo::getDirect(llvm::Type::getDoubleTy(getVMContext())); 563 } 564 } else if (SeltTy->isPointerType()) { 565 // FIXME: It would be really nice if this could come out as the proper 566 // pointer type. 567 llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext()); 568 return ABIArgInfo::getDirect(PtrTy); 569 } else if (SeltTy->isVectorType()) { 570 // 64- and 128-bit vectors are never returned in a 571 // register when inside a structure. 572 uint64_t Size = getContext().getTypeSize(RetTy); 573 if (Size == 64 || Size == 128) 574 return ABIArgInfo::getIndirect(0); 575 576 return classifyReturnType(QualType(SeltTy, 0)); 577 } 578 } 579 580 // Small structures which are register sized are generally returned 581 // in a register. 582 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) { 583 uint64_t Size = getContext().getTypeSize(RetTy); 584 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size)); 585 } 586 587 return ABIArgInfo::getIndirect(0); 588 } 589 590 // Treat an enum type as its underlying type. 591 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 592 RetTy = EnumTy->getDecl()->getIntegerType(); 593 594 return (RetTy->isPromotableIntegerType() ? 595 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 596 } 597 598 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) { 599 const RecordType *RT = Ty->getAs<RecordType>(); 600 if (!RT) 601 return 0; 602 const RecordDecl *RD = RT->getDecl(); 603 604 // If this is a C++ record, check the bases first. 605 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) 606 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 607 e = CXXRD->bases_end(); i != e; ++i) 608 if (!isRecordWithSSEVectorType(Context, i->getType())) 609 return false; 610 611 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 612 i != e; ++i) { 613 QualType FT = i->getType(); 614 615 if (FT->getAs<VectorType>() && Context.getTypeSize(Ty) == 128) 616 return true; 617 618 if (isRecordWithSSEVectorType(Context, FT)) 619 return true; 620 } 621 622 return false; 623 } 624 625 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty, 626 unsigned Align) const { 627 // Otherwise, if the alignment is less than or equal to the minimum ABI 628 // alignment, just use the default; the backend will handle this. 629 if (Align <= MinABIStackAlignInBytes) 630 return 0; // Use default alignment. 631 632 // On non-Darwin, the stack type alignment is always 4. 633 if (!IsDarwinVectorABI) { 634 // Set explicit alignment, since we may need to realign the top. 635 return MinABIStackAlignInBytes; 636 } 637 638 // Otherwise, if the type contains an SSE vector type, the alignment is 16. 639 if (isRecordWithSSEVectorType(getContext(), Ty)) 640 return 16; 641 642 return MinABIStackAlignInBytes; 643 } 644 645 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const { 646 if (!ByVal) 647 return ABIArgInfo::getIndirect(0, false); 648 649 // Compute the byval alignment. 650 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8; 651 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign); 652 if (StackAlign == 0) 653 return ABIArgInfo::getIndirect(4); 654 655 // If the stack alignment is less than the type alignment, realign the 656 // argument. 657 if (StackAlign < TypeAlign) 658 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, 659 /*Realign=*/true); 660 661 return ABIArgInfo::getIndirect(StackAlign); 662 } 663 664 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const { 665 // FIXME: Set alignment on indirect arguments. 666 if (isAggregateTypeForABI(Ty)) { 667 // Structures with flexible arrays are always indirect. 668 if (const RecordType *RT = Ty->getAs<RecordType>()) { 669 // Structures with either a non-trivial destructor or a non-trivial 670 // copy constructor are always indirect. 671 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 672 return getIndirectResult(Ty, /*ByVal=*/false); 673 674 if (RT->getDecl()->hasFlexibleArrayMember()) 675 return getIndirectResult(Ty); 676 } 677 678 // Ignore empty structs. 679 if (Ty->isStructureType() && getContext().getTypeSize(Ty) == 0) 680 return ABIArgInfo::getIgnore(); 681 682 // Expand small (<= 128-bit) record types when we know that the stack layout 683 // of those arguments will match the struct. This is important because the 684 // LLVM backend isn't smart enough to remove byval, which inhibits many 685 // optimizations. 686 if (getContext().getTypeSize(Ty) <= 4*32 && 687 canExpandIndirectArgument(Ty, getContext())) 688 return ABIArgInfo::getExpand(); 689 690 return getIndirectResult(Ty); 691 } 692 693 if (const VectorType *VT = Ty->getAs<VectorType>()) { 694 // On Darwin, some vectors are passed in memory, we handle this by passing 695 // it as an i8/i16/i32/i64. 696 if (IsDarwinVectorABI) { 697 uint64_t Size = getContext().getTypeSize(Ty); 698 if ((Size == 8 || Size == 16 || Size == 32) || 699 (Size == 64 && VT->getNumElements() == 1)) 700 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 701 Size)); 702 } 703 704 llvm::Type *IRType = CGT.ConvertType(Ty); 705 if (UseX86_MMXType(IRType)) { 706 if (IsMMXDisabled) 707 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 708 64)); 709 ABIArgInfo AAI = ABIArgInfo::getDirect(IRType); 710 AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext())); 711 return AAI; 712 } 713 714 return ABIArgInfo::getDirect(); 715 } 716 717 718 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 719 Ty = EnumTy->getDecl()->getIntegerType(); 720 721 return (Ty->isPromotableIntegerType() ? 722 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 723 } 724 725 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 726 CodeGenFunction &CGF) const { 727 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 728 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 729 730 CGBuilderTy &Builder = CGF.Builder; 731 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 732 "ap"); 733 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 734 llvm::Type *PTy = 735 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 736 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 737 738 uint64_t Offset = 739 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 740 llvm::Value *NextAddr = 741 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 742 "ap.next"); 743 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 744 745 return AddrTyped; 746 } 747 748 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 749 llvm::GlobalValue *GV, 750 CodeGen::CodeGenModule &CGM) const { 751 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 752 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) { 753 // Get the LLVM function. 754 llvm::Function *Fn = cast<llvm::Function>(GV); 755 756 // Now add the 'alignstack' attribute with a value of 16. 757 Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16)); 758 } 759 } 760 } 761 762 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( 763 CodeGen::CodeGenFunction &CGF, 764 llvm::Value *Address) const { 765 CodeGen::CGBuilderTy &Builder = CGF.Builder; 766 llvm::LLVMContext &Context = CGF.getLLVMContext(); 767 768 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 769 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 770 771 // 0-7 are the eight integer registers; the order is different 772 // on Darwin (for EH), but the range is the same. 773 // 8 is %eip. 774 AssignToArrayRange(Builder, Address, Four8, 0, 8); 775 776 if (CGF.CGM.isTargetDarwin()) { 777 // 12-16 are st(0..4). Not sure why we stop at 4. 778 // These have size 16, which is sizeof(long double) on 779 // platforms with 8-byte alignment for that type. 780 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 781 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16); 782 783 } else { 784 // 9 is %eflags, which doesn't get a size on Darwin for some 785 // reason. 786 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); 787 788 // 11-16 are st(0..5). Not sure why we stop at 5. 789 // These have size 12, which is sizeof(long double) on 790 // platforms with 4-byte alignment for that type. 791 llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12); 792 AssignToArrayRange(Builder, Address, Twelve8, 11, 16); 793 } 794 795 return false; 796 } 797 798 //===----------------------------------------------------------------------===// 799 // X86-64 ABI Implementation 800 //===----------------------------------------------------------------------===// 801 802 803 namespace { 804 /// X86_64ABIInfo - The X86_64 ABI information. 805 class X86_64ABIInfo : public ABIInfo { 806 enum Class { 807 Integer = 0, 808 SSE, 809 SSEUp, 810 X87, 811 X87Up, 812 ComplexX87, 813 NoClass, 814 Memory 815 }; 816 817 /// merge - Implement the X86_64 ABI merging algorithm. 818 /// 819 /// Merge an accumulating classification \arg Accum with a field 820 /// classification \arg Field. 821 /// 822 /// \param Accum - The accumulating classification. This should 823 /// always be either NoClass or the result of a previous merge 824 /// call. In addition, this should never be Memory (the caller 825 /// should just return Memory for the aggregate). 826 static Class merge(Class Accum, Class Field); 827 828 /// postMerge - Implement the X86_64 ABI post merging algorithm. 829 /// 830 /// Post merger cleanup, reduces a malformed Hi and Lo pair to 831 /// final MEMORY or SSE classes when necessary. 832 /// 833 /// \param AggregateSize - The size of the current aggregate in 834 /// the classification process. 835 /// 836 /// \param Lo - The classification for the parts of the type 837 /// residing in the low word of the containing object. 838 /// 839 /// \param Hi - The classification for the parts of the type 840 /// residing in the higher words of the containing object. 841 /// 842 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const; 843 844 /// classify - Determine the x86_64 register classes in which the 845 /// given type T should be passed. 846 /// 847 /// \param Lo - The classification for the parts of the type 848 /// residing in the low word of the containing object. 849 /// 850 /// \param Hi - The classification for the parts of the type 851 /// residing in the high word of the containing object. 852 /// 853 /// \param OffsetBase - The bit offset of this type in the 854 /// containing object. Some parameters are classified different 855 /// depending on whether they straddle an eightbyte boundary. 856 /// 857 /// If a word is unused its result will be NoClass; if a type should 858 /// be passed in Memory then at least the classification of \arg Lo 859 /// will be Memory. 860 /// 861 /// The \arg Lo class will be NoClass iff the argument is ignored. 862 /// 863 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will 864 /// also be ComplexX87. 865 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; 866 867 llvm::Type *GetByteVectorType(QualType Ty) const; 868 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType, 869 unsigned IROffset, QualType SourceTy, 870 unsigned SourceOffset) const; 871 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType, 872 unsigned IROffset, QualType SourceTy, 873 unsigned SourceOffset) const; 874 875 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 876 /// such that the argument will be returned in memory. 877 ABIArgInfo getIndirectReturnResult(QualType Ty) const; 878 879 /// getIndirectResult - Give a source type \arg Ty, return a suitable result 880 /// such that the argument will be passed in memory. 881 ABIArgInfo getIndirectResult(QualType Ty) const; 882 883 ABIArgInfo classifyReturnType(QualType RetTy) const; 884 885 ABIArgInfo classifyArgumentType(QualType Ty, 886 unsigned &neededInt, 887 unsigned &neededSSE) const; 888 889 /// The 0.98 ABI revision clarified a lot of ambiguities, 890 /// unfortunately in ways that were not always consistent with 891 /// certain previous compilers. In particular, platforms which 892 /// required strict binary compatibility with older versions of GCC 893 /// may need to exempt themselves. 894 bool honorsRevision0_98() const { 895 return !getContext().Target.getTriple().isOSDarwin(); 896 } 897 898 public: 899 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 900 901 virtual void computeInfo(CGFunctionInfo &FI) const; 902 903 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 904 CodeGenFunction &CGF) const; 905 }; 906 907 /// WinX86_64ABIInfo - The Windows X86_64 ABI information. 908 class WinX86_64ABIInfo : public ABIInfo { 909 910 ABIArgInfo classify(QualType Ty) const; 911 912 public: 913 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {} 914 915 virtual void computeInfo(CGFunctionInfo &FI) const; 916 917 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 918 CodeGenFunction &CGF) const; 919 }; 920 921 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { 922 public: 923 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 924 : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {} 925 926 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 927 return 7; 928 } 929 930 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 931 llvm::Value *Address) const { 932 CodeGen::CGBuilderTy &Builder = CGF.Builder; 933 llvm::LLVMContext &Context = CGF.getLLVMContext(); 934 935 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 936 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 937 938 // 0-15 are the 16 integer registers. 939 // 16 is %rip. 940 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 941 942 return false; 943 } 944 945 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF, 946 llvm::StringRef Constraint, 947 llvm::Type* Ty) const { 948 return X86AdjustInlineAsmType(CGF, Constraint, Ty); 949 } 950 951 }; 952 953 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { 954 public: 955 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) 956 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} 957 958 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 959 return 7; 960 } 961 962 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 963 llvm::Value *Address) const { 964 CodeGen::CGBuilderTy &Builder = CGF.Builder; 965 llvm::LLVMContext &Context = CGF.getLLVMContext(); 966 967 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 968 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 969 970 // 0-15 are the 16 integer registers. 971 // 16 is %rip. 972 AssignToArrayRange(Builder, Address, Eight8, 0, 16); 973 974 return false; 975 } 976 }; 977 978 } 979 980 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, 981 Class &Hi) const { 982 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: 983 // 984 // (a) If one of the classes is Memory, the whole argument is passed in 985 // memory. 986 // 987 // (b) If X87UP is not preceded by X87, the whole argument is passed in 988 // memory. 989 // 990 // (c) If the size of the aggregate exceeds two eightbytes and the first 991 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole 992 // argument is passed in memory. NOTE: This is necessary to keep the 993 // ABI working for processors that don't support the __m256 type. 994 // 995 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE. 996 // 997 // Some of these are enforced by the merging logic. Others can arise 998 // only with unions; for example: 999 // union { _Complex double; unsigned; } 1000 // 1001 // Note that clauses (b) and (c) were added in 0.98. 1002 // 1003 if (Hi == Memory) 1004 Lo = Memory; 1005 if (Hi == X87Up && Lo != X87 && honorsRevision0_98()) 1006 Lo = Memory; 1007 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp)) 1008 Lo = Memory; 1009 if (Hi == SSEUp && Lo != SSE) 1010 Hi = SSE; 1011 } 1012 1013 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { 1014 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is 1015 // classified recursively so that always two fields are 1016 // considered. The resulting class is calculated according to 1017 // the classes of the fields in the eightbyte: 1018 // 1019 // (a) If both classes are equal, this is the resulting class. 1020 // 1021 // (b) If one of the classes is NO_CLASS, the resulting class is 1022 // the other class. 1023 // 1024 // (c) If one of the classes is MEMORY, the result is the MEMORY 1025 // class. 1026 // 1027 // (d) If one of the classes is INTEGER, the result is the 1028 // INTEGER. 1029 // 1030 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, 1031 // MEMORY is used as class. 1032 // 1033 // (f) Otherwise class SSE is used. 1034 1035 // Accum should never be memory (we should have returned) or 1036 // ComplexX87 (because this cannot be passed in a structure). 1037 assert((Accum != Memory && Accum != ComplexX87) && 1038 "Invalid accumulated classification during merge."); 1039 if (Accum == Field || Field == NoClass) 1040 return Accum; 1041 if (Field == Memory) 1042 return Memory; 1043 if (Accum == NoClass) 1044 return Field; 1045 if (Accum == Integer || Field == Integer) 1046 return Integer; 1047 if (Field == X87 || Field == X87Up || Field == ComplexX87 || 1048 Accum == X87 || Accum == X87Up) 1049 return Memory; 1050 return SSE; 1051 } 1052 1053 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, 1054 Class &Lo, Class &Hi) const { 1055 // FIXME: This code can be simplified by introducing a simple value class for 1056 // Class pairs with appropriate constructor methods for the various 1057 // situations. 1058 1059 // FIXME: Some of the split computations are wrong; unaligned vectors 1060 // shouldn't be passed in registers for example, so there is no chance they 1061 // can straddle an eightbyte. Verify & simplify. 1062 1063 Lo = Hi = NoClass; 1064 1065 Class &Current = OffsetBase < 64 ? Lo : Hi; 1066 Current = Memory; 1067 1068 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { 1069 BuiltinType::Kind k = BT->getKind(); 1070 1071 if (k == BuiltinType::Void) { 1072 Current = NoClass; 1073 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { 1074 Lo = Integer; 1075 Hi = Integer; 1076 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { 1077 Current = Integer; 1078 } else if (k == BuiltinType::Float || k == BuiltinType::Double) { 1079 Current = SSE; 1080 } else if (k == BuiltinType::LongDouble) { 1081 Lo = X87; 1082 Hi = X87Up; 1083 } 1084 // FIXME: _Decimal32 and _Decimal64 are SSE. 1085 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). 1086 return; 1087 } 1088 1089 if (const EnumType *ET = Ty->getAs<EnumType>()) { 1090 // Classify the underlying integer type. 1091 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); 1092 return; 1093 } 1094 1095 if (Ty->hasPointerRepresentation()) { 1096 Current = Integer; 1097 return; 1098 } 1099 1100 if (Ty->isMemberPointerType()) { 1101 if (Ty->isMemberFunctionPointerType()) 1102 Lo = Hi = Integer; 1103 else 1104 Current = Integer; 1105 return; 1106 } 1107 1108 if (const VectorType *VT = Ty->getAs<VectorType>()) { 1109 uint64_t Size = getContext().getTypeSize(VT); 1110 if (Size == 32) { 1111 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x 1112 // float> as integer. 1113 Current = Integer; 1114 1115 // If this type crosses an eightbyte boundary, it should be 1116 // split. 1117 uint64_t EB_Real = (OffsetBase) / 64; 1118 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; 1119 if (EB_Real != EB_Imag) 1120 Hi = Lo; 1121 } else if (Size == 64) { 1122 // gcc passes <1 x double> in memory. :( 1123 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) 1124 return; 1125 1126 // gcc passes <1 x long long> as INTEGER. 1127 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) || 1128 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) || 1129 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) || 1130 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong)) 1131 Current = Integer; 1132 else 1133 Current = SSE; 1134 1135 // If this type crosses an eightbyte boundary, it should be 1136 // split. 1137 if (OffsetBase && OffsetBase != 64) 1138 Hi = Lo; 1139 } else if (Size == 128 || Size == 256) { 1140 // Arguments of 256-bits are split into four eightbyte chunks. The 1141 // least significant one belongs to class SSE and all the others to class 1142 // SSEUP. The original Lo and Hi design considers that types can't be 1143 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense. 1144 // This design isn't correct for 256-bits, but since there're no cases 1145 // where the upper parts would need to be inspected, avoid adding 1146 // complexity and just consider Hi to match the 64-256 part. 1147 Lo = SSE; 1148 Hi = SSEUp; 1149 } 1150 return; 1151 } 1152 1153 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 1154 QualType ET = getContext().getCanonicalType(CT->getElementType()); 1155 1156 uint64_t Size = getContext().getTypeSize(Ty); 1157 if (ET->isIntegralOrEnumerationType()) { 1158 if (Size <= 64) 1159 Current = Integer; 1160 else if (Size <= 128) 1161 Lo = Hi = Integer; 1162 } else if (ET == getContext().FloatTy) 1163 Current = SSE; 1164 else if (ET == getContext().DoubleTy) 1165 Lo = Hi = SSE; 1166 else if (ET == getContext().LongDoubleTy) 1167 Current = ComplexX87; 1168 1169 // If this complex type crosses an eightbyte boundary then it 1170 // should be split. 1171 uint64_t EB_Real = (OffsetBase) / 64; 1172 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64; 1173 if (Hi == NoClass && EB_Real != EB_Imag) 1174 Hi = Lo; 1175 1176 return; 1177 } 1178 1179 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 1180 // Arrays are treated like structures. 1181 1182 uint64_t Size = getContext().getTypeSize(Ty); 1183 1184 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1185 // than four eightbytes, ..., it has class MEMORY. 1186 if (Size > 256) 1187 return; 1188 1189 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1190 // fields, it has class MEMORY. 1191 // 1192 // Only need to check alignment of array base. 1193 if (OffsetBase % getContext().getTypeAlign(AT->getElementType())) 1194 return; 1195 1196 // Otherwise implement simplified merge. We could be smarter about 1197 // this, but it isn't worth it and would be harder to verify. 1198 Current = NoClass; 1199 uint64_t EltSize = getContext().getTypeSize(AT->getElementType()); 1200 uint64_t ArraySize = AT->getSize().getZExtValue(); 1201 1202 // The only case a 256-bit wide vector could be used is when the array 1203 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1204 // to work for sizes wider than 128, early check and fallback to memory. 1205 if (Size > 128 && EltSize != 256) 1206 return; 1207 1208 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { 1209 Class FieldLo, FieldHi; 1210 classify(AT->getElementType(), Offset, FieldLo, FieldHi); 1211 Lo = merge(Lo, FieldLo); 1212 Hi = merge(Hi, FieldHi); 1213 if (Lo == Memory || Hi == Memory) 1214 break; 1215 } 1216 1217 postMerge(Size, Lo, Hi); 1218 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); 1219 return; 1220 } 1221 1222 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1223 uint64_t Size = getContext().getTypeSize(Ty); 1224 1225 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger 1226 // than four eightbytes, ..., it has class MEMORY. 1227 if (Size > 256) 1228 return; 1229 1230 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial 1231 // copy constructor or a non-trivial destructor, it is passed by invisible 1232 // reference. 1233 if (hasNonTrivialDestructorOrCopyConstructor(RT)) 1234 return; 1235 1236 const RecordDecl *RD = RT->getDecl(); 1237 1238 // The only case a 256-bit wide vector could be used is when the struct 1239 // contains a single 256-bit element. Since Lo and Hi logic isn't extended 1240 // to work for sizes wider than 128, early check and fallback to memory. 1241 RecordDecl::field_iterator FirstElt = RD->field_begin(); 1242 if (Size > 128 && getContext().getTypeSize(FirstElt->getType()) != 256) 1243 return; 1244 1245 // Assume variable sized types are passed in memory. 1246 if (RD->hasFlexibleArrayMember()) 1247 return; 1248 1249 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1250 1251 // Reset Lo class, this will be recomputed. 1252 Current = NoClass; 1253 1254 // If this is a C++ record, classify the bases first. 1255 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1256 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1257 e = CXXRD->bases_end(); i != e; ++i) { 1258 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1259 "Unexpected base class!"); 1260 const CXXRecordDecl *Base = 1261 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1262 1263 // Classify this field. 1264 // 1265 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a 1266 // single eightbyte, each is classified separately. Each eightbyte gets 1267 // initialized to class NO_CLASS. 1268 Class FieldLo, FieldHi; 1269 uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base); 1270 classify(i->getType(), Offset, FieldLo, FieldHi); 1271 Lo = merge(Lo, FieldLo); 1272 Hi = merge(Hi, FieldHi); 1273 if (Lo == Memory || Hi == Memory) 1274 break; 1275 } 1276 } 1277 1278 // Classify the fields one at a time, merging the results. 1279 unsigned idx = 0; 1280 for (RecordDecl::field_iterator i = FirstElt, e = RD->field_end(); 1281 i != e; ++i, ++idx) { 1282 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1283 bool BitField = i->isBitField(); 1284 1285 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned 1286 // fields, it has class MEMORY. 1287 // 1288 // Note, skip this test for bit-fields, see below. 1289 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) { 1290 Lo = Memory; 1291 return; 1292 } 1293 1294 // Classify this field. 1295 // 1296 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate 1297 // exceeds a single eightbyte, each is classified 1298 // separately. Each eightbyte gets initialized to class 1299 // NO_CLASS. 1300 Class FieldLo, FieldHi; 1301 1302 // Bit-fields require special handling, they do not force the 1303 // structure to be passed in memory even if unaligned, and 1304 // therefore they can straddle an eightbyte. 1305 if (BitField) { 1306 // Ignore padding bit-fields. 1307 if (i->isUnnamedBitfield()) 1308 continue; 1309 1310 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); 1311 uint64_t Size = 1312 i->getBitWidth()->EvaluateAsInt(getContext()).getZExtValue(); 1313 1314 uint64_t EB_Lo = Offset / 64; 1315 uint64_t EB_Hi = (Offset + Size - 1) / 64; 1316 FieldLo = FieldHi = NoClass; 1317 if (EB_Lo) { 1318 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); 1319 FieldLo = NoClass; 1320 FieldHi = Integer; 1321 } else { 1322 FieldLo = Integer; 1323 FieldHi = EB_Hi ? Integer : NoClass; 1324 } 1325 } else 1326 classify(i->getType(), Offset, FieldLo, FieldHi); 1327 Lo = merge(Lo, FieldLo); 1328 Hi = merge(Hi, FieldHi); 1329 if (Lo == Memory || Hi == Memory) 1330 break; 1331 } 1332 1333 postMerge(Size, Lo, Hi); 1334 } 1335 } 1336 1337 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { 1338 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1339 // place naturally. 1340 if (!isAggregateTypeForABI(Ty)) { 1341 // Treat an enum type as its underlying type. 1342 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1343 Ty = EnumTy->getDecl()->getIntegerType(); 1344 1345 return (Ty->isPromotableIntegerType() ? 1346 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1347 } 1348 1349 return ABIArgInfo::getIndirect(0); 1350 } 1351 1352 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const { 1353 // If this is a scalar LLVM value then assume LLVM will pass it in the right 1354 // place naturally. 1355 if (!isAggregateTypeForABI(Ty)) { 1356 // Treat an enum type as its underlying type. 1357 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1358 Ty = EnumTy->getDecl()->getIntegerType(); 1359 1360 return (Ty->isPromotableIntegerType() ? 1361 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 1362 } 1363 1364 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1365 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 1366 1367 // Compute the byval alignment. We specify the alignment of the byval in all 1368 // cases so that the mid-level optimizer knows the alignment of the byval. 1369 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U); 1370 return ABIArgInfo::getIndirect(Align); 1371 } 1372 1373 /// GetByteVectorType - The ABI specifies that a value should be passed in an 1374 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a 1375 /// vector register. 1376 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { 1377 llvm::Type *IRType = CGT.ConvertType(Ty); 1378 1379 // Wrapper structs that just contain vectors are passed just like vectors, 1380 // strip them off if present. 1381 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType); 1382 while (STy && STy->getNumElements() == 1) { 1383 IRType = STy->getElementType(0); 1384 STy = dyn_cast<llvm::StructType>(IRType); 1385 } 1386 1387 // If the preferred type is a 16-byte vector, prefer to pass it. 1388 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ 1389 llvm::Type *EltTy = VT->getElementType(); 1390 unsigned BitWidth = VT->getBitWidth(); 1391 if ((BitWidth == 128 || BitWidth == 256) && 1392 (EltTy->isFloatTy() || EltTy->isDoubleTy() || 1393 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || 1394 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || 1395 EltTy->isIntegerTy(128))) 1396 return VT; 1397 } 1398 1399 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); 1400 } 1401 1402 /// BitsContainNoUserData - Return true if the specified [start,end) bit range 1403 /// is known to either be off the end of the specified type or being in 1404 /// alignment padding. The user type specified is known to be at most 128 bits 1405 /// in size, and have passed through X86_64ABIInfo::classify with a successful 1406 /// classification that put one of the two halves in the INTEGER class. 1407 /// 1408 /// It is conservatively correct to return false. 1409 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, 1410 unsigned EndBit, ASTContext &Context) { 1411 // If the bytes being queried are off the end of the type, there is no user 1412 // data hiding here. This handles analysis of builtins, vectors and other 1413 // types that don't contain interesting padding. 1414 unsigned TySize = (unsigned)Context.getTypeSize(Ty); 1415 if (TySize <= StartBit) 1416 return true; 1417 1418 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 1419 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType()); 1420 unsigned NumElts = (unsigned)AT->getSize().getZExtValue(); 1421 1422 // Check each element to see if the element overlaps with the queried range. 1423 for (unsigned i = 0; i != NumElts; ++i) { 1424 // If the element is after the span we care about, then we're done.. 1425 unsigned EltOffset = i*EltSize; 1426 if (EltOffset >= EndBit) break; 1427 1428 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0; 1429 if (!BitsContainNoUserData(AT->getElementType(), EltStart, 1430 EndBit-EltOffset, Context)) 1431 return false; 1432 } 1433 // If it overlaps no elements, then it is safe to process as padding. 1434 return true; 1435 } 1436 1437 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1438 const RecordDecl *RD = RT->getDecl(); 1439 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 1440 1441 // If this is a C++ record, check the bases first. 1442 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 1443 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(), 1444 e = CXXRD->bases_end(); i != e; ++i) { 1445 assert(!i->isVirtual() && !i->getType()->isDependentType() && 1446 "Unexpected base class!"); 1447 const CXXRecordDecl *Base = 1448 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 1449 1450 // If the base is after the span we care about, ignore it. 1451 unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base); 1452 if (BaseOffset >= EndBit) continue; 1453 1454 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0; 1455 if (!BitsContainNoUserData(i->getType(), BaseStart, 1456 EndBit-BaseOffset, Context)) 1457 return false; 1458 } 1459 } 1460 1461 // Verify that no field has data that overlaps the region of interest. Yes 1462 // this could be sped up a lot by being smarter about queried fields, 1463 // however we're only looking at structs up to 16 bytes, so we don't care 1464 // much. 1465 unsigned idx = 0; 1466 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 1467 i != e; ++i, ++idx) { 1468 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx); 1469 1470 // If we found a field after the region we care about, then we're done. 1471 if (FieldOffset >= EndBit) break; 1472 1473 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0; 1474 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset, 1475 Context)) 1476 return false; 1477 } 1478 1479 // If nothing in this record overlapped the area of interest, then we're 1480 // clean. 1481 return true; 1482 } 1483 1484 return false; 1485 } 1486 1487 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a 1488 /// float member at the specified offset. For example, {int,{float}} has a 1489 /// float at offset 4. It is conservatively correct for this routine to return 1490 /// false. 1491 static bool ContainsFloatAtOffset(const llvm::Type *IRType, unsigned IROffset, 1492 const llvm::TargetData &TD) { 1493 // Base case if we find a float. 1494 if (IROffset == 0 && IRType->isFloatTy()) 1495 return true; 1496 1497 // If this is a struct, recurse into the field at the specified offset. 1498 if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1499 const llvm::StructLayout *SL = TD.getStructLayout(STy); 1500 unsigned Elt = SL->getElementContainingOffset(IROffset); 1501 IROffset -= SL->getElementOffset(Elt); 1502 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD); 1503 } 1504 1505 // If this is an array, recurse into the field at the specified offset. 1506 if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1507 const llvm::Type *EltTy = ATy->getElementType(); 1508 unsigned EltSize = TD.getTypeAllocSize(EltTy); 1509 IROffset -= IROffset/EltSize*EltSize; 1510 return ContainsFloatAtOffset(EltTy, IROffset, TD); 1511 } 1512 1513 return false; 1514 } 1515 1516 1517 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the 1518 /// low 8 bytes of an XMM register, corresponding to the SSE class. 1519 llvm::Type *X86_64ABIInfo:: 1520 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1521 QualType SourceTy, unsigned SourceOffset) const { 1522 // The only three choices we have are either double, <2 x float>, or float. We 1523 // pass as float if the last 4 bytes is just padding. This happens for 1524 // structs that contain 3 floats. 1525 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32, 1526 SourceOffset*8+64, getContext())) 1527 return llvm::Type::getFloatTy(getVMContext()); 1528 1529 // We want to pass as <2 x float> if the LLVM IR type contains a float at 1530 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the 1531 // case. 1532 if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) && 1533 ContainsFloatAtOffset(IRType, IROffset+4, getTargetData())) 1534 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2); 1535 1536 return llvm::Type::getDoubleTy(getVMContext()); 1537 } 1538 1539 1540 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in 1541 /// an 8-byte GPR. This means that we either have a scalar or we are talking 1542 /// about the high or low part of an up-to-16-byte struct. This routine picks 1543 /// the best LLVM IR type to represent this, which may be i64 or may be anything 1544 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*, 1545 /// etc). 1546 /// 1547 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for 1548 /// the source type. IROffset is an offset in bytes into the LLVM IR type that 1549 /// the 8-byte value references. PrefType may be null. 1550 /// 1551 /// SourceTy is the source level type for the entire argument. SourceOffset is 1552 /// an offset into this that we're processing (which is always either 0 or 8). 1553 /// 1554 llvm::Type *X86_64ABIInfo:: 1555 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, 1556 QualType SourceTy, unsigned SourceOffset) const { 1557 // If we're dealing with an un-offset LLVM IR type, then it means that we're 1558 // returning an 8-byte unit starting with it. See if we can safely use it. 1559 if (IROffset == 0) { 1560 // Pointers and int64's always fill the 8-byte unit. 1561 if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64)) 1562 return IRType; 1563 1564 // If we have a 1/2/4-byte integer, we can use it only if the rest of the 1565 // goodness in the source type is just tail padding. This is allowed to 1566 // kick in for struct {double,int} on the int, but not on 1567 // struct{double,int,int} because we wouldn't return the second int. We 1568 // have to do this analysis on the source type because we can't depend on 1569 // unions being lowered a specific way etc. 1570 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || 1571 IRType->isIntegerTy(32)) { 1572 unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth(); 1573 1574 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, 1575 SourceOffset*8+64, getContext())) 1576 return IRType; 1577 } 1578 } 1579 1580 if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) { 1581 // If this is a struct, recurse into the field at the specified offset. 1582 const llvm::StructLayout *SL = getTargetData().getStructLayout(STy); 1583 if (IROffset < SL->getSizeInBytes()) { 1584 unsigned FieldIdx = SL->getElementContainingOffset(IROffset); 1585 IROffset -= SL->getElementOffset(FieldIdx); 1586 1587 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset, 1588 SourceTy, SourceOffset); 1589 } 1590 } 1591 1592 if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) { 1593 llvm::Type *EltTy = ATy->getElementType(); 1594 unsigned EltSize = getTargetData().getTypeAllocSize(EltTy); 1595 unsigned EltOffset = IROffset/EltSize*EltSize; 1596 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy, 1597 SourceOffset); 1598 } 1599 1600 // Okay, we don't have any better idea of what to pass, so we pass this in an 1601 // integer register that isn't too big to fit the rest of the struct. 1602 unsigned TySizeInBytes = 1603 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity(); 1604 1605 assert(TySizeInBytes != SourceOffset && "Empty field?"); 1606 1607 // It is always safe to classify this as an integer type up to i64 that 1608 // isn't larger than the structure. 1609 return llvm::IntegerType::get(getVMContext(), 1610 std::min(TySizeInBytes-SourceOffset, 8U)*8); 1611 } 1612 1613 1614 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally 1615 /// be used as elements of a two register pair to pass or return, return a 1616 /// first class aggregate to represent them. For example, if the low part of 1617 /// a by-value argument should be passed as i32* and the high part as float, 1618 /// return {i32*, float}. 1619 static llvm::Type * 1620 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, 1621 const llvm::TargetData &TD) { 1622 // In order to correctly satisfy the ABI, we need to the high part to start 1623 // at offset 8. If the high and low parts we inferred are both 4-byte types 1624 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have 1625 // the second element at offset 8. Check for this: 1626 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo); 1627 unsigned HiAlign = TD.getABITypeAlignment(Hi); 1628 unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign); 1629 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!"); 1630 1631 // To handle this, we have to increase the size of the low part so that the 1632 // second element will start at an 8 byte offset. We can't increase the size 1633 // of the second element because it might make us access off the end of the 1634 // struct. 1635 if (HiStart != 8) { 1636 // There are only two sorts of types the ABI generation code can produce for 1637 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32. 1638 // Promote these to a larger type. 1639 if (Lo->isFloatTy()) 1640 Lo = llvm::Type::getDoubleTy(Lo->getContext()); 1641 else { 1642 assert(Lo->isIntegerTy() && "Invalid/unknown lo type"); 1643 Lo = llvm::Type::getInt64Ty(Lo->getContext()); 1644 } 1645 } 1646 1647 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL); 1648 1649 1650 // Verify that the second element is at an 8-byte offset. 1651 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 && 1652 "Invalid x86-64 argument pair!"); 1653 return Result; 1654 } 1655 1656 ABIArgInfo X86_64ABIInfo:: 1657 classifyReturnType(QualType RetTy) const { 1658 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the 1659 // classification algorithm. 1660 X86_64ABIInfo::Class Lo, Hi; 1661 classify(RetTy, 0, Lo, Hi); 1662 1663 // Check some invariants. 1664 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1665 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1666 1667 llvm::Type *ResType = 0; 1668 switch (Lo) { 1669 case NoClass: 1670 if (Hi == NoClass) 1671 return ABIArgInfo::getIgnore(); 1672 // If the low part is just padding, it takes no register, leave ResType 1673 // null. 1674 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1675 "Unknown missing lo part"); 1676 break; 1677 1678 case SSEUp: 1679 case X87Up: 1680 assert(0 && "Invalid classification for lo word."); 1681 1682 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via 1683 // hidden argument. 1684 case Memory: 1685 return getIndirectReturnResult(RetTy); 1686 1687 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next 1688 // available register of the sequence %rax, %rdx is used. 1689 case Integer: 1690 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1691 1692 // If we have a sign or zero extended integer, make sure to return Extend 1693 // so that the parameter gets the right LLVM IR attributes. 1694 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1695 // Treat an enum type as its underlying type. 1696 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 1697 RetTy = EnumTy->getDecl()->getIntegerType(); 1698 1699 if (RetTy->isIntegralOrEnumerationType() && 1700 RetTy->isPromotableIntegerType()) 1701 return ABIArgInfo::getExtend(); 1702 } 1703 break; 1704 1705 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next 1706 // available SSE register of the sequence %xmm0, %xmm1 is used. 1707 case SSE: 1708 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0); 1709 break; 1710 1711 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is 1712 // returned on the X87 stack in %st0 as 80-bit x87 number. 1713 case X87: 1714 ResType = llvm::Type::getX86_FP80Ty(getVMContext()); 1715 break; 1716 1717 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real 1718 // part of the value is returned in %st0 and the imaginary part in 1719 // %st1. 1720 case ComplexX87: 1721 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); 1722 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()), 1723 llvm::Type::getX86_FP80Ty(getVMContext()), 1724 NULL); 1725 break; 1726 } 1727 1728 llvm::Type *HighPart = 0; 1729 switch (Hi) { 1730 // Memory was handled previously and X87 should 1731 // never occur as a hi class. 1732 case Memory: 1733 case X87: 1734 assert(0 && "Invalid classification for hi word."); 1735 1736 case ComplexX87: // Previously handled. 1737 case NoClass: 1738 break; 1739 1740 case Integer: 1741 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1742 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1743 return ABIArgInfo::getDirect(HighPart, 8); 1744 break; 1745 case SSE: 1746 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1747 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1748 return ABIArgInfo::getDirect(HighPart, 8); 1749 break; 1750 1751 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte 1752 // is passed in the next available eightbyte chunk if the last used 1753 // vector register. 1754 // 1755 // SSEUP should always be preceded by SSE, just widen. 1756 case SSEUp: 1757 assert(Lo == SSE && "Unexpected SSEUp classification."); 1758 ResType = GetByteVectorType(RetTy); 1759 break; 1760 1761 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is 1762 // returned together with the previous X87 value in %st0. 1763 case X87Up: 1764 // If X87Up is preceded by X87, we don't need to do 1765 // anything. However, in some cases with unions it may not be 1766 // preceded by X87. In such situations we follow gcc and pass the 1767 // extra bits in an SSE reg. 1768 if (Lo != X87) { 1769 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8); 1770 if (Lo == NoClass) // Return HighPart at offset 8 in memory. 1771 return ABIArgInfo::getDirect(HighPart, 8); 1772 } 1773 break; 1774 } 1775 1776 // If a high part was specified, merge it together with the low part. It is 1777 // known to pass in the high eightbyte of the result. We do this by forming a 1778 // first class struct aggregate with the high and low part: {low, high} 1779 if (HighPart) 1780 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1781 1782 return ABIArgInfo::getDirect(ResType); 1783 } 1784 1785 ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt, 1786 unsigned &neededSSE) const { 1787 X86_64ABIInfo::Class Lo, Hi; 1788 classify(Ty, 0, Lo, Hi); 1789 1790 // Check some invariants. 1791 // FIXME: Enforce these by construction. 1792 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); 1793 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); 1794 1795 neededInt = 0; 1796 neededSSE = 0; 1797 llvm::Type *ResType = 0; 1798 switch (Lo) { 1799 case NoClass: 1800 if (Hi == NoClass) 1801 return ABIArgInfo::getIgnore(); 1802 // If the low part is just padding, it takes no register, leave ResType 1803 // null. 1804 assert((Hi == SSE || Hi == Integer || Hi == X87Up) && 1805 "Unknown missing lo part"); 1806 break; 1807 1808 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument 1809 // on the stack. 1810 case Memory: 1811 1812 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or 1813 // COMPLEX_X87, it is passed in memory. 1814 case X87: 1815 case ComplexX87: 1816 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 1817 ++neededInt; 1818 return getIndirectResult(Ty); 1819 1820 case SSEUp: 1821 case X87Up: 1822 assert(0 && "Invalid classification for lo word."); 1823 1824 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next 1825 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 1826 // and %r9 is used. 1827 case Integer: 1828 ++neededInt; 1829 1830 // Pick an 8-byte type based on the preferred type. 1831 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0); 1832 1833 // If we have a sign or zero extended integer, make sure to return Extend 1834 // so that the parameter gets the right LLVM IR attributes. 1835 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) { 1836 // Treat an enum type as its underlying type. 1837 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 1838 Ty = EnumTy->getDecl()->getIntegerType(); 1839 1840 if (Ty->isIntegralOrEnumerationType() && 1841 Ty->isPromotableIntegerType()) 1842 return ABIArgInfo::getExtend(); 1843 } 1844 1845 break; 1846 1847 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next 1848 // available SSE register is used, the registers are taken in the 1849 // order from %xmm0 to %xmm7. 1850 case SSE: { 1851 llvm::Type *IRType = CGT.ConvertType(Ty); 1852 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0); 1853 ++neededSSE; 1854 break; 1855 } 1856 } 1857 1858 llvm::Type *HighPart = 0; 1859 switch (Hi) { 1860 // Memory was handled previously, ComplexX87 and X87 should 1861 // never occur as hi classes, and X87Up must be preceded by X87, 1862 // which is passed in memory. 1863 case Memory: 1864 case X87: 1865 case ComplexX87: 1866 assert(0 && "Invalid classification for hi word."); 1867 break; 1868 1869 case NoClass: break; 1870 1871 case Integer: 1872 ++neededInt; 1873 // Pick an 8-byte type based on the preferred type. 1874 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 1875 1876 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1877 return ABIArgInfo::getDirect(HighPart, 8); 1878 break; 1879 1880 // X87Up generally doesn't occur here (long double is passed in 1881 // memory), except in situations involving unions. 1882 case X87Up: 1883 case SSE: 1884 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8); 1885 1886 if (Lo == NoClass) // Pass HighPart at offset 8 in memory. 1887 return ABIArgInfo::getDirect(HighPart, 8); 1888 1889 ++neededSSE; 1890 break; 1891 1892 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the 1893 // eightbyte is passed in the upper half of the last used SSE 1894 // register. This only happens when 128-bit vectors are passed. 1895 case SSEUp: 1896 assert(Lo == SSE && "Unexpected SSEUp classification"); 1897 ResType = GetByteVectorType(Ty); 1898 break; 1899 } 1900 1901 // If a high part was specified, merge it together with the low part. It is 1902 // known to pass in the high eightbyte of the result. We do this by forming a 1903 // first class struct aggregate with the high and low part: {low, high} 1904 if (HighPart) 1905 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData()); 1906 1907 return ABIArgInfo::getDirect(ResType); 1908 } 1909 1910 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 1911 1912 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 1913 1914 // Keep track of the number of assigned registers. 1915 unsigned freeIntRegs = 6, freeSSERegs = 8; 1916 1917 // If the return value is indirect, then the hidden argument is consuming one 1918 // integer register. 1919 if (FI.getReturnInfo().isIndirect()) 1920 --freeIntRegs; 1921 1922 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers 1923 // get assigned (in left-to-right order) for passing as follows... 1924 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 1925 it != ie; ++it) { 1926 unsigned neededInt, neededSSE; 1927 it->info = classifyArgumentType(it->type, neededInt, neededSSE); 1928 1929 // AMD64-ABI 3.2.3p3: If there are no registers available for any 1930 // eightbyte of an argument, the whole argument is passed on the 1931 // stack. If registers have already been assigned for some 1932 // eightbytes of such an argument, the assignments get reverted. 1933 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { 1934 freeIntRegs -= neededInt; 1935 freeSSERegs -= neededSSE; 1936 } else { 1937 it->info = getIndirectResult(it->type); 1938 } 1939 } 1940 } 1941 1942 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, 1943 QualType Ty, 1944 CodeGenFunction &CGF) { 1945 llvm::Value *overflow_arg_area_p = 1946 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); 1947 llvm::Value *overflow_arg_area = 1948 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); 1949 1950 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 1951 // byte boundary if alignment needed by type exceeds 8 byte boundary. 1952 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; 1953 if (Align > 8) { 1954 // Note that we follow the ABI & gcc here, even though the type 1955 // could in theory have an alignment greater than 16. This case 1956 // shouldn't ever matter in practice. 1957 1958 // overflow_arg_area = (overflow_arg_area + 15) & ~15; 1959 llvm::Value *Offset = 1960 llvm::ConstantInt::get(CGF.Int32Ty, 15); 1961 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); 1962 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, 1963 CGF.Int64Ty); 1964 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL); 1965 overflow_arg_area = 1966 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), 1967 overflow_arg_area->getType(), 1968 "overflow_arg_area.align"); 1969 } 1970 1971 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. 1972 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 1973 llvm::Value *Res = 1974 CGF.Builder.CreateBitCast(overflow_arg_area, 1975 llvm::PointerType::getUnqual(LTy)); 1976 1977 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: 1978 // l->overflow_arg_area + sizeof(type). 1979 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to 1980 // an 8 byte boundary. 1981 1982 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; 1983 llvm::Value *Offset = 1984 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); 1985 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, 1986 "overflow_arg_area.next"); 1987 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); 1988 1989 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. 1990 return Res; 1991 } 1992 1993 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 1994 CodeGenFunction &CGF) const { 1995 llvm::LLVMContext &VMContext = CGF.getLLVMContext(); 1996 1997 // Assume that va_list type is correct; should be pointer to LLVM type: 1998 // struct { 1999 // i32 gp_offset; 2000 // i32 fp_offset; 2001 // i8* overflow_arg_area; 2002 // i8* reg_save_area; 2003 // }; 2004 unsigned neededInt, neededSSE; 2005 2006 Ty = CGF.getContext().getCanonicalType(Ty); 2007 ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE); 2008 2009 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed 2010 // in the registers. If not go to step 7. 2011 if (!neededInt && !neededSSE) 2012 return EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2013 2014 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of 2015 // general purpose registers needed to pass type and num_fp to hold 2016 // the number of floating point registers needed. 2017 2018 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into 2019 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or 2020 // l->fp_offset > 304 - num_fp * 16 go to step 7. 2021 // 2022 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of 2023 // register save space). 2024 2025 llvm::Value *InRegs = 0; 2026 llvm::Value *gp_offset_p = 0, *gp_offset = 0; 2027 llvm::Value *fp_offset_p = 0, *fp_offset = 0; 2028 if (neededInt) { 2029 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); 2030 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); 2031 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); 2032 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); 2033 } 2034 2035 if (neededSSE) { 2036 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); 2037 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); 2038 llvm::Value *FitsInFP = 2039 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); 2040 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); 2041 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; 2042 } 2043 2044 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); 2045 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); 2046 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); 2047 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); 2048 2049 // Emit code to load the value if it was passed in registers. 2050 2051 CGF.EmitBlock(InRegBlock); 2052 2053 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with 2054 // an offset of l->gp_offset and/or l->fp_offset. This may require 2055 // copying to a temporary location in case the parameter is passed 2056 // in different register classes or requires an alignment greater 2057 // than 8 for general purpose registers and 16 for XMM registers. 2058 // 2059 // FIXME: This really results in shameful code when we end up needing to 2060 // collect arguments from different places; often what should result in a 2061 // simple assembling of a structure from scattered addresses has many more 2062 // loads than necessary. Can we clean this up? 2063 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); 2064 llvm::Value *RegAddr = 2065 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), 2066 "reg_save_area"); 2067 if (neededInt && neededSSE) { 2068 // FIXME: Cleanup. 2069 assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); 2070 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); 2071 llvm::Value *Tmp = CGF.CreateTempAlloca(ST); 2072 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); 2073 const llvm::Type *TyLo = ST->getElementType(0); 2074 const llvm::Type *TyHi = ST->getElementType(1); 2075 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) && 2076 "Unexpected ABI info for mixed regs"); 2077 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); 2078 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); 2079 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2080 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2081 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr; 2082 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr; 2083 llvm::Value *V = 2084 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); 2085 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2086 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); 2087 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2088 2089 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2090 llvm::PointerType::getUnqual(LTy)); 2091 } else if (neededInt) { 2092 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); 2093 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2094 llvm::PointerType::getUnqual(LTy)); 2095 } else if (neededSSE == 1) { 2096 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2097 RegAddr = CGF.Builder.CreateBitCast(RegAddr, 2098 llvm::PointerType::getUnqual(LTy)); 2099 } else { 2100 assert(neededSSE == 2 && "Invalid number of needed registers!"); 2101 // SSE registers are spaced 16 bytes apart in the register save 2102 // area, we need to collect the two eightbytes together. 2103 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); 2104 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); 2105 llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext); 2106 const llvm::Type *DblPtrTy = 2107 llvm::PointerType::getUnqual(DoubleTy); 2108 const llvm::StructType *ST = llvm::StructType::get(DoubleTy, 2109 DoubleTy, NULL); 2110 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); 2111 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, 2112 DblPtrTy)); 2113 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); 2114 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, 2115 DblPtrTy)); 2116 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); 2117 RegAddr = CGF.Builder.CreateBitCast(Tmp, 2118 llvm::PointerType::getUnqual(LTy)); 2119 } 2120 2121 // AMD64-ABI 3.5.7p5: Step 5. Set: 2122 // l->gp_offset = l->gp_offset + num_gp * 8 2123 // l->fp_offset = l->fp_offset + num_fp * 16. 2124 if (neededInt) { 2125 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); 2126 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), 2127 gp_offset_p); 2128 } 2129 if (neededSSE) { 2130 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); 2131 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), 2132 fp_offset_p); 2133 } 2134 CGF.EmitBranch(ContBlock); 2135 2136 // Emit code to load the value if it was passed in memory. 2137 2138 CGF.EmitBlock(InMemBlock); 2139 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); 2140 2141 // Return the appropriate result. 2142 2143 CGF.EmitBlock(ContBlock); 2144 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2, 2145 "vaarg.addr"); 2146 ResAddr->addIncoming(RegAddr, InRegBlock); 2147 ResAddr->addIncoming(MemAddr, InMemBlock); 2148 return ResAddr; 2149 } 2150 2151 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const { 2152 2153 if (Ty->isVoidType()) 2154 return ABIArgInfo::getIgnore(); 2155 2156 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2157 Ty = EnumTy->getDecl()->getIntegerType(); 2158 2159 uint64_t Size = getContext().getTypeSize(Ty); 2160 2161 if (const RecordType *RT = Ty->getAs<RecordType>()) { 2162 if (hasNonTrivialDestructorOrCopyConstructor(RT) || 2163 RT->getDecl()->hasFlexibleArrayMember()) 2164 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2165 2166 // FIXME: mingw-w64-gcc emits 128-bit struct as i128 2167 if (Size == 128 && 2168 getContext().Target.getTriple().getOS() == llvm::Triple::MinGW32) 2169 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2170 Size)); 2171 2172 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is 2173 // not 1, 2, 4, or 8 bytes, must be passed by reference." 2174 if (Size <= 64 && 2175 (Size & (Size - 1)) == 0) 2176 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2177 Size)); 2178 2179 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2180 } 2181 2182 if (Ty->isPromotableIntegerType()) 2183 return ABIArgInfo::getExtend(); 2184 2185 return ABIArgInfo::getDirect(); 2186 } 2187 2188 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { 2189 2190 QualType RetTy = FI.getReturnType(); 2191 FI.getReturnInfo() = classify(RetTy); 2192 2193 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2194 it != ie; ++it) 2195 it->info = classify(it->type); 2196 } 2197 2198 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2199 CodeGenFunction &CGF) const { 2200 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2201 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2202 2203 CGBuilderTy &Builder = CGF.Builder; 2204 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2205 "ap"); 2206 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2207 llvm::Type *PTy = 2208 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2209 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2210 2211 uint64_t Offset = 2212 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8); 2213 llvm::Value *NextAddr = 2214 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2215 "ap.next"); 2216 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2217 2218 return AddrTyped; 2219 } 2220 2221 // PowerPC-32 2222 2223 namespace { 2224 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo { 2225 public: 2226 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {} 2227 2228 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2229 // This is recovered from gcc output. 2230 return 1; // r1 is the dedicated stack pointer 2231 } 2232 2233 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2234 llvm::Value *Address) const; 2235 }; 2236 2237 } 2238 2239 bool 2240 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2241 llvm::Value *Address) const { 2242 // This is calculated from the LLVM and GCC tables and verified 2243 // against gcc output. AFAIK all ABIs use the same encoding. 2244 2245 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2246 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2247 2248 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2249 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2250 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8); 2251 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16); 2252 2253 // 0-31: r0-31, the 4-byte general-purpose registers 2254 AssignToArrayRange(Builder, Address, Four8, 0, 31); 2255 2256 // 32-63: fp0-31, the 8-byte floating-point registers 2257 AssignToArrayRange(Builder, Address, Eight8, 32, 63); 2258 2259 // 64-76 are various 4-byte special-purpose registers: 2260 // 64: mq 2261 // 65: lr 2262 // 66: ctr 2263 // 67: ap 2264 // 68-75 cr0-7 2265 // 76: xer 2266 AssignToArrayRange(Builder, Address, Four8, 64, 76); 2267 2268 // 77-108: v0-31, the 16-byte vector registers 2269 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108); 2270 2271 // 109: vrsave 2272 // 110: vscr 2273 // 111: spe_acc 2274 // 112: spefscr 2275 // 113: sfp 2276 AssignToArrayRange(Builder, Address, Four8, 109, 113); 2277 2278 return false; 2279 } 2280 2281 2282 //===----------------------------------------------------------------------===// 2283 // ARM ABI Implementation 2284 //===----------------------------------------------------------------------===// 2285 2286 namespace { 2287 2288 class ARMABIInfo : public ABIInfo { 2289 public: 2290 enum ABIKind { 2291 APCS = 0, 2292 AAPCS = 1, 2293 AAPCS_VFP 2294 }; 2295 2296 private: 2297 ABIKind Kind; 2298 2299 public: 2300 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {} 2301 2302 private: 2303 ABIKind getABIKind() const { return Kind; } 2304 2305 ABIArgInfo classifyReturnType(QualType RetTy) const; 2306 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2307 2308 virtual void computeInfo(CGFunctionInfo &FI) const; 2309 2310 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2311 CodeGenFunction &CGF) const; 2312 }; 2313 2314 class ARMTargetCodeGenInfo : public TargetCodeGenInfo { 2315 public: 2316 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) 2317 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {} 2318 2319 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { 2320 return 13; 2321 } 2322 2323 llvm::StringRef getARCRetainAutoreleasedReturnValueMarker() const { 2324 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue"; 2325 } 2326 2327 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2328 llvm::Value *Address) const { 2329 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2330 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2331 2332 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2333 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2334 2335 // 0-15 are the 16 integer registers. 2336 AssignToArrayRange(Builder, Address, Four8, 0, 15); 2337 2338 return false; 2339 } 2340 }; 2341 2342 } 2343 2344 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { 2345 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2346 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2347 it != ie; ++it) 2348 it->info = classifyArgumentType(it->type); 2349 2350 // Always honor user-specified calling convention. 2351 if (FI.getCallingConvention() != llvm::CallingConv::C) 2352 return; 2353 2354 // Calling convention as default by an ABI. 2355 llvm::CallingConv::ID DefaultCC; 2356 llvm::StringRef Env = getContext().Target.getTriple().getEnvironmentName(); 2357 if (Env == "gnueabi" || Env == "eabi") 2358 DefaultCC = llvm::CallingConv::ARM_AAPCS; 2359 else 2360 DefaultCC = llvm::CallingConv::ARM_APCS; 2361 2362 // If user did not ask for specific calling convention explicitly (e.g. via 2363 // pcs attribute), set effective calling convention if it's different than ABI 2364 // default. 2365 switch (getABIKind()) { 2366 case APCS: 2367 if (DefaultCC != llvm::CallingConv::ARM_APCS) 2368 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); 2369 break; 2370 case AAPCS: 2371 if (DefaultCC != llvm::CallingConv::ARM_AAPCS) 2372 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); 2373 break; 2374 case AAPCS_VFP: 2375 if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP) 2376 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP); 2377 break; 2378 } 2379 } 2380 2381 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const { 2382 if (!isAggregateTypeForABI(Ty)) { 2383 // Treat an enum type as its underlying type. 2384 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2385 Ty = EnumTy->getDecl()->getIntegerType(); 2386 2387 return (Ty->isPromotableIntegerType() ? 2388 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2389 } 2390 2391 // Ignore empty records. 2392 if (isEmptyRecord(getContext(), Ty, true)) 2393 return ABIArgInfo::getIgnore(); 2394 2395 // Structures with either a non-trivial destructor or a non-trivial 2396 // copy constructor are always indirect. 2397 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) 2398 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2399 2400 // Otherwise, pass by coercing to a structure of the appropriate size. 2401 // 2402 // FIXME: This doesn't handle alignment > 64 bits. 2403 const llvm::Type* ElemTy; 2404 unsigned SizeRegs; 2405 if (getContext().getTypeSizeInChars(Ty) <= CharUnits::fromQuantity(64)) { 2406 ElemTy = llvm::Type::getInt32Ty(getVMContext()); 2407 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; 2408 } else if (getABIKind() == ARMABIInfo::APCS) { 2409 // Initial ARM ByVal support is APCS-only. 2410 return ABIArgInfo::getIndirect(0, /*ByVal=*/true); 2411 } else { 2412 // FIXME: This is kind of nasty... but there isn't much choice 2413 // because most of the ARM calling conventions don't yet support 2414 // byval. 2415 ElemTy = llvm::Type::getInt64Ty(getVMContext()); 2416 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; 2417 } 2418 2419 llvm::Type *STy = 2420 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL); 2421 return ABIArgInfo::getDirect(STy); 2422 } 2423 2424 static bool isIntegerLikeType(QualType Ty, ASTContext &Context, 2425 llvm::LLVMContext &VMContext) { 2426 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure 2427 // is called integer-like if its size is less than or equal to one word, and 2428 // the offset of each of its addressable sub-fields is zero. 2429 2430 uint64_t Size = Context.getTypeSize(Ty); 2431 2432 // Check that the type fits in a word. 2433 if (Size > 32) 2434 return false; 2435 2436 // FIXME: Handle vector types! 2437 if (Ty->isVectorType()) 2438 return false; 2439 2440 // Float types are never treated as "integer like". 2441 if (Ty->isRealFloatingType()) 2442 return false; 2443 2444 // If this is a builtin or pointer type then it is ok. 2445 if (Ty->getAs<BuiltinType>() || Ty->isPointerType()) 2446 return true; 2447 2448 // Small complex integer types are "integer like". 2449 if (const ComplexType *CT = Ty->getAs<ComplexType>()) 2450 return isIntegerLikeType(CT->getElementType(), Context, VMContext); 2451 2452 // Single element and zero sized arrays should be allowed, by the definition 2453 // above, but they are not. 2454 2455 // Otherwise, it must be a record type. 2456 const RecordType *RT = Ty->getAs<RecordType>(); 2457 if (!RT) return false; 2458 2459 // Ignore records with flexible arrays. 2460 const RecordDecl *RD = RT->getDecl(); 2461 if (RD->hasFlexibleArrayMember()) 2462 return false; 2463 2464 // Check that all sub-fields are at offset 0, and are themselves "integer 2465 // like". 2466 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 2467 2468 bool HadField = false; 2469 unsigned idx = 0; 2470 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); 2471 i != e; ++i, ++idx) { 2472 const FieldDecl *FD = *i; 2473 2474 // Bit-fields are not addressable, we only need to verify they are "integer 2475 // like". We still have to disallow a subsequent non-bitfield, for example: 2476 // struct { int : 0; int x } 2477 // is non-integer like according to gcc. 2478 if (FD->isBitField()) { 2479 if (!RD->isUnion()) 2480 HadField = true; 2481 2482 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2483 return false; 2484 2485 continue; 2486 } 2487 2488 // Check if this field is at offset 0. 2489 if (Layout.getFieldOffset(idx) != 0) 2490 return false; 2491 2492 if (!isIntegerLikeType(FD->getType(), Context, VMContext)) 2493 return false; 2494 2495 // Only allow at most one field in a structure. This doesn't match the 2496 // wording above, but follows gcc in situations with a field following an 2497 // empty structure. 2498 if (!RD->isUnion()) { 2499 if (HadField) 2500 return false; 2501 2502 HadField = true; 2503 } 2504 } 2505 2506 return true; 2507 } 2508 2509 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const { 2510 if (RetTy->isVoidType()) 2511 return ABIArgInfo::getIgnore(); 2512 2513 // Large vector types should be returned via memory. 2514 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) 2515 return ABIArgInfo::getIndirect(0); 2516 2517 if (!isAggregateTypeForABI(RetTy)) { 2518 // Treat an enum type as its underlying type. 2519 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2520 RetTy = EnumTy->getDecl()->getIntegerType(); 2521 2522 return (RetTy->isPromotableIntegerType() ? 2523 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2524 } 2525 2526 // Structures with either a non-trivial destructor or a non-trivial 2527 // copy constructor are always indirect. 2528 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) 2529 return ABIArgInfo::getIndirect(0, /*ByVal=*/false); 2530 2531 // Are we following APCS? 2532 if (getABIKind() == APCS) { 2533 if (isEmptyRecord(getContext(), RetTy, false)) 2534 return ABIArgInfo::getIgnore(); 2535 2536 // Complex types are all returned as packed integers. 2537 // 2538 // FIXME: Consider using 2 x vector types if the back end handles them 2539 // correctly. 2540 if (RetTy->isAnyComplexType()) 2541 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 2542 getContext().getTypeSize(RetTy))); 2543 2544 // Integer like structures are returned in r0. 2545 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) { 2546 // Return in the smallest viable integer type. 2547 uint64_t Size = getContext().getTypeSize(RetTy); 2548 if (Size <= 8) 2549 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2550 if (Size <= 16) 2551 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2552 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2553 } 2554 2555 // Otherwise return in memory. 2556 return ABIArgInfo::getIndirect(0); 2557 } 2558 2559 // Otherwise this is an AAPCS variant. 2560 2561 if (isEmptyRecord(getContext(), RetTy, true)) 2562 return ABIArgInfo::getIgnore(); 2563 2564 // Aggregates <= 4 bytes are returned in r0; other aggregates 2565 // are returned indirectly. 2566 uint64_t Size = getContext().getTypeSize(RetTy); 2567 if (Size <= 32) { 2568 // Return in the smallest viable integer type. 2569 if (Size <= 8) 2570 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext())); 2571 if (Size <= 16) 2572 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext())); 2573 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); 2574 } 2575 2576 return ABIArgInfo::getIndirect(0); 2577 } 2578 2579 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2580 CodeGenFunction &CGF) const { 2581 // FIXME: Need to handle alignment 2582 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 2583 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); 2584 2585 CGBuilderTy &Builder = CGF.Builder; 2586 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, 2587 "ap"); 2588 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); 2589 llvm::Type *PTy = 2590 llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); 2591 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); 2592 2593 uint64_t Offset = 2594 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); 2595 llvm::Value *NextAddr = 2596 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), 2597 "ap.next"); 2598 Builder.CreateStore(NextAddr, VAListAddrAsBPP); 2599 2600 return AddrTyped; 2601 } 2602 2603 //===----------------------------------------------------------------------===// 2604 // PTX ABI Implementation 2605 //===----------------------------------------------------------------------===// 2606 2607 namespace { 2608 2609 class PTXABIInfo : public ABIInfo { 2610 public: 2611 PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2612 2613 ABIArgInfo classifyReturnType(QualType RetTy) const; 2614 ABIArgInfo classifyArgumentType(QualType Ty) const; 2615 2616 virtual void computeInfo(CGFunctionInfo &FI) const; 2617 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2618 CodeGenFunction &CFG) const; 2619 }; 2620 2621 class PTXTargetCodeGenInfo : public TargetCodeGenInfo { 2622 public: 2623 PTXTargetCodeGenInfo(CodeGenTypes &CGT) 2624 : TargetCodeGenInfo(new PTXABIInfo(CGT)) {} 2625 }; 2626 2627 ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const { 2628 if (RetTy->isVoidType()) 2629 return ABIArgInfo::getIgnore(); 2630 if (isAggregateTypeForABI(RetTy)) 2631 return ABIArgInfo::getIndirect(0); 2632 return ABIArgInfo::getDirect(); 2633 } 2634 2635 ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const { 2636 if (isAggregateTypeForABI(Ty)) 2637 return ABIArgInfo::getIndirect(0); 2638 2639 return ABIArgInfo::getDirect(); 2640 } 2641 2642 void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const { 2643 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2644 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2645 it != ie; ++it) 2646 it->info = classifyArgumentType(it->type); 2647 2648 // Always honor user-specified calling convention. 2649 if (FI.getCallingConvention() != llvm::CallingConv::C) 2650 return; 2651 2652 // Calling convention as default by an ABI. 2653 llvm::CallingConv::ID DefaultCC; 2654 llvm::StringRef Env = getContext().Target.getTriple().getEnvironmentName(); 2655 if (Env == "device") 2656 DefaultCC = llvm::CallingConv::PTX_Device; 2657 else 2658 DefaultCC = llvm::CallingConv::PTX_Kernel; 2659 2660 FI.setEffectiveCallingConvention(DefaultCC); 2661 } 2662 2663 llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2664 CodeGenFunction &CFG) const { 2665 llvm_unreachable("PTX does not support varargs"); 2666 return 0; 2667 } 2668 2669 } 2670 2671 //===----------------------------------------------------------------------===// 2672 // SystemZ ABI Implementation 2673 //===----------------------------------------------------------------------===// 2674 2675 namespace { 2676 2677 class SystemZABIInfo : public ABIInfo { 2678 public: 2679 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2680 2681 bool isPromotableIntegerType(QualType Ty) const; 2682 2683 ABIArgInfo classifyReturnType(QualType RetTy) const; 2684 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2685 2686 virtual void computeInfo(CGFunctionInfo &FI) const { 2687 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2688 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2689 it != ie; ++it) 2690 it->info = classifyArgumentType(it->type); 2691 } 2692 2693 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2694 CodeGenFunction &CGF) const; 2695 }; 2696 2697 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { 2698 public: 2699 SystemZTargetCodeGenInfo(CodeGenTypes &CGT) 2700 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {} 2701 }; 2702 2703 } 2704 2705 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { 2706 // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended. 2707 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2708 switch (BT->getKind()) { 2709 case BuiltinType::Bool: 2710 case BuiltinType::Char_S: 2711 case BuiltinType::Char_U: 2712 case BuiltinType::SChar: 2713 case BuiltinType::UChar: 2714 case BuiltinType::Short: 2715 case BuiltinType::UShort: 2716 case BuiltinType::Int: 2717 case BuiltinType::UInt: 2718 return true; 2719 default: 2720 return false; 2721 } 2722 return false; 2723 } 2724 2725 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2726 CodeGenFunction &CGF) const { 2727 // FIXME: Implement 2728 return 0; 2729 } 2730 2731 2732 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { 2733 if (RetTy->isVoidType()) 2734 return ABIArgInfo::getIgnore(); 2735 if (isAggregateTypeForABI(RetTy)) 2736 return ABIArgInfo::getIndirect(0); 2737 2738 return (isPromotableIntegerType(RetTy) ? 2739 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2740 } 2741 2742 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { 2743 if (isAggregateTypeForABI(Ty)) 2744 return ABIArgInfo::getIndirect(0); 2745 2746 return (isPromotableIntegerType(Ty) ? 2747 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2748 } 2749 2750 //===----------------------------------------------------------------------===// 2751 // MBlaze ABI Implementation 2752 //===----------------------------------------------------------------------===// 2753 2754 namespace { 2755 2756 class MBlazeABIInfo : public ABIInfo { 2757 public: 2758 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2759 2760 bool isPromotableIntegerType(QualType Ty) const; 2761 2762 ABIArgInfo classifyReturnType(QualType RetTy) const; 2763 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2764 2765 virtual void computeInfo(CGFunctionInfo &FI) const { 2766 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2767 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2768 it != ie; ++it) 2769 it->info = classifyArgumentType(it->type); 2770 } 2771 2772 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2773 CodeGenFunction &CGF) const; 2774 }; 2775 2776 class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo { 2777 public: 2778 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT) 2779 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {} 2780 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2781 CodeGen::CodeGenModule &M) const; 2782 }; 2783 2784 } 2785 2786 bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const { 2787 // MBlaze ABI requires all 8 and 16 bit quantities to be extended. 2788 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) 2789 switch (BT->getKind()) { 2790 case BuiltinType::Bool: 2791 case BuiltinType::Char_S: 2792 case BuiltinType::Char_U: 2793 case BuiltinType::SChar: 2794 case BuiltinType::UChar: 2795 case BuiltinType::Short: 2796 case BuiltinType::UShort: 2797 return true; 2798 default: 2799 return false; 2800 } 2801 return false; 2802 } 2803 2804 llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2805 CodeGenFunction &CGF) const { 2806 // FIXME: Implement 2807 return 0; 2808 } 2809 2810 2811 ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const { 2812 if (RetTy->isVoidType()) 2813 return ABIArgInfo::getIgnore(); 2814 if (isAggregateTypeForABI(RetTy)) 2815 return ABIArgInfo::getIndirect(0); 2816 2817 return (isPromotableIntegerType(RetTy) ? 2818 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2819 } 2820 2821 ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const { 2822 if (isAggregateTypeForABI(Ty)) 2823 return ABIArgInfo::getIndirect(0); 2824 2825 return (isPromotableIntegerType(Ty) ? 2826 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2827 } 2828 2829 void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2830 llvm::GlobalValue *GV, 2831 CodeGen::CodeGenModule &M) 2832 const { 2833 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); 2834 if (!FD) return; 2835 2836 llvm::CallingConv::ID CC = llvm::CallingConv::C; 2837 if (FD->hasAttr<MBlazeInterruptHandlerAttr>()) 2838 CC = llvm::CallingConv::MBLAZE_INTR; 2839 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>()) 2840 CC = llvm::CallingConv::MBLAZE_SVOL; 2841 2842 if (CC != llvm::CallingConv::C) { 2843 // Handle 'interrupt_handler' attribute: 2844 llvm::Function *F = cast<llvm::Function>(GV); 2845 2846 // Step 1: Set ISR calling convention. 2847 F->setCallingConv(CC); 2848 2849 // Step 2: Add attributes goodness. 2850 F->addFnAttr(llvm::Attribute::NoInline); 2851 } 2852 2853 // Step 3: Emit _interrupt_handler alias. 2854 if (CC == llvm::CallingConv::MBLAZE_INTR) 2855 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 2856 "_interrupt_handler", GV, &M.getModule()); 2857 } 2858 2859 2860 //===----------------------------------------------------------------------===// 2861 // MSP430 ABI Implementation 2862 //===----------------------------------------------------------------------===// 2863 2864 namespace { 2865 2866 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo { 2867 public: 2868 MSP430TargetCodeGenInfo(CodeGenTypes &CGT) 2869 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {} 2870 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, 2871 CodeGen::CodeGenModule &M) const; 2872 }; 2873 2874 } 2875 2876 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, 2877 llvm::GlobalValue *GV, 2878 CodeGen::CodeGenModule &M) const { 2879 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 2880 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) { 2881 // Handle 'interrupt' attribute: 2882 llvm::Function *F = cast<llvm::Function>(GV); 2883 2884 // Step 1: Set ISR calling convention. 2885 F->setCallingConv(llvm::CallingConv::MSP430_INTR); 2886 2887 // Step 2: Add attributes goodness. 2888 F->addFnAttr(llvm::Attribute::NoInline); 2889 2890 // Step 3: Emit ISR vector alias. 2891 unsigned Num = attr->getNumber() + 0xffe0; 2892 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage, 2893 "vector_" + llvm::Twine::utohexstr(Num), 2894 GV, &M.getModule()); 2895 } 2896 } 2897 } 2898 2899 //===----------------------------------------------------------------------===// 2900 // MIPS ABI Implementation. This works for both little-endian and 2901 // big-endian variants. 2902 //===----------------------------------------------------------------------===// 2903 2904 namespace { 2905 class MipsABIInfo : public ABIInfo { 2906 public: 2907 MipsABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} 2908 2909 ABIArgInfo classifyReturnType(QualType RetTy) const; 2910 ABIArgInfo classifyArgumentType(QualType RetTy) const; 2911 virtual void computeInfo(CGFunctionInfo &FI) const; 2912 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2913 CodeGenFunction &CGF) const; 2914 }; 2915 2916 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { 2917 public: 2918 MIPSTargetCodeGenInfo(CodeGenTypes &CGT) 2919 : TargetCodeGenInfo(new MipsABIInfo(CGT)) {} 2920 2921 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { 2922 return 29; 2923 } 2924 2925 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2926 llvm::Value *Address) const; 2927 }; 2928 } 2929 2930 ABIArgInfo MipsABIInfo::classifyArgumentType(QualType Ty) const { 2931 if (isAggregateTypeForABI(Ty)) { 2932 // Ignore empty aggregates. 2933 if (getContext().getTypeSize(Ty) == 0) 2934 return ABIArgInfo::getIgnore(); 2935 2936 return ABIArgInfo::getIndirect(0); 2937 } 2938 2939 // Treat an enum type as its underlying type. 2940 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) 2941 Ty = EnumTy->getDecl()->getIntegerType(); 2942 2943 return (Ty->isPromotableIntegerType() ? 2944 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2945 } 2946 2947 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const { 2948 if (RetTy->isVoidType()) 2949 return ABIArgInfo::getIgnore(); 2950 2951 if (isAggregateTypeForABI(RetTy)) { 2952 if (RetTy->isAnyComplexType()) 2953 return ABIArgInfo::getDirect(); 2954 2955 return ABIArgInfo::getIndirect(0); 2956 } 2957 2958 // Treat an enum type as its underlying type. 2959 if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) 2960 RetTy = EnumTy->getDecl()->getIntegerType(); 2961 2962 return (RetTy->isPromotableIntegerType() ? 2963 ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); 2964 } 2965 2966 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const { 2967 FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); 2968 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); 2969 it != ie; ++it) 2970 it->info = classifyArgumentType(it->type); 2971 } 2972 2973 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, 2974 CodeGenFunction &CGF) const { 2975 return 0; 2976 } 2977 2978 bool 2979 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, 2980 llvm::Value *Address) const { 2981 // This information comes from gcc's implementation, which seems to 2982 // as canonical as it gets. 2983 2984 CodeGen::CGBuilderTy &Builder = CGF.Builder; 2985 llvm::LLVMContext &Context = CGF.getLLVMContext(); 2986 2987 // Everything on MIPS is 4 bytes. Double-precision FP registers 2988 // are aliased to pairs of single-precision FP registers. 2989 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context); 2990 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4); 2991 2992 // 0-31 are the general purpose registers, $0 - $31. 2993 // 32-63 are the floating-point registers, $f0 - $f31. 2994 // 64 and 65 are the multiply/divide registers, $hi and $lo. 2995 // 66 is the (notional, I think) register for signal-handler return. 2996 AssignToArrayRange(Builder, Address, Four8, 0, 65); 2997 2998 // 67-74 are the floating-point status registers, $fcc0 - $fcc7. 2999 // They are one bit wide and ignored here. 3000 3001 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31. 3002 // (coprocessor 1 is the FP unit) 3003 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31. 3004 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31. 3005 // 176-181 are the DSP accumulator registers. 3006 AssignToArrayRange(Builder, Address, Four8, 80, 181); 3007 3008 return false; 3009 } 3010 3011 3012 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { 3013 if (TheTargetCodeGenInfo) 3014 return *TheTargetCodeGenInfo; 3015 3016 // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't 3017 // free it. 3018 3019 const llvm::Triple &Triple = getContext().Target.getTriple(); 3020 switch (Triple.getArch()) { 3021 default: 3022 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types)); 3023 3024 case llvm::Triple::mips: 3025 case llvm::Triple::mipsel: 3026 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types)); 3027 3028 case llvm::Triple::arm: 3029 case llvm::Triple::thumb: 3030 { 3031 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; 3032 3033 if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0) 3034 Kind = ARMABIInfo::APCS; 3035 else if (CodeGenOpts.FloatABI == "hard") 3036 Kind = ARMABIInfo::AAPCS_VFP; 3037 3038 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); 3039 } 3040 3041 case llvm::Triple::ppc: 3042 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types)); 3043 3044 case llvm::Triple::ptx32: 3045 case llvm::Triple::ptx64: 3046 return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types)); 3047 3048 case llvm::Triple::systemz: 3049 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types)); 3050 3051 case llvm::Triple::mblaze: 3052 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types)); 3053 3054 case llvm::Triple::msp430: 3055 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); 3056 3057 case llvm::Triple::x86: { 3058 bool DisableMMX = strcmp(getContext().Target.getABI(), "no-mmx") == 0; 3059 3060 if (Triple.isOSDarwin()) 3061 return *(TheTargetCodeGenInfo = 3062 new X86_32TargetCodeGenInfo(Types, true, true, DisableMMX)); 3063 3064 switch (Triple.getOS()) { 3065 case llvm::Triple::Cygwin: 3066 case llvm::Triple::MinGW32: 3067 case llvm::Triple::AuroraUX: 3068 case llvm::Triple::DragonFly: 3069 case llvm::Triple::FreeBSD: 3070 case llvm::Triple::OpenBSD: 3071 case llvm::Triple::NetBSD: 3072 return *(TheTargetCodeGenInfo = 3073 new X86_32TargetCodeGenInfo(Types, false, true, DisableMMX)); 3074 3075 default: 3076 return *(TheTargetCodeGenInfo = 3077 new X86_32TargetCodeGenInfo(Types, false, false, DisableMMX)); 3078 } 3079 } 3080 3081 case llvm::Triple::x86_64: 3082 switch (Triple.getOS()) { 3083 case llvm::Triple::Win32: 3084 case llvm::Triple::MinGW32: 3085 case llvm::Triple::Cygwin: 3086 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); 3087 default: 3088 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types)); 3089 } 3090 } 3091 } 3092