1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/SourceLocation.h" 62 #include "clang/Basic/SourceManager.h" 63 #include "clang/Basic/Specifiers.h" 64 #include "clang/Basic/TargetCXXABI.h" 65 #include "clang/Basic/TargetInfo.h" 66 #include "clang/Basic/XRayLists.h" 67 #include "llvm/ADT/APFixedPoint.h" 68 #include "llvm/ADT/APInt.h" 69 #include "llvm/ADT/APSInt.h" 70 #include "llvm/ADT/ArrayRef.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/DenseSet.h" 73 #include "llvm/ADT/FoldingSet.h" 74 #include "llvm/ADT/None.h" 75 #include "llvm/ADT/Optional.h" 76 #include "llvm/ADT/PointerUnion.h" 77 #include "llvm/ADT/STLExtras.h" 78 #include "llvm/ADT/SmallPtrSet.h" 79 #include "llvm/ADT/SmallVector.h" 80 #include "llvm/ADT/StringExtras.h" 81 #include "llvm/ADT/StringRef.h" 82 #include "llvm/ADT/Triple.h" 83 #include "llvm/Support/Capacity.h" 84 #include "llvm/Support/Casting.h" 85 #include "llvm/Support/Compiler.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/MD5.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace clang; 102 103 enum FloatingRank { 104 BFloat16Rank, 105 Float16Rank, 106 HalfRank, 107 FloatRank, 108 DoubleRank, 109 LongDoubleRank, 110 Float128Rank, 111 Ibm128Rank 112 }; 113 114 /// \returns location that is relevant when searching for Doc comments related 115 /// to \p D. 116 static SourceLocation getDeclLocForCommentSearch(const Decl *D, 117 SourceManager &SourceMgr) { 118 assert(D); 119 120 // User can not attach documentation to implicit declarations. 121 if (D->isImplicit()) 122 return {}; 123 124 // User can not attach documentation to implicit instantiations. 125 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 126 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 127 return {}; 128 } 129 130 if (const auto *VD = dyn_cast<VarDecl>(D)) { 131 if (VD->isStaticDataMember() && 132 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 133 return {}; 134 } 135 136 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 137 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 138 return {}; 139 } 140 141 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 142 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 143 if (TSK == TSK_ImplicitInstantiation || 144 TSK == TSK_Undeclared) 145 return {}; 146 } 147 148 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 149 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 150 return {}; 151 } 152 if (const auto *TD = dyn_cast<TagDecl>(D)) { 153 // When tag declaration (but not definition!) is part of the 154 // decl-specifier-seq of some other declaration, it doesn't get comment 155 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 156 return {}; 157 } 158 // TODO: handle comments for function parameters properly. 159 if (isa<ParmVarDecl>(D)) 160 return {}; 161 162 // TODO: we could look up template parameter documentation in the template 163 // documentation. 164 if (isa<TemplateTypeParmDecl>(D) || 165 isa<NonTypeTemplateParmDecl>(D) || 166 isa<TemplateTemplateParmDecl>(D)) 167 return {}; 168 169 // Find declaration location. 170 // For Objective-C declarations we generally don't expect to have multiple 171 // declarators, thus use declaration starting location as the "declaration 172 // location". 173 // For all other declarations multiple declarators are used quite frequently, 174 // so we use the location of the identifier as the "declaration location". 175 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 176 isa<ObjCPropertyDecl>(D) || 177 isa<RedeclarableTemplateDecl>(D) || 178 isa<ClassTemplateSpecializationDecl>(D) || 179 // Allow association with Y across {} in `typedef struct X {} Y`. 180 isa<TypedefDecl>(D)) 181 return D->getBeginLoc(); 182 183 const SourceLocation DeclLoc = D->getLocation(); 184 if (DeclLoc.isMacroID()) { 185 if (isa<TypedefDecl>(D)) { 186 // If location of the typedef name is in a macro, it is because being 187 // declared via a macro. Try using declaration's starting location as 188 // the "declaration location". 189 return D->getBeginLoc(); 190 } 191 192 if (const auto *TD = dyn_cast<TagDecl>(D)) { 193 // If location of the tag decl is inside a macro, but the spelling of 194 // the tag name comes from a macro argument, it looks like a special 195 // macro like NS_ENUM is being used to define the tag decl. In that 196 // case, adjust the source location to the expansion loc so that we can 197 // attach the comment to the tag decl. 198 if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition()) 199 return SourceMgr.getExpansionLoc(DeclLoc); 200 } 201 } 202 203 return DeclLoc; 204 } 205 206 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 207 const Decl *D, const SourceLocation RepresentativeLocForDecl, 208 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 209 // If the declaration doesn't map directly to a location in a file, we 210 // can't find the comment. 211 if (RepresentativeLocForDecl.isInvalid() || 212 !RepresentativeLocForDecl.isFileID()) 213 return nullptr; 214 215 // If there are no comments anywhere, we won't find anything. 216 if (CommentsInTheFile.empty()) 217 return nullptr; 218 219 // Decompose the location for the declaration and find the beginning of the 220 // file buffer. 221 const std::pair<FileID, unsigned> DeclLocDecomp = 222 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 223 224 // Slow path. 225 auto OffsetCommentBehindDecl = 226 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 227 228 // First check whether we have a trailing comment. 229 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 230 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 231 if ((CommentBehindDecl->isDocumentation() || 232 LangOpts.CommentOpts.ParseAllComments) && 233 CommentBehindDecl->isTrailingComment() && 234 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 235 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 236 237 // Check that Doxygen trailing comment comes after the declaration, starts 238 // on the same line and in the same file as the declaration. 239 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 240 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 241 OffsetCommentBehindDecl->first)) { 242 return CommentBehindDecl; 243 } 244 } 245 } 246 247 // The comment just after the declaration was not a trailing comment. 248 // Let's look at the previous comment. 249 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 250 return nullptr; 251 252 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 253 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 254 255 // Check that we actually have a non-member Doxygen comment. 256 if (!(CommentBeforeDecl->isDocumentation() || 257 LangOpts.CommentOpts.ParseAllComments) || 258 CommentBeforeDecl->isTrailingComment()) 259 return nullptr; 260 261 // Decompose the end of the comment. 262 const unsigned CommentEndOffset = 263 Comments.getCommentEndOffset(CommentBeforeDecl); 264 265 // Get the corresponding buffer. 266 bool Invalid = false; 267 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 268 &Invalid).data(); 269 if (Invalid) 270 return nullptr; 271 272 // Extract text between the comment and declaration. 273 StringRef Text(Buffer + CommentEndOffset, 274 DeclLocDecomp.second - CommentEndOffset); 275 276 // There should be no other declarations or preprocessor directives between 277 // comment and declaration. 278 if (Text.find_first_of(";{}#@") != StringRef::npos) 279 return nullptr; 280 281 return CommentBeforeDecl; 282 } 283 284 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 285 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 286 287 // If the declaration doesn't map directly to a location in a file, we 288 // can't find the comment. 289 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 290 return nullptr; 291 292 if (ExternalSource && !CommentsLoaded) { 293 ExternalSource->ReadComments(); 294 CommentsLoaded = true; 295 } 296 297 if (Comments.empty()) 298 return nullptr; 299 300 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 301 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 302 if (!CommentsInThisFile || CommentsInThisFile->empty()) 303 return nullptr; 304 305 return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); 306 } 307 308 void ASTContext::addComment(const RawComment &RC) { 309 assert(LangOpts.RetainCommentsFromSystemHeaders || 310 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 311 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 312 } 313 314 /// If we have a 'templated' declaration for a template, adjust 'D' to 315 /// refer to the actual template. 316 /// If we have an implicit instantiation, adjust 'D' to refer to template. 317 static const Decl &adjustDeclToTemplate(const Decl &D) { 318 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 319 // Is this function declaration part of a function template? 320 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 321 return *FTD; 322 323 // Nothing to do if function is not an implicit instantiation. 324 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 325 return D; 326 327 // Function is an implicit instantiation of a function template? 328 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 329 return *FTD; 330 331 // Function is instantiated from a member definition of a class template? 332 if (const FunctionDecl *MemberDecl = 333 FD->getInstantiatedFromMemberFunction()) 334 return *MemberDecl; 335 336 return D; 337 } 338 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 339 // Static data member is instantiated from a member definition of a class 340 // template? 341 if (VD->isStaticDataMember()) 342 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 343 return *MemberDecl; 344 345 return D; 346 } 347 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 348 // Is this class declaration part of a class template? 349 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 350 return *CTD; 351 352 // Class is an implicit instantiation of a class template or partial 353 // specialization? 354 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 355 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 356 return D; 357 llvm::PointerUnion<ClassTemplateDecl *, 358 ClassTemplatePartialSpecializationDecl *> 359 PU = CTSD->getSpecializedTemplateOrPartial(); 360 return PU.is<ClassTemplateDecl *>() 361 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 362 : *static_cast<const Decl *>( 363 PU.get<ClassTemplatePartialSpecializationDecl *>()); 364 } 365 366 // Class is instantiated from a member definition of a class template? 367 if (const MemberSpecializationInfo *Info = 368 CRD->getMemberSpecializationInfo()) 369 return *Info->getInstantiatedFrom(); 370 371 return D; 372 } 373 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 374 // Enum is instantiated from a member definition of a class template? 375 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 376 return *MemberDecl; 377 378 return D; 379 } 380 // FIXME: Adjust alias templates? 381 return D; 382 } 383 384 const RawComment *ASTContext::getRawCommentForAnyRedecl( 385 const Decl *D, 386 const Decl **OriginalDecl) const { 387 if (!D) { 388 if (OriginalDecl) 389 OriginalDecl = nullptr; 390 return nullptr; 391 } 392 393 D = &adjustDeclToTemplate(*D); 394 395 // Any comment directly attached to D? 396 { 397 auto DeclComment = DeclRawComments.find(D); 398 if (DeclComment != DeclRawComments.end()) { 399 if (OriginalDecl) 400 *OriginalDecl = D; 401 return DeclComment->second; 402 } 403 } 404 405 // Any comment attached to any redeclaration of D? 406 const Decl *CanonicalD = D->getCanonicalDecl(); 407 if (!CanonicalD) 408 return nullptr; 409 410 { 411 auto RedeclComment = RedeclChainComments.find(CanonicalD); 412 if (RedeclComment != RedeclChainComments.end()) { 413 if (OriginalDecl) 414 *OriginalDecl = RedeclComment->second; 415 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 416 assert(CommentAtRedecl != DeclRawComments.end() && 417 "This decl is supposed to have comment attached."); 418 return CommentAtRedecl->second; 419 } 420 } 421 422 // Any redeclarations of D that we haven't checked for comments yet? 423 // We can't use DenseMap::iterator directly since it'd get invalid. 424 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 425 auto LookupRes = CommentlessRedeclChains.find(CanonicalD); 426 if (LookupRes != CommentlessRedeclChains.end()) 427 return LookupRes->second; 428 return nullptr; 429 }(); 430 431 for (const auto Redecl : D->redecls()) { 432 assert(Redecl); 433 // Skip all redeclarations that have been checked previously. 434 if (LastCheckedRedecl) { 435 if (LastCheckedRedecl == Redecl) { 436 LastCheckedRedecl = nullptr; 437 } 438 continue; 439 } 440 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 441 if (RedeclComment) { 442 cacheRawCommentForDecl(*Redecl, *RedeclComment); 443 if (OriginalDecl) 444 *OriginalDecl = Redecl; 445 return RedeclComment; 446 } 447 CommentlessRedeclChains[CanonicalD] = Redecl; 448 } 449 450 if (OriginalDecl) 451 *OriginalDecl = nullptr; 452 return nullptr; 453 } 454 455 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 456 const RawComment &Comment) const { 457 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 458 DeclRawComments.try_emplace(&OriginalD, &Comment); 459 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 460 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 461 CommentlessRedeclChains.erase(CanonicalDecl); 462 } 463 464 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 465 SmallVectorImpl<const NamedDecl *> &Redeclared) { 466 const DeclContext *DC = ObjCMethod->getDeclContext(); 467 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 468 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 469 if (!ID) 470 return; 471 // Add redeclared method here. 472 for (const auto *Ext : ID->known_extensions()) { 473 if (ObjCMethodDecl *RedeclaredMethod = 474 Ext->getMethod(ObjCMethod->getSelector(), 475 ObjCMethod->isInstanceMethod())) 476 Redeclared.push_back(RedeclaredMethod); 477 } 478 } 479 } 480 481 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 482 const Preprocessor *PP) { 483 if (Comments.empty() || Decls.empty()) 484 return; 485 486 FileID File; 487 for (Decl *D : Decls) { 488 SourceLocation Loc = D->getLocation(); 489 if (Loc.isValid()) { 490 // See if there are any new comments that are not attached to a decl. 491 // The location doesn't have to be precise - we care only about the file. 492 File = SourceMgr.getDecomposedLoc(Loc).first; 493 break; 494 } 495 } 496 497 if (File.isInvalid()) 498 return; 499 500 auto CommentsInThisFile = Comments.getCommentsInFile(File); 501 if (!CommentsInThisFile || CommentsInThisFile->empty() || 502 CommentsInThisFile->rbegin()->second->isAttached()) 503 return; 504 505 // There is at least one comment not attached to a decl. 506 // Maybe it should be attached to one of Decls? 507 // 508 // Note that this way we pick up not only comments that precede the 509 // declaration, but also comments that *follow* the declaration -- thanks to 510 // the lookahead in the lexer: we've consumed the semicolon and looked 511 // ahead through comments. 512 513 for (const Decl *D : Decls) { 514 assert(D); 515 if (D->isInvalidDecl()) 516 continue; 517 518 D = &adjustDeclToTemplate(*D); 519 520 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 521 522 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 523 continue; 524 525 if (DeclRawComments.count(D) > 0) 526 continue; 527 528 if (RawComment *const DocComment = 529 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { 530 cacheRawCommentForDecl(*D, *DocComment); 531 comments::FullComment *FC = DocComment->parse(*this, PP, D); 532 ParsedComments[D->getCanonicalDecl()] = FC; 533 } 534 } 535 } 536 537 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 538 const Decl *D) const { 539 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 540 ThisDeclInfo->CommentDecl = D; 541 ThisDeclInfo->IsFilled = false; 542 ThisDeclInfo->fill(); 543 ThisDeclInfo->CommentDecl = FC->getDecl(); 544 if (!ThisDeclInfo->TemplateParameters) 545 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 546 comments::FullComment *CFC = 547 new (*this) comments::FullComment(FC->getBlocks(), 548 ThisDeclInfo); 549 return CFC; 550 } 551 552 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 553 const RawComment *RC = getRawCommentForDeclNoCache(D); 554 return RC ? RC->parse(*this, nullptr, D) : nullptr; 555 } 556 557 comments::FullComment *ASTContext::getCommentForDecl( 558 const Decl *D, 559 const Preprocessor *PP) const { 560 if (!D || D->isInvalidDecl()) 561 return nullptr; 562 D = &adjustDeclToTemplate(*D); 563 564 const Decl *Canonical = D->getCanonicalDecl(); 565 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 566 ParsedComments.find(Canonical); 567 568 if (Pos != ParsedComments.end()) { 569 if (Canonical != D) { 570 comments::FullComment *FC = Pos->second; 571 comments::FullComment *CFC = cloneFullComment(FC, D); 572 return CFC; 573 } 574 return Pos->second; 575 } 576 577 const Decl *OriginalDecl = nullptr; 578 579 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 580 if (!RC) { 581 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 582 SmallVector<const NamedDecl*, 8> Overridden; 583 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 584 if (OMD && OMD->isPropertyAccessor()) 585 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 586 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 587 return cloneFullComment(FC, D); 588 if (OMD) 589 addRedeclaredMethods(OMD, Overridden); 590 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 591 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 592 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 593 return cloneFullComment(FC, D); 594 } 595 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 596 // Attach any tag type's documentation to its typedef if latter 597 // does not have one of its own. 598 QualType QT = TD->getUnderlyingType(); 599 if (const auto *TT = QT->getAs<TagType>()) 600 if (const Decl *TD = TT->getDecl()) 601 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 602 return cloneFullComment(FC, D); 603 } 604 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 605 while (IC->getSuperClass()) { 606 IC = IC->getSuperClass(); 607 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 608 return cloneFullComment(FC, D); 609 } 610 } 611 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 612 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 613 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 614 return cloneFullComment(FC, D); 615 } 616 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 617 if (!(RD = RD->getDefinition())) 618 return nullptr; 619 // Check non-virtual bases. 620 for (const auto &I : RD->bases()) { 621 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 622 continue; 623 QualType Ty = I.getType(); 624 if (Ty.isNull()) 625 continue; 626 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 627 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 628 continue; 629 630 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 631 return cloneFullComment(FC, D); 632 } 633 } 634 // Check virtual bases. 635 for (const auto &I : RD->vbases()) { 636 if (I.getAccessSpecifier() != AS_public) 637 continue; 638 QualType Ty = I.getType(); 639 if (Ty.isNull()) 640 continue; 641 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 642 if (!(VirtualBase= VirtualBase->getDefinition())) 643 continue; 644 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 645 return cloneFullComment(FC, D); 646 } 647 } 648 } 649 return nullptr; 650 } 651 652 // If the RawComment was attached to other redeclaration of this Decl, we 653 // should parse the comment in context of that other Decl. This is important 654 // because comments can contain references to parameter names which can be 655 // different across redeclarations. 656 if (D != OriginalDecl && OriginalDecl) 657 return getCommentForDecl(OriginalDecl, PP); 658 659 comments::FullComment *FC = RC->parse(*this, PP, D); 660 ParsedComments[Canonical] = FC; 661 return FC; 662 } 663 664 void 665 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 666 const ASTContext &C, 667 TemplateTemplateParmDecl *Parm) { 668 ID.AddInteger(Parm->getDepth()); 669 ID.AddInteger(Parm->getPosition()); 670 ID.AddBoolean(Parm->isParameterPack()); 671 672 TemplateParameterList *Params = Parm->getTemplateParameters(); 673 ID.AddInteger(Params->size()); 674 for (TemplateParameterList::const_iterator P = Params->begin(), 675 PEnd = Params->end(); 676 P != PEnd; ++P) { 677 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 678 ID.AddInteger(0); 679 ID.AddBoolean(TTP->isParameterPack()); 680 const TypeConstraint *TC = TTP->getTypeConstraint(); 681 ID.AddBoolean(TC != nullptr); 682 if (TC) 683 TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, 684 /*Canonical=*/true); 685 if (TTP->isExpandedParameterPack()) { 686 ID.AddBoolean(true); 687 ID.AddInteger(TTP->getNumExpansionParameters()); 688 } else 689 ID.AddBoolean(false); 690 continue; 691 } 692 693 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 694 ID.AddInteger(1); 695 ID.AddBoolean(NTTP->isParameterPack()); 696 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); 697 if (NTTP->isExpandedParameterPack()) { 698 ID.AddBoolean(true); 699 ID.AddInteger(NTTP->getNumExpansionTypes()); 700 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 701 QualType T = NTTP->getExpansionType(I); 702 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 703 } 704 } else 705 ID.AddBoolean(false); 706 continue; 707 } 708 709 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 710 ID.AddInteger(2); 711 Profile(ID, C, TTP); 712 } 713 Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); 714 ID.AddBoolean(RequiresClause != nullptr); 715 if (RequiresClause) 716 RequiresClause->Profile(ID, C, /*Canonical=*/true); 717 } 718 719 static Expr * 720 canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, 721 QualType ConstrainedType) { 722 // This is a bit ugly - we need to form a new immediately-declared 723 // constraint that references the new parameter; this would ideally 724 // require semantic analysis (e.g. template<C T> struct S {}; - the 725 // converted arguments of C<T> could be an argument pack if C is 726 // declared as template<typename... T> concept C = ...). 727 // We don't have semantic analysis here so we dig deep into the 728 // ready-made constraint expr and change the thing manually. 729 ConceptSpecializationExpr *CSE; 730 if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC)) 731 CSE = cast<ConceptSpecializationExpr>(Fold->getLHS()); 732 else 733 CSE = cast<ConceptSpecializationExpr>(IDC); 734 ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments(); 735 SmallVector<TemplateArgument, 3> NewConverted; 736 NewConverted.reserve(OldConverted.size()); 737 if (OldConverted.front().getKind() == TemplateArgument::Pack) { 738 // The case: 739 // template<typename... T> concept C = true; 740 // template<C<int> T> struct S; -> constraint is C<{T, int}> 741 NewConverted.push_back(ConstrainedType); 742 llvm::append_range(NewConverted, 743 OldConverted.front().pack_elements().drop_front(1)); 744 TemplateArgument NewPack(NewConverted); 745 746 NewConverted.clear(); 747 NewConverted.push_back(NewPack); 748 assert(OldConverted.size() == 1 && 749 "Template parameter pack should be the last parameter"); 750 } else { 751 assert(OldConverted.front().getKind() == TemplateArgument::Type && 752 "Unexpected first argument kind for immediately-declared " 753 "constraint"); 754 NewConverted.push_back(ConstrainedType); 755 llvm::append_range(NewConverted, OldConverted.drop_front(1)); 756 } 757 Expr *NewIDC = ConceptSpecializationExpr::Create( 758 C, CSE->getNamedConcept(), NewConverted, nullptr, 759 CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack()); 760 761 if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC)) 762 NewIDC = new (C) CXXFoldExpr( 763 OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC, 764 BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, 765 SourceLocation(), /*NumExpansions=*/None); 766 return NewIDC; 767 } 768 769 TemplateTemplateParmDecl * 770 ASTContext::getCanonicalTemplateTemplateParmDecl( 771 TemplateTemplateParmDecl *TTP) const { 772 // Check if we already have a canonical template template parameter. 773 llvm::FoldingSetNodeID ID; 774 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 775 void *InsertPos = nullptr; 776 CanonicalTemplateTemplateParm *Canonical 777 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 778 if (Canonical) 779 return Canonical->getParam(); 780 781 // Build a canonical template parameter list. 782 TemplateParameterList *Params = TTP->getTemplateParameters(); 783 SmallVector<NamedDecl *, 4> CanonParams; 784 CanonParams.reserve(Params->size()); 785 for (TemplateParameterList::const_iterator P = Params->begin(), 786 PEnd = Params->end(); 787 P != PEnd; ++P) { 788 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 789 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this, 790 getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 791 TTP->getDepth(), TTP->getIndex(), nullptr, false, 792 TTP->isParameterPack(), TTP->hasTypeConstraint(), 793 TTP->isExpandedParameterPack() ? 794 llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None); 795 if (const auto *TC = TTP->getTypeConstraint()) { 796 QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); 797 Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( 798 *this, TC->getImmediatelyDeclaredConstraint(), 799 ParamAsArgument); 800 TemplateArgumentListInfo CanonArgsAsWritten; 801 if (auto *Args = TC->getTemplateArgsAsWritten()) 802 for (const auto &ArgLoc : Args->arguments()) 803 CanonArgsAsWritten.addArgument( 804 TemplateArgumentLoc(ArgLoc.getArgument(), 805 TemplateArgumentLocInfo())); 806 NewTTP->setTypeConstraint( 807 NestedNameSpecifierLoc(), 808 DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), 809 SourceLocation()), /*FoundDecl=*/nullptr, 810 // Actually canonicalizing a TemplateArgumentLoc is difficult so we 811 // simply omit the ArgsAsWritten 812 TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); 813 } 814 CanonParams.push_back(NewTTP); 815 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 816 QualType T = getCanonicalType(NTTP->getType()); 817 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 818 NonTypeTemplateParmDecl *Param; 819 if (NTTP->isExpandedParameterPack()) { 820 SmallVector<QualType, 2> ExpandedTypes; 821 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 822 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 823 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 824 ExpandedTInfos.push_back( 825 getTrivialTypeSourceInfo(ExpandedTypes.back())); 826 } 827 828 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 829 SourceLocation(), 830 SourceLocation(), 831 NTTP->getDepth(), 832 NTTP->getPosition(), nullptr, 833 T, 834 TInfo, 835 ExpandedTypes, 836 ExpandedTInfos); 837 } else { 838 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 839 SourceLocation(), 840 SourceLocation(), 841 NTTP->getDepth(), 842 NTTP->getPosition(), nullptr, 843 T, 844 NTTP->isParameterPack(), 845 TInfo); 846 } 847 if (AutoType *AT = T->getContainedAutoType()) { 848 if (AT->isConstrained()) { 849 Param->setPlaceholderTypeConstraint( 850 canonicalizeImmediatelyDeclaredConstraint( 851 *this, NTTP->getPlaceholderTypeConstraint(), T)); 852 } 853 } 854 CanonParams.push_back(Param); 855 856 } else 857 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 858 cast<TemplateTemplateParmDecl>(*P))); 859 } 860 861 Expr *CanonRequiresClause = nullptr; 862 if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) 863 CanonRequiresClause = RequiresClause; 864 865 TemplateTemplateParmDecl *CanonTTP 866 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 867 SourceLocation(), TTP->getDepth(), 868 TTP->getPosition(), 869 TTP->isParameterPack(), 870 nullptr, 871 TemplateParameterList::Create(*this, SourceLocation(), 872 SourceLocation(), 873 CanonParams, 874 SourceLocation(), 875 CanonRequiresClause)); 876 877 // Get the new insert position for the node we care about. 878 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 879 assert(!Canonical && "Shouldn't be in the map!"); 880 (void)Canonical; 881 882 // Create the canonical template template parameter entry. 883 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 884 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 885 return CanonTTP; 886 } 887 888 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 889 auto Kind = getTargetInfo().getCXXABI().getKind(); 890 return getLangOpts().CXXABI.getValueOr(Kind); 891 } 892 893 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 894 if (!LangOpts.CPlusPlus) return nullptr; 895 896 switch (getCXXABIKind()) { 897 case TargetCXXABI::AppleARM64: 898 case TargetCXXABI::Fuchsia: 899 case TargetCXXABI::GenericARM: // Same as Itanium at this level 900 case TargetCXXABI::iOS: 901 case TargetCXXABI::WatchOS: 902 case TargetCXXABI::GenericAArch64: 903 case TargetCXXABI::GenericMIPS: 904 case TargetCXXABI::GenericItanium: 905 case TargetCXXABI::WebAssembly: 906 case TargetCXXABI::XL: 907 return CreateItaniumCXXABI(*this); 908 case TargetCXXABI::Microsoft: 909 return CreateMicrosoftCXXABI(*this); 910 } 911 llvm_unreachable("Invalid CXXABI type!"); 912 } 913 914 interp::Context &ASTContext::getInterpContext() { 915 if (!InterpContext) { 916 InterpContext.reset(new interp::Context(*this)); 917 } 918 return *InterpContext.get(); 919 } 920 921 ParentMapContext &ASTContext::getParentMapContext() { 922 if (!ParentMapCtx) 923 ParentMapCtx.reset(new ParentMapContext(*this)); 924 return *ParentMapCtx.get(); 925 } 926 927 static const LangASMap *getAddressSpaceMap(const TargetInfo &T, 928 const LangOptions &LOpts) { 929 if (LOpts.FakeAddressSpaceMap) { 930 // The fake address space map must have a distinct entry for each 931 // language-specific address space. 932 static const unsigned FakeAddrSpaceMap[] = { 933 0, // Default 934 1, // opencl_global 935 3, // opencl_local 936 2, // opencl_constant 937 0, // opencl_private 938 4, // opencl_generic 939 5, // opencl_global_device 940 6, // opencl_global_host 941 7, // cuda_device 942 8, // cuda_constant 943 9, // cuda_shared 944 1, // sycl_global 945 5, // sycl_global_device 946 6, // sycl_global_host 947 3, // sycl_local 948 0, // sycl_private 949 10, // ptr32_sptr 950 11, // ptr32_uptr 951 12 // ptr64 952 }; 953 return &FakeAddrSpaceMap; 954 } else { 955 return &T.getAddressSpaceMap(); 956 } 957 } 958 959 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 960 const LangOptions &LangOpts) { 961 switch (LangOpts.getAddressSpaceMapMangling()) { 962 case LangOptions::ASMM_Target: 963 return TI.useAddressSpaceMapMangling(); 964 case LangOptions::ASMM_On: 965 return true; 966 case LangOptions::ASMM_Off: 967 return false; 968 } 969 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 970 } 971 972 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 973 IdentifierTable &idents, SelectorTable &sels, 974 Builtin::Context &builtins, TranslationUnitKind TUKind) 975 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 976 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 977 TemplateSpecializationTypes(this_()), 978 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 979 SubstTemplateTemplateParmPacks(this_()), 980 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 981 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 982 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 983 LangOpts.XRayNeverInstrumentFiles, 984 LangOpts.XRayAttrListFiles, SM)), 985 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 986 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 987 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 988 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 989 CompCategories(this_()), LastSDM(nullptr, 0) { 990 addTranslationUnitDecl(); 991 } 992 993 void ASTContext::cleanup() { 994 // Release the DenseMaps associated with DeclContext objects. 995 // FIXME: Is this the ideal solution? 996 ReleaseDeclContextMaps(); 997 998 // Call all of the deallocation functions on all of their targets. 999 for (auto &Pair : Deallocations) 1000 (Pair.first)(Pair.second); 1001 Deallocations.clear(); 1002 1003 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 1004 // because they can contain DenseMaps. 1005 for (llvm::DenseMap<const ObjCContainerDecl*, 1006 const ASTRecordLayout*>::iterator 1007 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 1008 // Increment in loop to prevent using deallocated memory. 1009 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1010 R->Destroy(*this); 1011 ObjCLayouts.clear(); 1012 1013 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 1014 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 1015 // Increment in loop to prevent using deallocated memory. 1016 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1017 R->Destroy(*this); 1018 } 1019 ASTRecordLayouts.clear(); 1020 1021 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 1022 AEnd = DeclAttrs.end(); 1023 A != AEnd; ++A) 1024 A->second->~AttrVec(); 1025 DeclAttrs.clear(); 1026 1027 for (const auto &Value : ModuleInitializers) 1028 Value.second->~PerModuleInitializers(); 1029 ModuleInitializers.clear(); 1030 } 1031 1032 ASTContext::~ASTContext() { cleanup(); } 1033 1034 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 1035 TraversalScope = TopLevelDecls; 1036 getParentMapContext().clear(); 1037 } 1038 1039 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 1040 Deallocations.push_back({Callback, Data}); 1041 } 1042 1043 void 1044 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 1045 ExternalSource = std::move(Source); 1046 } 1047 1048 void ASTContext::PrintStats() const { 1049 llvm::errs() << "\n*** AST Context Stats:\n"; 1050 llvm::errs() << " " << Types.size() << " types total.\n"; 1051 1052 unsigned counts[] = { 1053 #define TYPE(Name, Parent) 0, 1054 #define ABSTRACT_TYPE(Name, Parent) 1055 #include "clang/AST/TypeNodes.inc" 1056 0 // Extra 1057 }; 1058 1059 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 1060 Type *T = Types[i]; 1061 counts[(unsigned)T->getTypeClass()]++; 1062 } 1063 1064 unsigned Idx = 0; 1065 unsigned TotalBytes = 0; 1066 #define TYPE(Name, Parent) \ 1067 if (counts[Idx]) \ 1068 llvm::errs() << " " << counts[Idx] << " " << #Name \ 1069 << " types, " << sizeof(Name##Type) << " each " \ 1070 << "(" << counts[Idx] * sizeof(Name##Type) \ 1071 << " bytes)\n"; \ 1072 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 1073 ++Idx; 1074 #define ABSTRACT_TYPE(Name, Parent) 1075 #include "clang/AST/TypeNodes.inc" 1076 1077 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 1078 1079 // Implicit special member functions. 1080 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 1081 << NumImplicitDefaultConstructors 1082 << " implicit default constructors created\n"; 1083 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 1084 << NumImplicitCopyConstructors 1085 << " implicit copy constructors created\n"; 1086 if (getLangOpts().CPlusPlus) 1087 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 1088 << NumImplicitMoveConstructors 1089 << " implicit move constructors created\n"; 1090 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 1091 << NumImplicitCopyAssignmentOperators 1092 << " implicit copy assignment operators created\n"; 1093 if (getLangOpts().CPlusPlus) 1094 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 1095 << NumImplicitMoveAssignmentOperators 1096 << " implicit move assignment operators created\n"; 1097 llvm::errs() << NumImplicitDestructorsDeclared << "/" 1098 << NumImplicitDestructors 1099 << " implicit destructors created\n"; 1100 1101 if (ExternalSource) { 1102 llvm::errs() << "\n"; 1103 ExternalSource->PrintStats(); 1104 } 1105 1106 BumpAlloc.PrintStats(); 1107 } 1108 1109 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1110 bool NotifyListeners) { 1111 if (NotifyListeners) 1112 if (auto *Listener = getASTMutationListener()) 1113 Listener->RedefinedHiddenDefinition(ND, M); 1114 1115 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1116 } 1117 1118 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1119 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1120 if (It == MergedDefModules.end()) 1121 return; 1122 1123 auto &Merged = It->second; 1124 llvm::DenseSet<Module*> Found; 1125 for (Module *&M : Merged) 1126 if (!Found.insert(M).second) 1127 M = nullptr; 1128 llvm::erase_value(Merged, nullptr); 1129 } 1130 1131 ArrayRef<Module *> 1132 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1133 auto MergedIt = 1134 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1135 if (MergedIt == MergedDefModules.end()) 1136 return None; 1137 return MergedIt->second; 1138 } 1139 1140 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1141 if (LazyInitializers.empty()) 1142 return; 1143 1144 auto *Source = Ctx.getExternalSource(); 1145 assert(Source && "lazy initializers but no external source"); 1146 1147 auto LazyInits = std::move(LazyInitializers); 1148 LazyInitializers.clear(); 1149 1150 for (auto ID : LazyInits) 1151 Initializers.push_back(Source->GetExternalDecl(ID)); 1152 1153 assert(LazyInitializers.empty() && 1154 "GetExternalDecl for lazy module initializer added more inits"); 1155 } 1156 1157 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1158 // One special case: if we add a module initializer that imports another 1159 // module, and that module's only initializer is an ImportDecl, simplify. 1160 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1161 auto It = ModuleInitializers.find(ID->getImportedModule()); 1162 1163 // Maybe the ImportDecl does nothing at all. (Common case.) 1164 if (It == ModuleInitializers.end()) 1165 return; 1166 1167 // Maybe the ImportDecl only imports another ImportDecl. 1168 auto &Imported = *It->second; 1169 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1170 Imported.resolve(*this); 1171 auto *OnlyDecl = Imported.Initializers.front(); 1172 if (isa<ImportDecl>(OnlyDecl)) 1173 D = OnlyDecl; 1174 } 1175 } 1176 1177 auto *&Inits = ModuleInitializers[M]; 1178 if (!Inits) 1179 Inits = new (*this) PerModuleInitializers; 1180 Inits->Initializers.push_back(D); 1181 } 1182 1183 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1184 auto *&Inits = ModuleInitializers[M]; 1185 if (!Inits) 1186 Inits = new (*this) PerModuleInitializers; 1187 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1188 IDs.begin(), IDs.end()); 1189 } 1190 1191 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1192 auto It = ModuleInitializers.find(M); 1193 if (It == ModuleInitializers.end()) 1194 return None; 1195 1196 auto *Inits = It->second; 1197 Inits->resolve(*this); 1198 return Inits->Initializers; 1199 } 1200 1201 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1202 if (!ExternCContext) 1203 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1204 1205 return ExternCContext; 1206 } 1207 1208 BuiltinTemplateDecl * 1209 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1210 const IdentifierInfo *II) const { 1211 auto *BuiltinTemplate = 1212 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1213 BuiltinTemplate->setImplicit(); 1214 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1215 1216 return BuiltinTemplate; 1217 } 1218 1219 BuiltinTemplateDecl * 1220 ASTContext::getMakeIntegerSeqDecl() const { 1221 if (!MakeIntegerSeqDecl) 1222 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1223 getMakeIntegerSeqName()); 1224 return MakeIntegerSeqDecl; 1225 } 1226 1227 BuiltinTemplateDecl * 1228 ASTContext::getTypePackElementDecl() const { 1229 if (!TypePackElementDecl) 1230 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1231 getTypePackElementName()); 1232 return TypePackElementDecl; 1233 } 1234 1235 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1236 RecordDecl::TagKind TK) const { 1237 SourceLocation Loc; 1238 RecordDecl *NewDecl; 1239 if (getLangOpts().CPlusPlus) 1240 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1241 Loc, &Idents.get(Name)); 1242 else 1243 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1244 &Idents.get(Name)); 1245 NewDecl->setImplicit(); 1246 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1247 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1248 return NewDecl; 1249 } 1250 1251 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1252 StringRef Name) const { 1253 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1254 TypedefDecl *NewDecl = TypedefDecl::Create( 1255 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1256 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1257 NewDecl->setImplicit(); 1258 return NewDecl; 1259 } 1260 1261 TypedefDecl *ASTContext::getInt128Decl() const { 1262 if (!Int128Decl) 1263 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1264 return Int128Decl; 1265 } 1266 1267 TypedefDecl *ASTContext::getUInt128Decl() const { 1268 if (!UInt128Decl) 1269 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1270 return UInt128Decl; 1271 } 1272 1273 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1274 auto *Ty = new (*this, TypeAlignment) BuiltinType(K); 1275 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1276 Types.push_back(Ty); 1277 } 1278 1279 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1280 const TargetInfo *AuxTarget) { 1281 assert((!this->Target || this->Target == &Target) && 1282 "Incorrect target reinitialization"); 1283 assert(VoidTy.isNull() && "Context reinitialized?"); 1284 1285 this->Target = &Target; 1286 this->AuxTarget = AuxTarget; 1287 1288 ABI.reset(createCXXABI(Target)); 1289 AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); 1290 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1291 1292 // C99 6.2.5p19. 1293 InitBuiltinType(VoidTy, BuiltinType::Void); 1294 1295 // C99 6.2.5p2. 1296 InitBuiltinType(BoolTy, BuiltinType::Bool); 1297 // C99 6.2.5p3. 1298 if (LangOpts.CharIsSigned) 1299 InitBuiltinType(CharTy, BuiltinType::Char_S); 1300 else 1301 InitBuiltinType(CharTy, BuiltinType::Char_U); 1302 // C99 6.2.5p4. 1303 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1304 InitBuiltinType(ShortTy, BuiltinType::Short); 1305 InitBuiltinType(IntTy, BuiltinType::Int); 1306 InitBuiltinType(LongTy, BuiltinType::Long); 1307 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1308 1309 // C99 6.2.5p6. 1310 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1311 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1312 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1313 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1314 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1315 1316 // C99 6.2.5p10. 1317 InitBuiltinType(FloatTy, BuiltinType::Float); 1318 InitBuiltinType(DoubleTy, BuiltinType::Double); 1319 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1320 1321 // GNU extension, __float128 for IEEE quadruple precision 1322 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1323 1324 // __ibm128 for IBM extended precision 1325 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1326 1327 // C11 extension ISO/IEC TS 18661-3 1328 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1329 1330 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1331 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1332 InitBuiltinType(AccumTy, BuiltinType::Accum); 1333 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1334 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1335 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1336 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1337 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1338 InitBuiltinType(FractTy, BuiltinType::Fract); 1339 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1340 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1341 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1342 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1343 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1344 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1345 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1346 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1347 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1348 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1349 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1350 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1351 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1352 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1353 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1354 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1355 1356 // GNU extension, 128-bit integers. 1357 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1358 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1359 1360 // C++ 3.9.1p5 1361 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1362 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1363 else // -fshort-wchar makes wchar_t be unsigned. 1364 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1365 if (LangOpts.CPlusPlus && LangOpts.WChar) 1366 WideCharTy = WCharTy; 1367 else { 1368 // C99 (or C++ using -fno-wchar). 1369 WideCharTy = getFromTargetType(Target.getWCharType()); 1370 } 1371 1372 WIntTy = getFromTargetType(Target.getWIntType()); 1373 1374 // C++20 (proposed) 1375 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1376 1377 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1378 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1379 else // C99 1380 Char16Ty = getFromTargetType(Target.getChar16Type()); 1381 1382 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1383 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1384 else // C99 1385 Char32Ty = getFromTargetType(Target.getChar32Type()); 1386 1387 // Placeholder type for type-dependent expressions whose type is 1388 // completely unknown. No code should ever check a type against 1389 // DependentTy and users should never see it; however, it is here to 1390 // help diagnose failures to properly check for type-dependent 1391 // expressions. 1392 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1393 1394 // Placeholder type for functions. 1395 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1396 1397 // Placeholder type for bound members. 1398 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1399 1400 // Placeholder type for pseudo-objects. 1401 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1402 1403 // "any" type; useful for debugger-like clients. 1404 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1405 1406 // Placeholder type for unbridged ARC casts. 1407 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1408 1409 // Placeholder type for builtin functions. 1410 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1411 1412 // Placeholder type for OMP array sections. 1413 if (LangOpts.OpenMP) { 1414 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1415 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1416 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1417 } 1418 if (LangOpts.MatrixTypes) 1419 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1420 1421 // Builtin types for 'id', 'Class', and 'SEL'. 1422 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1423 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1424 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1425 1426 if (LangOpts.OpenCL) { 1427 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1428 InitBuiltinType(SingletonId, BuiltinType::Id); 1429 #include "clang/Basic/OpenCLImageTypes.def" 1430 1431 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1432 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1433 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1434 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1435 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1436 1437 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1438 InitBuiltinType(Id##Ty, BuiltinType::Id); 1439 #include "clang/Basic/OpenCLExtensionTypes.def" 1440 } 1441 1442 if (Target.hasAArch64SVETypes()) { 1443 #define SVE_TYPE(Name, Id, SingletonId) \ 1444 InitBuiltinType(SingletonId, BuiltinType::Id); 1445 #include "clang/Basic/AArch64SVEACLETypes.def" 1446 } 1447 1448 if (Target.getTriple().isPPC64()) { 1449 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1450 InitBuiltinType(Id##Ty, BuiltinType::Id); 1451 #include "clang/Basic/PPCTypes.def" 1452 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1453 InitBuiltinType(Id##Ty, BuiltinType::Id); 1454 #include "clang/Basic/PPCTypes.def" 1455 } 1456 1457 if (Target.hasRISCVVTypes()) { 1458 #define RVV_TYPE(Name, Id, SingletonId) \ 1459 InitBuiltinType(SingletonId, BuiltinType::Id); 1460 #include "clang/Basic/RISCVVTypes.def" 1461 } 1462 1463 // Builtin type for __objc_yes and __objc_no 1464 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1465 SignedCharTy : BoolTy); 1466 1467 ObjCConstantStringType = QualType(); 1468 1469 ObjCSuperType = QualType(); 1470 1471 // void * type 1472 if (LangOpts.OpenCLGenericAddressSpace) { 1473 auto Q = VoidTy.getQualifiers(); 1474 Q.setAddressSpace(LangAS::opencl_generic); 1475 VoidPtrTy = getPointerType(getCanonicalType( 1476 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1477 } else { 1478 VoidPtrTy = getPointerType(VoidTy); 1479 } 1480 1481 // nullptr type (C++0x 2.14.7) 1482 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1483 1484 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1485 InitBuiltinType(HalfTy, BuiltinType::Half); 1486 1487 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1488 1489 // Builtin type used to help define __builtin_va_list. 1490 VaListTagDecl = nullptr; 1491 1492 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1493 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1494 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1495 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1496 } 1497 } 1498 1499 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1500 return SourceMgr.getDiagnostics(); 1501 } 1502 1503 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1504 AttrVec *&Result = DeclAttrs[D]; 1505 if (!Result) { 1506 void *Mem = Allocate(sizeof(AttrVec)); 1507 Result = new (Mem) AttrVec; 1508 } 1509 1510 return *Result; 1511 } 1512 1513 /// Erase the attributes corresponding to the given declaration. 1514 void ASTContext::eraseDeclAttrs(const Decl *D) { 1515 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1516 if (Pos != DeclAttrs.end()) { 1517 Pos->second->~AttrVec(); 1518 DeclAttrs.erase(Pos); 1519 } 1520 } 1521 1522 // FIXME: Remove ? 1523 MemberSpecializationInfo * 1524 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1525 assert(Var->isStaticDataMember() && "Not a static data member"); 1526 return getTemplateOrSpecializationInfo(Var) 1527 .dyn_cast<MemberSpecializationInfo *>(); 1528 } 1529 1530 ASTContext::TemplateOrSpecializationInfo 1531 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1532 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1533 TemplateOrInstantiation.find(Var); 1534 if (Pos == TemplateOrInstantiation.end()) 1535 return {}; 1536 1537 return Pos->second; 1538 } 1539 1540 void 1541 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1542 TemplateSpecializationKind TSK, 1543 SourceLocation PointOfInstantiation) { 1544 assert(Inst->isStaticDataMember() && "Not a static data member"); 1545 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1546 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1547 Tmpl, TSK, PointOfInstantiation)); 1548 } 1549 1550 void 1551 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1552 TemplateOrSpecializationInfo TSI) { 1553 assert(!TemplateOrInstantiation[Inst] && 1554 "Already noted what the variable was instantiated from"); 1555 TemplateOrInstantiation[Inst] = TSI; 1556 } 1557 1558 NamedDecl * 1559 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1560 auto Pos = InstantiatedFromUsingDecl.find(UUD); 1561 if (Pos == InstantiatedFromUsingDecl.end()) 1562 return nullptr; 1563 1564 return Pos->second; 1565 } 1566 1567 void 1568 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1569 assert((isa<UsingDecl>(Pattern) || 1570 isa<UnresolvedUsingValueDecl>(Pattern) || 1571 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1572 "pattern decl is not a using decl"); 1573 assert((isa<UsingDecl>(Inst) || 1574 isa<UnresolvedUsingValueDecl>(Inst) || 1575 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1576 "instantiation did not produce a using decl"); 1577 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1578 InstantiatedFromUsingDecl[Inst] = Pattern; 1579 } 1580 1581 UsingEnumDecl * 1582 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1583 auto Pos = InstantiatedFromUsingEnumDecl.find(UUD); 1584 if (Pos == InstantiatedFromUsingEnumDecl.end()) 1585 return nullptr; 1586 1587 return Pos->second; 1588 } 1589 1590 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1591 UsingEnumDecl *Pattern) { 1592 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1593 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1594 } 1595 1596 UsingShadowDecl * 1597 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1598 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 1599 = InstantiatedFromUsingShadowDecl.find(Inst); 1600 if (Pos == InstantiatedFromUsingShadowDecl.end()) 1601 return nullptr; 1602 1603 return Pos->second; 1604 } 1605 1606 void 1607 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1608 UsingShadowDecl *Pattern) { 1609 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1610 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1611 } 1612 1613 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1614 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 1615 = InstantiatedFromUnnamedFieldDecl.find(Field); 1616 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 1617 return nullptr; 1618 1619 return Pos->second; 1620 } 1621 1622 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1623 FieldDecl *Tmpl) { 1624 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1625 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1626 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1627 "Already noted what unnamed field was instantiated from"); 1628 1629 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1630 } 1631 1632 ASTContext::overridden_cxx_method_iterator 1633 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1634 return overridden_methods(Method).begin(); 1635 } 1636 1637 ASTContext::overridden_cxx_method_iterator 1638 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1639 return overridden_methods(Method).end(); 1640 } 1641 1642 unsigned 1643 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1644 auto Range = overridden_methods(Method); 1645 return Range.end() - Range.begin(); 1646 } 1647 1648 ASTContext::overridden_method_range 1649 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1650 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1651 OverriddenMethods.find(Method->getCanonicalDecl()); 1652 if (Pos == OverriddenMethods.end()) 1653 return overridden_method_range(nullptr, nullptr); 1654 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1655 } 1656 1657 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1658 const CXXMethodDecl *Overridden) { 1659 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1660 OverriddenMethods[Method].push_back(Overridden); 1661 } 1662 1663 void ASTContext::getOverriddenMethods( 1664 const NamedDecl *D, 1665 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1666 assert(D); 1667 1668 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1669 Overridden.append(overridden_methods_begin(CXXMethod), 1670 overridden_methods_end(CXXMethod)); 1671 return; 1672 } 1673 1674 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1675 if (!Method) 1676 return; 1677 1678 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1679 Method->getOverriddenMethods(OverDecls); 1680 Overridden.append(OverDecls.begin(), OverDecls.end()); 1681 } 1682 1683 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1684 assert(!Import->getNextLocalImport() && 1685 "Import declaration already in the chain"); 1686 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1687 if (!FirstLocalImport) { 1688 FirstLocalImport = Import; 1689 LastLocalImport = Import; 1690 return; 1691 } 1692 1693 LastLocalImport->setNextLocalImport(Import); 1694 LastLocalImport = Import; 1695 } 1696 1697 //===----------------------------------------------------------------------===// 1698 // Type Sizing and Analysis 1699 //===----------------------------------------------------------------------===// 1700 1701 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1702 /// scalar floating point type. 1703 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1704 switch (T->castAs<BuiltinType>()->getKind()) { 1705 default: 1706 llvm_unreachable("Not a floating point type!"); 1707 case BuiltinType::BFloat16: 1708 return Target->getBFloat16Format(); 1709 case BuiltinType::Float16: 1710 case BuiltinType::Half: 1711 return Target->getHalfFormat(); 1712 case BuiltinType::Float: return Target->getFloatFormat(); 1713 case BuiltinType::Double: return Target->getDoubleFormat(); 1714 case BuiltinType::Ibm128: 1715 return Target->getIbm128Format(); 1716 case BuiltinType::LongDouble: 1717 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1718 return AuxTarget->getLongDoubleFormat(); 1719 return Target->getLongDoubleFormat(); 1720 case BuiltinType::Float128: 1721 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1722 return AuxTarget->getFloat128Format(); 1723 return Target->getFloat128Format(); 1724 } 1725 } 1726 1727 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1728 unsigned Align = Target->getCharWidth(); 1729 1730 bool UseAlignAttrOnly = false; 1731 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1732 Align = AlignFromAttr; 1733 1734 // __attribute__((aligned)) can increase or decrease alignment 1735 // *except* on a struct or struct member, where it only increases 1736 // alignment unless 'packed' is also specified. 1737 // 1738 // It is an error for alignas to decrease alignment, so we can 1739 // ignore that possibility; Sema should diagnose it. 1740 if (isa<FieldDecl>(D)) { 1741 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1742 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1743 } else { 1744 UseAlignAttrOnly = true; 1745 } 1746 } 1747 else if (isa<FieldDecl>(D)) 1748 UseAlignAttrOnly = 1749 D->hasAttr<PackedAttr>() || 1750 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1751 1752 // If we're using the align attribute only, just ignore everything 1753 // else about the declaration and its type. 1754 if (UseAlignAttrOnly) { 1755 // do nothing 1756 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1757 QualType T = VD->getType(); 1758 if (const auto *RT = T->getAs<ReferenceType>()) { 1759 if (ForAlignof) 1760 T = RT->getPointeeType(); 1761 else 1762 T = getPointerType(RT->getPointeeType()); 1763 } 1764 QualType BaseT = getBaseElementType(T); 1765 if (T->isFunctionType()) 1766 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1767 else if (!BaseT->isIncompleteType()) { 1768 // Adjust alignments of declarations with array type by the 1769 // large-array alignment on the target. 1770 if (const ArrayType *arrayType = getAsArrayType(T)) { 1771 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1772 if (!ForAlignof && MinWidth) { 1773 if (isa<VariableArrayType>(arrayType)) 1774 Align = std::max(Align, Target->getLargeArrayAlign()); 1775 else if (isa<ConstantArrayType>(arrayType) && 1776 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1777 Align = std::max(Align, Target->getLargeArrayAlign()); 1778 } 1779 } 1780 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1781 if (BaseT.getQualifiers().hasUnaligned()) 1782 Align = Target->getCharWidth(); 1783 if (const auto *VD = dyn_cast<VarDecl>(D)) { 1784 if (VD->hasGlobalStorage() && !ForAlignof) { 1785 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 1786 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1787 } 1788 } 1789 } 1790 1791 // Fields can be subject to extra alignment constraints, like if 1792 // the field is packed, the struct is packed, or the struct has a 1793 // a max-field-alignment constraint (#pragma pack). So calculate 1794 // the actual alignment of the field within the struct, and then 1795 // (as we're expected to) constrain that by the alignment of the type. 1796 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1797 const RecordDecl *Parent = Field->getParent(); 1798 // We can only produce a sensible answer if the record is valid. 1799 if (!Parent->isInvalidDecl()) { 1800 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1801 1802 // Start with the record's overall alignment. 1803 unsigned FieldAlign = toBits(Layout.getAlignment()); 1804 1805 // Use the GCD of that and the offset within the record. 1806 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1807 if (Offset > 0) { 1808 // Alignment is always a power of 2, so the GCD will be a power of 2, 1809 // which means we get to do this crazy thing instead of Euclid's. 1810 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1811 if (LowBitOfOffset < FieldAlign) 1812 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1813 } 1814 1815 Align = std::min(Align, FieldAlign); 1816 } 1817 } 1818 } 1819 1820 // Some targets have hard limitation on the maximum requestable alignment in 1821 // aligned attribute for static variables. 1822 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1823 const auto *VD = dyn_cast<VarDecl>(D); 1824 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1825 Align = std::min(Align, MaxAlignedAttr); 1826 1827 return toCharUnitsFromBits(Align); 1828 } 1829 1830 CharUnits ASTContext::getExnObjectAlignment() const { 1831 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1832 } 1833 1834 // getTypeInfoDataSizeInChars - Return the size of a type, in 1835 // chars. If the type is a record, its data size is returned. This is 1836 // the size of the memcpy that's performed when assigning this type 1837 // using a trivial copy/move assignment operator. 1838 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1839 TypeInfoChars Info = getTypeInfoInChars(T); 1840 1841 // In C++, objects can sometimes be allocated into the tail padding 1842 // of a base-class subobject. We decide whether that's possible 1843 // during class layout, so here we can just trust the layout results. 1844 if (getLangOpts().CPlusPlus) { 1845 if (const auto *RT = T->getAs<RecordType>()) { 1846 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1847 Info.Width = layout.getDataSize(); 1848 } 1849 } 1850 1851 return Info; 1852 } 1853 1854 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1855 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1856 TypeInfoChars 1857 static getConstantArrayInfoInChars(const ASTContext &Context, 1858 const ConstantArrayType *CAT) { 1859 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1860 uint64_t Size = CAT->getSize().getZExtValue(); 1861 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1862 (uint64_t)(-1)/Size) && 1863 "Overflow in array type char size evaluation"); 1864 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1865 unsigned Align = EltInfo.Align.getQuantity(); 1866 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1867 Context.getTargetInfo().getPointerWidth(0) == 64) 1868 Width = llvm::alignTo(Width, Align); 1869 return TypeInfoChars(CharUnits::fromQuantity(Width), 1870 CharUnits::fromQuantity(Align), 1871 EltInfo.AlignRequirement); 1872 } 1873 1874 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1875 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1876 return getConstantArrayInfoInChars(*this, CAT); 1877 TypeInfo Info = getTypeInfo(T); 1878 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1879 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1880 } 1881 1882 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1883 return getTypeInfoInChars(T.getTypePtr()); 1884 } 1885 1886 bool ASTContext::isAlignmentRequired(const Type *T) const { 1887 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1888 } 1889 1890 bool ASTContext::isAlignmentRequired(QualType T) const { 1891 return isAlignmentRequired(T.getTypePtr()); 1892 } 1893 1894 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1895 bool NeedsPreferredAlignment) const { 1896 // An alignment on a typedef overrides anything else. 1897 if (const auto *TT = T->getAs<TypedefType>()) 1898 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1899 return Align; 1900 1901 // If we have an (array of) complete type, we're done. 1902 T = getBaseElementType(T); 1903 if (!T->isIncompleteType()) 1904 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1905 1906 // If we had an array type, its element type might be a typedef 1907 // type with an alignment attribute. 1908 if (const auto *TT = T->getAs<TypedefType>()) 1909 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1910 return Align; 1911 1912 // Otherwise, see if the declaration of the type had an attribute. 1913 if (const auto *TT = T->getAs<TagType>()) 1914 return TT->getDecl()->getMaxAlignment(); 1915 1916 return 0; 1917 } 1918 1919 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1920 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1921 if (I != MemoizedTypeInfo.end()) 1922 return I->second; 1923 1924 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1925 TypeInfo TI = getTypeInfoImpl(T); 1926 MemoizedTypeInfo[T] = TI; 1927 return TI; 1928 } 1929 1930 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1931 /// method does not work on incomplete types. 1932 /// 1933 /// FIXME: Pointers into different addr spaces could have different sizes and 1934 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1935 /// should take a QualType, &c. 1936 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1937 uint64_t Width = 0; 1938 unsigned Align = 8; 1939 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1940 unsigned AS = 0; 1941 switch (T->getTypeClass()) { 1942 #define TYPE(Class, Base) 1943 #define ABSTRACT_TYPE(Class, Base) 1944 #define NON_CANONICAL_TYPE(Class, Base) 1945 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1946 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1947 case Type::Class: \ 1948 assert(!T->isDependentType() && "should not see dependent types here"); \ 1949 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1950 #include "clang/AST/TypeNodes.inc" 1951 llvm_unreachable("Should not see dependent types"); 1952 1953 case Type::FunctionNoProto: 1954 case Type::FunctionProto: 1955 // GCC extension: alignof(function) = 32 bits 1956 Width = 0; 1957 Align = 32; 1958 break; 1959 1960 case Type::IncompleteArray: 1961 case Type::VariableArray: 1962 case Type::ConstantArray: { 1963 // Model non-constant sized arrays as size zero, but track the alignment. 1964 uint64_t Size = 0; 1965 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1966 Size = CAT->getSize().getZExtValue(); 1967 1968 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1969 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1970 "Overflow in array type bit size evaluation"); 1971 Width = EltInfo.Width * Size; 1972 Align = EltInfo.Align; 1973 AlignRequirement = EltInfo.AlignRequirement; 1974 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1975 getTargetInfo().getPointerWidth(0) == 64) 1976 Width = llvm::alignTo(Width, Align); 1977 break; 1978 } 1979 1980 case Type::ExtVector: 1981 case Type::Vector: { 1982 const auto *VT = cast<VectorType>(T); 1983 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1984 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 1985 : EltInfo.Width * VT->getNumElements(); 1986 // Enforce at least byte alignment. 1987 Align = std::max<unsigned>(8, Width); 1988 1989 // If the alignment is not a power of 2, round up to the next power of 2. 1990 // This happens for non-power-of-2 length vectors. 1991 if (Align & (Align-1)) { 1992 Align = llvm::NextPowerOf2(Align); 1993 Width = llvm::alignTo(Width, Align); 1994 } 1995 // Adjust the alignment based on the target max. 1996 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1997 if (TargetVectorAlign && TargetVectorAlign < Align) 1998 Align = TargetVectorAlign; 1999 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 2000 // Adjust the alignment for fixed-length SVE vectors. This is important 2001 // for non-power-of-2 vector lengths. 2002 Align = 128; 2003 else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 2004 // Adjust the alignment for fixed-length SVE predicates. 2005 Align = 16; 2006 break; 2007 } 2008 2009 case Type::ConstantMatrix: { 2010 const auto *MT = cast<ConstantMatrixType>(T); 2011 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 2012 // The internal layout of a matrix value is implementation defined. 2013 // Initially be ABI compatible with arrays with respect to alignment and 2014 // size. 2015 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 2016 Align = ElementInfo.Align; 2017 break; 2018 } 2019 2020 case Type::Builtin: 2021 switch (cast<BuiltinType>(T)->getKind()) { 2022 default: llvm_unreachable("Unknown builtin type!"); 2023 case BuiltinType::Void: 2024 // GCC extension: alignof(void) = 8 bits. 2025 Width = 0; 2026 Align = 8; 2027 break; 2028 case BuiltinType::Bool: 2029 Width = Target->getBoolWidth(); 2030 Align = Target->getBoolAlign(); 2031 break; 2032 case BuiltinType::Char_S: 2033 case BuiltinType::Char_U: 2034 case BuiltinType::UChar: 2035 case BuiltinType::SChar: 2036 case BuiltinType::Char8: 2037 Width = Target->getCharWidth(); 2038 Align = Target->getCharAlign(); 2039 break; 2040 case BuiltinType::WChar_S: 2041 case BuiltinType::WChar_U: 2042 Width = Target->getWCharWidth(); 2043 Align = Target->getWCharAlign(); 2044 break; 2045 case BuiltinType::Char16: 2046 Width = Target->getChar16Width(); 2047 Align = Target->getChar16Align(); 2048 break; 2049 case BuiltinType::Char32: 2050 Width = Target->getChar32Width(); 2051 Align = Target->getChar32Align(); 2052 break; 2053 case BuiltinType::UShort: 2054 case BuiltinType::Short: 2055 Width = Target->getShortWidth(); 2056 Align = Target->getShortAlign(); 2057 break; 2058 case BuiltinType::UInt: 2059 case BuiltinType::Int: 2060 Width = Target->getIntWidth(); 2061 Align = Target->getIntAlign(); 2062 break; 2063 case BuiltinType::ULong: 2064 case BuiltinType::Long: 2065 Width = Target->getLongWidth(); 2066 Align = Target->getLongAlign(); 2067 break; 2068 case BuiltinType::ULongLong: 2069 case BuiltinType::LongLong: 2070 Width = Target->getLongLongWidth(); 2071 Align = Target->getLongLongAlign(); 2072 break; 2073 case BuiltinType::Int128: 2074 case BuiltinType::UInt128: 2075 Width = 128; 2076 Align = 128; // int128_t is 128-bit aligned on all targets. 2077 break; 2078 case BuiltinType::ShortAccum: 2079 case BuiltinType::UShortAccum: 2080 case BuiltinType::SatShortAccum: 2081 case BuiltinType::SatUShortAccum: 2082 Width = Target->getShortAccumWidth(); 2083 Align = Target->getShortAccumAlign(); 2084 break; 2085 case BuiltinType::Accum: 2086 case BuiltinType::UAccum: 2087 case BuiltinType::SatAccum: 2088 case BuiltinType::SatUAccum: 2089 Width = Target->getAccumWidth(); 2090 Align = Target->getAccumAlign(); 2091 break; 2092 case BuiltinType::LongAccum: 2093 case BuiltinType::ULongAccum: 2094 case BuiltinType::SatLongAccum: 2095 case BuiltinType::SatULongAccum: 2096 Width = Target->getLongAccumWidth(); 2097 Align = Target->getLongAccumAlign(); 2098 break; 2099 case BuiltinType::ShortFract: 2100 case BuiltinType::UShortFract: 2101 case BuiltinType::SatShortFract: 2102 case BuiltinType::SatUShortFract: 2103 Width = Target->getShortFractWidth(); 2104 Align = Target->getShortFractAlign(); 2105 break; 2106 case BuiltinType::Fract: 2107 case BuiltinType::UFract: 2108 case BuiltinType::SatFract: 2109 case BuiltinType::SatUFract: 2110 Width = Target->getFractWidth(); 2111 Align = Target->getFractAlign(); 2112 break; 2113 case BuiltinType::LongFract: 2114 case BuiltinType::ULongFract: 2115 case BuiltinType::SatLongFract: 2116 case BuiltinType::SatULongFract: 2117 Width = Target->getLongFractWidth(); 2118 Align = Target->getLongFractAlign(); 2119 break; 2120 case BuiltinType::BFloat16: 2121 if (Target->hasBFloat16Type()) { 2122 Width = Target->getBFloat16Width(); 2123 Align = Target->getBFloat16Align(); 2124 } 2125 break; 2126 case BuiltinType::Float16: 2127 case BuiltinType::Half: 2128 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2129 !getLangOpts().OpenMPIsDevice) { 2130 Width = Target->getHalfWidth(); 2131 Align = Target->getHalfAlign(); 2132 } else { 2133 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2134 "Expected OpenMP device compilation."); 2135 Width = AuxTarget->getHalfWidth(); 2136 Align = AuxTarget->getHalfAlign(); 2137 } 2138 break; 2139 case BuiltinType::Float: 2140 Width = Target->getFloatWidth(); 2141 Align = Target->getFloatAlign(); 2142 break; 2143 case BuiltinType::Double: 2144 Width = Target->getDoubleWidth(); 2145 Align = Target->getDoubleAlign(); 2146 break; 2147 case BuiltinType::Ibm128: 2148 Width = Target->getIbm128Width(); 2149 Align = Target->getIbm128Align(); 2150 break; 2151 case BuiltinType::LongDouble: 2152 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2153 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2154 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2155 Width = AuxTarget->getLongDoubleWidth(); 2156 Align = AuxTarget->getLongDoubleAlign(); 2157 } else { 2158 Width = Target->getLongDoubleWidth(); 2159 Align = Target->getLongDoubleAlign(); 2160 } 2161 break; 2162 case BuiltinType::Float128: 2163 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2164 !getLangOpts().OpenMPIsDevice) { 2165 Width = Target->getFloat128Width(); 2166 Align = Target->getFloat128Align(); 2167 } else { 2168 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2169 "Expected OpenMP device compilation."); 2170 Width = AuxTarget->getFloat128Width(); 2171 Align = AuxTarget->getFloat128Align(); 2172 } 2173 break; 2174 case BuiltinType::NullPtr: 2175 Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) 2176 Align = Target->getPointerAlign(0); // == sizeof(void*) 2177 break; 2178 case BuiltinType::ObjCId: 2179 case BuiltinType::ObjCClass: 2180 case BuiltinType::ObjCSel: 2181 Width = Target->getPointerWidth(0); 2182 Align = Target->getPointerAlign(0); 2183 break; 2184 case BuiltinType::OCLSampler: 2185 case BuiltinType::OCLEvent: 2186 case BuiltinType::OCLClkEvent: 2187 case BuiltinType::OCLQueue: 2188 case BuiltinType::OCLReserveID: 2189 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2190 case BuiltinType::Id: 2191 #include "clang/Basic/OpenCLImageTypes.def" 2192 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2193 case BuiltinType::Id: 2194 #include "clang/Basic/OpenCLExtensionTypes.def" 2195 AS = getTargetAddressSpace( 2196 Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T))); 2197 Width = Target->getPointerWidth(AS); 2198 Align = Target->getPointerAlign(AS); 2199 break; 2200 // The SVE types are effectively target-specific. The length of an 2201 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2202 // of 128 bits. There is one predicate bit for each vector byte, so the 2203 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2204 // 2205 // Because the length is only known at runtime, we use a dummy value 2206 // of 0 for the static length. The alignment values are those defined 2207 // by the Procedure Call Standard for the Arm Architecture. 2208 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2209 IsSigned, IsFP, IsBF) \ 2210 case BuiltinType::Id: \ 2211 Width = 0; \ 2212 Align = 128; \ 2213 break; 2214 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2215 case BuiltinType::Id: \ 2216 Width = 0; \ 2217 Align = 16; \ 2218 break; 2219 #include "clang/Basic/AArch64SVEACLETypes.def" 2220 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2221 case BuiltinType::Id: \ 2222 Width = Size; \ 2223 Align = Size; \ 2224 break; 2225 #include "clang/Basic/PPCTypes.def" 2226 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2227 IsFP) \ 2228 case BuiltinType::Id: \ 2229 Width = 0; \ 2230 Align = ElBits; \ 2231 break; 2232 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2233 case BuiltinType::Id: \ 2234 Width = 0; \ 2235 Align = 8; \ 2236 break; 2237 #include "clang/Basic/RISCVVTypes.def" 2238 } 2239 break; 2240 case Type::ObjCObjectPointer: 2241 Width = Target->getPointerWidth(0); 2242 Align = Target->getPointerAlign(0); 2243 break; 2244 case Type::BlockPointer: 2245 AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType()); 2246 Width = Target->getPointerWidth(AS); 2247 Align = Target->getPointerAlign(AS); 2248 break; 2249 case Type::LValueReference: 2250 case Type::RValueReference: 2251 // alignof and sizeof should never enter this code path here, so we go 2252 // the pointer route. 2253 AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType()); 2254 Width = Target->getPointerWidth(AS); 2255 Align = Target->getPointerAlign(AS); 2256 break; 2257 case Type::Pointer: 2258 AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); 2259 Width = Target->getPointerWidth(AS); 2260 Align = Target->getPointerAlign(AS); 2261 break; 2262 case Type::MemberPointer: { 2263 const auto *MPT = cast<MemberPointerType>(T); 2264 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2265 Width = MPI.Width; 2266 Align = MPI.Align; 2267 break; 2268 } 2269 case Type::Complex: { 2270 // Complex types have the same alignment as their elements, but twice the 2271 // size. 2272 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2273 Width = EltInfo.Width * 2; 2274 Align = EltInfo.Align; 2275 break; 2276 } 2277 case Type::ObjCObject: 2278 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2279 case Type::Adjusted: 2280 case Type::Decayed: 2281 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2282 case Type::ObjCInterface: { 2283 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2284 if (ObjCI->getDecl()->isInvalidDecl()) { 2285 Width = 8; 2286 Align = 8; 2287 break; 2288 } 2289 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2290 Width = toBits(Layout.getSize()); 2291 Align = toBits(Layout.getAlignment()); 2292 break; 2293 } 2294 case Type::BitInt: { 2295 const auto *EIT = cast<BitIntType>(T); 2296 Align = 2297 std::min(static_cast<unsigned>(std::max( 2298 getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), 2299 Target->getLongLongAlign()); 2300 Width = llvm::alignTo(EIT->getNumBits(), Align); 2301 break; 2302 } 2303 case Type::Record: 2304 case Type::Enum: { 2305 const auto *TT = cast<TagType>(T); 2306 2307 if (TT->getDecl()->isInvalidDecl()) { 2308 Width = 8; 2309 Align = 8; 2310 break; 2311 } 2312 2313 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2314 const EnumDecl *ED = ET->getDecl(); 2315 TypeInfo Info = 2316 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2317 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2318 Info.Align = AttrAlign; 2319 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2320 } 2321 return Info; 2322 } 2323 2324 const auto *RT = cast<RecordType>(TT); 2325 const RecordDecl *RD = RT->getDecl(); 2326 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2327 Width = toBits(Layout.getSize()); 2328 Align = toBits(Layout.getAlignment()); 2329 AlignRequirement = RD->hasAttr<AlignedAttr>() 2330 ? AlignRequirementKind::RequiredByRecord 2331 : AlignRequirementKind::None; 2332 break; 2333 } 2334 2335 case Type::SubstTemplateTypeParm: 2336 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2337 getReplacementType().getTypePtr()); 2338 2339 case Type::Auto: 2340 case Type::DeducedTemplateSpecialization: { 2341 const auto *A = cast<DeducedType>(T); 2342 assert(!A->getDeducedType().isNull() && 2343 "cannot request the size of an undeduced or dependent auto type"); 2344 return getTypeInfo(A->getDeducedType().getTypePtr()); 2345 } 2346 2347 case Type::Paren: 2348 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2349 2350 case Type::MacroQualified: 2351 return getTypeInfo( 2352 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2353 2354 case Type::ObjCTypeParam: 2355 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2356 2357 case Type::Using: 2358 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2359 2360 case Type::Typedef: { 2361 const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); 2362 TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); 2363 // If the typedef has an aligned attribute on it, it overrides any computed 2364 // alignment we have. This violates the GCC documentation (which says that 2365 // attribute(aligned) can only round up) but matches its implementation. 2366 if (unsigned AttrAlign = Typedef->getMaxAlignment()) { 2367 Align = AttrAlign; 2368 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2369 } else { 2370 Align = Info.Align; 2371 AlignRequirement = Info.AlignRequirement; 2372 } 2373 Width = Info.Width; 2374 break; 2375 } 2376 2377 case Type::Elaborated: 2378 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2379 2380 case Type::Attributed: 2381 return getTypeInfo( 2382 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2383 2384 case Type::BTFTagAttributed: 2385 return getTypeInfo( 2386 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2387 2388 case Type::Atomic: { 2389 // Start with the base type information. 2390 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2391 Width = Info.Width; 2392 Align = Info.Align; 2393 2394 if (!Width) { 2395 // An otherwise zero-sized type should still generate an 2396 // atomic operation. 2397 Width = Target->getCharWidth(); 2398 assert(Align); 2399 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2400 // If the size of the type doesn't exceed the platform's max 2401 // atomic promotion width, make the size and alignment more 2402 // favorable to atomic operations: 2403 2404 // Round the size up to a power of 2. 2405 if (!llvm::isPowerOf2_64(Width)) 2406 Width = llvm::NextPowerOf2(Width); 2407 2408 // Set the alignment equal to the size. 2409 Align = static_cast<unsigned>(Width); 2410 } 2411 } 2412 break; 2413 2414 case Type::Pipe: 2415 Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global)); 2416 Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global)); 2417 break; 2418 } 2419 2420 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2421 return TypeInfo(Width, Align, AlignRequirement); 2422 } 2423 2424 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2425 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2426 if (I != MemoizedUnadjustedAlign.end()) 2427 return I->second; 2428 2429 unsigned UnadjustedAlign; 2430 if (const auto *RT = T->getAs<RecordType>()) { 2431 const RecordDecl *RD = RT->getDecl(); 2432 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2433 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2434 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2435 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2436 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2437 } else { 2438 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2439 } 2440 2441 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2442 return UnadjustedAlign; 2443 } 2444 2445 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2446 unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); 2447 return SimdAlign; 2448 } 2449 2450 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2451 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2452 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2453 } 2454 2455 /// toBits - Convert a size in characters to a size in characters. 2456 int64_t ASTContext::toBits(CharUnits CharSize) const { 2457 return CharSize.getQuantity() * getCharWidth(); 2458 } 2459 2460 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2461 /// This method does not work on incomplete types. 2462 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2463 return getTypeInfoInChars(T).Width; 2464 } 2465 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2466 return getTypeInfoInChars(T).Width; 2467 } 2468 2469 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2470 /// characters. This method does not work on incomplete types. 2471 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2472 return toCharUnitsFromBits(getTypeAlign(T)); 2473 } 2474 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2475 return toCharUnitsFromBits(getTypeAlign(T)); 2476 } 2477 2478 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2479 /// type, in characters, before alignment adustments. This method does 2480 /// not work on incomplete types. 2481 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2482 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2483 } 2484 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2485 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2486 } 2487 2488 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2489 /// type for the current target in bits. This can be different than the ABI 2490 /// alignment in cases where it is beneficial for performance or backwards 2491 /// compatibility preserving to overalign a data type. (Note: despite the name, 2492 /// the preferred alignment is ABI-impacting, and not an optimization.) 2493 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2494 TypeInfo TI = getTypeInfo(T); 2495 unsigned ABIAlign = TI.Align; 2496 2497 T = T->getBaseElementTypeUnsafe(); 2498 2499 // The preferred alignment of member pointers is that of a pointer. 2500 if (T->isMemberPointerType()) 2501 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2502 2503 if (!Target->allowsLargerPreferedTypeAlignment()) 2504 return ABIAlign; 2505 2506 if (const auto *RT = T->getAs<RecordType>()) { 2507 const RecordDecl *RD = RT->getDecl(); 2508 2509 // When used as part of a typedef, or together with a 'packed' attribute, 2510 // the 'aligned' attribute can be used to decrease alignment. Note that the 2511 // 'packed' case is already taken into consideration when computing the 2512 // alignment, we only need to handle the typedef case here. 2513 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2514 RD->isInvalidDecl()) 2515 return ABIAlign; 2516 2517 unsigned PreferredAlign = static_cast<unsigned>( 2518 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2519 assert(PreferredAlign >= ABIAlign && 2520 "PreferredAlign should be at least as large as ABIAlign."); 2521 return PreferredAlign; 2522 } 2523 2524 // Double (and, for targets supporting AIX `power` alignment, long double) and 2525 // long long should be naturally aligned (despite requiring less alignment) if 2526 // possible. 2527 if (const auto *CT = T->getAs<ComplexType>()) 2528 T = CT->getElementType().getTypePtr(); 2529 if (const auto *ET = T->getAs<EnumType>()) 2530 T = ET->getDecl()->getIntegerType().getTypePtr(); 2531 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2532 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2533 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2534 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2535 Target->defaultsToAIXPowerAlignment())) 2536 // Don't increase the alignment if an alignment attribute was specified on a 2537 // typedef declaration. 2538 if (!TI.isAlignRequired()) 2539 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2540 2541 return ABIAlign; 2542 } 2543 2544 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2545 /// for __attribute__((aligned)) on this target, to be used if no alignment 2546 /// value is specified. 2547 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2548 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2549 } 2550 2551 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2552 /// to a global variable of the specified type. 2553 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2554 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2555 return std::max(getPreferredTypeAlign(T), 2556 getTargetInfo().getMinGlobalAlign(TypeSize)); 2557 } 2558 2559 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2560 /// should be given to a global variable of the specified type. 2561 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2562 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2563 } 2564 2565 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2566 CharUnits Offset = CharUnits::Zero(); 2567 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2568 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2569 Offset += Layout->getBaseClassOffset(Base); 2570 Layout = &getASTRecordLayout(Base); 2571 } 2572 return Offset; 2573 } 2574 2575 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2576 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2577 CharUnits ThisAdjustment = CharUnits::Zero(); 2578 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2579 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2580 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2581 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2582 const CXXRecordDecl *Base = RD; 2583 const CXXRecordDecl *Derived = Path[I]; 2584 if (DerivedMember) 2585 std::swap(Base, Derived); 2586 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2587 RD = Path[I]; 2588 } 2589 if (DerivedMember) 2590 ThisAdjustment = -ThisAdjustment; 2591 return ThisAdjustment; 2592 } 2593 2594 /// DeepCollectObjCIvars - 2595 /// This routine first collects all declared, but not synthesized, ivars in 2596 /// super class and then collects all ivars, including those synthesized for 2597 /// current class. This routine is used for implementation of current class 2598 /// when all ivars, declared and synthesized are known. 2599 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2600 bool leafClass, 2601 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2602 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2603 DeepCollectObjCIvars(SuperClass, false, Ivars); 2604 if (!leafClass) { 2605 llvm::append_range(Ivars, OI->ivars()); 2606 } else { 2607 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2608 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2609 Iv= Iv->getNextIvar()) 2610 Ivars.push_back(Iv); 2611 } 2612 } 2613 2614 /// CollectInheritedProtocols - Collect all protocols in current class and 2615 /// those inherited by it. 2616 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2617 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2618 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2619 // We can use protocol_iterator here instead of 2620 // all_referenced_protocol_iterator since we are walking all categories. 2621 for (auto *Proto : OI->all_referenced_protocols()) { 2622 CollectInheritedProtocols(Proto, Protocols); 2623 } 2624 2625 // Categories of this Interface. 2626 for (const auto *Cat : OI->visible_categories()) 2627 CollectInheritedProtocols(Cat, Protocols); 2628 2629 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2630 while (SD) { 2631 CollectInheritedProtocols(SD, Protocols); 2632 SD = SD->getSuperClass(); 2633 } 2634 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2635 for (auto *Proto : OC->protocols()) { 2636 CollectInheritedProtocols(Proto, Protocols); 2637 } 2638 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2639 // Insert the protocol. 2640 if (!Protocols.insert( 2641 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2642 return; 2643 2644 for (auto *Proto : OP->protocols()) 2645 CollectInheritedProtocols(Proto, Protocols); 2646 } 2647 } 2648 2649 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2650 const RecordDecl *RD) { 2651 assert(RD->isUnion() && "Must be union type"); 2652 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2653 2654 for (const auto *Field : RD->fields()) { 2655 if (!Context.hasUniqueObjectRepresentations(Field->getType())) 2656 return false; 2657 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2658 if (FieldSize != UnionSize) 2659 return false; 2660 } 2661 return !RD->field_empty(); 2662 } 2663 2664 static int64_t getSubobjectOffset(const FieldDecl *Field, 2665 const ASTContext &Context, 2666 const clang::ASTRecordLayout & /*Layout*/) { 2667 return Context.getFieldOffset(Field); 2668 } 2669 2670 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2671 const ASTContext &Context, 2672 const clang::ASTRecordLayout &Layout) { 2673 return Context.toBits(Layout.getBaseClassOffset(RD)); 2674 } 2675 2676 static llvm::Optional<int64_t> 2677 structHasUniqueObjectRepresentations(const ASTContext &Context, 2678 const RecordDecl *RD); 2679 2680 static llvm::Optional<int64_t> 2681 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) { 2682 if (Field->getType()->isRecordType()) { 2683 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2684 if (!RD->isUnion()) 2685 return structHasUniqueObjectRepresentations(Context, RD); 2686 } 2687 2688 // A _BitInt type may not be unique if it has padding bits 2689 // but if it is a bitfield the padding bits are not used. 2690 bool IsBitIntType = Field->getType()->isBitIntType(); 2691 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2692 !Context.hasUniqueObjectRepresentations(Field->getType())) 2693 return llvm::None; 2694 2695 int64_t FieldSizeInBits = 2696 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2697 if (Field->isBitField()) { 2698 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2699 if (IsBitIntType) { 2700 if ((unsigned)BitfieldSize > 2701 cast<BitIntType>(Field->getType())->getNumBits()) 2702 return llvm::None; 2703 } else if (BitfieldSize > FieldSizeInBits) { 2704 return llvm::None; 2705 } 2706 FieldSizeInBits = BitfieldSize; 2707 } else if (IsBitIntType && 2708 !Context.hasUniqueObjectRepresentations(Field->getType())) { 2709 return llvm::None; 2710 } 2711 return FieldSizeInBits; 2712 } 2713 2714 static llvm::Optional<int64_t> 2715 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context) { 2716 return structHasUniqueObjectRepresentations(Context, RD); 2717 } 2718 2719 template <typename RangeT> 2720 static llvm::Optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2721 const RangeT &Subobjects, int64_t CurOffsetInBits, 2722 const ASTContext &Context, const clang::ASTRecordLayout &Layout) { 2723 for (const auto *Subobject : Subobjects) { 2724 llvm::Optional<int64_t> SizeInBits = 2725 getSubobjectSizeInBits(Subobject, Context); 2726 if (!SizeInBits) 2727 return llvm::None; 2728 if (*SizeInBits != 0) { 2729 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2730 if (Offset != CurOffsetInBits) 2731 return llvm::None; 2732 CurOffsetInBits += *SizeInBits; 2733 } 2734 } 2735 return CurOffsetInBits; 2736 } 2737 2738 static llvm::Optional<int64_t> 2739 structHasUniqueObjectRepresentations(const ASTContext &Context, 2740 const RecordDecl *RD) { 2741 assert(!RD->isUnion() && "Must be struct/class type"); 2742 const auto &Layout = Context.getASTRecordLayout(RD); 2743 2744 int64_t CurOffsetInBits = 0; 2745 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2746 if (ClassDecl->isDynamicClass()) 2747 return llvm::None; 2748 2749 SmallVector<CXXRecordDecl *, 4> Bases; 2750 for (const auto &Base : ClassDecl->bases()) { 2751 // Empty types can be inherited from, and non-empty types can potentially 2752 // have tail padding, so just make sure there isn't an error. 2753 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2754 } 2755 2756 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2757 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2758 }); 2759 2760 llvm::Optional<int64_t> OffsetAfterBases = 2761 structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits, 2762 Context, Layout); 2763 if (!OffsetAfterBases) 2764 return llvm::None; 2765 CurOffsetInBits = *OffsetAfterBases; 2766 } 2767 2768 llvm::Optional<int64_t> OffsetAfterFields = 2769 structSubobjectsHaveUniqueObjectRepresentations( 2770 RD->fields(), CurOffsetInBits, Context, Layout); 2771 if (!OffsetAfterFields) 2772 return llvm::None; 2773 CurOffsetInBits = *OffsetAfterFields; 2774 2775 return CurOffsetInBits; 2776 } 2777 2778 bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { 2779 // C++17 [meta.unary.prop]: 2780 // The predicate condition for a template specialization 2781 // has_unique_object_representations<T> shall be 2782 // satisfied if and only if: 2783 // (9.1) - T is trivially copyable, and 2784 // (9.2) - any two objects of type T with the same value have the same 2785 // object representation, where two objects 2786 // of array or non-union class type are considered to have the same value 2787 // if their respective sequences of 2788 // direct subobjects have the same values, and two objects of union type 2789 // are considered to have the same 2790 // value if they have the same active member and the corresponding members 2791 // have the same value. 2792 // The set of scalar types for which this condition holds is 2793 // implementation-defined. [ Note: If a type has padding 2794 // bits, the condition does not hold; otherwise, the condition holds true 2795 // for unsigned integral types. -- end note ] 2796 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2797 2798 // Arrays are unique only if their element type is unique. 2799 if (Ty->isArrayType()) 2800 return hasUniqueObjectRepresentations(getBaseElementType(Ty)); 2801 2802 // (9.1) - T is trivially copyable... 2803 if (!Ty.isTriviallyCopyableType(*this)) 2804 return false; 2805 2806 // All integrals and enums are unique. 2807 if (Ty->isIntegralOrEnumerationType()) { 2808 // Except _BitInt types that have padding bits. 2809 if (const auto *BIT = dyn_cast<BitIntType>(Ty)) 2810 return getTypeSize(BIT) == BIT->getNumBits(); 2811 2812 return true; 2813 } 2814 2815 // All other pointers are unique. 2816 if (Ty->isPointerType()) 2817 return true; 2818 2819 if (Ty->isMemberPointerType()) { 2820 const auto *MPT = Ty->getAs<MemberPointerType>(); 2821 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2822 } 2823 2824 if (Ty->isRecordType()) { 2825 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2826 2827 if (Record->isInvalidDecl()) 2828 return false; 2829 2830 if (Record->isUnion()) 2831 return unionHasUniqueObjectRepresentations(*this, Record); 2832 2833 Optional<int64_t> StructSize = 2834 structHasUniqueObjectRepresentations(*this, Record); 2835 2836 return StructSize && 2837 StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty)); 2838 } 2839 2840 // FIXME: More cases to handle here (list by rsmith): 2841 // vectors (careful about, eg, vector of 3 foo) 2842 // _Complex int and friends 2843 // _Atomic T 2844 // Obj-C block pointers 2845 // Obj-C object pointers 2846 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2847 // clk_event_t, queue_t, reserve_id_t) 2848 // There're also Obj-C class types and the Obj-C selector type, but I think it 2849 // makes sense for those to return false here. 2850 2851 return false; 2852 } 2853 2854 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2855 unsigned count = 0; 2856 // Count ivars declared in class extension. 2857 for (const auto *Ext : OI->known_extensions()) 2858 count += Ext->ivar_size(); 2859 2860 // Count ivar defined in this class's implementation. This 2861 // includes synthesized ivars. 2862 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2863 count += ImplDecl->ivar_size(); 2864 2865 return count; 2866 } 2867 2868 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2869 if (!E) 2870 return false; 2871 2872 // nullptr_t is always treated as null. 2873 if (E->getType()->isNullPtrType()) return true; 2874 2875 if (E->getType()->isAnyPointerType() && 2876 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2877 Expr::NPC_ValueDependentIsNull)) 2878 return true; 2879 2880 // Unfortunately, __null has type 'int'. 2881 if (isa<GNUNullExpr>(E)) return true; 2882 2883 return false; 2884 } 2885 2886 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2887 /// exists. 2888 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2889 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2890 I = ObjCImpls.find(D); 2891 if (I != ObjCImpls.end()) 2892 return cast<ObjCImplementationDecl>(I->second); 2893 return nullptr; 2894 } 2895 2896 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2897 /// exists. 2898 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2899 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2900 I = ObjCImpls.find(D); 2901 if (I != ObjCImpls.end()) 2902 return cast<ObjCCategoryImplDecl>(I->second); 2903 return nullptr; 2904 } 2905 2906 /// Set the implementation of ObjCInterfaceDecl. 2907 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2908 ObjCImplementationDecl *ImplD) { 2909 assert(IFaceD && ImplD && "Passed null params"); 2910 ObjCImpls[IFaceD] = ImplD; 2911 } 2912 2913 /// Set the implementation of ObjCCategoryDecl. 2914 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2915 ObjCCategoryImplDecl *ImplD) { 2916 assert(CatD && ImplD && "Passed null params"); 2917 ObjCImpls[CatD] = ImplD; 2918 } 2919 2920 const ObjCMethodDecl * 2921 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2922 return ObjCMethodRedecls.lookup(MD); 2923 } 2924 2925 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2926 const ObjCMethodDecl *Redecl) { 2927 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2928 ObjCMethodRedecls[MD] = Redecl; 2929 } 2930 2931 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2932 const NamedDecl *ND) const { 2933 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2934 return ID; 2935 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2936 return CD->getClassInterface(); 2937 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2938 return IMD->getClassInterface(); 2939 2940 return nullptr; 2941 } 2942 2943 /// Get the copy initialization expression of VarDecl, or nullptr if 2944 /// none exists. 2945 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2946 assert(VD && "Passed null params"); 2947 assert(VD->hasAttr<BlocksAttr>() && 2948 "getBlockVarCopyInits - not __block var"); 2949 auto I = BlockVarCopyInits.find(VD); 2950 if (I != BlockVarCopyInits.end()) 2951 return I->second; 2952 return {nullptr, false}; 2953 } 2954 2955 /// Set the copy initialization expression of a block var decl. 2956 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2957 bool CanThrow) { 2958 assert(VD && CopyExpr && "Passed null params"); 2959 assert(VD->hasAttr<BlocksAttr>() && 2960 "setBlockVarCopyInits - not __block var"); 2961 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2962 } 2963 2964 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2965 unsigned DataSize) const { 2966 if (!DataSize) 2967 DataSize = TypeLoc::getFullDataSizeForType(T); 2968 else 2969 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2970 "incorrect data size provided to CreateTypeSourceInfo!"); 2971 2972 auto *TInfo = 2973 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2974 new (TInfo) TypeSourceInfo(T); 2975 return TInfo; 2976 } 2977 2978 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2979 SourceLocation L) const { 2980 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2981 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2982 return DI; 2983 } 2984 2985 const ASTRecordLayout & 2986 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2987 return getObjCLayout(D, nullptr); 2988 } 2989 2990 const ASTRecordLayout & 2991 ASTContext::getASTObjCImplementationLayout( 2992 const ObjCImplementationDecl *D) const { 2993 return getObjCLayout(D->getClassInterface(), D); 2994 } 2995 2996 //===----------------------------------------------------------------------===// 2997 // Type creation/memoization methods 2998 //===----------------------------------------------------------------------===// 2999 3000 QualType 3001 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 3002 unsigned fastQuals = quals.getFastQualifiers(); 3003 quals.removeFastQualifiers(); 3004 3005 // Check if we've already instantiated this type. 3006 llvm::FoldingSetNodeID ID; 3007 ExtQuals::Profile(ID, baseType, quals); 3008 void *insertPos = nullptr; 3009 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 3010 assert(eq->getQualifiers() == quals); 3011 return QualType(eq, fastQuals); 3012 } 3013 3014 // If the base type is not canonical, make the appropriate canonical type. 3015 QualType canon; 3016 if (!baseType->isCanonicalUnqualified()) { 3017 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 3018 canonSplit.Quals.addConsistentQualifiers(quals); 3019 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3020 3021 // Re-find the insert position. 3022 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3023 } 3024 3025 auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 3026 ExtQualNodes.InsertNode(eq, insertPos); 3027 return QualType(eq, fastQuals); 3028 } 3029 3030 QualType ASTContext::getAddrSpaceQualType(QualType T, 3031 LangAS AddressSpace) const { 3032 QualType CanT = getCanonicalType(T); 3033 if (CanT.getAddressSpace() == AddressSpace) 3034 return T; 3035 3036 // If we are composing extended qualifiers together, merge together 3037 // into one ExtQuals node. 3038 QualifierCollector Quals; 3039 const Type *TypeNode = Quals.strip(T); 3040 3041 // If this type already has an address space specified, it cannot get 3042 // another one. 3043 assert(!Quals.hasAddressSpace() && 3044 "Type cannot be in multiple addr spaces!"); 3045 Quals.addAddressSpace(AddressSpace); 3046 3047 return getExtQualType(TypeNode, Quals); 3048 } 3049 3050 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3051 // If the type is not qualified with an address space, just return it 3052 // immediately. 3053 if (!T.hasAddressSpace()) 3054 return T; 3055 3056 // If we are composing extended qualifiers together, merge together 3057 // into one ExtQuals node. 3058 QualifierCollector Quals; 3059 const Type *TypeNode; 3060 3061 while (T.hasAddressSpace()) { 3062 TypeNode = Quals.strip(T); 3063 3064 // If the type no longer has an address space after stripping qualifiers, 3065 // jump out. 3066 if (!QualType(TypeNode, 0).hasAddressSpace()) 3067 break; 3068 3069 // There might be sugar in the way. Strip it and try again. 3070 T = T.getSingleStepDesugaredType(*this); 3071 } 3072 3073 Quals.removeAddressSpace(); 3074 3075 // Removal of the address space can mean there are no longer any 3076 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3077 // or required. 3078 if (Quals.hasNonFastQualifiers()) 3079 return getExtQualType(TypeNode, Quals); 3080 else 3081 return QualType(TypeNode, Quals.getFastQualifiers()); 3082 } 3083 3084 QualType ASTContext::getObjCGCQualType(QualType T, 3085 Qualifiers::GC GCAttr) const { 3086 QualType CanT = getCanonicalType(T); 3087 if (CanT.getObjCGCAttr() == GCAttr) 3088 return T; 3089 3090 if (const auto *ptr = T->getAs<PointerType>()) { 3091 QualType Pointee = ptr->getPointeeType(); 3092 if (Pointee->isAnyPointerType()) { 3093 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3094 return getPointerType(ResultType); 3095 } 3096 } 3097 3098 // If we are composing extended qualifiers together, merge together 3099 // into one ExtQuals node. 3100 QualifierCollector Quals; 3101 const Type *TypeNode = Quals.strip(T); 3102 3103 // If this type already has an ObjCGC specified, it cannot get 3104 // another one. 3105 assert(!Quals.hasObjCGCAttr() && 3106 "Type cannot have multiple ObjCGCs!"); 3107 Quals.addObjCGCAttr(GCAttr); 3108 3109 return getExtQualType(TypeNode, Quals); 3110 } 3111 3112 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3113 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3114 QualType Pointee = Ptr->getPointeeType(); 3115 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3116 return getPointerType(removeAddrSpaceQualType(Pointee)); 3117 } 3118 } 3119 return T; 3120 } 3121 3122 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3123 FunctionType::ExtInfo Info) { 3124 if (T->getExtInfo() == Info) 3125 return T; 3126 3127 QualType Result; 3128 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3129 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3130 } else { 3131 const auto *FPT = cast<FunctionProtoType>(T); 3132 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3133 EPI.ExtInfo = Info; 3134 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3135 } 3136 3137 return cast<FunctionType>(Result.getTypePtr()); 3138 } 3139 3140 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3141 QualType ResultType) { 3142 FD = FD->getMostRecentDecl(); 3143 while (true) { 3144 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3145 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3146 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3147 if (FunctionDecl *Next = FD->getPreviousDecl()) 3148 FD = Next; 3149 else 3150 break; 3151 } 3152 if (ASTMutationListener *L = getASTMutationListener()) 3153 L->DeducedReturnType(FD, ResultType); 3154 } 3155 3156 /// Get a function type and produce the equivalent function type with the 3157 /// specified exception specification. Type sugar that can be present on a 3158 /// declaration of a function with an exception specification is permitted 3159 /// and preserved. Other type sugar (for instance, typedefs) is not. 3160 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3161 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) { 3162 // Might have some parens. 3163 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3164 return getParenType( 3165 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3166 3167 // Might be wrapped in a macro qualified type. 3168 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3169 return getMacroQualifiedType( 3170 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3171 MQT->getMacroIdentifier()); 3172 3173 // Might have a calling-convention attribute. 3174 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3175 return getAttributedType( 3176 AT->getAttrKind(), 3177 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3178 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3179 3180 // Anything else must be a function type. Rebuild it with the new exception 3181 // specification. 3182 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3183 return getFunctionType( 3184 Proto->getReturnType(), Proto->getParamTypes(), 3185 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3186 } 3187 3188 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3189 QualType U) { 3190 return hasSameType(T, U) || 3191 (getLangOpts().CPlusPlus17 && 3192 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3193 getFunctionTypeWithExceptionSpec(U, EST_None))); 3194 } 3195 3196 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3197 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3198 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3199 SmallVector<QualType, 16> Args(Proto->param_types()); 3200 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3201 Args[i] = removePtrSizeAddrSpace(Args[i]); 3202 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3203 } 3204 3205 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3206 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3207 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3208 } 3209 3210 return T; 3211 } 3212 3213 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3214 return hasSameType(T, U) || 3215 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3216 getFunctionTypeWithoutPtrSizes(U)); 3217 } 3218 3219 void ASTContext::adjustExceptionSpec( 3220 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3221 bool AsWritten) { 3222 // Update the type. 3223 QualType Updated = 3224 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3225 FD->setType(Updated); 3226 3227 if (!AsWritten) 3228 return; 3229 3230 // Update the type in the type source information too. 3231 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3232 // If the type and the type-as-written differ, we may need to update 3233 // the type-as-written too. 3234 if (TSInfo->getType() != FD->getType()) 3235 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3236 3237 // FIXME: When we get proper type location information for exceptions, 3238 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3239 // up the TypeSourceInfo; 3240 assert(TypeLoc::getFullDataSizeForType(Updated) == 3241 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3242 "TypeLoc size mismatch from updating exception specification"); 3243 TSInfo->overrideType(Updated); 3244 } 3245 } 3246 3247 /// getComplexType - Return the uniqued reference to the type for a complex 3248 /// number with the specified element type. 3249 QualType ASTContext::getComplexType(QualType T) const { 3250 // Unique pointers, to guarantee there is only one pointer of a particular 3251 // structure. 3252 llvm::FoldingSetNodeID ID; 3253 ComplexType::Profile(ID, T); 3254 3255 void *InsertPos = nullptr; 3256 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3257 return QualType(CT, 0); 3258 3259 // If the pointee type isn't canonical, this won't be a canonical type either, 3260 // so fill in the canonical type field. 3261 QualType Canonical; 3262 if (!T.isCanonical()) { 3263 Canonical = getComplexType(getCanonicalType(T)); 3264 3265 // Get the new insert position for the node we care about. 3266 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3267 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3268 } 3269 auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 3270 Types.push_back(New); 3271 ComplexTypes.InsertNode(New, InsertPos); 3272 return QualType(New, 0); 3273 } 3274 3275 /// getPointerType - Return the uniqued reference to the type for a pointer to 3276 /// the specified type. 3277 QualType ASTContext::getPointerType(QualType T) const { 3278 // Unique pointers, to guarantee there is only one pointer of a particular 3279 // structure. 3280 llvm::FoldingSetNodeID ID; 3281 PointerType::Profile(ID, T); 3282 3283 void *InsertPos = nullptr; 3284 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3285 return QualType(PT, 0); 3286 3287 // If the pointee type isn't canonical, this won't be a canonical type either, 3288 // so fill in the canonical type field. 3289 QualType Canonical; 3290 if (!T.isCanonical()) { 3291 Canonical = getPointerType(getCanonicalType(T)); 3292 3293 // Get the new insert position for the node we care about. 3294 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3295 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3296 } 3297 auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); 3298 Types.push_back(New); 3299 PointerTypes.InsertNode(New, InsertPos); 3300 return QualType(New, 0); 3301 } 3302 3303 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3304 llvm::FoldingSetNodeID ID; 3305 AdjustedType::Profile(ID, Orig, New); 3306 void *InsertPos = nullptr; 3307 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3308 if (AT) 3309 return QualType(AT, 0); 3310 3311 QualType Canonical = getCanonicalType(New); 3312 3313 // Get the new insert position for the node we care about. 3314 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3315 assert(!AT && "Shouldn't be in the map!"); 3316 3317 AT = new (*this, TypeAlignment) 3318 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3319 Types.push_back(AT); 3320 AdjustedTypes.InsertNode(AT, InsertPos); 3321 return QualType(AT, 0); 3322 } 3323 3324 QualType ASTContext::getDecayedType(QualType T) const { 3325 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3326 3327 QualType Decayed; 3328 3329 // C99 6.7.5.3p7: 3330 // A declaration of a parameter as "array of type" shall be 3331 // adjusted to "qualified pointer to type", where the type 3332 // qualifiers (if any) are those specified within the [ and ] of 3333 // the array type derivation. 3334 if (T->isArrayType()) 3335 Decayed = getArrayDecayedType(T); 3336 3337 // C99 6.7.5.3p8: 3338 // A declaration of a parameter as "function returning type" 3339 // shall be adjusted to "pointer to function returning type", as 3340 // in 6.3.2.1. 3341 if (T->isFunctionType()) 3342 Decayed = getPointerType(T); 3343 3344 llvm::FoldingSetNodeID ID; 3345 AdjustedType::Profile(ID, T, Decayed); 3346 void *InsertPos = nullptr; 3347 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3348 if (AT) 3349 return QualType(AT, 0); 3350 3351 QualType Canonical = getCanonicalType(Decayed); 3352 3353 // Get the new insert position for the node we care about. 3354 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3355 assert(!AT && "Shouldn't be in the map!"); 3356 3357 AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); 3358 Types.push_back(AT); 3359 AdjustedTypes.InsertNode(AT, InsertPos); 3360 return QualType(AT, 0); 3361 } 3362 3363 /// getBlockPointerType - Return the uniqued reference to the type for 3364 /// a pointer to the specified block. 3365 QualType ASTContext::getBlockPointerType(QualType T) const { 3366 assert(T->isFunctionType() && "block of function types only"); 3367 // Unique pointers, to guarantee there is only one block of a particular 3368 // structure. 3369 llvm::FoldingSetNodeID ID; 3370 BlockPointerType::Profile(ID, T); 3371 3372 void *InsertPos = nullptr; 3373 if (BlockPointerType *PT = 3374 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3375 return QualType(PT, 0); 3376 3377 // If the block pointee type isn't canonical, this won't be a canonical 3378 // type either so fill in the canonical type field. 3379 QualType Canonical; 3380 if (!T.isCanonical()) { 3381 Canonical = getBlockPointerType(getCanonicalType(T)); 3382 3383 // Get the new insert position for the node we care about. 3384 BlockPointerType *NewIP = 3385 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3386 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3387 } 3388 auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 3389 Types.push_back(New); 3390 BlockPointerTypes.InsertNode(New, InsertPos); 3391 return QualType(New, 0); 3392 } 3393 3394 /// getLValueReferenceType - Return the uniqued reference to the type for an 3395 /// lvalue reference to the specified type. 3396 QualType 3397 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3398 assert((!T->isPlaceholderType() || 3399 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3400 "Unresolved placeholder type"); 3401 3402 // Unique pointers, to guarantee there is only one pointer of a particular 3403 // structure. 3404 llvm::FoldingSetNodeID ID; 3405 ReferenceType::Profile(ID, T, SpelledAsLValue); 3406 3407 void *InsertPos = nullptr; 3408 if (LValueReferenceType *RT = 3409 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3410 return QualType(RT, 0); 3411 3412 const auto *InnerRef = T->getAs<ReferenceType>(); 3413 3414 // If the referencee type isn't canonical, this won't be a canonical type 3415 // either, so fill in the canonical type field. 3416 QualType Canonical; 3417 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3418 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3419 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3420 3421 // Get the new insert position for the node we care about. 3422 LValueReferenceType *NewIP = 3423 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3424 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3425 } 3426 3427 auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 3428 SpelledAsLValue); 3429 Types.push_back(New); 3430 LValueReferenceTypes.InsertNode(New, InsertPos); 3431 3432 return QualType(New, 0); 3433 } 3434 3435 /// getRValueReferenceType - Return the uniqued reference to the type for an 3436 /// rvalue reference to the specified type. 3437 QualType ASTContext::getRValueReferenceType(QualType T) const { 3438 assert((!T->isPlaceholderType() || 3439 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3440 "Unresolved placeholder type"); 3441 3442 // Unique pointers, to guarantee there is only one pointer of a particular 3443 // structure. 3444 llvm::FoldingSetNodeID ID; 3445 ReferenceType::Profile(ID, T, false); 3446 3447 void *InsertPos = nullptr; 3448 if (RValueReferenceType *RT = 3449 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3450 return QualType(RT, 0); 3451 3452 const auto *InnerRef = T->getAs<ReferenceType>(); 3453 3454 // If the referencee type isn't canonical, this won't be a canonical type 3455 // either, so fill in the canonical type field. 3456 QualType Canonical; 3457 if (InnerRef || !T.isCanonical()) { 3458 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3459 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3460 3461 // Get the new insert position for the node we care about. 3462 RValueReferenceType *NewIP = 3463 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3464 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3465 } 3466 3467 auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 3468 Types.push_back(New); 3469 RValueReferenceTypes.InsertNode(New, InsertPos); 3470 return QualType(New, 0); 3471 } 3472 3473 /// getMemberPointerType - Return the uniqued reference to the type for a 3474 /// member pointer to the specified type, in the specified class. 3475 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3476 // Unique pointers, to guarantee there is only one pointer of a particular 3477 // structure. 3478 llvm::FoldingSetNodeID ID; 3479 MemberPointerType::Profile(ID, T, Cls); 3480 3481 void *InsertPos = nullptr; 3482 if (MemberPointerType *PT = 3483 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3484 return QualType(PT, 0); 3485 3486 // If the pointee or class type isn't canonical, this won't be a canonical 3487 // type either, so fill in the canonical type field. 3488 QualType Canonical; 3489 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3490 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3491 3492 // Get the new insert position for the node we care about. 3493 MemberPointerType *NewIP = 3494 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3495 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3496 } 3497 auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 3498 Types.push_back(New); 3499 MemberPointerTypes.InsertNode(New, InsertPos); 3500 return QualType(New, 0); 3501 } 3502 3503 /// getConstantArrayType - Return the unique reference to the type for an 3504 /// array of the specified element type. 3505 QualType ASTContext::getConstantArrayType(QualType EltTy, 3506 const llvm::APInt &ArySizeIn, 3507 const Expr *SizeExpr, 3508 ArrayType::ArraySizeModifier ASM, 3509 unsigned IndexTypeQuals) const { 3510 assert((EltTy->isDependentType() || 3511 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3512 "Constant array of VLAs is illegal!"); 3513 3514 // We only need the size as part of the type if it's instantiation-dependent. 3515 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3516 SizeExpr = nullptr; 3517 3518 // Convert the array size into a canonical width matching the pointer size for 3519 // the target. 3520 llvm::APInt ArySize(ArySizeIn); 3521 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3522 3523 llvm::FoldingSetNodeID ID; 3524 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3525 IndexTypeQuals); 3526 3527 void *InsertPos = nullptr; 3528 if (ConstantArrayType *ATP = 3529 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3530 return QualType(ATP, 0); 3531 3532 // If the element type isn't canonical or has qualifiers, or the array bound 3533 // is instantiation-dependent, this won't be a canonical type either, so fill 3534 // in the canonical type field. 3535 QualType Canon; 3536 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3537 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3538 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3539 ASM, IndexTypeQuals); 3540 Canon = getQualifiedType(Canon, canonSplit.Quals); 3541 3542 // Get the new insert position for the node we care about. 3543 ConstantArrayType *NewIP = 3544 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3545 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3546 } 3547 3548 void *Mem = Allocate( 3549 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3550 TypeAlignment); 3551 auto *New = new (Mem) 3552 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3553 ConstantArrayTypes.InsertNode(New, InsertPos); 3554 Types.push_back(New); 3555 return QualType(New, 0); 3556 } 3557 3558 /// getVariableArrayDecayedType - Turns the given type, which may be 3559 /// variably-modified, into the corresponding type with all the known 3560 /// sizes replaced with [*]. 3561 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3562 // Vastly most common case. 3563 if (!type->isVariablyModifiedType()) return type; 3564 3565 QualType result; 3566 3567 SplitQualType split = type.getSplitDesugaredType(); 3568 const Type *ty = split.Ty; 3569 switch (ty->getTypeClass()) { 3570 #define TYPE(Class, Base) 3571 #define ABSTRACT_TYPE(Class, Base) 3572 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3573 #include "clang/AST/TypeNodes.inc" 3574 llvm_unreachable("didn't desugar past all non-canonical types?"); 3575 3576 // These types should never be variably-modified. 3577 case Type::Builtin: 3578 case Type::Complex: 3579 case Type::Vector: 3580 case Type::DependentVector: 3581 case Type::ExtVector: 3582 case Type::DependentSizedExtVector: 3583 case Type::ConstantMatrix: 3584 case Type::DependentSizedMatrix: 3585 case Type::DependentAddressSpace: 3586 case Type::ObjCObject: 3587 case Type::ObjCInterface: 3588 case Type::ObjCObjectPointer: 3589 case Type::Record: 3590 case Type::Enum: 3591 case Type::UnresolvedUsing: 3592 case Type::TypeOfExpr: 3593 case Type::TypeOf: 3594 case Type::Decltype: 3595 case Type::UnaryTransform: 3596 case Type::DependentName: 3597 case Type::InjectedClassName: 3598 case Type::TemplateSpecialization: 3599 case Type::DependentTemplateSpecialization: 3600 case Type::TemplateTypeParm: 3601 case Type::SubstTemplateTypeParmPack: 3602 case Type::Auto: 3603 case Type::DeducedTemplateSpecialization: 3604 case Type::PackExpansion: 3605 case Type::BitInt: 3606 case Type::DependentBitInt: 3607 llvm_unreachable("type should never be variably-modified"); 3608 3609 // These types can be variably-modified but should never need to 3610 // further decay. 3611 case Type::FunctionNoProto: 3612 case Type::FunctionProto: 3613 case Type::BlockPointer: 3614 case Type::MemberPointer: 3615 case Type::Pipe: 3616 return type; 3617 3618 // These types can be variably-modified. All these modifications 3619 // preserve structure except as noted by comments. 3620 // TODO: if we ever care about optimizing VLAs, there are no-op 3621 // optimizations available here. 3622 case Type::Pointer: 3623 result = getPointerType(getVariableArrayDecayedType( 3624 cast<PointerType>(ty)->getPointeeType())); 3625 break; 3626 3627 case Type::LValueReference: { 3628 const auto *lv = cast<LValueReferenceType>(ty); 3629 result = getLValueReferenceType( 3630 getVariableArrayDecayedType(lv->getPointeeType()), 3631 lv->isSpelledAsLValue()); 3632 break; 3633 } 3634 3635 case Type::RValueReference: { 3636 const auto *lv = cast<RValueReferenceType>(ty); 3637 result = getRValueReferenceType( 3638 getVariableArrayDecayedType(lv->getPointeeType())); 3639 break; 3640 } 3641 3642 case Type::Atomic: { 3643 const auto *at = cast<AtomicType>(ty); 3644 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3645 break; 3646 } 3647 3648 case Type::ConstantArray: { 3649 const auto *cat = cast<ConstantArrayType>(ty); 3650 result = getConstantArrayType( 3651 getVariableArrayDecayedType(cat->getElementType()), 3652 cat->getSize(), 3653 cat->getSizeExpr(), 3654 cat->getSizeModifier(), 3655 cat->getIndexTypeCVRQualifiers()); 3656 break; 3657 } 3658 3659 case Type::DependentSizedArray: { 3660 const auto *dat = cast<DependentSizedArrayType>(ty); 3661 result = getDependentSizedArrayType( 3662 getVariableArrayDecayedType(dat->getElementType()), 3663 dat->getSizeExpr(), 3664 dat->getSizeModifier(), 3665 dat->getIndexTypeCVRQualifiers(), 3666 dat->getBracketsRange()); 3667 break; 3668 } 3669 3670 // Turn incomplete types into [*] types. 3671 case Type::IncompleteArray: { 3672 const auto *iat = cast<IncompleteArrayType>(ty); 3673 result = getVariableArrayType( 3674 getVariableArrayDecayedType(iat->getElementType()), 3675 /*size*/ nullptr, 3676 ArrayType::Normal, 3677 iat->getIndexTypeCVRQualifiers(), 3678 SourceRange()); 3679 break; 3680 } 3681 3682 // Turn VLA types into [*] types. 3683 case Type::VariableArray: { 3684 const auto *vat = cast<VariableArrayType>(ty); 3685 result = getVariableArrayType( 3686 getVariableArrayDecayedType(vat->getElementType()), 3687 /*size*/ nullptr, 3688 ArrayType::Star, 3689 vat->getIndexTypeCVRQualifiers(), 3690 vat->getBracketsRange()); 3691 break; 3692 } 3693 } 3694 3695 // Apply the top-level qualifiers from the original. 3696 return getQualifiedType(result, split.Quals); 3697 } 3698 3699 /// getVariableArrayType - Returns a non-unique reference to the type for a 3700 /// variable array of the specified element type. 3701 QualType ASTContext::getVariableArrayType(QualType EltTy, 3702 Expr *NumElts, 3703 ArrayType::ArraySizeModifier ASM, 3704 unsigned IndexTypeQuals, 3705 SourceRange Brackets) const { 3706 // Since we don't unique expressions, it isn't possible to unique VLA's 3707 // that have an expression provided for their size. 3708 QualType Canon; 3709 3710 // Be sure to pull qualifiers off the element type. 3711 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3712 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3713 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3714 IndexTypeQuals, Brackets); 3715 Canon = getQualifiedType(Canon, canonSplit.Quals); 3716 } 3717 3718 auto *New = new (*this, TypeAlignment) 3719 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3720 3721 VariableArrayTypes.push_back(New); 3722 Types.push_back(New); 3723 return QualType(New, 0); 3724 } 3725 3726 /// getDependentSizedArrayType - Returns a non-unique reference to 3727 /// the type for a dependently-sized array of the specified element 3728 /// type. 3729 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3730 Expr *numElements, 3731 ArrayType::ArraySizeModifier ASM, 3732 unsigned elementTypeQuals, 3733 SourceRange brackets) const { 3734 assert((!numElements || numElements->isTypeDependent() || 3735 numElements->isValueDependent()) && 3736 "Size must be type- or value-dependent!"); 3737 3738 // Dependently-sized array types that do not have a specified number 3739 // of elements will have their sizes deduced from a dependent 3740 // initializer. We do no canonicalization here at all, which is okay 3741 // because they can't be used in most locations. 3742 if (!numElements) { 3743 auto *newType 3744 = new (*this, TypeAlignment) 3745 DependentSizedArrayType(*this, elementType, QualType(), 3746 numElements, ASM, elementTypeQuals, 3747 brackets); 3748 Types.push_back(newType); 3749 return QualType(newType, 0); 3750 } 3751 3752 // Otherwise, we actually build a new type every time, but we 3753 // also build a canonical type. 3754 3755 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3756 3757 void *insertPos = nullptr; 3758 llvm::FoldingSetNodeID ID; 3759 DependentSizedArrayType::Profile(ID, *this, 3760 QualType(canonElementType.Ty, 0), 3761 ASM, elementTypeQuals, numElements); 3762 3763 // Look for an existing type with these properties. 3764 DependentSizedArrayType *canonTy = 3765 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3766 3767 // If we don't have one, build one. 3768 if (!canonTy) { 3769 canonTy = new (*this, TypeAlignment) 3770 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), 3771 QualType(), numElements, ASM, elementTypeQuals, 3772 brackets); 3773 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3774 Types.push_back(canonTy); 3775 } 3776 3777 // Apply qualifiers from the element type to the array. 3778 QualType canon = getQualifiedType(QualType(canonTy,0), 3779 canonElementType.Quals); 3780 3781 // If we didn't need extra canonicalization for the element type or the size 3782 // expression, then just use that as our result. 3783 if (QualType(canonElementType.Ty, 0) == elementType && 3784 canonTy->getSizeExpr() == numElements) 3785 return canon; 3786 3787 // Otherwise, we need to build a type which follows the spelling 3788 // of the element type. 3789 auto *sugaredType 3790 = new (*this, TypeAlignment) 3791 DependentSizedArrayType(*this, elementType, canon, numElements, 3792 ASM, elementTypeQuals, brackets); 3793 Types.push_back(sugaredType); 3794 return QualType(sugaredType, 0); 3795 } 3796 3797 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3798 ArrayType::ArraySizeModifier ASM, 3799 unsigned elementTypeQuals) const { 3800 llvm::FoldingSetNodeID ID; 3801 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3802 3803 void *insertPos = nullptr; 3804 if (IncompleteArrayType *iat = 3805 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3806 return QualType(iat, 0); 3807 3808 // If the element type isn't canonical, this won't be a canonical type 3809 // either, so fill in the canonical type field. We also have to pull 3810 // qualifiers off the element type. 3811 QualType canon; 3812 3813 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3814 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3815 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3816 ASM, elementTypeQuals); 3817 canon = getQualifiedType(canon, canonSplit.Quals); 3818 3819 // Get the new insert position for the node we care about. 3820 IncompleteArrayType *existing = 3821 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3822 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3823 } 3824 3825 auto *newType = new (*this, TypeAlignment) 3826 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3827 3828 IncompleteArrayTypes.InsertNode(newType, insertPos); 3829 Types.push_back(newType); 3830 return QualType(newType, 0); 3831 } 3832 3833 ASTContext::BuiltinVectorTypeInfo 3834 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3835 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3836 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3837 NUMVECTORS}; 3838 3839 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3840 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3841 3842 switch (Ty->getKind()) { 3843 default: 3844 llvm_unreachable("Unsupported builtin vector type"); 3845 case BuiltinType::SveInt8: 3846 return SVE_INT_ELTTY(8, 16, true, 1); 3847 case BuiltinType::SveUint8: 3848 return SVE_INT_ELTTY(8, 16, false, 1); 3849 case BuiltinType::SveInt8x2: 3850 return SVE_INT_ELTTY(8, 16, true, 2); 3851 case BuiltinType::SveUint8x2: 3852 return SVE_INT_ELTTY(8, 16, false, 2); 3853 case BuiltinType::SveInt8x3: 3854 return SVE_INT_ELTTY(8, 16, true, 3); 3855 case BuiltinType::SveUint8x3: 3856 return SVE_INT_ELTTY(8, 16, false, 3); 3857 case BuiltinType::SveInt8x4: 3858 return SVE_INT_ELTTY(8, 16, true, 4); 3859 case BuiltinType::SveUint8x4: 3860 return SVE_INT_ELTTY(8, 16, false, 4); 3861 case BuiltinType::SveInt16: 3862 return SVE_INT_ELTTY(16, 8, true, 1); 3863 case BuiltinType::SveUint16: 3864 return SVE_INT_ELTTY(16, 8, false, 1); 3865 case BuiltinType::SveInt16x2: 3866 return SVE_INT_ELTTY(16, 8, true, 2); 3867 case BuiltinType::SveUint16x2: 3868 return SVE_INT_ELTTY(16, 8, false, 2); 3869 case BuiltinType::SveInt16x3: 3870 return SVE_INT_ELTTY(16, 8, true, 3); 3871 case BuiltinType::SveUint16x3: 3872 return SVE_INT_ELTTY(16, 8, false, 3); 3873 case BuiltinType::SveInt16x4: 3874 return SVE_INT_ELTTY(16, 8, true, 4); 3875 case BuiltinType::SveUint16x4: 3876 return SVE_INT_ELTTY(16, 8, false, 4); 3877 case BuiltinType::SveInt32: 3878 return SVE_INT_ELTTY(32, 4, true, 1); 3879 case BuiltinType::SveUint32: 3880 return SVE_INT_ELTTY(32, 4, false, 1); 3881 case BuiltinType::SveInt32x2: 3882 return SVE_INT_ELTTY(32, 4, true, 2); 3883 case BuiltinType::SveUint32x2: 3884 return SVE_INT_ELTTY(32, 4, false, 2); 3885 case BuiltinType::SveInt32x3: 3886 return SVE_INT_ELTTY(32, 4, true, 3); 3887 case BuiltinType::SveUint32x3: 3888 return SVE_INT_ELTTY(32, 4, false, 3); 3889 case BuiltinType::SveInt32x4: 3890 return SVE_INT_ELTTY(32, 4, true, 4); 3891 case BuiltinType::SveUint32x4: 3892 return SVE_INT_ELTTY(32, 4, false, 4); 3893 case BuiltinType::SveInt64: 3894 return SVE_INT_ELTTY(64, 2, true, 1); 3895 case BuiltinType::SveUint64: 3896 return SVE_INT_ELTTY(64, 2, false, 1); 3897 case BuiltinType::SveInt64x2: 3898 return SVE_INT_ELTTY(64, 2, true, 2); 3899 case BuiltinType::SveUint64x2: 3900 return SVE_INT_ELTTY(64, 2, false, 2); 3901 case BuiltinType::SveInt64x3: 3902 return SVE_INT_ELTTY(64, 2, true, 3); 3903 case BuiltinType::SveUint64x3: 3904 return SVE_INT_ELTTY(64, 2, false, 3); 3905 case BuiltinType::SveInt64x4: 3906 return SVE_INT_ELTTY(64, 2, true, 4); 3907 case BuiltinType::SveUint64x4: 3908 return SVE_INT_ELTTY(64, 2, false, 4); 3909 case BuiltinType::SveBool: 3910 return SVE_ELTTY(BoolTy, 16, 1); 3911 case BuiltinType::SveFloat16: 3912 return SVE_ELTTY(HalfTy, 8, 1); 3913 case BuiltinType::SveFloat16x2: 3914 return SVE_ELTTY(HalfTy, 8, 2); 3915 case BuiltinType::SveFloat16x3: 3916 return SVE_ELTTY(HalfTy, 8, 3); 3917 case BuiltinType::SveFloat16x4: 3918 return SVE_ELTTY(HalfTy, 8, 4); 3919 case BuiltinType::SveFloat32: 3920 return SVE_ELTTY(FloatTy, 4, 1); 3921 case BuiltinType::SveFloat32x2: 3922 return SVE_ELTTY(FloatTy, 4, 2); 3923 case BuiltinType::SveFloat32x3: 3924 return SVE_ELTTY(FloatTy, 4, 3); 3925 case BuiltinType::SveFloat32x4: 3926 return SVE_ELTTY(FloatTy, 4, 4); 3927 case BuiltinType::SveFloat64: 3928 return SVE_ELTTY(DoubleTy, 2, 1); 3929 case BuiltinType::SveFloat64x2: 3930 return SVE_ELTTY(DoubleTy, 2, 2); 3931 case BuiltinType::SveFloat64x3: 3932 return SVE_ELTTY(DoubleTy, 2, 3); 3933 case BuiltinType::SveFloat64x4: 3934 return SVE_ELTTY(DoubleTy, 2, 4); 3935 case BuiltinType::SveBFloat16: 3936 return SVE_ELTTY(BFloat16Ty, 8, 1); 3937 case BuiltinType::SveBFloat16x2: 3938 return SVE_ELTTY(BFloat16Ty, 8, 2); 3939 case BuiltinType::SveBFloat16x3: 3940 return SVE_ELTTY(BFloat16Ty, 8, 3); 3941 case BuiltinType::SveBFloat16x4: 3942 return SVE_ELTTY(BFloat16Ty, 8, 4); 3943 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3944 IsSigned) \ 3945 case BuiltinType::Id: \ 3946 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3947 llvm::ElementCount::getScalable(NumEls), NF}; 3948 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3949 case BuiltinType::Id: \ 3950 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3951 llvm::ElementCount::getScalable(NumEls), NF}; 3952 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3953 case BuiltinType::Id: \ 3954 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3955 #include "clang/Basic/RISCVVTypes.def" 3956 } 3957 } 3958 3959 /// getScalableVectorType - Return the unique reference to a scalable vector 3960 /// type of the specified element type and size. VectorType must be a built-in 3961 /// type. 3962 QualType ASTContext::getScalableVectorType(QualType EltTy, 3963 unsigned NumElts) const { 3964 if (Target->hasAArch64SVETypes()) { 3965 uint64_t EltTySize = getTypeSize(EltTy); 3966 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3967 IsSigned, IsFP, IsBF) \ 3968 if (!EltTy->isBooleanType() && \ 3969 ((EltTy->hasIntegerRepresentation() && \ 3970 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3971 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3972 IsFP && !IsBF) || \ 3973 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3974 IsBF && !IsFP)) && \ 3975 EltTySize == ElBits && NumElts == NumEls) { \ 3976 return SingletonId; \ 3977 } 3978 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3979 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3980 return SingletonId; 3981 #include "clang/Basic/AArch64SVEACLETypes.def" 3982 } else if (Target->hasRISCVVTypes()) { 3983 uint64_t EltTySize = getTypeSize(EltTy); 3984 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3985 IsFP) \ 3986 if (!EltTy->isBooleanType() && \ 3987 ((EltTy->hasIntegerRepresentation() && \ 3988 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3989 (EltTy->hasFloatingRepresentation() && IsFP)) && \ 3990 EltTySize == ElBits && NumElts == NumEls) \ 3991 return SingletonId; 3992 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3993 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3994 return SingletonId; 3995 #include "clang/Basic/RISCVVTypes.def" 3996 } 3997 return QualType(); 3998 } 3999 4000 /// getVectorType - Return the unique reference to a vector type of 4001 /// the specified element type and size. VectorType must be a built-in type. 4002 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4003 VectorType::VectorKind VecKind) const { 4004 assert(vecType->isBuiltinType()); 4005 4006 // Check if we've already instantiated a vector of this type. 4007 llvm::FoldingSetNodeID ID; 4008 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4009 4010 void *InsertPos = nullptr; 4011 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4012 return QualType(VTP, 0); 4013 4014 // If the element type isn't canonical, this won't be a canonical type either, 4015 // so fill in the canonical type field. 4016 QualType Canonical; 4017 if (!vecType.isCanonical()) { 4018 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4019 4020 // Get the new insert position for the node we care about. 4021 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4022 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4023 } 4024 auto *New = new (*this, TypeAlignment) 4025 VectorType(vecType, NumElts, Canonical, VecKind); 4026 VectorTypes.InsertNode(New, InsertPos); 4027 Types.push_back(New); 4028 return QualType(New, 0); 4029 } 4030 4031 QualType 4032 ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4033 SourceLocation AttrLoc, 4034 VectorType::VectorKind VecKind) const { 4035 llvm::FoldingSetNodeID ID; 4036 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4037 VecKind); 4038 void *InsertPos = nullptr; 4039 DependentVectorType *Canon = 4040 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4041 DependentVectorType *New; 4042 4043 if (Canon) { 4044 New = new (*this, TypeAlignment) DependentVectorType( 4045 *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4046 } else { 4047 QualType CanonVecTy = getCanonicalType(VecType); 4048 if (CanonVecTy == VecType) { 4049 New = new (*this, TypeAlignment) DependentVectorType( 4050 *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4051 4052 DependentVectorType *CanonCheck = 4053 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4054 assert(!CanonCheck && 4055 "Dependent-sized vector_size canonical type broken"); 4056 (void)CanonCheck; 4057 DependentVectorTypes.InsertNode(New, InsertPos); 4058 } else { 4059 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4060 SourceLocation(), VecKind); 4061 New = new (*this, TypeAlignment) DependentVectorType( 4062 *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4063 } 4064 } 4065 4066 Types.push_back(New); 4067 return QualType(New, 0); 4068 } 4069 4070 /// getExtVectorType - Return the unique reference to an extended vector type of 4071 /// the specified element type and size. VectorType must be a built-in type. 4072 QualType 4073 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { 4074 assert(vecType->isBuiltinType() || vecType->isDependentType()); 4075 4076 // Check if we've already instantiated a vector of this type. 4077 llvm::FoldingSetNodeID ID; 4078 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4079 VectorType::GenericVector); 4080 void *InsertPos = nullptr; 4081 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4082 return QualType(VTP, 0); 4083 4084 // If the element type isn't canonical, this won't be a canonical type either, 4085 // so fill in the canonical type field. 4086 QualType Canonical; 4087 if (!vecType.isCanonical()) { 4088 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4089 4090 // Get the new insert position for the node we care about. 4091 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4092 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4093 } 4094 auto *New = new (*this, TypeAlignment) 4095 ExtVectorType(vecType, NumElts, Canonical); 4096 VectorTypes.InsertNode(New, InsertPos); 4097 Types.push_back(New); 4098 return QualType(New, 0); 4099 } 4100 4101 QualType 4102 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4103 Expr *SizeExpr, 4104 SourceLocation AttrLoc) const { 4105 llvm::FoldingSetNodeID ID; 4106 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4107 SizeExpr); 4108 4109 void *InsertPos = nullptr; 4110 DependentSizedExtVectorType *Canon 4111 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4112 DependentSizedExtVectorType *New; 4113 if (Canon) { 4114 // We already have a canonical version of this array type; use it as 4115 // the canonical type for a newly-built type. 4116 New = new (*this, TypeAlignment) 4117 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 4118 SizeExpr, AttrLoc); 4119 } else { 4120 QualType CanonVecTy = getCanonicalType(vecType); 4121 if (CanonVecTy == vecType) { 4122 New = new (*this, TypeAlignment) 4123 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 4124 AttrLoc); 4125 4126 DependentSizedExtVectorType *CanonCheck 4127 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4128 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4129 (void)CanonCheck; 4130 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4131 } else { 4132 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4133 SourceLocation()); 4134 New = new (*this, TypeAlignment) DependentSizedExtVectorType( 4135 *this, vecType, CanonExtTy, SizeExpr, AttrLoc); 4136 } 4137 } 4138 4139 Types.push_back(New); 4140 return QualType(New, 0); 4141 } 4142 4143 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4144 unsigned NumColumns) const { 4145 llvm::FoldingSetNodeID ID; 4146 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4147 Type::ConstantMatrix); 4148 4149 assert(MatrixType::isValidElementType(ElementTy) && 4150 "need a valid element type"); 4151 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4152 ConstantMatrixType::isDimensionValid(NumColumns) && 4153 "need valid matrix dimensions"); 4154 void *InsertPos = nullptr; 4155 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4156 return QualType(MTP, 0); 4157 4158 QualType Canonical; 4159 if (!ElementTy.isCanonical()) { 4160 Canonical = 4161 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4162 4163 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4164 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4165 (void)NewIP; 4166 } 4167 4168 auto *New = new (*this, TypeAlignment) 4169 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4170 MatrixTypes.InsertNode(New, InsertPos); 4171 Types.push_back(New); 4172 return QualType(New, 0); 4173 } 4174 4175 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4176 Expr *RowExpr, 4177 Expr *ColumnExpr, 4178 SourceLocation AttrLoc) const { 4179 QualType CanonElementTy = getCanonicalType(ElementTy); 4180 llvm::FoldingSetNodeID ID; 4181 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4182 ColumnExpr); 4183 4184 void *InsertPos = nullptr; 4185 DependentSizedMatrixType *Canon = 4186 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4187 4188 if (!Canon) { 4189 Canon = new (*this, TypeAlignment) DependentSizedMatrixType( 4190 *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); 4191 #ifndef NDEBUG 4192 DependentSizedMatrixType *CanonCheck = 4193 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4194 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4195 #endif 4196 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4197 Types.push_back(Canon); 4198 } 4199 4200 // Already have a canonical version of the matrix type 4201 // 4202 // If it exactly matches the requested type, use it directly. 4203 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4204 Canon->getRowExpr() == ColumnExpr) 4205 return QualType(Canon, 0); 4206 4207 // Use Canon as the canonical type for newly-built type. 4208 DependentSizedMatrixType *New = new (*this, TypeAlignment) 4209 DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, 4210 ColumnExpr, AttrLoc); 4211 Types.push_back(New); 4212 return QualType(New, 0); 4213 } 4214 4215 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4216 Expr *AddrSpaceExpr, 4217 SourceLocation AttrLoc) const { 4218 assert(AddrSpaceExpr->isInstantiationDependent()); 4219 4220 QualType canonPointeeType = getCanonicalType(PointeeType); 4221 4222 void *insertPos = nullptr; 4223 llvm::FoldingSetNodeID ID; 4224 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4225 AddrSpaceExpr); 4226 4227 DependentAddressSpaceType *canonTy = 4228 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4229 4230 if (!canonTy) { 4231 canonTy = new (*this, TypeAlignment) 4232 DependentAddressSpaceType(*this, canonPointeeType, 4233 QualType(), AddrSpaceExpr, AttrLoc); 4234 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4235 Types.push_back(canonTy); 4236 } 4237 4238 if (canonPointeeType == PointeeType && 4239 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4240 return QualType(canonTy, 0); 4241 4242 auto *sugaredType 4243 = new (*this, TypeAlignment) 4244 DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), 4245 AddrSpaceExpr, AttrLoc); 4246 Types.push_back(sugaredType); 4247 return QualType(sugaredType, 0); 4248 } 4249 4250 /// Determine whether \p T is canonical as the result type of a function. 4251 static bool isCanonicalResultType(QualType T) { 4252 return T.isCanonical() && 4253 (T.getObjCLifetime() == Qualifiers::OCL_None || 4254 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4255 } 4256 4257 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4258 QualType 4259 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4260 const FunctionType::ExtInfo &Info) const { 4261 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4262 // functionality creates a function without a prototype regardless of 4263 // language mode (so it makes them even in C++). Once the rewriter has been 4264 // fixed, this assertion can be enabled again. 4265 //assert(!LangOpts.requiresStrictPrototypes() && 4266 // "strict prototypes are disabled"); 4267 4268 // Unique functions, to guarantee there is only one function of a particular 4269 // structure. 4270 llvm::FoldingSetNodeID ID; 4271 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4272 4273 void *InsertPos = nullptr; 4274 if (FunctionNoProtoType *FT = 4275 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4276 return QualType(FT, 0); 4277 4278 QualType Canonical; 4279 if (!isCanonicalResultType(ResultTy)) { 4280 Canonical = 4281 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4282 4283 // Get the new insert position for the node we care about. 4284 FunctionNoProtoType *NewIP = 4285 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4286 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4287 } 4288 4289 auto *New = new (*this, TypeAlignment) 4290 FunctionNoProtoType(ResultTy, Canonical, Info); 4291 Types.push_back(New); 4292 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4293 return QualType(New, 0); 4294 } 4295 4296 CanQualType 4297 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4298 CanQualType CanResultType = getCanonicalType(ResultType); 4299 4300 // Canonical result types do not have ARC lifetime qualifiers. 4301 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4302 Qualifiers Qs = CanResultType.getQualifiers(); 4303 Qs.removeObjCLifetime(); 4304 return CanQualType::CreateUnsafe( 4305 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4306 } 4307 4308 return CanResultType; 4309 } 4310 4311 static bool isCanonicalExceptionSpecification( 4312 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4313 if (ESI.Type == EST_None) 4314 return true; 4315 if (!NoexceptInType) 4316 return false; 4317 4318 // C++17 onwards: exception specification is part of the type, as a simple 4319 // boolean "can this function type throw". 4320 if (ESI.Type == EST_BasicNoexcept) 4321 return true; 4322 4323 // A noexcept(expr) specification is (possibly) canonical if expr is 4324 // value-dependent. 4325 if (ESI.Type == EST_DependentNoexcept) 4326 return true; 4327 4328 // A dynamic exception specification is canonical if it only contains pack 4329 // expansions (so we can't tell whether it's non-throwing) and all its 4330 // contained types are canonical. 4331 if (ESI.Type == EST_Dynamic) { 4332 bool AnyPackExpansions = false; 4333 for (QualType ET : ESI.Exceptions) { 4334 if (!ET.isCanonical()) 4335 return false; 4336 if (ET->getAs<PackExpansionType>()) 4337 AnyPackExpansions = true; 4338 } 4339 return AnyPackExpansions; 4340 } 4341 4342 return false; 4343 } 4344 4345 QualType ASTContext::getFunctionTypeInternal( 4346 QualType ResultTy, ArrayRef<QualType> ArgArray, 4347 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4348 size_t NumArgs = ArgArray.size(); 4349 4350 // Unique functions, to guarantee there is only one function of a particular 4351 // structure. 4352 llvm::FoldingSetNodeID ID; 4353 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4354 *this, true); 4355 4356 QualType Canonical; 4357 bool Unique = false; 4358 4359 void *InsertPos = nullptr; 4360 if (FunctionProtoType *FPT = 4361 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4362 QualType Existing = QualType(FPT, 0); 4363 4364 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4365 // it so long as our exception specification doesn't contain a dependent 4366 // noexcept expression, or we're just looking for a canonical type. 4367 // Otherwise, we're going to need to create a type 4368 // sugar node to hold the concrete expression. 4369 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4370 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4371 return Existing; 4372 4373 // We need a new type sugar node for this one, to hold the new noexcept 4374 // expression. We do no canonicalization here, but that's OK since we don't 4375 // expect to see the same noexcept expression much more than once. 4376 Canonical = getCanonicalType(Existing); 4377 Unique = true; 4378 } 4379 4380 bool NoexceptInType = getLangOpts().CPlusPlus17; 4381 bool IsCanonicalExceptionSpec = 4382 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4383 4384 // Determine whether the type being created is already canonical or not. 4385 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4386 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4387 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4388 if (!ArgArray[i].isCanonicalAsParam()) 4389 isCanonical = false; 4390 4391 if (OnlyWantCanonical) 4392 assert(isCanonical && 4393 "given non-canonical parameters constructing canonical type"); 4394 4395 // If this type isn't canonical, get the canonical version of it if we don't 4396 // already have it. The exception spec is only partially part of the 4397 // canonical type, and only in C++17 onwards. 4398 if (!isCanonical && Canonical.isNull()) { 4399 SmallVector<QualType, 16> CanonicalArgs; 4400 CanonicalArgs.reserve(NumArgs); 4401 for (unsigned i = 0; i != NumArgs; ++i) 4402 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4403 4404 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4405 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4406 CanonicalEPI.HasTrailingReturn = false; 4407 4408 if (IsCanonicalExceptionSpec) { 4409 // Exception spec is already OK. 4410 } else if (NoexceptInType) { 4411 switch (EPI.ExceptionSpec.Type) { 4412 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4413 // We don't know yet. It shouldn't matter what we pick here; no-one 4414 // should ever look at this. 4415 LLVM_FALLTHROUGH; 4416 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4417 CanonicalEPI.ExceptionSpec.Type = EST_None; 4418 break; 4419 4420 // A dynamic exception specification is almost always "not noexcept", 4421 // with the exception that a pack expansion might expand to no types. 4422 case EST_Dynamic: { 4423 bool AnyPacks = false; 4424 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4425 if (ET->getAs<PackExpansionType>()) 4426 AnyPacks = true; 4427 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4428 } 4429 if (!AnyPacks) 4430 CanonicalEPI.ExceptionSpec.Type = EST_None; 4431 else { 4432 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4433 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4434 } 4435 break; 4436 } 4437 4438 case EST_DynamicNone: 4439 case EST_BasicNoexcept: 4440 case EST_NoexceptTrue: 4441 case EST_NoThrow: 4442 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4443 break; 4444 4445 case EST_DependentNoexcept: 4446 llvm_unreachable("dependent noexcept is already canonical"); 4447 } 4448 } else { 4449 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4450 } 4451 4452 // Adjust the canonical function result type. 4453 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4454 Canonical = 4455 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4456 4457 // Get the new insert position for the node we care about. 4458 FunctionProtoType *NewIP = 4459 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4460 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4461 } 4462 4463 // Compute the needed size to hold this FunctionProtoType and the 4464 // various trailing objects. 4465 auto ESH = FunctionProtoType::getExceptionSpecSize( 4466 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4467 size_t Size = FunctionProtoType::totalSizeToAlloc< 4468 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4469 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4470 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4471 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4472 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4473 EPI.ExtParameterInfos ? NumArgs : 0, 4474 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4475 4476 auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); 4477 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4478 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4479 Types.push_back(FTP); 4480 if (!Unique) 4481 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4482 return QualType(FTP, 0); 4483 } 4484 4485 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4486 llvm::FoldingSetNodeID ID; 4487 PipeType::Profile(ID, T, ReadOnly); 4488 4489 void *InsertPos = nullptr; 4490 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4491 return QualType(PT, 0); 4492 4493 // If the pipe element type isn't canonical, this won't be a canonical type 4494 // either, so fill in the canonical type field. 4495 QualType Canonical; 4496 if (!T.isCanonical()) { 4497 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4498 4499 // Get the new insert position for the node we care about. 4500 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4501 assert(!NewIP && "Shouldn't be in the map!"); 4502 (void)NewIP; 4503 } 4504 auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); 4505 Types.push_back(New); 4506 PipeTypes.InsertNode(New, InsertPos); 4507 return QualType(New, 0); 4508 } 4509 4510 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4511 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4512 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4513 : Ty; 4514 } 4515 4516 QualType ASTContext::getReadPipeType(QualType T) const { 4517 return getPipeType(T, true); 4518 } 4519 4520 QualType ASTContext::getWritePipeType(QualType T) const { 4521 return getPipeType(T, false); 4522 } 4523 4524 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4525 llvm::FoldingSetNodeID ID; 4526 BitIntType::Profile(ID, IsUnsigned, NumBits); 4527 4528 void *InsertPos = nullptr; 4529 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4530 return QualType(EIT, 0); 4531 4532 auto *New = new (*this, TypeAlignment) BitIntType(IsUnsigned, NumBits); 4533 BitIntTypes.InsertNode(New, InsertPos); 4534 Types.push_back(New); 4535 return QualType(New, 0); 4536 } 4537 4538 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4539 Expr *NumBitsExpr) const { 4540 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4541 llvm::FoldingSetNodeID ID; 4542 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4543 4544 void *InsertPos = nullptr; 4545 if (DependentBitIntType *Existing = 4546 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4547 return QualType(Existing, 0); 4548 4549 auto *New = new (*this, TypeAlignment) 4550 DependentBitIntType(*this, IsUnsigned, NumBitsExpr); 4551 DependentBitIntTypes.InsertNode(New, InsertPos); 4552 4553 Types.push_back(New); 4554 return QualType(New, 0); 4555 } 4556 4557 #ifndef NDEBUG 4558 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4559 if (!isa<CXXRecordDecl>(D)) return false; 4560 const auto *RD = cast<CXXRecordDecl>(D); 4561 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4562 return true; 4563 if (RD->getDescribedClassTemplate() && 4564 !isa<ClassTemplateSpecializationDecl>(RD)) 4565 return true; 4566 return false; 4567 } 4568 #endif 4569 4570 /// getInjectedClassNameType - Return the unique reference to the 4571 /// injected class name type for the specified templated declaration. 4572 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4573 QualType TST) const { 4574 assert(NeedsInjectedClassNameType(Decl)); 4575 if (Decl->TypeForDecl) { 4576 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4577 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4578 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4579 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4580 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4581 } else { 4582 Type *newType = 4583 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 4584 Decl->TypeForDecl = newType; 4585 Types.push_back(newType); 4586 } 4587 return QualType(Decl->TypeForDecl, 0); 4588 } 4589 4590 /// getTypeDeclType - Return the unique reference to the type for the 4591 /// specified type declaration. 4592 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4593 assert(Decl && "Passed null for Decl param"); 4594 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4595 4596 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4597 return getTypedefType(Typedef); 4598 4599 assert(!isa<TemplateTypeParmDecl>(Decl) && 4600 "Template type parameter types are always available."); 4601 4602 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4603 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4604 assert(!NeedsInjectedClassNameType(Record)); 4605 return getRecordType(Record); 4606 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4607 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4608 return getEnumType(Enum); 4609 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4610 return getUnresolvedUsingType(Using); 4611 } else 4612 llvm_unreachable("TypeDecl without a type?"); 4613 4614 return QualType(Decl->TypeForDecl, 0); 4615 } 4616 4617 /// getTypedefType - Return the unique reference to the type for the 4618 /// specified typedef name decl. 4619 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4620 QualType Underlying) const { 4621 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4622 4623 if (Underlying.isNull()) 4624 Underlying = Decl->getUnderlyingType(); 4625 QualType Canonical = getCanonicalType(Underlying); 4626 auto *newType = new (*this, TypeAlignment) 4627 TypedefType(Type::Typedef, Decl, Underlying, Canonical); 4628 Decl->TypeForDecl = newType; 4629 Types.push_back(newType); 4630 return QualType(newType, 0); 4631 } 4632 4633 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4634 QualType Underlying) const { 4635 llvm::FoldingSetNodeID ID; 4636 UsingType::Profile(ID, Found); 4637 4638 void *InsertPos = nullptr; 4639 UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos); 4640 if (T) 4641 return QualType(T, 0); 4642 4643 assert(!Underlying.hasLocalQualifiers()); 4644 assert(Underlying == getTypeDeclType(cast<TypeDecl>(Found->getTargetDecl()))); 4645 QualType Canon = Underlying.getCanonicalType(); 4646 4647 UsingType *NewType = 4648 new (*this, TypeAlignment) UsingType(Found, Underlying, Canon); 4649 Types.push_back(NewType); 4650 UsingTypes.InsertNode(NewType, InsertPos); 4651 return QualType(NewType, 0); 4652 } 4653 4654 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4655 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4656 4657 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4658 if (PrevDecl->TypeForDecl) 4659 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4660 4661 auto *newType = new (*this, TypeAlignment) RecordType(Decl); 4662 Decl->TypeForDecl = newType; 4663 Types.push_back(newType); 4664 return QualType(newType, 0); 4665 } 4666 4667 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4668 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4669 4670 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4671 if (PrevDecl->TypeForDecl) 4672 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4673 4674 auto *newType = new (*this, TypeAlignment) EnumType(Decl); 4675 Decl->TypeForDecl = newType; 4676 Types.push_back(newType); 4677 return QualType(newType, 0); 4678 } 4679 4680 QualType ASTContext::getUnresolvedUsingType( 4681 const UnresolvedUsingTypenameDecl *Decl) const { 4682 if (Decl->TypeForDecl) 4683 return QualType(Decl->TypeForDecl, 0); 4684 4685 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4686 Decl->getCanonicalDecl()) 4687 if (CanonicalDecl->TypeForDecl) 4688 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4689 4690 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Decl); 4691 Decl->TypeForDecl = newType; 4692 Types.push_back(newType); 4693 return QualType(newType, 0); 4694 } 4695 4696 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4697 QualType modifiedType, 4698 QualType equivalentType) { 4699 llvm::FoldingSetNodeID id; 4700 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4701 4702 void *insertPos = nullptr; 4703 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4704 if (type) return QualType(type, 0); 4705 4706 QualType canon = getCanonicalType(equivalentType); 4707 type = new (*this, TypeAlignment) 4708 AttributedType(canon, attrKind, modifiedType, equivalentType); 4709 4710 Types.push_back(type); 4711 AttributedTypes.InsertNode(type, insertPos); 4712 4713 return QualType(type, 0); 4714 } 4715 4716 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 4717 QualType Wrapped) { 4718 llvm::FoldingSetNodeID ID; 4719 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 4720 4721 void *InsertPos = nullptr; 4722 BTFTagAttributedType *Ty = 4723 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 4724 if (Ty) 4725 return QualType(Ty, 0); 4726 4727 QualType Canon = getCanonicalType(Wrapped); 4728 Ty = new (*this, TypeAlignment) BTFTagAttributedType(Canon, Wrapped, BTFAttr); 4729 4730 Types.push_back(Ty); 4731 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 4732 4733 return QualType(Ty, 0); 4734 } 4735 4736 /// Retrieve a substitution-result type. 4737 QualType 4738 ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, 4739 QualType Replacement) const { 4740 assert(Replacement.isCanonical() 4741 && "replacement types must always be canonical"); 4742 4743 llvm::FoldingSetNodeID ID; 4744 SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); 4745 void *InsertPos = nullptr; 4746 SubstTemplateTypeParmType *SubstParm 4747 = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4748 4749 if (!SubstParm) { 4750 SubstParm = new (*this, TypeAlignment) 4751 SubstTemplateTypeParmType(Parm, Replacement); 4752 Types.push_back(SubstParm); 4753 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4754 } 4755 4756 return QualType(SubstParm, 0); 4757 } 4758 4759 /// Retrieve a 4760 QualType ASTContext::getSubstTemplateTypeParmPackType( 4761 const TemplateTypeParmType *Parm, 4762 const TemplateArgument &ArgPack) { 4763 #ifndef NDEBUG 4764 for (const auto &P : ArgPack.pack_elements()) { 4765 assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type"); 4766 assert(P.getAsType().isCanonical() && "Pack contains non-canonical type"); 4767 } 4768 #endif 4769 4770 llvm::FoldingSetNodeID ID; 4771 SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); 4772 void *InsertPos = nullptr; 4773 if (SubstTemplateTypeParmPackType *SubstParm 4774 = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4775 return QualType(SubstParm, 0); 4776 4777 QualType Canon; 4778 if (!Parm->isCanonicalUnqualified()) { 4779 Canon = getCanonicalType(QualType(Parm, 0)); 4780 Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), 4781 ArgPack); 4782 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4783 } 4784 4785 auto *SubstParm 4786 = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, 4787 ArgPack); 4788 Types.push_back(SubstParm); 4789 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4790 return QualType(SubstParm, 0); 4791 } 4792 4793 /// Retrieve the template type parameter type for a template 4794 /// parameter or parameter pack with the given depth, index, and (optionally) 4795 /// name. 4796 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4797 bool ParameterPack, 4798 TemplateTypeParmDecl *TTPDecl) const { 4799 llvm::FoldingSetNodeID ID; 4800 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4801 void *InsertPos = nullptr; 4802 TemplateTypeParmType *TypeParm 4803 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4804 4805 if (TypeParm) 4806 return QualType(TypeParm, 0); 4807 4808 if (TTPDecl) { 4809 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4810 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 4811 4812 TemplateTypeParmType *TypeCheck 4813 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4814 assert(!TypeCheck && "Template type parameter canonical type broken"); 4815 (void)TypeCheck; 4816 } else 4817 TypeParm = new (*this, TypeAlignment) 4818 TemplateTypeParmType(Depth, Index, ParameterPack); 4819 4820 Types.push_back(TypeParm); 4821 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4822 4823 return QualType(TypeParm, 0); 4824 } 4825 4826 TypeSourceInfo * 4827 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4828 SourceLocation NameLoc, 4829 const TemplateArgumentListInfo &Args, 4830 QualType Underlying) const { 4831 assert(!Name.getAsDependentTemplateName() && 4832 "No dependent template names here!"); 4833 QualType TST = getTemplateSpecializationType(Name, Args, Underlying); 4834 4835 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4836 TemplateSpecializationTypeLoc TL = 4837 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4838 TL.setTemplateKeywordLoc(SourceLocation()); 4839 TL.setTemplateNameLoc(NameLoc); 4840 TL.setLAngleLoc(Args.getLAngleLoc()); 4841 TL.setRAngleLoc(Args.getRAngleLoc()); 4842 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4843 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4844 return DI; 4845 } 4846 4847 QualType 4848 ASTContext::getTemplateSpecializationType(TemplateName Template, 4849 const TemplateArgumentListInfo &Args, 4850 QualType Underlying) const { 4851 assert(!Template.getAsDependentTemplateName() && 4852 "No dependent template names here!"); 4853 4854 SmallVector<TemplateArgument, 4> ArgVec; 4855 ArgVec.reserve(Args.size()); 4856 for (const TemplateArgumentLoc &Arg : Args.arguments()) 4857 ArgVec.push_back(Arg.getArgument()); 4858 4859 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4860 } 4861 4862 #ifndef NDEBUG 4863 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4864 for (const TemplateArgument &Arg : Args) 4865 if (Arg.isPackExpansion()) 4866 return true; 4867 4868 return true; 4869 } 4870 #endif 4871 4872 QualType 4873 ASTContext::getTemplateSpecializationType(TemplateName Template, 4874 ArrayRef<TemplateArgument> Args, 4875 QualType Underlying) const { 4876 assert(!Template.getAsDependentTemplateName() && 4877 "No dependent template names here!"); 4878 // Look through qualified template names. 4879 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4880 Template = QTN->getUnderlyingTemplate(); 4881 4882 bool IsTypeAlias = 4883 isa_and_nonnull<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); 4884 QualType CanonType; 4885 if (!Underlying.isNull()) 4886 CanonType = getCanonicalType(Underlying); 4887 else { 4888 // We can get here with an alias template when the specialization contains 4889 // a pack expansion that does not match up with a parameter pack. 4890 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4891 "Caller must compute aliased type"); 4892 IsTypeAlias = false; 4893 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4894 } 4895 4896 // Allocate the (non-canonical) template specialization type, but don't 4897 // try to unique it: these types typically have location information that 4898 // we don't unique and don't want to lose. 4899 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4900 sizeof(TemplateArgument) * Args.size() + 4901 (IsTypeAlias? sizeof(QualType) : 0), 4902 TypeAlignment); 4903 auto *Spec 4904 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4905 IsTypeAlias ? Underlying : QualType()); 4906 4907 Types.push_back(Spec); 4908 return QualType(Spec, 0); 4909 } 4910 4911 static bool 4912 getCanonicalTemplateArguments(const ASTContext &C, 4913 ArrayRef<TemplateArgument> OrigArgs, 4914 SmallVectorImpl<TemplateArgument> &CanonArgs) { 4915 bool AnyNonCanonArgs = false; 4916 unsigned NumArgs = OrigArgs.size(); 4917 CanonArgs.resize(NumArgs); 4918 for (unsigned I = 0; I != NumArgs; ++I) { 4919 const TemplateArgument &OrigArg = OrigArgs[I]; 4920 TemplateArgument &CanonArg = CanonArgs[I]; 4921 CanonArg = C.getCanonicalTemplateArgument(OrigArg); 4922 if (!CanonArg.structurallyEquals(OrigArg)) 4923 AnyNonCanonArgs = true; 4924 } 4925 return AnyNonCanonArgs; 4926 } 4927 4928 QualType ASTContext::getCanonicalTemplateSpecializationType( 4929 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4930 assert(!Template.getAsDependentTemplateName() && 4931 "No dependent template names here!"); 4932 4933 // Look through qualified template names. 4934 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4935 Template = TemplateName(QTN->getUnderlyingTemplate()); 4936 4937 // Build the canonical template specialization type. 4938 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4939 SmallVector<TemplateArgument, 4> CanonArgs; 4940 ::getCanonicalTemplateArguments(*this, Args, CanonArgs); 4941 4942 // Determine whether this canonical template specialization type already 4943 // exists. 4944 llvm::FoldingSetNodeID ID; 4945 TemplateSpecializationType::Profile(ID, CanonTemplate, 4946 CanonArgs, *this); 4947 4948 void *InsertPos = nullptr; 4949 TemplateSpecializationType *Spec 4950 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4951 4952 if (!Spec) { 4953 // Allocate a new canonical template specialization type. 4954 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4955 sizeof(TemplateArgument) * CanonArgs.size()), 4956 TypeAlignment); 4957 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 4958 CanonArgs, 4959 QualType(), QualType()); 4960 Types.push_back(Spec); 4961 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 4962 } 4963 4964 assert(Spec->isDependentType() && 4965 "Non-dependent template-id type must have a canonical type"); 4966 return QualType(Spec, 0); 4967 } 4968 4969 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 4970 NestedNameSpecifier *NNS, 4971 QualType NamedType, 4972 TagDecl *OwnedTagDecl) const { 4973 llvm::FoldingSetNodeID ID; 4974 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 4975 4976 void *InsertPos = nullptr; 4977 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4978 if (T) 4979 return QualType(T, 0); 4980 4981 QualType Canon = NamedType; 4982 if (!Canon.isCanonical()) { 4983 Canon = getCanonicalType(NamedType); 4984 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4985 assert(!CheckT && "Elaborated canonical type broken"); 4986 (void)CheckT; 4987 } 4988 4989 void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 4990 TypeAlignment); 4991 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 4992 4993 Types.push_back(T); 4994 ElaboratedTypes.InsertNode(T, InsertPos); 4995 return QualType(T, 0); 4996 } 4997 4998 QualType 4999 ASTContext::getParenType(QualType InnerType) const { 5000 llvm::FoldingSetNodeID ID; 5001 ParenType::Profile(ID, InnerType); 5002 5003 void *InsertPos = nullptr; 5004 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5005 if (T) 5006 return QualType(T, 0); 5007 5008 QualType Canon = InnerType; 5009 if (!Canon.isCanonical()) { 5010 Canon = getCanonicalType(InnerType); 5011 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5012 assert(!CheckT && "Paren canonical type broken"); 5013 (void)CheckT; 5014 } 5015 5016 T = new (*this, TypeAlignment) ParenType(InnerType, Canon); 5017 Types.push_back(T); 5018 ParenTypes.InsertNode(T, InsertPos); 5019 return QualType(T, 0); 5020 } 5021 5022 QualType 5023 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5024 const IdentifierInfo *MacroII) const { 5025 QualType Canon = UnderlyingTy; 5026 if (!Canon.isCanonical()) 5027 Canon = getCanonicalType(UnderlyingTy); 5028 5029 auto *newType = new (*this, TypeAlignment) 5030 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5031 Types.push_back(newType); 5032 return QualType(newType, 0); 5033 } 5034 5035 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5036 NestedNameSpecifier *NNS, 5037 const IdentifierInfo *Name, 5038 QualType Canon) const { 5039 if (Canon.isNull()) { 5040 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5041 if (CanonNNS != NNS) 5042 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5043 } 5044 5045 llvm::FoldingSetNodeID ID; 5046 DependentNameType::Profile(ID, Keyword, NNS, Name); 5047 5048 void *InsertPos = nullptr; 5049 DependentNameType *T 5050 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5051 if (T) 5052 return QualType(T, 0); 5053 5054 T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); 5055 Types.push_back(T); 5056 DependentNameTypes.InsertNode(T, InsertPos); 5057 return QualType(T, 0); 5058 } 5059 5060 QualType 5061 ASTContext::getDependentTemplateSpecializationType( 5062 ElaboratedTypeKeyword Keyword, 5063 NestedNameSpecifier *NNS, 5064 const IdentifierInfo *Name, 5065 const TemplateArgumentListInfo &Args) const { 5066 // TODO: avoid this copy 5067 SmallVector<TemplateArgument, 16> ArgCopy; 5068 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5069 ArgCopy.push_back(Args[I].getArgument()); 5070 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5071 } 5072 5073 QualType 5074 ASTContext::getDependentTemplateSpecializationType( 5075 ElaboratedTypeKeyword Keyword, 5076 NestedNameSpecifier *NNS, 5077 const IdentifierInfo *Name, 5078 ArrayRef<TemplateArgument> Args) const { 5079 assert((!NNS || NNS->isDependent()) && 5080 "nested-name-specifier must be dependent"); 5081 5082 llvm::FoldingSetNodeID ID; 5083 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5084 Name, Args); 5085 5086 void *InsertPos = nullptr; 5087 DependentTemplateSpecializationType *T 5088 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5089 if (T) 5090 return QualType(T, 0); 5091 5092 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5093 5094 ElaboratedTypeKeyword CanonKeyword = Keyword; 5095 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 5096 5097 SmallVector<TemplateArgument, 16> CanonArgs; 5098 bool AnyNonCanonArgs = 5099 ::getCanonicalTemplateArguments(*this, Args, CanonArgs); 5100 5101 QualType Canon; 5102 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5103 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5104 Name, 5105 CanonArgs); 5106 5107 // Find the insert position again. 5108 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5109 } 5110 5111 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5112 sizeof(TemplateArgument) * Args.size()), 5113 TypeAlignment); 5114 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5115 Name, Args, Canon); 5116 Types.push_back(T); 5117 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5118 return QualType(T, 0); 5119 } 5120 5121 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5122 TemplateArgument Arg; 5123 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5124 QualType ArgType = getTypeDeclType(TTP); 5125 if (TTP->isParameterPack()) 5126 ArgType = getPackExpansionType(ArgType, None); 5127 5128 Arg = TemplateArgument(ArgType); 5129 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5130 QualType T = 5131 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5132 // For class NTTPs, ensure we include the 'const' so the type matches that 5133 // of a real template argument. 5134 // FIXME: It would be more faithful to model this as something like an 5135 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5136 if (T->isRecordType()) 5137 T.addConst(); 5138 Expr *E = new (*this) DeclRefExpr( 5139 *this, NTTP, /*enclosing*/ false, T, 5140 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5141 5142 if (NTTP->isParameterPack()) 5143 E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(), 5144 None); 5145 Arg = TemplateArgument(E); 5146 } else { 5147 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5148 if (TTP->isParameterPack()) 5149 Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>()); 5150 else 5151 Arg = TemplateArgument(TemplateName(TTP)); 5152 } 5153 5154 if (Param->isTemplateParameterPack()) 5155 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5156 5157 return Arg; 5158 } 5159 5160 void 5161 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5162 SmallVectorImpl<TemplateArgument> &Args) { 5163 Args.reserve(Args.size() + Params->size()); 5164 5165 for (NamedDecl *Param : *Params) 5166 Args.push_back(getInjectedTemplateArg(Param)); 5167 } 5168 5169 QualType ASTContext::getPackExpansionType(QualType Pattern, 5170 Optional<unsigned> NumExpansions, 5171 bool ExpectPackInType) { 5172 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5173 "Pack expansions must expand one or more parameter packs"); 5174 5175 llvm::FoldingSetNodeID ID; 5176 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5177 5178 void *InsertPos = nullptr; 5179 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5180 if (T) 5181 return QualType(T, 0); 5182 5183 QualType Canon; 5184 if (!Pattern.isCanonical()) { 5185 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5186 /*ExpectPackInType=*/false); 5187 5188 // Find the insert position again, in case we inserted an element into 5189 // PackExpansionTypes and invalidated our insert position. 5190 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5191 } 5192 5193 T = new (*this, TypeAlignment) 5194 PackExpansionType(Pattern, Canon, NumExpansions); 5195 Types.push_back(T); 5196 PackExpansionTypes.InsertNode(T, InsertPos); 5197 return QualType(T, 0); 5198 } 5199 5200 /// CmpProtocolNames - Comparison predicate for sorting protocols 5201 /// alphabetically. 5202 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5203 ObjCProtocolDecl *const *RHS) { 5204 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5205 } 5206 5207 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5208 if (Protocols.empty()) return true; 5209 5210 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5211 return false; 5212 5213 for (unsigned i = 1; i != Protocols.size(); ++i) 5214 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5215 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5216 return false; 5217 return true; 5218 } 5219 5220 static void 5221 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5222 // Sort protocols, keyed by name. 5223 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5224 5225 // Canonicalize. 5226 for (ObjCProtocolDecl *&P : Protocols) 5227 P = P->getCanonicalDecl(); 5228 5229 // Remove duplicates. 5230 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5231 Protocols.erase(ProtocolsEnd, Protocols.end()); 5232 } 5233 5234 QualType ASTContext::getObjCObjectType(QualType BaseType, 5235 ObjCProtocolDecl * const *Protocols, 5236 unsigned NumProtocols) const { 5237 return getObjCObjectType(BaseType, {}, 5238 llvm::makeArrayRef(Protocols, NumProtocols), 5239 /*isKindOf=*/false); 5240 } 5241 5242 QualType ASTContext::getObjCObjectType( 5243 QualType baseType, 5244 ArrayRef<QualType> typeArgs, 5245 ArrayRef<ObjCProtocolDecl *> protocols, 5246 bool isKindOf) const { 5247 // If the base type is an interface and there aren't any protocols or 5248 // type arguments to add, then the interface type will do just fine. 5249 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5250 isa<ObjCInterfaceType>(baseType)) 5251 return baseType; 5252 5253 // Look in the folding set for an existing type. 5254 llvm::FoldingSetNodeID ID; 5255 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5256 void *InsertPos = nullptr; 5257 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5258 return QualType(QT, 0); 5259 5260 // Determine the type arguments to be used for canonicalization, 5261 // which may be explicitly specified here or written on the base 5262 // type. 5263 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5264 if (effectiveTypeArgs.empty()) { 5265 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5266 effectiveTypeArgs = baseObject->getTypeArgs(); 5267 } 5268 5269 // Build the canonical type, which has the canonical base type and a 5270 // sorted-and-uniqued list of protocols and the type arguments 5271 // canonicalized. 5272 QualType canonical; 5273 bool typeArgsAreCanonical = llvm::all_of( 5274 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5275 bool protocolsSorted = areSortedAndUniqued(protocols); 5276 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5277 // Determine the canonical type arguments. 5278 ArrayRef<QualType> canonTypeArgs; 5279 SmallVector<QualType, 4> canonTypeArgsVec; 5280 if (!typeArgsAreCanonical) { 5281 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5282 for (auto typeArg : effectiveTypeArgs) 5283 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5284 canonTypeArgs = canonTypeArgsVec; 5285 } else { 5286 canonTypeArgs = effectiveTypeArgs; 5287 } 5288 5289 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5290 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5291 if (!protocolsSorted) { 5292 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5293 SortAndUniqueProtocols(canonProtocolsVec); 5294 canonProtocols = canonProtocolsVec; 5295 } else { 5296 canonProtocols = protocols; 5297 } 5298 5299 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5300 canonProtocols, isKindOf); 5301 5302 // Regenerate InsertPos. 5303 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5304 } 5305 5306 unsigned size = sizeof(ObjCObjectTypeImpl); 5307 size += typeArgs.size() * sizeof(QualType); 5308 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5309 void *mem = Allocate(size, TypeAlignment); 5310 auto *T = 5311 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5312 isKindOf); 5313 5314 Types.push_back(T); 5315 ObjCObjectTypes.InsertNode(T, InsertPos); 5316 return QualType(T, 0); 5317 } 5318 5319 /// Apply Objective-C protocol qualifiers to the given type. 5320 /// If this is for the canonical type of a type parameter, we can apply 5321 /// protocol qualifiers on the ObjCObjectPointerType. 5322 QualType 5323 ASTContext::applyObjCProtocolQualifiers(QualType type, 5324 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5325 bool allowOnPointerType) const { 5326 hasError = false; 5327 5328 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5329 return getObjCTypeParamType(objT->getDecl(), protocols); 5330 } 5331 5332 // Apply protocol qualifiers to ObjCObjectPointerType. 5333 if (allowOnPointerType) { 5334 if (const auto *objPtr = 5335 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5336 const ObjCObjectType *objT = objPtr->getObjectType(); 5337 // Merge protocol lists and construct ObjCObjectType. 5338 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5339 protocolsVec.append(objT->qual_begin(), 5340 objT->qual_end()); 5341 protocolsVec.append(protocols.begin(), protocols.end()); 5342 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5343 type = getObjCObjectType( 5344 objT->getBaseType(), 5345 objT->getTypeArgsAsWritten(), 5346 protocols, 5347 objT->isKindOfTypeAsWritten()); 5348 return getObjCObjectPointerType(type); 5349 } 5350 } 5351 5352 // Apply protocol qualifiers to ObjCObjectType. 5353 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5354 // FIXME: Check for protocols to which the class type is already 5355 // known to conform. 5356 5357 return getObjCObjectType(objT->getBaseType(), 5358 objT->getTypeArgsAsWritten(), 5359 protocols, 5360 objT->isKindOfTypeAsWritten()); 5361 } 5362 5363 // If the canonical type is ObjCObjectType, ... 5364 if (type->isObjCObjectType()) { 5365 // Silently overwrite any existing protocol qualifiers. 5366 // TODO: determine whether that's the right thing to do. 5367 5368 // FIXME: Check for protocols to which the class type is already 5369 // known to conform. 5370 return getObjCObjectType(type, {}, protocols, false); 5371 } 5372 5373 // id<protocol-list> 5374 if (type->isObjCIdType()) { 5375 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5376 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5377 objPtr->isKindOfType()); 5378 return getObjCObjectPointerType(type); 5379 } 5380 5381 // Class<protocol-list> 5382 if (type->isObjCClassType()) { 5383 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5384 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5385 objPtr->isKindOfType()); 5386 return getObjCObjectPointerType(type); 5387 } 5388 5389 hasError = true; 5390 return type; 5391 } 5392 5393 QualType 5394 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5395 ArrayRef<ObjCProtocolDecl *> protocols) const { 5396 // Look in the folding set for an existing type. 5397 llvm::FoldingSetNodeID ID; 5398 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5399 void *InsertPos = nullptr; 5400 if (ObjCTypeParamType *TypeParam = 5401 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5402 return QualType(TypeParam, 0); 5403 5404 // We canonicalize to the underlying type. 5405 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5406 if (!protocols.empty()) { 5407 // Apply the protocol qualifers. 5408 bool hasError; 5409 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5410 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5411 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5412 } 5413 5414 unsigned size = sizeof(ObjCTypeParamType); 5415 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5416 void *mem = Allocate(size, TypeAlignment); 5417 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5418 5419 Types.push_back(newType); 5420 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5421 return QualType(newType, 0); 5422 } 5423 5424 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5425 ObjCTypeParamDecl *New) const { 5426 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5427 // Update TypeForDecl after updating TypeSourceInfo. 5428 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5429 SmallVector<ObjCProtocolDecl *, 8> protocols; 5430 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5431 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5432 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5433 } 5434 5435 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5436 /// protocol list adopt all protocols in QT's qualified-id protocol 5437 /// list. 5438 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5439 ObjCInterfaceDecl *IC) { 5440 if (!QT->isObjCQualifiedIdType()) 5441 return false; 5442 5443 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5444 // If both the right and left sides have qualifiers. 5445 for (auto *Proto : OPT->quals()) { 5446 if (!IC->ClassImplementsProtocol(Proto, false)) 5447 return false; 5448 } 5449 return true; 5450 } 5451 return false; 5452 } 5453 5454 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5455 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5456 /// of protocols. 5457 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5458 ObjCInterfaceDecl *IDecl) { 5459 if (!QT->isObjCQualifiedIdType()) 5460 return false; 5461 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5462 if (!OPT) 5463 return false; 5464 if (!IDecl->hasDefinition()) 5465 return false; 5466 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5467 CollectInheritedProtocols(IDecl, InheritedProtocols); 5468 if (InheritedProtocols.empty()) 5469 return false; 5470 // Check that if every protocol in list of id<plist> conforms to a protocol 5471 // of IDecl's, then bridge casting is ok. 5472 bool Conforms = false; 5473 for (auto *Proto : OPT->quals()) { 5474 Conforms = false; 5475 for (auto *PI : InheritedProtocols) { 5476 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5477 Conforms = true; 5478 break; 5479 } 5480 } 5481 if (!Conforms) 5482 break; 5483 } 5484 if (Conforms) 5485 return true; 5486 5487 for (auto *PI : InheritedProtocols) { 5488 // If both the right and left sides have qualifiers. 5489 bool Adopts = false; 5490 for (auto *Proto : OPT->quals()) { 5491 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5492 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5493 break; 5494 } 5495 if (!Adopts) 5496 return false; 5497 } 5498 return true; 5499 } 5500 5501 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5502 /// the given object type. 5503 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5504 llvm::FoldingSetNodeID ID; 5505 ObjCObjectPointerType::Profile(ID, ObjectT); 5506 5507 void *InsertPos = nullptr; 5508 if (ObjCObjectPointerType *QT = 5509 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5510 return QualType(QT, 0); 5511 5512 // Find the canonical object type. 5513 QualType Canonical; 5514 if (!ObjectT.isCanonical()) { 5515 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5516 5517 // Regenerate InsertPos. 5518 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5519 } 5520 5521 // No match. 5522 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 5523 auto *QType = 5524 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5525 5526 Types.push_back(QType); 5527 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5528 return QualType(QType, 0); 5529 } 5530 5531 /// getObjCInterfaceType - Return the unique reference to the type for the 5532 /// specified ObjC interface decl. The list of protocols is optional. 5533 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5534 ObjCInterfaceDecl *PrevDecl) const { 5535 if (Decl->TypeForDecl) 5536 return QualType(Decl->TypeForDecl, 0); 5537 5538 if (PrevDecl) { 5539 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5540 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5541 return QualType(PrevDecl->TypeForDecl, 0); 5542 } 5543 5544 // Prefer the definition, if there is one. 5545 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5546 Decl = Def; 5547 5548 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 5549 auto *T = new (Mem) ObjCInterfaceType(Decl); 5550 Decl->TypeForDecl = T; 5551 Types.push_back(T); 5552 return QualType(T, 0); 5553 } 5554 5555 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5556 /// TypeOfExprType AST's (since expression's are never shared). For example, 5557 /// multiple declarations that refer to "typeof(x)" all contain different 5558 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5559 /// on canonical type's (which are always unique). 5560 QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { 5561 TypeOfExprType *toe; 5562 if (tofExpr->isTypeDependent()) { 5563 llvm::FoldingSetNodeID ID; 5564 DependentTypeOfExprType::Profile(ID, *this, tofExpr); 5565 5566 void *InsertPos = nullptr; 5567 DependentTypeOfExprType *Canon 5568 = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5569 if (Canon) { 5570 // We already have a "canonical" version of an identical, dependent 5571 // typeof(expr) type. Use that as our canonical type. 5572 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, 5573 QualType((TypeOfExprType*)Canon, 0)); 5574 } else { 5575 // Build a new, canonical typeof(expr) type. 5576 Canon 5577 = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); 5578 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5579 toe = Canon; 5580 } 5581 } else { 5582 QualType Canonical = getCanonicalType(tofExpr->getType()); 5583 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); 5584 } 5585 Types.push_back(toe); 5586 return QualType(toe, 0); 5587 } 5588 5589 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5590 /// TypeOfType nodes. The only motivation to unique these nodes would be 5591 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5592 /// an issue. This doesn't affect the type checker, since it operates 5593 /// on canonical types (which are always unique). 5594 QualType ASTContext::getTypeOfType(QualType tofType) const { 5595 QualType Canonical = getCanonicalType(tofType); 5596 auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); 5597 Types.push_back(tot); 5598 return QualType(tot, 0); 5599 } 5600 5601 /// getReferenceQualifiedType - Given an expr, will return the type for 5602 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5603 /// and class member access into account. 5604 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5605 // C++11 [dcl.type.simple]p4: 5606 // [...] 5607 QualType T = E->getType(); 5608 switch (E->getValueKind()) { 5609 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5610 // type of e; 5611 case VK_XValue: 5612 return getRValueReferenceType(T); 5613 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5614 // type of e; 5615 case VK_LValue: 5616 return getLValueReferenceType(T); 5617 // - otherwise, decltype(e) is the type of e. 5618 case VK_PRValue: 5619 return T; 5620 } 5621 llvm_unreachable("Unknown value kind"); 5622 } 5623 5624 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5625 /// nodes. This would never be helpful, since each such type has its own 5626 /// expression, and would not give a significant memory saving, since there 5627 /// is an Expr tree under each such type. 5628 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5629 DecltypeType *dt; 5630 5631 // C++11 [temp.type]p2: 5632 // If an expression e involves a template parameter, decltype(e) denotes a 5633 // unique dependent type. Two such decltype-specifiers refer to the same 5634 // type only if their expressions are equivalent (14.5.6.1). 5635 if (e->isInstantiationDependent()) { 5636 llvm::FoldingSetNodeID ID; 5637 DependentDecltypeType::Profile(ID, *this, e); 5638 5639 void *InsertPos = nullptr; 5640 DependentDecltypeType *Canon 5641 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5642 if (!Canon) { 5643 // Build a new, canonical decltype(expr) type. 5644 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 5645 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5646 } 5647 dt = new (*this, TypeAlignment) 5648 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5649 } else { 5650 dt = new (*this, TypeAlignment) 5651 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5652 } 5653 Types.push_back(dt); 5654 return QualType(dt, 0); 5655 } 5656 5657 /// getUnaryTransformationType - We don't unique these, since the memory 5658 /// savings are minimal and these are rare. 5659 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5660 QualType UnderlyingType, 5661 UnaryTransformType::UTTKind Kind) 5662 const { 5663 UnaryTransformType *ut = nullptr; 5664 5665 if (BaseType->isDependentType()) { 5666 // Look in the folding set for an existing type. 5667 llvm::FoldingSetNodeID ID; 5668 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5669 5670 void *InsertPos = nullptr; 5671 DependentUnaryTransformType *Canon 5672 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5673 5674 if (!Canon) { 5675 // Build a new, canonical __underlying_type(type) type. 5676 Canon = new (*this, TypeAlignment) 5677 DependentUnaryTransformType(*this, getCanonicalType(BaseType), 5678 Kind); 5679 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5680 } 5681 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5682 QualType(), Kind, 5683 QualType(Canon, 0)); 5684 } else { 5685 QualType CanonType = getCanonicalType(UnderlyingType); 5686 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5687 UnderlyingType, Kind, 5688 CanonType); 5689 } 5690 Types.push_back(ut); 5691 return QualType(ut, 0); 5692 } 5693 5694 QualType ASTContext::getAutoTypeInternal( 5695 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5696 bool IsPack, ConceptDecl *TypeConstraintConcept, 5697 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5698 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5699 !TypeConstraintConcept && !IsDependent) 5700 return getAutoDeductType(); 5701 5702 // Look in the folding set for an existing type. 5703 void *InsertPos = nullptr; 5704 llvm::FoldingSetNodeID ID; 5705 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5706 TypeConstraintConcept, TypeConstraintArgs); 5707 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5708 return QualType(AT, 0); 5709 5710 QualType Canon; 5711 if (!IsCanon) { 5712 if (DeducedType.isNull()) { 5713 SmallVector<TemplateArgument, 4> CanonArgs; 5714 bool AnyNonCanonArgs = 5715 ::getCanonicalTemplateArguments(*this, TypeConstraintArgs, CanonArgs); 5716 if (AnyNonCanonArgs) { 5717 Canon = getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5718 TypeConstraintConcept, CanonArgs, true); 5719 // Find the insert position again. 5720 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5721 } 5722 } else { 5723 Canon = DeducedType.getCanonicalType(); 5724 } 5725 } 5726 5727 void *Mem = Allocate(sizeof(AutoType) + 5728 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5729 TypeAlignment); 5730 auto *AT = new (Mem) AutoType( 5731 DeducedType, Keyword, 5732 (IsDependent ? TypeDependence::DependentInstantiation 5733 : TypeDependence::None) | 5734 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5735 Canon, TypeConstraintConcept, TypeConstraintArgs); 5736 Types.push_back(AT); 5737 AutoTypes.InsertNode(AT, InsertPos); 5738 return QualType(AT, 0); 5739 } 5740 5741 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5742 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5743 /// canonical deduced-but-dependent 'auto' type. 5744 QualType 5745 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5746 bool IsDependent, bool IsPack, 5747 ConceptDecl *TypeConstraintConcept, 5748 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5749 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5750 assert((!IsDependent || DeducedType.isNull()) && 5751 "A dependent auto should be undeduced"); 5752 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5753 TypeConstraintConcept, TypeConstraintArgs); 5754 } 5755 5756 /// Return the uniqued reference to the deduced template specialization type 5757 /// which has been deduced to the given type, or to the canonical undeduced 5758 /// such type, or the canonical deduced-but-dependent such type. 5759 QualType ASTContext::getDeducedTemplateSpecializationType( 5760 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5761 // Look in the folding set for an existing type. 5762 void *InsertPos = nullptr; 5763 llvm::FoldingSetNodeID ID; 5764 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5765 IsDependent); 5766 if (DeducedTemplateSpecializationType *DTST = 5767 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5768 return QualType(DTST, 0); 5769 5770 auto *DTST = new (*this, TypeAlignment) 5771 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5772 llvm::FoldingSetNodeID TempID; 5773 DTST->Profile(TempID); 5774 assert(ID == TempID && "ID does not match"); 5775 Types.push_back(DTST); 5776 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5777 return QualType(DTST, 0); 5778 } 5779 5780 /// getAtomicType - Return the uniqued reference to the atomic type for 5781 /// the given value type. 5782 QualType ASTContext::getAtomicType(QualType T) const { 5783 // Unique pointers, to guarantee there is only one pointer of a particular 5784 // structure. 5785 llvm::FoldingSetNodeID ID; 5786 AtomicType::Profile(ID, T); 5787 5788 void *InsertPos = nullptr; 5789 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5790 return QualType(AT, 0); 5791 5792 // If the atomic value type isn't canonical, this won't be a canonical type 5793 // either, so fill in the canonical type field. 5794 QualType Canonical; 5795 if (!T.isCanonical()) { 5796 Canonical = getAtomicType(getCanonicalType(T)); 5797 5798 // Get the new insert position for the node we care about. 5799 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5800 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5801 } 5802 auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 5803 Types.push_back(New); 5804 AtomicTypes.InsertNode(New, InsertPos); 5805 return QualType(New, 0); 5806 } 5807 5808 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5809 QualType ASTContext::getAutoDeductType() const { 5810 if (AutoDeductTy.isNull()) 5811 AutoDeductTy = QualType(new (*this, TypeAlignment) 5812 AutoType(QualType(), AutoTypeKeyword::Auto, 5813 TypeDependence::None, QualType(), 5814 /*concept*/ nullptr, /*args*/ {}), 5815 0); 5816 return AutoDeductTy; 5817 } 5818 5819 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5820 QualType ASTContext::getAutoRRefDeductType() const { 5821 if (AutoRRefDeductTy.isNull()) 5822 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5823 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5824 return AutoRRefDeductTy; 5825 } 5826 5827 /// getTagDeclType - Return the unique reference to the type for the 5828 /// specified TagDecl (struct/union/class/enum) decl. 5829 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5830 assert(Decl); 5831 // FIXME: What is the design on getTagDeclType when it requires casting 5832 // away const? mutable? 5833 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5834 } 5835 5836 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5837 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5838 /// needs to agree with the definition in <stddef.h>. 5839 CanQualType ASTContext::getSizeType() const { 5840 return getFromTargetType(Target->getSizeType()); 5841 } 5842 5843 /// Return the unique signed counterpart of the integer type 5844 /// corresponding to size_t. 5845 CanQualType ASTContext::getSignedSizeType() const { 5846 return getFromTargetType(Target->getSignedSizeType()); 5847 } 5848 5849 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5850 CanQualType ASTContext::getIntMaxType() const { 5851 return getFromTargetType(Target->getIntMaxType()); 5852 } 5853 5854 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5855 CanQualType ASTContext::getUIntMaxType() const { 5856 return getFromTargetType(Target->getUIntMaxType()); 5857 } 5858 5859 /// getSignedWCharType - Return the type of "signed wchar_t". 5860 /// Used when in C++, as a GCC extension. 5861 QualType ASTContext::getSignedWCharType() const { 5862 // FIXME: derive from "Target" ? 5863 return WCharTy; 5864 } 5865 5866 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5867 /// Used when in C++, as a GCC extension. 5868 QualType ASTContext::getUnsignedWCharType() const { 5869 // FIXME: derive from "Target" ? 5870 return UnsignedIntTy; 5871 } 5872 5873 QualType ASTContext::getIntPtrType() const { 5874 return getFromTargetType(Target->getIntPtrType()); 5875 } 5876 5877 QualType ASTContext::getUIntPtrType() const { 5878 return getCorrespondingUnsignedType(getIntPtrType()); 5879 } 5880 5881 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5882 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5883 QualType ASTContext::getPointerDiffType() const { 5884 return getFromTargetType(Target->getPtrDiffType(0)); 5885 } 5886 5887 /// Return the unique unsigned counterpart of "ptrdiff_t" 5888 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5889 /// in the definition of %tu format specifier. 5890 QualType ASTContext::getUnsignedPointerDiffType() const { 5891 return getFromTargetType(Target->getUnsignedPtrDiffType(0)); 5892 } 5893 5894 /// Return the unique type for "pid_t" defined in 5895 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5896 QualType ASTContext::getProcessIDType() const { 5897 return getFromTargetType(Target->getProcessIDType()); 5898 } 5899 5900 //===----------------------------------------------------------------------===// 5901 // Type Operators 5902 //===----------------------------------------------------------------------===// 5903 5904 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5905 // Push qualifiers into arrays, and then discard any remaining 5906 // qualifiers. 5907 T = getCanonicalType(T); 5908 T = getVariableArrayDecayedType(T); 5909 const Type *Ty = T.getTypePtr(); 5910 QualType Result; 5911 if (isa<ArrayType>(Ty)) { 5912 Result = getArrayDecayedType(QualType(Ty,0)); 5913 } else if (isa<FunctionType>(Ty)) { 5914 Result = getPointerType(QualType(Ty, 0)); 5915 } else { 5916 Result = QualType(Ty, 0); 5917 } 5918 5919 return CanQualType::CreateUnsafe(Result); 5920 } 5921 5922 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5923 Qualifiers &quals) { 5924 SplitQualType splitType = type.getSplitUnqualifiedType(); 5925 5926 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5927 // the unqualified desugared type and then drops it on the floor. 5928 // We then have to strip that sugar back off with 5929 // getUnqualifiedDesugaredType(), which is silly. 5930 const auto *AT = 5931 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5932 5933 // If we don't have an array, just use the results in splitType. 5934 if (!AT) { 5935 quals = splitType.Quals; 5936 return QualType(splitType.Ty, 0); 5937 } 5938 5939 // Otherwise, recurse on the array's element type. 5940 QualType elementType = AT->getElementType(); 5941 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 5942 5943 // If that didn't change the element type, AT has no qualifiers, so we 5944 // can just use the results in splitType. 5945 if (elementType == unqualElementType) { 5946 assert(quals.empty()); // from the recursive call 5947 quals = splitType.Quals; 5948 return QualType(splitType.Ty, 0); 5949 } 5950 5951 // Otherwise, add in the qualifiers from the outermost type, then 5952 // build the type back up. 5953 quals.addConsistentQualifiers(splitType.Quals); 5954 5955 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 5956 return getConstantArrayType(unqualElementType, CAT->getSize(), 5957 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 5958 } 5959 5960 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 5961 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 5962 } 5963 5964 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 5965 return getVariableArrayType(unqualElementType, 5966 VAT->getSizeExpr(), 5967 VAT->getSizeModifier(), 5968 VAT->getIndexTypeCVRQualifiers(), 5969 VAT->getBracketsRange()); 5970 } 5971 5972 const auto *DSAT = cast<DependentSizedArrayType>(AT); 5973 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 5974 DSAT->getSizeModifier(), 0, 5975 SourceRange()); 5976 } 5977 5978 /// Attempt to unwrap two types that may both be array types with the same bound 5979 /// (or both be array types of unknown bound) for the purpose of comparing the 5980 /// cv-decomposition of two types per C++ [conv.qual]. 5981 /// 5982 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 5983 /// C++20 [conv.qual], if permitted by the current language mode. 5984 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 5985 bool AllowPiMismatch) { 5986 while (true) { 5987 auto *AT1 = getAsArrayType(T1); 5988 if (!AT1) 5989 return; 5990 5991 auto *AT2 = getAsArrayType(T2); 5992 if (!AT2) 5993 return; 5994 5995 // If we don't have two array types with the same constant bound nor two 5996 // incomplete array types, we've unwrapped everything we can. 5997 // C++20 also permits one type to be a constant array type and the other 5998 // to be an incomplete array type. 5999 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6000 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6001 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6002 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6003 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6004 isa<IncompleteArrayType>(AT2)))) 6005 return; 6006 } else if (isa<IncompleteArrayType>(AT1)) { 6007 if (!(isa<IncompleteArrayType>(AT2) || 6008 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6009 isa<ConstantArrayType>(AT2)))) 6010 return; 6011 } else { 6012 return; 6013 } 6014 6015 T1 = AT1->getElementType(); 6016 T2 = AT2->getElementType(); 6017 } 6018 } 6019 6020 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6021 /// 6022 /// If T1 and T2 are both pointer types of the same kind, or both array types 6023 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6024 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6025 /// 6026 /// This function will typically be called in a loop that successively 6027 /// "unwraps" pointer and pointer-to-member types to compare them at each 6028 /// level. 6029 /// 6030 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6031 /// C++20 [conv.qual], if permitted by the current language mode. 6032 /// 6033 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6034 /// pair of types that can't be unwrapped further. 6035 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6036 bool AllowPiMismatch) { 6037 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6038 6039 const auto *T1PtrType = T1->getAs<PointerType>(); 6040 const auto *T2PtrType = T2->getAs<PointerType>(); 6041 if (T1PtrType && T2PtrType) { 6042 T1 = T1PtrType->getPointeeType(); 6043 T2 = T2PtrType->getPointeeType(); 6044 return true; 6045 } 6046 6047 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6048 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6049 if (T1MPType && T2MPType && 6050 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6051 QualType(T2MPType->getClass(), 0))) { 6052 T1 = T1MPType->getPointeeType(); 6053 T2 = T2MPType->getPointeeType(); 6054 return true; 6055 } 6056 6057 if (getLangOpts().ObjC) { 6058 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6059 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6060 if (T1OPType && T2OPType) { 6061 T1 = T1OPType->getPointeeType(); 6062 T2 = T2OPType->getPointeeType(); 6063 return true; 6064 } 6065 } 6066 6067 // FIXME: Block pointers, too? 6068 6069 return false; 6070 } 6071 6072 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6073 while (true) { 6074 Qualifiers Quals; 6075 T1 = getUnqualifiedArrayType(T1, Quals); 6076 T2 = getUnqualifiedArrayType(T2, Quals); 6077 if (hasSameType(T1, T2)) 6078 return true; 6079 if (!UnwrapSimilarTypes(T1, T2)) 6080 return false; 6081 } 6082 } 6083 6084 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6085 while (true) { 6086 Qualifiers Quals1, Quals2; 6087 T1 = getUnqualifiedArrayType(T1, Quals1); 6088 T2 = getUnqualifiedArrayType(T2, Quals2); 6089 6090 Quals1.removeCVRQualifiers(); 6091 Quals2.removeCVRQualifiers(); 6092 if (Quals1 != Quals2) 6093 return false; 6094 6095 if (hasSameType(T1, T2)) 6096 return true; 6097 6098 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6099 return false; 6100 } 6101 } 6102 6103 DeclarationNameInfo 6104 ASTContext::getNameForTemplate(TemplateName Name, 6105 SourceLocation NameLoc) const { 6106 switch (Name.getKind()) { 6107 case TemplateName::QualifiedTemplate: 6108 case TemplateName::Template: 6109 // DNInfo work in progress: CHECKME: what about DNLoc? 6110 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6111 NameLoc); 6112 6113 case TemplateName::OverloadedTemplate: { 6114 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6115 // DNInfo work in progress: CHECKME: what about DNLoc? 6116 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6117 } 6118 6119 case TemplateName::AssumedTemplate: { 6120 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6121 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6122 } 6123 6124 case TemplateName::DependentTemplate: { 6125 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6126 DeclarationName DName; 6127 if (DTN->isIdentifier()) { 6128 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6129 return DeclarationNameInfo(DName, NameLoc); 6130 } else { 6131 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6132 // DNInfo work in progress: FIXME: source locations? 6133 DeclarationNameLoc DNLoc = 6134 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6135 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6136 } 6137 } 6138 6139 case TemplateName::SubstTemplateTemplateParm: { 6140 SubstTemplateTemplateParmStorage *subst 6141 = Name.getAsSubstTemplateTemplateParm(); 6142 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6143 NameLoc); 6144 } 6145 6146 case TemplateName::SubstTemplateTemplateParmPack: { 6147 SubstTemplateTemplateParmPackStorage *subst 6148 = Name.getAsSubstTemplateTemplateParmPack(); 6149 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6150 NameLoc); 6151 } 6152 case TemplateName::UsingTemplate: 6153 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6154 NameLoc); 6155 } 6156 6157 llvm_unreachable("bad template name kind!"); 6158 } 6159 6160 TemplateName 6161 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6162 switch (Name.getKind()) { 6163 case TemplateName::UsingTemplate: 6164 case TemplateName::QualifiedTemplate: 6165 case TemplateName::Template: { 6166 TemplateDecl *Template = Name.getAsTemplateDecl(); 6167 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6168 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6169 6170 // The canonical template name is the canonical template declaration. 6171 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6172 } 6173 6174 case TemplateName::OverloadedTemplate: 6175 case TemplateName::AssumedTemplate: 6176 llvm_unreachable("cannot canonicalize unresolved template"); 6177 6178 case TemplateName::DependentTemplate: { 6179 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6180 assert(DTN && "Non-dependent template names must refer to template decls."); 6181 return DTN->CanonicalTemplateName; 6182 } 6183 6184 case TemplateName::SubstTemplateTemplateParm: { 6185 SubstTemplateTemplateParmStorage *subst 6186 = Name.getAsSubstTemplateTemplateParm(); 6187 return getCanonicalTemplateName(subst->getReplacement()); 6188 } 6189 6190 case TemplateName::SubstTemplateTemplateParmPack: { 6191 SubstTemplateTemplateParmPackStorage *subst 6192 = Name.getAsSubstTemplateTemplateParmPack(); 6193 TemplateTemplateParmDecl *canonParameter 6194 = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); 6195 TemplateArgument canonArgPack 6196 = getCanonicalTemplateArgument(subst->getArgumentPack()); 6197 return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); 6198 } 6199 } 6200 6201 llvm_unreachable("bad template name!"); 6202 } 6203 6204 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6205 const TemplateName &Y) const { 6206 return getCanonicalTemplateName(X).getAsVoidPointer() == 6207 getCanonicalTemplateName(Y).getAsVoidPointer(); 6208 } 6209 6210 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6211 const NamedDecl *Y) { 6212 if (X->getKind() != Y->getKind()) 6213 return false; 6214 6215 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6216 auto *TY = cast<TemplateTypeParmDecl>(Y); 6217 if (TX->isParameterPack() != TY->isParameterPack()) 6218 return false; 6219 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6220 return false; 6221 const TypeConstraint *TXTC = TX->getTypeConstraint(); 6222 const TypeConstraint *TYTC = TY->getTypeConstraint(); 6223 if (!TXTC != !TYTC) 6224 return false; 6225 if (TXTC && TYTC) { 6226 auto *NCX = TXTC->getNamedConcept(); 6227 auto *NCY = TYTC->getNamedConcept(); 6228 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6229 return false; 6230 if (TXTC->hasExplicitTemplateArgs() != TYTC->hasExplicitTemplateArgs()) 6231 return false; 6232 if (TXTC->hasExplicitTemplateArgs()) { 6233 auto *TXTCArgs = TXTC->getTemplateArgsAsWritten(); 6234 auto *TYTCArgs = TYTC->getTemplateArgsAsWritten(); 6235 if (TXTCArgs->NumTemplateArgs != TYTCArgs->NumTemplateArgs) 6236 return false; 6237 llvm::FoldingSetNodeID XID, YID; 6238 for (auto &ArgLoc : TXTCArgs->arguments()) 6239 ArgLoc.getArgument().Profile(XID, X->getASTContext()); 6240 for (auto &ArgLoc : TYTCArgs->arguments()) 6241 ArgLoc.getArgument().Profile(YID, Y->getASTContext()); 6242 if (XID != YID) 6243 return false; 6244 } 6245 } 6246 return true; 6247 } 6248 6249 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6250 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6251 return TX->isParameterPack() == TY->isParameterPack() && 6252 TX->getASTContext().hasSameType(TX->getType(), TY->getType()); 6253 } 6254 6255 auto *TX = cast<TemplateTemplateParmDecl>(X); 6256 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6257 return TX->isParameterPack() == TY->isParameterPack() && 6258 isSameTemplateParameterList(TX->getTemplateParameters(), 6259 TY->getTemplateParameters()); 6260 } 6261 6262 bool ASTContext::isSameTemplateParameterList(const TemplateParameterList *X, 6263 const TemplateParameterList *Y) { 6264 if (X->size() != Y->size()) 6265 return false; 6266 6267 for (unsigned I = 0, N = X->size(); I != N; ++I) 6268 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6269 return false; 6270 6271 const Expr *XRC = X->getRequiresClause(); 6272 const Expr *YRC = Y->getRequiresClause(); 6273 if (!XRC != !YRC) 6274 return false; 6275 if (XRC) { 6276 llvm::FoldingSetNodeID XRCID, YRCID; 6277 XRC->Profile(XRCID, *this, /*Canonical=*/true); 6278 YRC->Profile(YRCID, *this, /*Canonical=*/true); 6279 if (XRCID != YRCID) 6280 return false; 6281 } 6282 6283 return true; 6284 } 6285 6286 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6287 if (auto *NS = X->getAsNamespace()) 6288 return NS; 6289 if (auto *NAS = X->getAsNamespaceAlias()) 6290 return NAS->getNamespace(); 6291 return nullptr; 6292 } 6293 6294 static bool isSameQualifier(const NestedNameSpecifier *X, 6295 const NestedNameSpecifier *Y) { 6296 if (auto *NSX = getNamespace(X)) { 6297 auto *NSY = getNamespace(Y); 6298 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6299 return false; 6300 } else if (X->getKind() != Y->getKind()) 6301 return false; 6302 6303 // FIXME: For namespaces and types, we're permitted to check that the entity 6304 // is named via the same tokens. We should probably do so. 6305 switch (X->getKind()) { 6306 case NestedNameSpecifier::Identifier: 6307 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6308 return false; 6309 break; 6310 case NestedNameSpecifier::Namespace: 6311 case NestedNameSpecifier::NamespaceAlias: 6312 // We've already checked that we named the same namespace. 6313 break; 6314 case NestedNameSpecifier::TypeSpec: 6315 case NestedNameSpecifier::TypeSpecWithTemplate: 6316 if (X->getAsType()->getCanonicalTypeInternal() != 6317 Y->getAsType()->getCanonicalTypeInternal()) 6318 return false; 6319 break; 6320 case NestedNameSpecifier::Global: 6321 case NestedNameSpecifier::Super: 6322 return true; 6323 } 6324 6325 // Recurse into earlier portion of NNS, if any. 6326 auto *PX = X->getPrefix(); 6327 auto *PY = Y->getPrefix(); 6328 if (PX && PY) 6329 return isSameQualifier(PX, PY); 6330 return !PX && !PY; 6331 } 6332 6333 /// Determine whether the attributes we can overload on are identical for A and 6334 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6335 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6336 const FunctionDecl *B) { 6337 // Note that pass_object_size attributes are represented in the function's 6338 // ExtParameterInfo, so we don't need to check them here. 6339 6340 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6341 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6342 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6343 6344 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6345 Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6346 Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6347 6348 // Return false if the number of enable_if attributes is different. 6349 if (!Cand1A || !Cand2A) 6350 return false; 6351 6352 Cand1ID.clear(); 6353 Cand2ID.clear(); 6354 6355 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6356 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6357 6358 // Return false if any of the enable_if expressions of A and B are 6359 // different. 6360 if (Cand1ID != Cand2ID) 6361 return false; 6362 } 6363 return true; 6364 } 6365 6366 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) { 6367 if (X == Y) 6368 return true; 6369 6370 if (X->getDeclName() != Y->getDeclName()) 6371 return false; 6372 6373 // Must be in the same context. 6374 // 6375 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6376 // could be two different declarations of the same function. (We will fix the 6377 // semantic DC to refer to the primary definition after merging.) 6378 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6379 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6380 return false; 6381 6382 // Two typedefs refer to the same entity if they have the same underlying 6383 // type. 6384 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6385 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6386 return hasSameType(TypedefX->getUnderlyingType(), 6387 TypedefY->getUnderlyingType()); 6388 6389 // Must have the same kind. 6390 if (X->getKind() != Y->getKind()) 6391 return false; 6392 6393 // Objective-C classes and protocols with the same name always match. 6394 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6395 return true; 6396 6397 if (isa<ClassTemplateSpecializationDecl>(X)) { 6398 // No need to handle these here: we merge them when adding them to the 6399 // template. 6400 return false; 6401 } 6402 6403 // Compatible tags match. 6404 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6405 const auto *TagY = cast<TagDecl>(Y); 6406 return (TagX->getTagKind() == TagY->getTagKind()) || 6407 ((TagX->getTagKind() == TTK_Struct || 6408 TagX->getTagKind() == TTK_Class || 6409 TagX->getTagKind() == TTK_Interface) && 6410 (TagY->getTagKind() == TTK_Struct || 6411 TagY->getTagKind() == TTK_Class || 6412 TagY->getTagKind() == TTK_Interface)); 6413 } 6414 6415 // Functions with the same type and linkage match. 6416 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6417 // functions, etc. 6418 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6419 const auto *FuncY = cast<FunctionDecl>(Y); 6420 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6421 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6422 if (CtorX->getInheritedConstructor() && 6423 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6424 CtorY->getInheritedConstructor().getConstructor())) 6425 return false; 6426 } 6427 6428 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6429 return false; 6430 6431 // Multiversioned functions with different feature strings are represented 6432 // as separate declarations. 6433 if (FuncX->isMultiVersion()) { 6434 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6435 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6436 assert(TAX && TAY && "Multiversion Function without target attribute"); 6437 6438 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6439 return false; 6440 } 6441 6442 const Expr *XRC = FuncX->getTrailingRequiresClause(); 6443 const Expr *YRC = FuncY->getTrailingRequiresClause(); 6444 if (!XRC != !YRC) 6445 return false; 6446 if (XRC) { 6447 llvm::FoldingSetNodeID XRCID, YRCID; 6448 XRC->Profile(XRCID, *this, /*Canonical=*/true); 6449 YRC->Profile(YRCID, *this, /*Canonical=*/true); 6450 if (XRCID != YRCID) 6451 return false; 6452 } 6453 6454 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6455 // Map to the first declaration that we've already merged into this one. 6456 // The TSI of redeclarations might not match (due to calling conventions 6457 // being inherited onto the type but not the TSI), but the TSI type of 6458 // the first declaration of the function should match across modules. 6459 FD = FD->getCanonicalDecl(); 6460 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6461 : FD->getType(); 6462 }; 6463 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6464 if (!hasSameType(XT, YT)) { 6465 // We can get functions with different types on the redecl chain in C++17 6466 // if they have differing exception specifications and at least one of 6467 // the excpetion specs is unresolved. 6468 auto *XFPT = XT->getAs<FunctionProtoType>(); 6469 auto *YFPT = YT->getAs<FunctionProtoType>(); 6470 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6471 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6472 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6473 // FIXME: We could make isSameEntity const after we make 6474 // hasSameFunctionTypeIgnoringExceptionSpec const. 6475 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6476 return true; 6477 return false; 6478 } 6479 6480 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6481 hasSameOverloadableAttrs(FuncX, FuncY); 6482 } 6483 6484 // Variables with the same type and linkage match. 6485 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6486 const auto *VarY = cast<VarDecl>(Y); 6487 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6488 if (hasSameType(VarX->getType(), VarY->getType())) 6489 return true; 6490 6491 // We can get decls with different types on the redecl chain. Eg. 6492 // template <typename T> struct S { static T Var[]; }; // #1 6493 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6494 // Only? happens when completing an incomplete array type. In this case 6495 // when comparing #1 and #2 we should go through their element type. 6496 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6497 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6498 if (!VarXTy || !VarYTy) 6499 return false; 6500 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6501 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6502 } 6503 return false; 6504 } 6505 6506 // Namespaces with the same name and inlinedness match. 6507 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6508 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6509 return NamespaceX->isInline() == NamespaceY->isInline(); 6510 } 6511 6512 // Identical template names and kinds match if their template parameter lists 6513 // and patterns match. 6514 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6515 const auto *TemplateY = cast<TemplateDecl>(Y); 6516 return isSameEntity(TemplateX->getTemplatedDecl(), 6517 TemplateY->getTemplatedDecl()) && 6518 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6519 TemplateY->getTemplateParameters()); 6520 } 6521 6522 // Fields with the same name and the same type match. 6523 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6524 const auto *FDY = cast<FieldDecl>(Y); 6525 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6526 return hasSameType(FDX->getType(), FDY->getType()); 6527 } 6528 6529 // Indirect fields with the same target field match. 6530 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6531 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6532 return IFDX->getAnonField()->getCanonicalDecl() == 6533 IFDY->getAnonField()->getCanonicalDecl(); 6534 } 6535 6536 // Enumerators with the same name match. 6537 if (isa<EnumConstantDecl>(X)) 6538 // FIXME: Also check the value is odr-equivalent. 6539 return true; 6540 6541 // Using shadow declarations with the same target match. 6542 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6543 const auto *USY = cast<UsingShadowDecl>(Y); 6544 return USX->getTargetDecl() == USY->getTargetDecl(); 6545 } 6546 6547 // Using declarations with the same qualifier match. (We already know that 6548 // the name matches.) 6549 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6550 const auto *UY = cast<UsingDecl>(Y); 6551 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6552 UX->hasTypename() == UY->hasTypename() && 6553 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6554 } 6555 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6556 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6557 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6558 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6559 } 6560 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6561 return isSameQualifier( 6562 UX->getQualifier(), 6563 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6564 } 6565 6566 // Using-pack declarations are only created by instantiation, and match if 6567 // they're instantiated from matching UnresolvedUsing...Decls. 6568 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6569 return declaresSameEntity( 6570 UX->getInstantiatedFromUsingDecl(), 6571 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6572 } 6573 6574 // Namespace alias definitions with the same target match. 6575 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6576 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6577 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6578 } 6579 6580 return false; 6581 } 6582 6583 TemplateArgument 6584 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6585 switch (Arg.getKind()) { 6586 case TemplateArgument::Null: 6587 return Arg; 6588 6589 case TemplateArgument::Expression: 6590 return Arg; 6591 6592 case TemplateArgument::Declaration: { 6593 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6594 return TemplateArgument(D, Arg.getParamTypeForDecl()); 6595 } 6596 6597 case TemplateArgument::NullPtr: 6598 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6599 /*isNullPtr*/true); 6600 6601 case TemplateArgument::Template: 6602 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); 6603 6604 case TemplateArgument::TemplateExpansion: 6605 return TemplateArgument(getCanonicalTemplateName( 6606 Arg.getAsTemplateOrTemplatePattern()), 6607 Arg.getNumTemplateExpansions()); 6608 6609 case TemplateArgument::Integral: 6610 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6611 6612 case TemplateArgument::Type: 6613 return TemplateArgument(getCanonicalType(Arg.getAsType())); 6614 6615 case TemplateArgument::Pack: { 6616 if (Arg.pack_size() == 0) 6617 return Arg; 6618 6619 auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()]; 6620 unsigned Idx = 0; 6621 for (TemplateArgument::pack_iterator A = Arg.pack_begin(), 6622 AEnd = Arg.pack_end(); 6623 A != AEnd; (void)++A, ++Idx) 6624 CanonArgs[Idx] = getCanonicalTemplateArgument(*A); 6625 6626 return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size())); 6627 } 6628 } 6629 6630 // Silence GCC warning 6631 llvm_unreachable("Unhandled template argument kind"); 6632 } 6633 6634 NestedNameSpecifier * 6635 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6636 if (!NNS) 6637 return nullptr; 6638 6639 switch (NNS->getKind()) { 6640 case NestedNameSpecifier::Identifier: 6641 // Canonicalize the prefix but keep the identifier the same. 6642 return NestedNameSpecifier::Create(*this, 6643 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6644 NNS->getAsIdentifier()); 6645 6646 case NestedNameSpecifier::Namespace: 6647 // A namespace is canonical; build a nested-name-specifier with 6648 // this namespace and no prefix. 6649 return NestedNameSpecifier::Create(*this, nullptr, 6650 NNS->getAsNamespace()->getOriginalNamespace()); 6651 6652 case NestedNameSpecifier::NamespaceAlias: 6653 // A namespace is canonical; build a nested-name-specifier with 6654 // this namespace and no prefix. 6655 return NestedNameSpecifier::Create(*this, nullptr, 6656 NNS->getAsNamespaceAlias()->getNamespace() 6657 ->getOriginalNamespace()); 6658 6659 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6660 // latter will have the 'template' keyword when printed. 6661 case NestedNameSpecifier::TypeSpec: 6662 case NestedNameSpecifier::TypeSpecWithTemplate: { 6663 const Type *T = getCanonicalType(NNS->getAsType()); 6664 6665 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6666 // break it apart into its prefix and identifier, then reconsititute those 6667 // as the canonical nested-name-specifier. This is required to canonicalize 6668 // a dependent nested-name-specifier involving typedefs of dependent-name 6669 // types, e.g., 6670 // typedef typename T::type T1; 6671 // typedef typename T1::type T2; 6672 if (const auto *DNT = T->getAs<DependentNameType>()) 6673 return NestedNameSpecifier::Create( 6674 *this, DNT->getQualifier(), 6675 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6676 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6677 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6678 const_cast<Type *>(T)); 6679 6680 // TODO: Set 'Template' parameter to true for other template types. 6681 return NestedNameSpecifier::Create(*this, nullptr, false, 6682 const_cast<Type *>(T)); 6683 } 6684 6685 case NestedNameSpecifier::Global: 6686 case NestedNameSpecifier::Super: 6687 // The global specifier and __super specifer are canonical and unique. 6688 return NNS; 6689 } 6690 6691 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6692 } 6693 6694 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6695 // Handle the non-qualified case efficiently. 6696 if (!T.hasLocalQualifiers()) { 6697 // Handle the common positive case fast. 6698 if (const auto *AT = dyn_cast<ArrayType>(T)) 6699 return AT; 6700 } 6701 6702 // Handle the common negative case fast. 6703 if (!isa<ArrayType>(T.getCanonicalType())) 6704 return nullptr; 6705 6706 // Apply any qualifiers from the array type to the element type. This 6707 // implements C99 6.7.3p8: "If the specification of an array type includes 6708 // any type qualifiers, the element type is so qualified, not the array type." 6709 6710 // If we get here, we either have type qualifiers on the type, or we have 6711 // sugar such as a typedef in the way. If we have type qualifiers on the type 6712 // we must propagate them down into the element type. 6713 6714 SplitQualType split = T.getSplitDesugaredType(); 6715 Qualifiers qs = split.Quals; 6716 6717 // If we have a simple case, just return now. 6718 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6719 if (!ATy || qs.empty()) 6720 return ATy; 6721 6722 // Otherwise, we have an array and we have qualifiers on it. Push the 6723 // qualifiers into the array element type and return a new array type. 6724 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6725 6726 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6727 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6728 CAT->getSizeExpr(), 6729 CAT->getSizeModifier(), 6730 CAT->getIndexTypeCVRQualifiers())); 6731 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6732 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6733 IAT->getSizeModifier(), 6734 IAT->getIndexTypeCVRQualifiers())); 6735 6736 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6737 return cast<ArrayType>( 6738 getDependentSizedArrayType(NewEltTy, 6739 DSAT->getSizeExpr(), 6740 DSAT->getSizeModifier(), 6741 DSAT->getIndexTypeCVRQualifiers(), 6742 DSAT->getBracketsRange())); 6743 6744 const auto *VAT = cast<VariableArrayType>(ATy); 6745 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6746 VAT->getSizeExpr(), 6747 VAT->getSizeModifier(), 6748 VAT->getIndexTypeCVRQualifiers(), 6749 VAT->getBracketsRange())); 6750 } 6751 6752 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6753 if (T->isArrayType() || T->isFunctionType()) 6754 return getDecayedType(T); 6755 return T; 6756 } 6757 6758 QualType ASTContext::getSignatureParameterType(QualType T) const { 6759 T = getVariableArrayDecayedType(T); 6760 T = getAdjustedParameterType(T); 6761 return T.getUnqualifiedType(); 6762 } 6763 6764 QualType ASTContext::getExceptionObjectType(QualType T) const { 6765 // C++ [except.throw]p3: 6766 // A throw-expression initializes a temporary object, called the exception 6767 // object, the type of which is determined by removing any top-level 6768 // cv-qualifiers from the static type of the operand of throw and adjusting 6769 // the type from "array of T" or "function returning T" to "pointer to T" 6770 // or "pointer to function returning T", [...] 6771 T = getVariableArrayDecayedType(T); 6772 if (T->isArrayType() || T->isFunctionType()) 6773 T = getDecayedType(T); 6774 return T.getUnqualifiedType(); 6775 } 6776 6777 /// getArrayDecayedType - Return the properly qualified result of decaying the 6778 /// specified array type to a pointer. This operation is non-trivial when 6779 /// handling typedefs etc. The canonical type of "T" must be an array type, 6780 /// this returns a pointer to a properly qualified element of the array. 6781 /// 6782 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6783 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6784 // Get the element type with 'getAsArrayType' so that we don't lose any 6785 // typedefs in the element type of the array. This also handles propagation 6786 // of type qualifiers from the array type into the element type if present 6787 // (C99 6.7.3p8). 6788 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6789 assert(PrettyArrayType && "Not an array type!"); 6790 6791 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6792 6793 // int x[restrict 4] -> int *restrict 6794 QualType Result = getQualifiedType(PtrTy, 6795 PrettyArrayType->getIndexTypeQualifiers()); 6796 6797 // int x[_Nullable] -> int * _Nullable 6798 if (auto Nullability = Ty->getNullability(*this)) { 6799 Result = const_cast<ASTContext *>(this)->getAttributedType( 6800 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6801 } 6802 return Result; 6803 } 6804 6805 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6806 return getBaseElementType(array->getElementType()); 6807 } 6808 6809 QualType ASTContext::getBaseElementType(QualType type) const { 6810 Qualifiers qs; 6811 while (true) { 6812 SplitQualType split = type.getSplitDesugaredType(); 6813 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6814 if (!array) break; 6815 6816 type = array->getElementType(); 6817 qs.addConsistentQualifiers(split.Quals); 6818 } 6819 6820 return getQualifiedType(type, qs); 6821 } 6822 6823 /// getConstantArrayElementCount - Returns number of constant array elements. 6824 uint64_t 6825 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6826 uint64_t ElementCount = 1; 6827 do { 6828 ElementCount *= CA->getSize().getZExtValue(); 6829 CA = dyn_cast_or_null<ConstantArrayType>( 6830 CA->getElementType()->getAsArrayTypeUnsafe()); 6831 } while (CA); 6832 return ElementCount; 6833 } 6834 6835 /// getFloatingRank - Return a relative rank for floating point types. 6836 /// This routine will assert if passed a built-in type that isn't a float. 6837 static FloatingRank getFloatingRank(QualType T) { 6838 if (const auto *CT = T->getAs<ComplexType>()) 6839 return getFloatingRank(CT->getElementType()); 6840 6841 switch (T->castAs<BuiltinType>()->getKind()) { 6842 default: llvm_unreachable("getFloatingRank(): not a floating type"); 6843 case BuiltinType::Float16: return Float16Rank; 6844 case BuiltinType::Half: return HalfRank; 6845 case BuiltinType::Float: return FloatRank; 6846 case BuiltinType::Double: return DoubleRank; 6847 case BuiltinType::LongDouble: return LongDoubleRank; 6848 case BuiltinType::Float128: return Float128Rank; 6849 case BuiltinType::BFloat16: return BFloat16Rank; 6850 case BuiltinType::Ibm128: return Ibm128Rank; 6851 } 6852 } 6853 6854 /// getFloatingTypeOrder - Compare the rank of the two specified floating 6855 /// point types, ignoring the domain of the type (i.e. 'double' == 6856 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 6857 /// LHS < RHS, return -1. 6858 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 6859 FloatingRank LHSR = getFloatingRank(LHS); 6860 FloatingRank RHSR = getFloatingRank(RHS); 6861 6862 if (LHSR == RHSR) 6863 return 0; 6864 if (LHSR > RHSR) 6865 return 1; 6866 return -1; 6867 } 6868 6869 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 6870 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 6871 return 0; 6872 return getFloatingTypeOrder(LHS, RHS); 6873 } 6874 6875 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 6876 /// routine will assert if passed a built-in type that isn't an integer or enum, 6877 /// or if it is not canonicalized. 6878 unsigned ASTContext::getIntegerRank(const Type *T) const { 6879 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 6880 6881 // Results in this 'losing' to any type of the same size, but winning if 6882 // larger. 6883 if (const auto *EIT = dyn_cast<BitIntType>(T)) 6884 return 0 + (EIT->getNumBits() << 3); 6885 6886 switch (cast<BuiltinType>(T)->getKind()) { 6887 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 6888 case BuiltinType::Bool: 6889 return 1 + (getIntWidth(BoolTy) << 3); 6890 case BuiltinType::Char_S: 6891 case BuiltinType::Char_U: 6892 case BuiltinType::SChar: 6893 case BuiltinType::UChar: 6894 return 2 + (getIntWidth(CharTy) << 3); 6895 case BuiltinType::Short: 6896 case BuiltinType::UShort: 6897 return 3 + (getIntWidth(ShortTy) << 3); 6898 case BuiltinType::Int: 6899 case BuiltinType::UInt: 6900 return 4 + (getIntWidth(IntTy) << 3); 6901 case BuiltinType::Long: 6902 case BuiltinType::ULong: 6903 return 5 + (getIntWidth(LongTy) << 3); 6904 case BuiltinType::LongLong: 6905 case BuiltinType::ULongLong: 6906 return 6 + (getIntWidth(LongLongTy) << 3); 6907 case BuiltinType::Int128: 6908 case BuiltinType::UInt128: 6909 return 7 + (getIntWidth(Int128Ty) << 3); 6910 } 6911 } 6912 6913 /// Whether this is a promotable bitfield reference according 6914 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 6915 /// 6916 /// \returns the type this bit-field will promote to, or NULL if no 6917 /// promotion occurs. 6918 QualType ASTContext::isPromotableBitField(Expr *E) const { 6919 if (E->isTypeDependent() || E->isValueDependent()) 6920 return {}; 6921 6922 // C++ [conv.prom]p5: 6923 // If the bit-field has an enumerated type, it is treated as any other 6924 // value of that type for promotion purposes. 6925 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 6926 return {}; 6927 6928 // FIXME: We should not do this unless E->refersToBitField() is true. This 6929 // matters in C where getSourceBitField() will find bit-fields for various 6930 // cases where the source expression is not a bit-field designator. 6931 6932 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 6933 if (!Field) 6934 return {}; 6935 6936 QualType FT = Field->getType(); 6937 6938 uint64_t BitWidth = Field->getBitWidthValue(*this); 6939 uint64_t IntSize = getTypeSize(IntTy); 6940 // C++ [conv.prom]p5: 6941 // A prvalue for an integral bit-field can be converted to a prvalue of type 6942 // int if int can represent all the values of the bit-field; otherwise, it 6943 // can be converted to unsigned int if unsigned int can represent all the 6944 // values of the bit-field. If the bit-field is larger yet, no integral 6945 // promotion applies to it. 6946 // C11 6.3.1.1/2: 6947 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 6948 // If an int can represent all values of the original type (as restricted by 6949 // the width, for a bit-field), the value is converted to an int; otherwise, 6950 // it is converted to an unsigned int. 6951 // 6952 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 6953 // We perform that promotion here to match GCC and C++. 6954 // FIXME: C does not permit promotion of an enum bit-field whose rank is 6955 // greater than that of 'int'. We perform that promotion to match GCC. 6956 if (BitWidth < IntSize) 6957 return IntTy; 6958 6959 if (BitWidth == IntSize) 6960 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 6961 6962 // Bit-fields wider than int are not subject to promotions, and therefore act 6963 // like the base type. GCC has some weird bugs in this area that we 6964 // deliberately do not follow (GCC follows a pre-standard resolution to 6965 // C's DR315 which treats bit-width as being part of the type, and this leaks 6966 // into their semantics in some cases). 6967 return {}; 6968 } 6969 6970 /// getPromotedIntegerType - Returns the type that Promotable will 6971 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 6972 /// integer type. 6973 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 6974 assert(!Promotable.isNull()); 6975 assert(Promotable->isPromotableIntegerType()); 6976 if (const auto *ET = Promotable->getAs<EnumType>()) 6977 return ET->getDecl()->getPromotionType(); 6978 6979 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 6980 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 6981 // (3.9.1) can be converted to a prvalue of the first of the following 6982 // types that can represent all the values of its underlying type: 6983 // int, unsigned int, long int, unsigned long int, long long int, or 6984 // unsigned long long int [...] 6985 // FIXME: Is there some better way to compute this? 6986 if (BT->getKind() == BuiltinType::WChar_S || 6987 BT->getKind() == BuiltinType::WChar_U || 6988 BT->getKind() == BuiltinType::Char8 || 6989 BT->getKind() == BuiltinType::Char16 || 6990 BT->getKind() == BuiltinType::Char32) { 6991 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 6992 uint64_t FromSize = getTypeSize(BT); 6993 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 6994 LongLongTy, UnsignedLongLongTy }; 6995 for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { 6996 uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); 6997 if (FromSize < ToSize || 6998 (FromSize == ToSize && 6999 FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) 7000 return PromoteTypes[Idx]; 7001 } 7002 llvm_unreachable("char type should fit into long long"); 7003 } 7004 } 7005 7006 // At this point, we should have a signed or unsigned integer type. 7007 if (Promotable->isSignedIntegerType()) 7008 return IntTy; 7009 uint64_t PromotableSize = getIntWidth(Promotable); 7010 uint64_t IntSize = getIntWidth(IntTy); 7011 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7012 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7013 } 7014 7015 /// Recurses in pointer/array types until it finds an objc retainable 7016 /// type and returns its ownership. 7017 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7018 while (!T.isNull()) { 7019 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7020 return T.getObjCLifetime(); 7021 if (T->isArrayType()) 7022 T = getBaseElementType(T); 7023 else if (const auto *PT = T->getAs<PointerType>()) 7024 T = PT->getPointeeType(); 7025 else if (const auto *RT = T->getAs<ReferenceType>()) 7026 T = RT->getPointeeType(); 7027 else 7028 break; 7029 } 7030 7031 return Qualifiers::OCL_None; 7032 } 7033 7034 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7035 // Incomplete enum types are not treated as integer types. 7036 // FIXME: In C++, enum types are never integer types. 7037 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7038 return ET->getDecl()->getIntegerType().getTypePtr(); 7039 return nullptr; 7040 } 7041 7042 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7043 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7044 /// LHS < RHS, return -1. 7045 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7046 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7047 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7048 7049 // Unwrap enums to their underlying type. 7050 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7051 LHSC = getIntegerTypeForEnum(ET); 7052 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7053 RHSC = getIntegerTypeForEnum(ET); 7054 7055 if (LHSC == RHSC) return 0; 7056 7057 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7058 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7059 7060 unsigned LHSRank = getIntegerRank(LHSC); 7061 unsigned RHSRank = getIntegerRank(RHSC); 7062 7063 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7064 if (LHSRank == RHSRank) return 0; 7065 return LHSRank > RHSRank ? 1 : -1; 7066 } 7067 7068 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7069 if (LHSUnsigned) { 7070 // If the unsigned [LHS] type is larger, return it. 7071 if (LHSRank >= RHSRank) 7072 return 1; 7073 7074 // If the signed type can represent all values of the unsigned type, it 7075 // wins. Because we are dealing with 2's complement and types that are 7076 // powers of two larger than each other, this is always safe. 7077 return -1; 7078 } 7079 7080 // If the unsigned [RHS] type is larger, return it. 7081 if (RHSRank >= LHSRank) 7082 return -1; 7083 7084 // If the signed type can represent all values of the unsigned type, it 7085 // wins. Because we are dealing with 2's complement and types that are 7086 // powers of two larger than each other, this is always safe. 7087 return 1; 7088 } 7089 7090 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7091 if (CFConstantStringTypeDecl) 7092 return CFConstantStringTypeDecl; 7093 7094 assert(!CFConstantStringTagDecl && 7095 "tag and typedef should be initialized together"); 7096 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7097 CFConstantStringTagDecl->startDefinition(); 7098 7099 struct { 7100 QualType Type; 7101 const char *Name; 7102 } Fields[5]; 7103 unsigned Count = 0; 7104 7105 /// Objective-C ABI 7106 /// 7107 /// typedef struct __NSConstantString_tag { 7108 /// const int *isa; 7109 /// int flags; 7110 /// const char *str; 7111 /// long length; 7112 /// } __NSConstantString; 7113 /// 7114 /// Swift ABI (4.1, 4.2) 7115 /// 7116 /// typedef struct __NSConstantString_tag { 7117 /// uintptr_t _cfisa; 7118 /// uintptr_t _swift_rc; 7119 /// _Atomic(uint64_t) _cfinfoa; 7120 /// const char *_ptr; 7121 /// uint32_t _length; 7122 /// } __NSConstantString; 7123 /// 7124 /// Swift ABI (5.0) 7125 /// 7126 /// typedef struct __NSConstantString_tag { 7127 /// uintptr_t _cfisa; 7128 /// uintptr_t _swift_rc; 7129 /// _Atomic(uint64_t) _cfinfoa; 7130 /// const char *_ptr; 7131 /// uintptr_t _length; 7132 /// } __NSConstantString; 7133 7134 const auto CFRuntime = getLangOpts().CFRuntime; 7135 if (static_cast<unsigned>(CFRuntime) < 7136 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7137 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7138 Fields[Count++] = { IntTy, "flags" }; 7139 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7140 Fields[Count++] = { LongTy, "length" }; 7141 } else { 7142 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7143 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7144 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7145 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7146 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7147 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7148 Fields[Count++] = { IntTy, "_ptr" }; 7149 else 7150 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7151 } 7152 7153 // Create fields 7154 for (unsigned i = 0; i < Count; ++i) { 7155 FieldDecl *Field = 7156 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7157 SourceLocation(), &Idents.get(Fields[i].Name), 7158 Fields[i].Type, /*TInfo=*/nullptr, 7159 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7160 Field->setAccess(AS_public); 7161 CFConstantStringTagDecl->addDecl(Field); 7162 } 7163 7164 CFConstantStringTagDecl->completeDefinition(); 7165 // This type is designed to be compatible with NSConstantString, but cannot 7166 // use the same name, since NSConstantString is an interface. 7167 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7168 CFConstantStringTypeDecl = 7169 buildImplicitTypedef(tagType, "__NSConstantString"); 7170 7171 return CFConstantStringTypeDecl; 7172 } 7173 7174 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7175 if (!CFConstantStringTagDecl) 7176 getCFConstantStringDecl(); // Build the tag and the typedef. 7177 return CFConstantStringTagDecl; 7178 } 7179 7180 // getCFConstantStringType - Return the type used for constant CFStrings. 7181 QualType ASTContext::getCFConstantStringType() const { 7182 return getTypedefType(getCFConstantStringDecl()); 7183 } 7184 7185 QualType ASTContext::getObjCSuperType() const { 7186 if (ObjCSuperType.isNull()) { 7187 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7188 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7189 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7190 } 7191 return ObjCSuperType; 7192 } 7193 7194 void ASTContext::setCFConstantStringType(QualType T) { 7195 const auto *TD = T->castAs<TypedefType>(); 7196 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7197 const auto *TagType = 7198 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7199 CFConstantStringTagDecl = TagType->getDecl(); 7200 } 7201 7202 QualType ASTContext::getBlockDescriptorType() const { 7203 if (BlockDescriptorType) 7204 return getTagDeclType(BlockDescriptorType); 7205 7206 RecordDecl *RD; 7207 // FIXME: Needs the FlagAppleBlock bit. 7208 RD = buildImplicitRecord("__block_descriptor"); 7209 RD->startDefinition(); 7210 7211 QualType FieldTypes[] = { 7212 UnsignedLongTy, 7213 UnsignedLongTy, 7214 }; 7215 7216 static const char *const FieldNames[] = { 7217 "reserved", 7218 "Size" 7219 }; 7220 7221 for (size_t i = 0; i < 2; ++i) { 7222 FieldDecl *Field = FieldDecl::Create( 7223 *this, RD, SourceLocation(), SourceLocation(), 7224 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7225 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7226 Field->setAccess(AS_public); 7227 RD->addDecl(Field); 7228 } 7229 7230 RD->completeDefinition(); 7231 7232 BlockDescriptorType = RD; 7233 7234 return getTagDeclType(BlockDescriptorType); 7235 } 7236 7237 QualType ASTContext::getBlockDescriptorExtendedType() const { 7238 if (BlockDescriptorExtendedType) 7239 return getTagDeclType(BlockDescriptorExtendedType); 7240 7241 RecordDecl *RD; 7242 // FIXME: Needs the FlagAppleBlock bit. 7243 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7244 RD->startDefinition(); 7245 7246 QualType FieldTypes[] = { 7247 UnsignedLongTy, 7248 UnsignedLongTy, 7249 getPointerType(VoidPtrTy), 7250 getPointerType(VoidPtrTy) 7251 }; 7252 7253 static const char *const FieldNames[] = { 7254 "reserved", 7255 "Size", 7256 "CopyFuncPtr", 7257 "DestroyFuncPtr" 7258 }; 7259 7260 for (size_t i = 0; i < 4; ++i) { 7261 FieldDecl *Field = FieldDecl::Create( 7262 *this, RD, SourceLocation(), SourceLocation(), 7263 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7264 /*BitWidth=*/nullptr, 7265 /*Mutable=*/false, ICIS_NoInit); 7266 Field->setAccess(AS_public); 7267 RD->addDecl(Field); 7268 } 7269 7270 RD->completeDefinition(); 7271 7272 BlockDescriptorExtendedType = RD; 7273 return getTagDeclType(BlockDescriptorExtendedType); 7274 } 7275 7276 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7277 const auto *BT = dyn_cast<BuiltinType>(T); 7278 7279 if (!BT) { 7280 if (isa<PipeType>(T)) 7281 return OCLTK_Pipe; 7282 7283 return OCLTK_Default; 7284 } 7285 7286 switch (BT->getKind()) { 7287 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7288 case BuiltinType::Id: \ 7289 return OCLTK_Image; 7290 #include "clang/Basic/OpenCLImageTypes.def" 7291 7292 case BuiltinType::OCLClkEvent: 7293 return OCLTK_ClkEvent; 7294 7295 case BuiltinType::OCLEvent: 7296 return OCLTK_Event; 7297 7298 case BuiltinType::OCLQueue: 7299 return OCLTK_Queue; 7300 7301 case BuiltinType::OCLReserveID: 7302 return OCLTK_ReserveID; 7303 7304 case BuiltinType::OCLSampler: 7305 return OCLTK_Sampler; 7306 7307 default: 7308 return OCLTK_Default; 7309 } 7310 } 7311 7312 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7313 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7314 } 7315 7316 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7317 /// requires copy/dispose. Note that this must match the logic 7318 /// in buildByrefHelpers. 7319 bool ASTContext::BlockRequiresCopying(QualType Ty, 7320 const VarDecl *D) { 7321 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7322 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7323 if (!copyExpr && record->hasTrivialDestructor()) return false; 7324 7325 return true; 7326 } 7327 7328 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7329 // move or destroy. 7330 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7331 return true; 7332 7333 if (!Ty->isObjCRetainableType()) return false; 7334 7335 Qualifiers qs = Ty.getQualifiers(); 7336 7337 // If we have lifetime, that dominates. 7338 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7339 switch (lifetime) { 7340 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7341 7342 // These are just bits as far as the runtime is concerned. 7343 case Qualifiers::OCL_ExplicitNone: 7344 case Qualifiers::OCL_Autoreleasing: 7345 return false; 7346 7347 // These cases should have been taken care of when checking the type's 7348 // non-triviality. 7349 case Qualifiers::OCL_Weak: 7350 case Qualifiers::OCL_Strong: 7351 llvm_unreachable("impossible"); 7352 } 7353 llvm_unreachable("fell out of lifetime switch!"); 7354 } 7355 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7356 Ty->isObjCObjectPointerType()); 7357 } 7358 7359 bool ASTContext::getByrefLifetime(QualType Ty, 7360 Qualifiers::ObjCLifetime &LifeTime, 7361 bool &HasByrefExtendedLayout) const { 7362 if (!getLangOpts().ObjC || 7363 getLangOpts().getGC() != LangOptions::NonGC) 7364 return false; 7365 7366 HasByrefExtendedLayout = false; 7367 if (Ty->isRecordType()) { 7368 HasByrefExtendedLayout = true; 7369 LifeTime = Qualifiers::OCL_None; 7370 } else if ((LifeTime = Ty.getObjCLifetime())) { 7371 // Honor the ARC qualifiers. 7372 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7373 // The MRR rule. 7374 LifeTime = Qualifiers::OCL_ExplicitNone; 7375 } else { 7376 LifeTime = Qualifiers::OCL_None; 7377 } 7378 return true; 7379 } 7380 7381 CanQualType ASTContext::getNSUIntegerType() const { 7382 assert(Target && "Expected target to be initialized"); 7383 const llvm::Triple &T = Target->getTriple(); 7384 // Windows is LLP64 rather than LP64 7385 if (T.isOSWindows() && T.isArch64Bit()) 7386 return UnsignedLongLongTy; 7387 return UnsignedLongTy; 7388 } 7389 7390 CanQualType ASTContext::getNSIntegerType() const { 7391 assert(Target && "Expected target to be initialized"); 7392 const llvm::Triple &T = Target->getTriple(); 7393 // Windows is LLP64 rather than LP64 7394 if (T.isOSWindows() && T.isArch64Bit()) 7395 return LongLongTy; 7396 return LongTy; 7397 } 7398 7399 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7400 if (!ObjCInstanceTypeDecl) 7401 ObjCInstanceTypeDecl = 7402 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7403 return ObjCInstanceTypeDecl; 7404 } 7405 7406 // This returns true if a type has been typedefed to BOOL: 7407 // typedef <type> BOOL; 7408 static bool isTypeTypedefedAsBOOL(QualType T) { 7409 if (const auto *TT = dyn_cast<TypedefType>(T)) 7410 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7411 return II->isStr("BOOL"); 7412 7413 return false; 7414 } 7415 7416 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7417 /// purpose. 7418 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7419 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7420 return CharUnits::Zero(); 7421 7422 CharUnits sz = getTypeSizeInChars(type); 7423 7424 // Make all integer and enum types at least as large as an int 7425 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7426 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7427 // Treat arrays as pointers, since that's how they're passed in. 7428 else if (type->isArrayType()) 7429 sz = getTypeSizeInChars(VoidPtrTy); 7430 return sz; 7431 } 7432 7433 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7434 return getTargetInfo().getCXXABI().isMicrosoft() && 7435 VD->isStaticDataMember() && 7436 VD->getType()->isIntegralOrEnumerationType() && 7437 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7438 } 7439 7440 ASTContext::InlineVariableDefinitionKind 7441 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7442 if (!VD->isInline()) 7443 return InlineVariableDefinitionKind::None; 7444 7445 // In almost all cases, it's a weak definition. 7446 auto *First = VD->getFirstDecl(); 7447 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7448 return InlineVariableDefinitionKind::Weak; 7449 7450 // If there's a file-context declaration in this translation unit, it's a 7451 // non-discardable definition. 7452 for (auto *D : VD->redecls()) 7453 if (D->getLexicalDeclContext()->isFileContext() && 7454 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7455 return InlineVariableDefinitionKind::Strong; 7456 7457 // If we've not seen one yet, we don't know. 7458 return InlineVariableDefinitionKind::WeakUnknown; 7459 } 7460 7461 static std::string charUnitsToString(const CharUnits &CU) { 7462 return llvm::itostr(CU.getQuantity()); 7463 } 7464 7465 /// getObjCEncodingForBlock - Return the encoded type for this block 7466 /// declaration. 7467 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7468 std::string S; 7469 7470 const BlockDecl *Decl = Expr->getBlockDecl(); 7471 QualType BlockTy = 7472 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7473 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7474 // Encode result type. 7475 if (getLangOpts().EncodeExtendedBlockSig) 7476 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7477 true /*Extended*/); 7478 else 7479 getObjCEncodingForType(BlockReturnTy, S); 7480 // Compute size of all parameters. 7481 // Start with computing size of a pointer in number of bytes. 7482 // FIXME: There might(should) be a better way of doing this computation! 7483 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7484 CharUnits ParmOffset = PtrSize; 7485 for (auto PI : Decl->parameters()) { 7486 QualType PType = PI->getType(); 7487 CharUnits sz = getObjCEncodingTypeSize(PType); 7488 if (sz.isZero()) 7489 continue; 7490 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7491 ParmOffset += sz; 7492 } 7493 // Size of the argument frame 7494 S += charUnitsToString(ParmOffset); 7495 // Block pointer and offset. 7496 S += "@?0"; 7497 7498 // Argument types. 7499 ParmOffset = PtrSize; 7500 for (auto PVDecl : Decl->parameters()) { 7501 QualType PType = PVDecl->getOriginalType(); 7502 if (const auto *AT = 7503 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7504 // Use array's original type only if it has known number of 7505 // elements. 7506 if (!isa<ConstantArrayType>(AT)) 7507 PType = PVDecl->getType(); 7508 } else if (PType->isFunctionType()) 7509 PType = PVDecl->getType(); 7510 if (getLangOpts().EncodeExtendedBlockSig) 7511 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7512 S, true /*Extended*/); 7513 else 7514 getObjCEncodingForType(PType, S); 7515 S += charUnitsToString(ParmOffset); 7516 ParmOffset += getObjCEncodingTypeSize(PType); 7517 } 7518 7519 return S; 7520 } 7521 7522 std::string 7523 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7524 std::string S; 7525 // Encode result type. 7526 getObjCEncodingForType(Decl->getReturnType(), S); 7527 CharUnits ParmOffset; 7528 // Compute size of all parameters. 7529 for (auto PI : Decl->parameters()) { 7530 QualType PType = PI->getType(); 7531 CharUnits sz = getObjCEncodingTypeSize(PType); 7532 if (sz.isZero()) 7533 continue; 7534 7535 assert(sz.isPositive() && 7536 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7537 ParmOffset += sz; 7538 } 7539 S += charUnitsToString(ParmOffset); 7540 ParmOffset = CharUnits::Zero(); 7541 7542 // Argument types. 7543 for (auto PVDecl : Decl->parameters()) { 7544 QualType PType = PVDecl->getOriginalType(); 7545 if (const auto *AT = 7546 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7547 // Use array's original type only if it has known number of 7548 // elements. 7549 if (!isa<ConstantArrayType>(AT)) 7550 PType = PVDecl->getType(); 7551 } else if (PType->isFunctionType()) 7552 PType = PVDecl->getType(); 7553 getObjCEncodingForType(PType, S); 7554 S += charUnitsToString(ParmOffset); 7555 ParmOffset += getObjCEncodingTypeSize(PType); 7556 } 7557 7558 return S; 7559 } 7560 7561 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7562 /// method parameter or return type. If Extended, include class names and 7563 /// block object types. 7564 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7565 QualType T, std::string& S, 7566 bool Extended) const { 7567 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7568 getObjCEncodingForTypeQualifier(QT, S); 7569 // Encode parameter type. 7570 ObjCEncOptions Options = ObjCEncOptions() 7571 .setExpandPointedToStructures() 7572 .setExpandStructures() 7573 .setIsOutermostType(); 7574 if (Extended) 7575 Options.setEncodeBlockParameters().setEncodeClassNames(); 7576 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7577 } 7578 7579 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7580 /// declaration. 7581 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7582 bool Extended) const { 7583 // FIXME: This is not very efficient. 7584 // Encode return type. 7585 std::string S; 7586 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7587 Decl->getReturnType(), S, Extended); 7588 // Compute size of all parameters. 7589 // Start with computing size of a pointer in number of bytes. 7590 // FIXME: There might(should) be a better way of doing this computation! 7591 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7592 // The first two arguments (self and _cmd) are pointers; account for 7593 // their size. 7594 CharUnits ParmOffset = 2 * PtrSize; 7595 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7596 E = Decl->sel_param_end(); PI != E; ++PI) { 7597 QualType PType = (*PI)->getType(); 7598 CharUnits sz = getObjCEncodingTypeSize(PType); 7599 if (sz.isZero()) 7600 continue; 7601 7602 assert(sz.isPositive() && 7603 "getObjCEncodingForMethodDecl - Incomplete param type"); 7604 ParmOffset += sz; 7605 } 7606 S += charUnitsToString(ParmOffset); 7607 S += "@0:"; 7608 S += charUnitsToString(PtrSize); 7609 7610 // Argument types. 7611 ParmOffset = 2 * PtrSize; 7612 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7613 E = Decl->sel_param_end(); PI != E; ++PI) { 7614 const ParmVarDecl *PVDecl = *PI; 7615 QualType PType = PVDecl->getOriginalType(); 7616 if (const auto *AT = 7617 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7618 // Use array's original type only if it has known number of 7619 // elements. 7620 if (!isa<ConstantArrayType>(AT)) 7621 PType = PVDecl->getType(); 7622 } else if (PType->isFunctionType()) 7623 PType = PVDecl->getType(); 7624 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7625 PType, S, Extended); 7626 S += charUnitsToString(ParmOffset); 7627 ParmOffset += getObjCEncodingTypeSize(PType); 7628 } 7629 7630 return S; 7631 } 7632 7633 ObjCPropertyImplDecl * 7634 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7635 const ObjCPropertyDecl *PD, 7636 const Decl *Container) const { 7637 if (!Container) 7638 return nullptr; 7639 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7640 for (auto *PID : CID->property_impls()) 7641 if (PID->getPropertyDecl() == PD) 7642 return PID; 7643 } else { 7644 const auto *OID = cast<ObjCImplementationDecl>(Container); 7645 for (auto *PID : OID->property_impls()) 7646 if (PID->getPropertyDecl() == PD) 7647 return PID; 7648 } 7649 return nullptr; 7650 } 7651 7652 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7653 /// property declaration. If non-NULL, Container must be either an 7654 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7655 /// NULL when getting encodings for protocol properties. 7656 /// Property attributes are stored as a comma-delimited C string. The simple 7657 /// attributes readonly and bycopy are encoded as single characters. The 7658 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7659 /// encoded as single characters, followed by an identifier. Property types 7660 /// are also encoded as a parametrized attribute. The characters used to encode 7661 /// these attributes are defined by the following enumeration: 7662 /// @code 7663 /// enum PropertyAttributes { 7664 /// kPropertyReadOnly = 'R', // property is read-only. 7665 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7666 /// kPropertyByref = '&', // property is a reference to the value last assigned 7667 /// kPropertyDynamic = 'D', // property is dynamic 7668 /// kPropertyGetter = 'G', // followed by getter selector name 7669 /// kPropertySetter = 'S', // followed by setter selector name 7670 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7671 /// kPropertyType = 'T' // followed by old-style type encoding. 7672 /// kPropertyWeak = 'W' // 'weak' property 7673 /// kPropertyStrong = 'P' // property GC'able 7674 /// kPropertyNonAtomic = 'N' // property non-atomic 7675 /// }; 7676 /// @endcode 7677 std::string 7678 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7679 const Decl *Container) const { 7680 // Collect information from the property implementation decl(s). 7681 bool Dynamic = false; 7682 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7683 7684 if (ObjCPropertyImplDecl *PropertyImpDecl = 7685 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7686 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7687 Dynamic = true; 7688 else 7689 SynthesizePID = PropertyImpDecl; 7690 } 7691 7692 // FIXME: This is not very efficient. 7693 std::string S = "T"; 7694 7695 // Encode result type. 7696 // GCC has some special rules regarding encoding of properties which 7697 // closely resembles encoding of ivars. 7698 getObjCEncodingForPropertyType(PD->getType(), S); 7699 7700 if (PD->isReadOnly()) { 7701 S += ",R"; 7702 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7703 S += ",C"; 7704 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7705 S += ",&"; 7706 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7707 S += ",W"; 7708 } else { 7709 switch (PD->getSetterKind()) { 7710 case ObjCPropertyDecl::Assign: break; 7711 case ObjCPropertyDecl::Copy: S += ",C"; break; 7712 case ObjCPropertyDecl::Retain: S += ",&"; break; 7713 case ObjCPropertyDecl::Weak: S += ",W"; break; 7714 } 7715 } 7716 7717 // It really isn't clear at all what this means, since properties 7718 // are "dynamic by default". 7719 if (Dynamic) 7720 S += ",D"; 7721 7722 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7723 S += ",N"; 7724 7725 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7726 S += ",G"; 7727 S += PD->getGetterName().getAsString(); 7728 } 7729 7730 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7731 S += ",S"; 7732 S += PD->getSetterName().getAsString(); 7733 } 7734 7735 if (SynthesizePID) { 7736 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7737 S += ",V"; 7738 S += OID->getNameAsString(); 7739 } 7740 7741 // FIXME: OBJCGC: weak & strong 7742 return S; 7743 } 7744 7745 /// getLegacyIntegralTypeEncoding - 7746 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7747 /// 'l' or 'L' , but not always. For typedefs, we need to use 7748 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7749 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7750 if (isa<TypedefType>(PointeeTy.getTypePtr())) { 7751 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7752 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7753 PointeeTy = UnsignedIntTy; 7754 else 7755 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7756 PointeeTy = IntTy; 7757 } 7758 } 7759 } 7760 7761 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7762 const FieldDecl *Field, 7763 QualType *NotEncodedT) const { 7764 // We follow the behavior of gcc, expanding structures which are 7765 // directly pointed to, and expanding embedded structures. Note that 7766 // these rules are sufficient to prevent recursive encoding of the 7767 // same type. 7768 getObjCEncodingForTypeImpl(T, S, 7769 ObjCEncOptions() 7770 .setExpandPointedToStructures() 7771 .setExpandStructures() 7772 .setIsOutermostType(), 7773 Field, NotEncodedT); 7774 } 7775 7776 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7777 std::string& S) const { 7778 // Encode result type. 7779 // GCC has some special rules regarding encoding of properties which 7780 // closely resembles encoding of ivars. 7781 getObjCEncodingForTypeImpl(T, S, 7782 ObjCEncOptions() 7783 .setExpandPointedToStructures() 7784 .setExpandStructures() 7785 .setIsOutermostType() 7786 .setEncodingProperty(), 7787 /*Field=*/nullptr); 7788 } 7789 7790 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7791 const BuiltinType *BT) { 7792 BuiltinType::Kind kind = BT->getKind(); 7793 switch (kind) { 7794 case BuiltinType::Void: return 'v'; 7795 case BuiltinType::Bool: return 'B'; 7796 case BuiltinType::Char8: 7797 case BuiltinType::Char_U: 7798 case BuiltinType::UChar: return 'C'; 7799 case BuiltinType::Char16: 7800 case BuiltinType::UShort: return 'S'; 7801 case BuiltinType::Char32: 7802 case BuiltinType::UInt: return 'I'; 7803 case BuiltinType::ULong: 7804 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7805 case BuiltinType::UInt128: return 'T'; 7806 case BuiltinType::ULongLong: return 'Q'; 7807 case BuiltinType::Char_S: 7808 case BuiltinType::SChar: return 'c'; 7809 case BuiltinType::Short: return 's'; 7810 case BuiltinType::WChar_S: 7811 case BuiltinType::WChar_U: 7812 case BuiltinType::Int: return 'i'; 7813 case BuiltinType::Long: 7814 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7815 case BuiltinType::LongLong: return 'q'; 7816 case BuiltinType::Int128: return 't'; 7817 case BuiltinType::Float: return 'f'; 7818 case BuiltinType::Double: return 'd'; 7819 case BuiltinType::LongDouble: return 'D'; 7820 case BuiltinType::NullPtr: return '*'; // like char* 7821 7822 case BuiltinType::BFloat16: 7823 case BuiltinType::Float16: 7824 case BuiltinType::Float128: 7825 case BuiltinType::Ibm128: 7826 case BuiltinType::Half: 7827 case BuiltinType::ShortAccum: 7828 case BuiltinType::Accum: 7829 case BuiltinType::LongAccum: 7830 case BuiltinType::UShortAccum: 7831 case BuiltinType::UAccum: 7832 case BuiltinType::ULongAccum: 7833 case BuiltinType::ShortFract: 7834 case BuiltinType::Fract: 7835 case BuiltinType::LongFract: 7836 case BuiltinType::UShortFract: 7837 case BuiltinType::UFract: 7838 case BuiltinType::ULongFract: 7839 case BuiltinType::SatShortAccum: 7840 case BuiltinType::SatAccum: 7841 case BuiltinType::SatLongAccum: 7842 case BuiltinType::SatUShortAccum: 7843 case BuiltinType::SatUAccum: 7844 case BuiltinType::SatULongAccum: 7845 case BuiltinType::SatShortFract: 7846 case BuiltinType::SatFract: 7847 case BuiltinType::SatLongFract: 7848 case BuiltinType::SatUShortFract: 7849 case BuiltinType::SatUFract: 7850 case BuiltinType::SatULongFract: 7851 // FIXME: potentially need @encodes for these! 7852 return ' '; 7853 7854 #define SVE_TYPE(Name, Id, SingletonId) \ 7855 case BuiltinType::Id: 7856 #include "clang/Basic/AArch64SVEACLETypes.def" 7857 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 7858 #include "clang/Basic/RISCVVTypes.def" 7859 { 7860 DiagnosticsEngine &Diags = C->getDiagnostics(); 7861 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 7862 "cannot yet @encode type %0"); 7863 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 7864 return ' '; 7865 } 7866 7867 case BuiltinType::ObjCId: 7868 case BuiltinType::ObjCClass: 7869 case BuiltinType::ObjCSel: 7870 llvm_unreachable("@encoding ObjC primitive type"); 7871 7872 // OpenCL and placeholder types don't need @encodings. 7873 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7874 case BuiltinType::Id: 7875 #include "clang/Basic/OpenCLImageTypes.def" 7876 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 7877 case BuiltinType::Id: 7878 #include "clang/Basic/OpenCLExtensionTypes.def" 7879 case BuiltinType::OCLEvent: 7880 case BuiltinType::OCLClkEvent: 7881 case BuiltinType::OCLQueue: 7882 case BuiltinType::OCLReserveID: 7883 case BuiltinType::OCLSampler: 7884 case BuiltinType::Dependent: 7885 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 7886 case BuiltinType::Id: 7887 #include "clang/Basic/PPCTypes.def" 7888 #define BUILTIN_TYPE(KIND, ID) 7889 #define PLACEHOLDER_TYPE(KIND, ID) \ 7890 case BuiltinType::KIND: 7891 #include "clang/AST/BuiltinTypes.def" 7892 llvm_unreachable("invalid builtin type for @encode"); 7893 } 7894 llvm_unreachable("invalid BuiltinType::Kind value"); 7895 } 7896 7897 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 7898 EnumDecl *Enum = ET->getDecl(); 7899 7900 // The encoding of an non-fixed enum type is always 'i', regardless of size. 7901 if (!Enum->isFixed()) 7902 return 'i'; 7903 7904 // The encoding of a fixed enum type matches its fixed underlying type. 7905 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 7906 return getObjCEncodingForPrimitiveType(C, BT); 7907 } 7908 7909 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 7910 QualType T, const FieldDecl *FD) { 7911 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 7912 S += 'b'; 7913 // The NeXT runtime encodes bit fields as b followed by the number of bits. 7914 // The GNU runtime requires more information; bitfields are encoded as b, 7915 // then the offset (in bits) of the first element, then the type of the 7916 // bitfield, then the size in bits. For example, in this structure: 7917 // 7918 // struct 7919 // { 7920 // int integer; 7921 // int flags:2; 7922 // }; 7923 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 7924 // runtime, but b32i2 for the GNU runtime. The reason for this extra 7925 // information is not especially sensible, but we're stuck with it for 7926 // compatibility with GCC, although providing it breaks anything that 7927 // actually uses runtime introspection and wants to work on both runtimes... 7928 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 7929 uint64_t Offset; 7930 7931 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 7932 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 7933 IVD); 7934 } else { 7935 const RecordDecl *RD = FD->getParent(); 7936 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 7937 Offset = RL.getFieldOffset(FD->getFieldIndex()); 7938 } 7939 7940 S += llvm::utostr(Offset); 7941 7942 if (const auto *ET = T->getAs<EnumType>()) 7943 S += ObjCEncodingForEnumType(Ctx, ET); 7944 else { 7945 const auto *BT = T->castAs<BuiltinType>(); 7946 S += getObjCEncodingForPrimitiveType(Ctx, BT); 7947 } 7948 } 7949 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 7950 } 7951 7952 // Helper function for determining whether the encoded type string would include 7953 // a template specialization type. 7954 static bool hasTemplateSpecializationInEncodedString(const Type *T, 7955 bool VisitBasesAndFields) { 7956 T = T->getBaseElementTypeUnsafe(); 7957 7958 if (auto *PT = T->getAs<PointerType>()) 7959 return hasTemplateSpecializationInEncodedString( 7960 PT->getPointeeType().getTypePtr(), false); 7961 7962 auto *CXXRD = T->getAsCXXRecordDecl(); 7963 7964 if (!CXXRD) 7965 return false; 7966 7967 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 7968 return true; 7969 7970 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 7971 return false; 7972 7973 for (auto B : CXXRD->bases()) 7974 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 7975 true)) 7976 return true; 7977 7978 for (auto *FD : CXXRD->fields()) 7979 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 7980 true)) 7981 return true; 7982 7983 return false; 7984 } 7985 7986 // FIXME: Use SmallString for accumulating string. 7987 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 7988 const ObjCEncOptions Options, 7989 const FieldDecl *FD, 7990 QualType *NotEncodedT) const { 7991 CanQualType CT = getCanonicalType(T); 7992 switch (CT->getTypeClass()) { 7993 case Type::Builtin: 7994 case Type::Enum: 7995 if (FD && FD->isBitField()) 7996 return EncodeBitField(this, S, T, FD); 7997 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 7998 S += getObjCEncodingForPrimitiveType(this, BT); 7999 else 8000 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8001 return; 8002 8003 case Type::Complex: 8004 S += 'j'; 8005 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8006 ObjCEncOptions(), 8007 /*Field=*/nullptr); 8008 return; 8009 8010 case Type::Atomic: 8011 S += 'A'; 8012 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8013 ObjCEncOptions(), 8014 /*Field=*/nullptr); 8015 return; 8016 8017 // encoding for pointer or reference types. 8018 case Type::Pointer: 8019 case Type::LValueReference: 8020 case Type::RValueReference: { 8021 QualType PointeeTy; 8022 if (isa<PointerType>(CT)) { 8023 const auto *PT = T->castAs<PointerType>(); 8024 if (PT->isObjCSelType()) { 8025 S += ':'; 8026 return; 8027 } 8028 PointeeTy = PT->getPointeeType(); 8029 } else { 8030 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8031 } 8032 8033 bool isReadOnly = false; 8034 // For historical/compatibility reasons, the read-only qualifier of the 8035 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8036 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8037 // Also, do not emit the 'r' for anything but the outermost type! 8038 if (isa<TypedefType>(T.getTypePtr())) { 8039 if (Options.IsOutermostType() && T.isConstQualified()) { 8040 isReadOnly = true; 8041 S += 'r'; 8042 } 8043 } else if (Options.IsOutermostType()) { 8044 QualType P = PointeeTy; 8045 while (auto PT = P->getAs<PointerType>()) 8046 P = PT->getPointeeType(); 8047 if (P.isConstQualified()) { 8048 isReadOnly = true; 8049 S += 'r'; 8050 } 8051 } 8052 if (isReadOnly) { 8053 // Another legacy compatibility encoding. Some ObjC qualifier and type 8054 // combinations need to be rearranged. 8055 // Rewrite "in const" from "nr" to "rn" 8056 if (StringRef(S).endswith("nr")) 8057 S.replace(S.end()-2, S.end(), "rn"); 8058 } 8059 8060 if (PointeeTy->isCharType()) { 8061 // char pointer types should be encoded as '*' unless it is a 8062 // type that has been typedef'd to 'BOOL'. 8063 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8064 S += '*'; 8065 return; 8066 } 8067 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8068 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8069 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8070 S += '#'; 8071 return; 8072 } 8073 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8074 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8075 S += '@'; 8076 return; 8077 } 8078 // If the encoded string for the class includes template names, just emit 8079 // "^v" for pointers to the class. 8080 if (getLangOpts().CPlusPlus && 8081 (!getLangOpts().EncodeCXXClassTemplateSpec && 8082 hasTemplateSpecializationInEncodedString( 8083 RTy, Options.ExpandPointedToStructures()))) { 8084 S += "^v"; 8085 return; 8086 } 8087 // fall through... 8088 } 8089 S += '^'; 8090 getLegacyIntegralTypeEncoding(PointeeTy); 8091 8092 ObjCEncOptions NewOptions; 8093 if (Options.ExpandPointedToStructures()) 8094 NewOptions.setExpandStructures(); 8095 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8096 /*Field=*/nullptr, NotEncodedT); 8097 return; 8098 } 8099 8100 case Type::ConstantArray: 8101 case Type::IncompleteArray: 8102 case Type::VariableArray: { 8103 const auto *AT = cast<ArrayType>(CT); 8104 8105 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8106 // Incomplete arrays are encoded as a pointer to the array element. 8107 S += '^'; 8108 8109 getObjCEncodingForTypeImpl( 8110 AT->getElementType(), S, 8111 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8112 } else { 8113 S += '['; 8114 8115 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8116 S += llvm::utostr(CAT->getSize().getZExtValue()); 8117 else { 8118 //Variable length arrays are encoded as a regular array with 0 elements. 8119 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8120 "Unknown array type!"); 8121 S += '0'; 8122 } 8123 8124 getObjCEncodingForTypeImpl( 8125 AT->getElementType(), S, 8126 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8127 NotEncodedT); 8128 S += ']'; 8129 } 8130 return; 8131 } 8132 8133 case Type::FunctionNoProto: 8134 case Type::FunctionProto: 8135 S += '?'; 8136 return; 8137 8138 case Type::Record: { 8139 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8140 S += RDecl->isUnion() ? '(' : '{'; 8141 // Anonymous structures print as '?' 8142 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8143 S += II->getName(); 8144 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8145 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8146 llvm::raw_string_ostream OS(S); 8147 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8148 getPrintingPolicy()); 8149 } 8150 } else { 8151 S += '?'; 8152 } 8153 if (Options.ExpandStructures()) { 8154 S += '='; 8155 if (!RDecl->isUnion()) { 8156 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8157 } else { 8158 for (const auto *Field : RDecl->fields()) { 8159 if (FD) { 8160 S += '"'; 8161 S += Field->getNameAsString(); 8162 S += '"'; 8163 } 8164 8165 // Special case bit-fields. 8166 if (Field->isBitField()) { 8167 getObjCEncodingForTypeImpl(Field->getType(), S, 8168 ObjCEncOptions().setExpandStructures(), 8169 Field); 8170 } else { 8171 QualType qt = Field->getType(); 8172 getLegacyIntegralTypeEncoding(qt); 8173 getObjCEncodingForTypeImpl( 8174 qt, S, 8175 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8176 NotEncodedT); 8177 } 8178 } 8179 } 8180 } 8181 S += RDecl->isUnion() ? ')' : '}'; 8182 return; 8183 } 8184 8185 case Type::BlockPointer: { 8186 const auto *BT = T->castAs<BlockPointerType>(); 8187 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8188 if (Options.EncodeBlockParameters()) { 8189 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8190 8191 S += '<'; 8192 // Block return type 8193 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8194 Options.forComponentType(), FD, NotEncodedT); 8195 // Block self 8196 S += "@?"; 8197 // Block parameters 8198 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8199 for (const auto &I : FPT->param_types()) 8200 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8201 NotEncodedT); 8202 } 8203 S += '>'; 8204 } 8205 return; 8206 } 8207 8208 case Type::ObjCObject: { 8209 // hack to match legacy encoding of *id and *Class 8210 QualType Ty = getObjCObjectPointerType(CT); 8211 if (Ty->isObjCIdType()) { 8212 S += "{objc_object=}"; 8213 return; 8214 } 8215 else if (Ty->isObjCClassType()) { 8216 S += "{objc_class=}"; 8217 return; 8218 } 8219 // TODO: Double check to make sure this intentionally falls through. 8220 LLVM_FALLTHROUGH; 8221 } 8222 8223 case Type::ObjCInterface: { 8224 // Ignore protocol qualifiers when mangling at this level. 8225 // @encode(class_name) 8226 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8227 S += '{'; 8228 S += OI->getObjCRuntimeNameAsString(); 8229 if (Options.ExpandStructures()) { 8230 S += '='; 8231 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8232 DeepCollectObjCIvars(OI, true, Ivars); 8233 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8234 const FieldDecl *Field = Ivars[i]; 8235 if (Field->isBitField()) 8236 getObjCEncodingForTypeImpl(Field->getType(), S, 8237 ObjCEncOptions().setExpandStructures(), 8238 Field); 8239 else 8240 getObjCEncodingForTypeImpl(Field->getType(), S, 8241 ObjCEncOptions().setExpandStructures(), FD, 8242 NotEncodedT); 8243 } 8244 } 8245 S += '}'; 8246 return; 8247 } 8248 8249 case Type::ObjCObjectPointer: { 8250 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8251 if (OPT->isObjCIdType()) { 8252 S += '@'; 8253 return; 8254 } 8255 8256 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8257 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8258 // Since this is a binary compatibility issue, need to consult with 8259 // runtime folks. Fortunately, this is a *very* obscure construct. 8260 S += '#'; 8261 return; 8262 } 8263 8264 if (OPT->isObjCQualifiedIdType()) { 8265 getObjCEncodingForTypeImpl( 8266 getObjCIdType(), S, 8267 Options.keepingOnly(ObjCEncOptions() 8268 .setExpandPointedToStructures() 8269 .setExpandStructures()), 8270 FD); 8271 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8272 // Note that we do extended encoding of protocol qualifier list 8273 // Only when doing ivar or property encoding. 8274 S += '"'; 8275 for (const auto *I : OPT->quals()) { 8276 S += '<'; 8277 S += I->getObjCRuntimeNameAsString(); 8278 S += '>'; 8279 } 8280 S += '"'; 8281 } 8282 return; 8283 } 8284 8285 S += '@'; 8286 if (OPT->getInterfaceDecl() && 8287 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8288 S += '"'; 8289 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8290 for (const auto *I : OPT->quals()) { 8291 S += '<'; 8292 S += I->getObjCRuntimeNameAsString(); 8293 S += '>'; 8294 } 8295 S += '"'; 8296 } 8297 return; 8298 } 8299 8300 // gcc just blithely ignores member pointers. 8301 // FIXME: we should do better than that. 'M' is available. 8302 case Type::MemberPointer: 8303 // This matches gcc's encoding, even though technically it is insufficient. 8304 //FIXME. We should do a better job than gcc. 8305 case Type::Vector: 8306 case Type::ExtVector: 8307 // Until we have a coherent encoding of these three types, issue warning. 8308 if (NotEncodedT) 8309 *NotEncodedT = T; 8310 return; 8311 8312 case Type::ConstantMatrix: 8313 if (NotEncodedT) 8314 *NotEncodedT = T; 8315 return; 8316 8317 case Type::BitInt: 8318 if (NotEncodedT) 8319 *NotEncodedT = T; 8320 return; 8321 8322 // We could see an undeduced auto type here during error recovery. 8323 // Just ignore it. 8324 case Type::Auto: 8325 case Type::DeducedTemplateSpecialization: 8326 return; 8327 8328 case Type::Pipe: 8329 #define ABSTRACT_TYPE(KIND, BASE) 8330 #define TYPE(KIND, BASE) 8331 #define DEPENDENT_TYPE(KIND, BASE) \ 8332 case Type::KIND: 8333 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8334 case Type::KIND: 8335 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8336 case Type::KIND: 8337 #include "clang/AST/TypeNodes.inc" 8338 llvm_unreachable("@encode for dependent type!"); 8339 } 8340 llvm_unreachable("bad type kind!"); 8341 } 8342 8343 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8344 std::string &S, 8345 const FieldDecl *FD, 8346 bool includeVBases, 8347 QualType *NotEncodedT) const { 8348 assert(RDecl && "Expected non-null RecordDecl"); 8349 assert(!RDecl->isUnion() && "Should not be called for unions"); 8350 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8351 return; 8352 8353 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8354 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8355 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8356 8357 if (CXXRec) { 8358 for (const auto &BI : CXXRec->bases()) { 8359 if (!BI.isVirtual()) { 8360 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8361 if (base->isEmpty()) 8362 continue; 8363 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8364 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8365 std::make_pair(offs, base)); 8366 } 8367 } 8368 } 8369 8370 unsigned i = 0; 8371 for (FieldDecl *Field : RDecl->fields()) { 8372 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8373 continue; 8374 uint64_t offs = layout.getFieldOffset(i); 8375 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8376 std::make_pair(offs, Field)); 8377 ++i; 8378 } 8379 8380 if (CXXRec && includeVBases) { 8381 for (const auto &BI : CXXRec->vbases()) { 8382 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8383 if (base->isEmpty()) 8384 continue; 8385 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8386 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8387 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8388 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8389 std::make_pair(offs, base)); 8390 } 8391 } 8392 8393 CharUnits size; 8394 if (CXXRec) { 8395 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8396 } else { 8397 size = layout.getSize(); 8398 } 8399 8400 #ifndef NDEBUG 8401 uint64_t CurOffs = 0; 8402 #endif 8403 std::multimap<uint64_t, NamedDecl *>::iterator 8404 CurLayObj = FieldOrBaseOffsets.begin(); 8405 8406 if (CXXRec && CXXRec->isDynamicClass() && 8407 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8408 if (FD) { 8409 S += "\"_vptr$"; 8410 std::string recname = CXXRec->getNameAsString(); 8411 if (recname.empty()) recname = "?"; 8412 S += recname; 8413 S += '"'; 8414 } 8415 S += "^^?"; 8416 #ifndef NDEBUG 8417 CurOffs += getTypeSize(VoidPtrTy); 8418 #endif 8419 } 8420 8421 if (!RDecl->hasFlexibleArrayMember()) { 8422 // Mark the end of the structure. 8423 uint64_t offs = toBits(size); 8424 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8425 std::make_pair(offs, nullptr)); 8426 } 8427 8428 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8429 #ifndef NDEBUG 8430 assert(CurOffs <= CurLayObj->first); 8431 if (CurOffs < CurLayObj->first) { 8432 uint64_t padding = CurLayObj->first - CurOffs; 8433 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8434 // packing/alignment of members is different that normal, in which case 8435 // the encoding will be out-of-sync with the real layout. 8436 // If the runtime switches to just consider the size of types without 8437 // taking into account alignment, we could make padding explicit in the 8438 // encoding (e.g. using arrays of chars). The encoding strings would be 8439 // longer then though. 8440 CurOffs += padding; 8441 } 8442 #endif 8443 8444 NamedDecl *dcl = CurLayObj->second; 8445 if (!dcl) 8446 break; // reached end of structure. 8447 8448 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8449 // We expand the bases without their virtual bases since those are going 8450 // in the initial structure. Note that this differs from gcc which 8451 // expands virtual bases each time one is encountered in the hierarchy, 8452 // making the encoding type bigger than it really is. 8453 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8454 NotEncodedT); 8455 assert(!base->isEmpty()); 8456 #ifndef NDEBUG 8457 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8458 #endif 8459 } else { 8460 const auto *field = cast<FieldDecl>(dcl); 8461 if (FD) { 8462 S += '"'; 8463 S += field->getNameAsString(); 8464 S += '"'; 8465 } 8466 8467 if (field->isBitField()) { 8468 EncodeBitField(this, S, field->getType(), field); 8469 #ifndef NDEBUG 8470 CurOffs += field->getBitWidthValue(*this); 8471 #endif 8472 } else { 8473 QualType qt = field->getType(); 8474 getLegacyIntegralTypeEncoding(qt); 8475 getObjCEncodingForTypeImpl( 8476 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8477 FD, NotEncodedT); 8478 #ifndef NDEBUG 8479 CurOffs += getTypeSize(field->getType()); 8480 #endif 8481 } 8482 } 8483 } 8484 } 8485 8486 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8487 std::string& S) const { 8488 if (QT & Decl::OBJC_TQ_In) 8489 S += 'n'; 8490 if (QT & Decl::OBJC_TQ_Inout) 8491 S += 'N'; 8492 if (QT & Decl::OBJC_TQ_Out) 8493 S += 'o'; 8494 if (QT & Decl::OBJC_TQ_Bycopy) 8495 S += 'O'; 8496 if (QT & Decl::OBJC_TQ_Byref) 8497 S += 'R'; 8498 if (QT & Decl::OBJC_TQ_Oneway) 8499 S += 'V'; 8500 } 8501 8502 TypedefDecl *ASTContext::getObjCIdDecl() const { 8503 if (!ObjCIdDecl) { 8504 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8505 T = getObjCObjectPointerType(T); 8506 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8507 } 8508 return ObjCIdDecl; 8509 } 8510 8511 TypedefDecl *ASTContext::getObjCSelDecl() const { 8512 if (!ObjCSelDecl) { 8513 QualType T = getPointerType(ObjCBuiltinSelTy); 8514 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8515 } 8516 return ObjCSelDecl; 8517 } 8518 8519 TypedefDecl *ASTContext::getObjCClassDecl() const { 8520 if (!ObjCClassDecl) { 8521 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8522 T = getObjCObjectPointerType(T); 8523 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8524 } 8525 return ObjCClassDecl; 8526 } 8527 8528 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8529 if (!ObjCProtocolClassDecl) { 8530 ObjCProtocolClassDecl 8531 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8532 SourceLocation(), 8533 &Idents.get("Protocol"), 8534 /*typeParamList=*/nullptr, 8535 /*PrevDecl=*/nullptr, 8536 SourceLocation(), true); 8537 } 8538 8539 return ObjCProtocolClassDecl; 8540 } 8541 8542 //===----------------------------------------------------------------------===// 8543 // __builtin_va_list Construction Functions 8544 //===----------------------------------------------------------------------===// 8545 8546 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8547 StringRef Name) { 8548 // typedef char* __builtin[_ms]_va_list; 8549 QualType T = Context->getPointerType(Context->CharTy); 8550 return Context->buildImplicitTypedef(T, Name); 8551 } 8552 8553 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8554 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8555 } 8556 8557 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8558 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8559 } 8560 8561 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8562 // typedef void* __builtin_va_list; 8563 QualType T = Context->getPointerType(Context->VoidTy); 8564 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8565 } 8566 8567 static TypedefDecl * 8568 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8569 // struct __va_list 8570 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8571 if (Context->getLangOpts().CPlusPlus) { 8572 // namespace std { struct __va_list { 8573 auto *NS = NamespaceDecl::Create( 8574 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8575 /*Inline*/ false, SourceLocation(), SourceLocation(), 8576 &Context->Idents.get("std"), 8577 /*PrevDecl*/ nullptr); 8578 NS->setImplicit(); 8579 VaListTagDecl->setDeclContext(NS); 8580 } 8581 8582 VaListTagDecl->startDefinition(); 8583 8584 const size_t NumFields = 5; 8585 QualType FieldTypes[NumFields]; 8586 const char *FieldNames[NumFields]; 8587 8588 // void *__stack; 8589 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8590 FieldNames[0] = "__stack"; 8591 8592 // void *__gr_top; 8593 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8594 FieldNames[1] = "__gr_top"; 8595 8596 // void *__vr_top; 8597 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8598 FieldNames[2] = "__vr_top"; 8599 8600 // int __gr_offs; 8601 FieldTypes[3] = Context->IntTy; 8602 FieldNames[3] = "__gr_offs"; 8603 8604 // int __vr_offs; 8605 FieldTypes[4] = Context->IntTy; 8606 FieldNames[4] = "__vr_offs"; 8607 8608 // Create fields 8609 for (unsigned i = 0; i < NumFields; ++i) { 8610 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8611 VaListTagDecl, 8612 SourceLocation(), 8613 SourceLocation(), 8614 &Context->Idents.get(FieldNames[i]), 8615 FieldTypes[i], /*TInfo=*/nullptr, 8616 /*BitWidth=*/nullptr, 8617 /*Mutable=*/false, 8618 ICIS_NoInit); 8619 Field->setAccess(AS_public); 8620 VaListTagDecl->addDecl(Field); 8621 } 8622 VaListTagDecl->completeDefinition(); 8623 Context->VaListTagDecl = VaListTagDecl; 8624 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8625 8626 // } __builtin_va_list; 8627 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8628 } 8629 8630 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8631 // typedef struct __va_list_tag { 8632 RecordDecl *VaListTagDecl; 8633 8634 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8635 VaListTagDecl->startDefinition(); 8636 8637 const size_t NumFields = 5; 8638 QualType FieldTypes[NumFields]; 8639 const char *FieldNames[NumFields]; 8640 8641 // unsigned char gpr; 8642 FieldTypes[0] = Context->UnsignedCharTy; 8643 FieldNames[0] = "gpr"; 8644 8645 // unsigned char fpr; 8646 FieldTypes[1] = Context->UnsignedCharTy; 8647 FieldNames[1] = "fpr"; 8648 8649 // unsigned short reserved; 8650 FieldTypes[2] = Context->UnsignedShortTy; 8651 FieldNames[2] = "reserved"; 8652 8653 // void* overflow_arg_area; 8654 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8655 FieldNames[3] = "overflow_arg_area"; 8656 8657 // void* reg_save_area; 8658 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8659 FieldNames[4] = "reg_save_area"; 8660 8661 // Create fields 8662 for (unsigned i = 0; i < NumFields; ++i) { 8663 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8664 SourceLocation(), 8665 SourceLocation(), 8666 &Context->Idents.get(FieldNames[i]), 8667 FieldTypes[i], /*TInfo=*/nullptr, 8668 /*BitWidth=*/nullptr, 8669 /*Mutable=*/false, 8670 ICIS_NoInit); 8671 Field->setAccess(AS_public); 8672 VaListTagDecl->addDecl(Field); 8673 } 8674 VaListTagDecl->completeDefinition(); 8675 Context->VaListTagDecl = VaListTagDecl; 8676 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8677 8678 // } __va_list_tag; 8679 TypedefDecl *VaListTagTypedefDecl = 8680 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8681 8682 QualType VaListTagTypedefType = 8683 Context->getTypedefType(VaListTagTypedefDecl); 8684 8685 // typedef __va_list_tag __builtin_va_list[1]; 8686 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8687 QualType VaListTagArrayType 8688 = Context->getConstantArrayType(VaListTagTypedefType, 8689 Size, nullptr, ArrayType::Normal, 0); 8690 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8691 } 8692 8693 static TypedefDecl * 8694 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8695 // struct __va_list_tag { 8696 RecordDecl *VaListTagDecl; 8697 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8698 VaListTagDecl->startDefinition(); 8699 8700 const size_t NumFields = 4; 8701 QualType FieldTypes[NumFields]; 8702 const char *FieldNames[NumFields]; 8703 8704 // unsigned gp_offset; 8705 FieldTypes[0] = Context->UnsignedIntTy; 8706 FieldNames[0] = "gp_offset"; 8707 8708 // unsigned fp_offset; 8709 FieldTypes[1] = Context->UnsignedIntTy; 8710 FieldNames[1] = "fp_offset"; 8711 8712 // void* overflow_arg_area; 8713 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8714 FieldNames[2] = "overflow_arg_area"; 8715 8716 // void* reg_save_area; 8717 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8718 FieldNames[3] = "reg_save_area"; 8719 8720 // Create fields 8721 for (unsigned i = 0; i < NumFields; ++i) { 8722 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8723 VaListTagDecl, 8724 SourceLocation(), 8725 SourceLocation(), 8726 &Context->Idents.get(FieldNames[i]), 8727 FieldTypes[i], /*TInfo=*/nullptr, 8728 /*BitWidth=*/nullptr, 8729 /*Mutable=*/false, 8730 ICIS_NoInit); 8731 Field->setAccess(AS_public); 8732 VaListTagDecl->addDecl(Field); 8733 } 8734 VaListTagDecl->completeDefinition(); 8735 Context->VaListTagDecl = VaListTagDecl; 8736 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8737 8738 // }; 8739 8740 // typedef struct __va_list_tag __builtin_va_list[1]; 8741 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8742 QualType VaListTagArrayType = Context->getConstantArrayType( 8743 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8744 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8745 } 8746 8747 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8748 // typedef int __builtin_va_list[4]; 8749 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8750 QualType IntArrayType = Context->getConstantArrayType( 8751 Context->IntTy, Size, nullptr, ArrayType::Normal, 0); 8752 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8753 } 8754 8755 static TypedefDecl * 8756 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8757 // struct __va_list 8758 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8759 if (Context->getLangOpts().CPlusPlus) { 8760 // namespace std { struct __va_list { 8761 NamespaceDecl *NS; 8762 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8763 Context->getTranslationUnitDecl(), 8764 /*Inline*/false, SourceLocation(), 8765 SourceLocation(), &Context->Idents.get("std"), 8766 /*PrevDecl*/ nullptr); 8767 NS->setImplicit(); 8768 VaListDecl->setDeclContext(NS); 8769 } 8770 8771 VaListDecl->startDefinition(); 8772 8773 // void * __ap; 8774 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8775 VaListDecl, 8776 SourceLocation(), 8777 SourceLocation(), 8778 &Context->Idents.get("__ap"), 8779 Context->getPointerType(Context->VoidTy), 8780 /*TInfo=*/nullptr, 8781 /*BitWidth=*/nullptr, 8782 /*Mutable=*/false, 8783 ICIS_NoInit); 8784 Field->setAccess(AS_public); 8785 VaListDecl->addDecl(Field); 8786 8787 // }; 8788 VaListDecl->completeDefinition(); 8789 Context->VaListTagDecl = VaListDecl; 8790 8791 // typedef struct __va_list __builtin_va_list; 8792 QualType T = Context->getRecordType(VaListDecl); 8793 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8794 } 8795 8796 static TypedefDecl * 8797 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8798 // struct __va_list_tag { 8799 RecordDecl *VaListTagDecl; 8800 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8801 VaListTagDecl->startDefinition(); 8802 8803 const size_t NumFields = 4; 8804 QualType FieldTypes[NumFields]; 8805 const char *FieldNames[NumFields]; 8806 8807 // long __gpr; 8808 FieldTypes[0] = Context->LongTy; 8809 FieldNames[0] = "__gpr"; 8810 8811 // long __fpr; 8812 FieldTypes[1] = Context->LongTy; 8813 FieldNames[1] = "__fpr"; 8814 8815 // void *__overflow_arg_area; 8816 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8817 FieldNames[2] = "__overflow_arg_area"; 8818 8819 // void *__reg_save_area; 8820 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8821 FieldNames[3] = "__reg_save_area"; 8822 8823 // Create fields 8824 for (unsigned i = 0; i < NumFields; ++i) { 8825 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8826 VaListTagDecl, 8827 SourceLocation(), 8828 SourceLocation(), 8829 &Context->Idents.get(FieldNames[i]), 8830 FieldTypes[i], /*TInfo=*/nullptr, 8831 /*BitWidth=*/nullptr, 8832 /*Mutable=*/false, 8833 ICIS_NoInit); 8834 Field->setAccess(AS_public); 8835 VaListTagDecl->addDecl(Field); 8836 } 8837 VaListTagDecl->completeDefinition(); 8838 Context->VaListTagDecl = VaListTagDecl; 8839 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8840 8841 // }; 8842 8843 // typedef __va_list_tag __builtin_va_list[1]; 8844 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8845 QualType VaListTagArrayType = Context->getConstantArrayType( 8846 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8847 8848 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8849 } 8850 8851 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 8852 // typedef struct __va_list_tag { 8853 RecordDecl *VaListTagDecl; 8854 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8855 VaListTagDecl->startDefinition(); 8856 8857 const size_t NumFields = 3; 8858 QualType FieldTypes[NumFields]; 8859 const char *FieldNames[NumFields]; 8860 8861 // void *CurrentSavedRegisterArea; 8862 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8863 FieldNames[0] = "__current_saved_reg_area_pointer"; 8864 8865 // void *SavedRegAreaEnd; 8866 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8867 FieldNames[1] = "__saved_reg_area_end_pointer"; 8868 8869 // void *OverflowArea; 8870 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8871 FieldNames[2] = "__overflow_area_pointer"; 8872 8873 // Create fields 8874 for (unsigned i = 0; i < NumFields; ++i) { 8875 FieldDecl *Field = FieldDecl::Create( 8876 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 8877 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 8878 /*TInfo=*/nullptr, 8879 /*BitWidth=*/nullptr, 8880 /*Mutable=*/false, ICIS_NoInit); 8881 Field->setAccess(AS_public); 8882 VaListTagDecl->addDecl(Field); 8883 } 8884 VaListTagDecl->completeDefinition(); 8885 Context->VaListTagDecl = VaListTagDecl; 8886 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8887 8888 // } __va_list_tag; 8889 TypedefDecl *VaListTagTypedefDecl = 8890 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8891 8892 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 8893 8894 // typedef __va_list_tag __builtin_va_list[1]; 8895 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8896 QualType VaListTagArrayType = Context->getConstantArrayType( 8897 VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); 8898 8899 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8900 } 8901 8902 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 8903 TargetInfo::BuiltinVaListKind Kind) { 8904 switch (Kind) { 8905 case TargetInfo::CharPtrBuiltinVaList: 8906 return CreateCharPtrBuiltinVaListDecl(Context); 8907 case TargetInfo::VoidPtrBuiltinVaList: 8908 return CreateVoidPtrBuiltinVaListDecl(Context); 8909 case TargetInfo::AArch64ABIBuiltinVaList: 8910 return CreateAArch64ABIBuiltinVaListDecl(Context); 8911 case TargetInfo::PowerABIBuiltinVaList: 8912 return CreatePowerABIBuiltinVaListDecl(Context); 8913 case TargetInfo::X86_64ABIBuiltinVaList: 8914 return CreateX86_64ABIBuiltinVaListDecl(Context); 8915 case TargetInfo::PNaClABIBuiltinVaList: 8916 return CreatePNaClABIBuiltinVaListDecl(Context); 8917 case TargetInfo::AAPCSABIBuiltinVaList: 8918 return CreateAAPCSABIBuiltinVaListDecl(Context); 8919 case TargetInfo::SystemZBuiltinVaList: 8920 return CreateSystemZBuiltinVaListDecl(Context); 8921 case TargetInfo::HexagonBuiltinVaList: 8922 return CreateHexagonBuiltinVaListDecl(Context); 8923 } 8924 8925 llvm_unreachable("Unhandled __builtin_va_list type kind"); 8926 } 8927 8928 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 8929 if (!BuiltinVaListDecl) { 8930 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 8931 assert(BuiltinVaListDecl->isImplicit()); 8932 } 8933 8934 return BuiltinVaListDecl; 8935 } 8936 8937 Decl *ASTContext::getVaListTagDecl() const { 8938 // Force the creation of VaListTagDecl by building the __builtin_va_list 8939 // declaration. 8940 if (!VaListTagDecl) 8941 (void)getBuiltinVaListDecl(); 8942 8943 return VaListTagDecl; 8944 } 8945 8946 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 8947 if (!BuiltinMSVaListDecl) 8948 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 8949 8950 return BuiltinMSVaListDecl; 8951 } 8952 8953 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 8954 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 8955 } 8956 8957 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 8958 assert(ObjCConstantStringType.isNull() && 8959 "'NSConstantString' type already set!"); 8960 8961 ObjCConstantStringType = getObjCInterfaceType(Decl); 8962 } 8963 8964 /// Retrieve the template name that corresponds to a non-empty 8965 /// lookup. 8966 TemplateName 8967 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 8968 UnresolvedSetIterator End) const { 8969 unsigned size = End - Begin; 8970 assert(size > 1 && "set is not overloaded!"); 8971 8972 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 8973 size * sizeof(FunctionTemplateDecl*)); 8974 auto *OT = new (memory) OverloadedTemplateStorage(size); 8975 8976 NamedDecl **Storage = OT->getStorage(); 8977 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 8978 NamedDecl *D = *I; 8979 assert(isa<FunctionTemplateDecl>(D) || 8980 isa<UnresolvedUsingValueDecl>(D) || 8981 (isa<UsingShadowDecl>(D) && 8982 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 8983 *Storage++ = D; 8984 } 8985 8986 return TemplateName(OT); 8987 } 8988 8989 /// Retrieve a template name representing an unqualified-id that has been 8990 /// assumed to name a template for ADL purposes. 8991 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 8992 auto *OT = new (*this) AssumedTemplateStorage(Name); 8993 return TemplateName(OT); 8994 } 8995 8996 /// Retrieve the template name that represents a qualified 8997 /// template name such as \c std::vector. 8998 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 8999 bool TemplateKeyword, 9000 TemplateName Template) const { 9001 assert(NNS && "Missing nested-name-specifier in qualified template name"); 9002 9003 // FIXME: Canonicalization? 9004 llvm::FoldingSetNodeID ID; 9005 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9006 9007 void *InsertPos = nullptr; 9008 QualifiedTemplateName *QTN = 9009 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9010 if (!QTN) { 9011 QTN = new (*this, alignof(QualifiedTemplateName)) 9012 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9013 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9014 } 9015 9016 return TemplateName(QTN); 9017 } 9018 9019 /// Retrieve the template name that represents a dependent 9020 /// template name such as \c MetaFun::template apply. 9021 TemplateName 9022 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9023 const IdentifierInfo *Name) const { 9024 assert((!NNS || NNS->isDependent()) && 9025 "Nested name specifier must be dependent"); 9026 9027 llvm::FoldingSetNodeID ID; 9028 DependentTemplateName::Profile(ID, NNS, Name); 9029 9030 void *InsertPos = nullptr; 9031 DependentTemplateName *QTN = 9032 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9033 9034 if (QTN) 9035 return TemplateName(QTN); 9036 9037 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9038 if (CanonNNS == NNS) { 9039 QTN = new (*this, alignof(DependentTemplateName)) 9040 DependentTemplateName(NNS, Name); 9041 } else { 9042 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9043 QTN = new (*this, alignof(DependentTemplateName)) 9044 DependentTemplateName(NNS, Name, Canon); 9045 DependentTemplateName *CheckQTN = 9046 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9047 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9048 (void)CheckQTN; 9049 } 9050 9051 DependentTemplateNames.InsertNode(QTN, InsertPos); 9052 return TemplateName(QTN); 9053 } 9054 9055 /// Retrieve the template name that represents a dependent 9056 /// template name such as \c MetaFun::template operator+. 9057 TemplateName 9058 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9059 OverloadedOperatorKind Operator) const { 9060 assert((!NNS || NNS->isDependent()) && 9061 "Nested name specifier must be dependent"); 9062 9063 llvm::FoldingSetNodeID ID; 9064 DependentTemplateName::Profile(ID, NNS, Operator); 9065 9066 void *InsertPos = nullptr; 9067 DependentTemplateName *QTN 9068 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9069 9070 if (QTN) 9071 return TemplateName(QTN); 9072 9073 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9074 if (CanonNNS == NNS) { 9075 QTN = new (*this, alignof(DependentTemplateName)) 9076 DependentTemplateName(NNS, Operator); 9077 } else { 9078 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9079 QTN = new (*this, alignof(DependentTemplateName)) 9080 DependentTemplateName(NNS, Operator, Canon); 9081 9082 DependentTemplateName *CheckQTN 9083 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9084 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9085 (void)CheckQTN; 9086 } 9087 9088 DependentTemplateNames.InsertNode(QTN, InsertPos); 9089 return TemplateName(QTN); 9090 } 9091 9092 TemplateName 9093 ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, 9094 TemplateName replacement) const { 9095 llvm::FoldingSetNodeID ID; 9096 SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); 9097 9098 void *insertPos = nullptr; 9099 SubstTemplateTemplateParmStorage *subst 9100 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9101 9102 if (!subst) { 9103 subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); 9104 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9105 } 9106 9107 return TemplateName(subst); 9108 } 9109 9110 TemplateName 9111 ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, 9112 const TemplateArgument &ArgPack) const { 9113 auto &Self = const_cast<ASTContext &>(*this); 9114 llvm::FoldingSetNodeID ID; 9115 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); 9116 9117 void *InsertPos = nullptr; 9118 SubstTemplateTemplateParmPackStorage *Subst 9119 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9120 9121 if (!Subst) { 9122 Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, 9123 ArgPack.pack_size(), 9124 ArgPack.pack_begin()); 9125 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9126 } 9127 9128 return TemplateName(Subst); 9129 } 9130 9131 /// getFromTargetType - Given one of the integer types provided by 9132 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9133 /// is actually a value of type @c TargetInfo::IntType. 9134 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9135 switch (Type) { 9136 case TargetInfo::NoInt: return {}; 9137 case TargetInfo::SignedChar: return SignedCharTy; 9138 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9139 case TargetInfo::SignedShort: return ShortTy; 9140 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9141 case TargetInfo::SignedInt: return IntTy; 9142 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9143 case TargetInfo::SignedLong: return LongTy; 9144 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9145 case TargetInfo::SignedLongLong: return LongLongTy; 9146 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9147 } 9148 9149 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9150 } 9151 9152 //===----------------------------------------------------------------------===// 9153 // Type Predicates. 9154 //===----------------------------------------------------------------------===// 9155 9156 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9157 /// garbage collection attribute. 9158 /// 9159 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9160 if (getLangOpts().getGC() == LangOptions::NonGC) 9161 return Qualifiers::GCNone; 9162 9163 assert(getLangOpts().ObjC); 9164 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9165 9166 // Default behaviour under objective-C's gc is for ObjC pointers 9167 // (or pointers to them) be treated as though they were declared 9168 // as __strong. 9169 if (GCAttrs == Qualifiers::GCNone) { 9170 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9171 return Qualifiers::Strong; 9172 else if (Ty->isPointerType()) 9173 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9174 } else { 9175 // It's not valid to set GC attributes on anything that isn't a 9176 // pointer. 9177 #ifndef NDEBUG 9178 QualType CT = Ty->getCanonicalTypeInternal(); 9179 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9180 CT = AT->getElementType(); 9181 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9182 #endif 9183 } 9184 return GCAttrs; 9185 } 9186 9187 //===----------------------------------------------------------------------===// 9188 // Type Compatibility Testing 9189 //===----------------------------------------------------------------------===// 9190 9191 /// areCompatVectorTypes - Return true if the two specified vector types are 9192 /// compatible. 9193 static bool areCompatVectorTypes(const VectorType *LHS, 9194 const VectorType *RHS) { 9195 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9196 return LHS->getElementType() == RHS->getElementType() && 9197 LHS->getNumElements() == RHS->getNumElements(); 9198 } 9199 9200 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9201 /// compatible. 9202 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9203 const ConstantMatrixType *RHS) { 9204 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9205 return LHS->getElementType() == RHS->getElementType() && 9206 LHS->getNumRows() == RHS->getNumRows() && 9207 LHS->getNumColumns() == RHS->getNumColumns(); 9208 } 9209 9210 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9211 QualType SecondVec) { 9212 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9213 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9214 9215 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9216 return true; 9217 9218 // Treat Neon vector types and most AltiVec vector types as if they are the 9219 // equivalent GCC vector types. 9220 const auto *First = FirstVec->castAs<VectorType>(); 9221 const auto *Second = SecondVec->castAs<VectorType>(); 9222 if (First->getNumElements() == Second->getNumElements() && 9223 hasSameType(First->getElementType(), Second->getElementType()) && 9224 First->getVectorKind() != VectorType::AltiVecPixel && 9225 First->getVectorKind() != VectorType::AltiVecBool && 9226 Second->getVectorKind() != VectorType::AltiVecPixel && 9227 Second->getVectorKind() != VectorType::AltiVecBool && 9228 First->getVectorKind() != VectorType::SveFixedLengthDataVector && 9229 First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && 9230 Second->getVectorKind() != VectorType::SveFixedLengthDataVector && 9231 Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) 9232 return true; 9233 9234 return false; 9235 } 9236 9237 /// getSVETypeSize - Return SVE vector or predicate register size. 9238 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9239 assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); 9240 return Ty->getKind() == BuiltinType::SveBool 9241 ? (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth() 9242 : Context.getLangOpts().VScaleMin * 128; 9243 } 9244 9245 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9246 QualType SecondType) { 9247 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 9248 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 9249 "Expected SVE builtin type and vector type!"); 9250 9251 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9252 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9253 if (const auto *VT = SecondType->getAs<VectorType>()) { 9254 // Predicates have the same representation as uint8 so we also have to 9255 // check the kind to make these types incompatible. 9256 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 9257 return BT->getKind() == BuiltinType::SveBool; 9258 else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 9259 return VT->getElementType().getCanonicalType() == 9260 FirstType->getSveEltType(*this); 9261 else if (VT->getVectorKind() == VectorType::GenericVector) 9262 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9263 hasSameType(VT->getElementType(), 9264 getBuiltinVectorTypeInfo(BT).ElementType); 9265 } 9266 } 9267 return false; 9268 }; 9269 9270 return IsValidCast(FirstType, SecondType) || 9271 IsValidCast(SecondType, FirstType); 9272 } 9273 9274 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9275 QualType SecondType) { 9276 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 9277 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 9278 "Expected SVE builtin type and vector type!"); 9279 9280 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9281 const auto *BT = FirstType->getAs<BuiltinType>(); 9282 if (!BT) 9283 return false; 9284 9285 const auto *VecTy = SecondType->getAs<VectorType>(); 9286 if (VecTy && 9287 (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || 9288 VecTy->getVectorKind() == VectorType::GenericVector)) { 9289 const LangOptions::LaxVectorConversionKind LVCKind = 9290 getLangOpts().getLaxVectorConversions(); 9291 9292 // Can not convert between sve predicates and sve vectors because of 9293 // different size. 9294 if (BT->getKind() == BuiltinType::SveBool && 9295 VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) 9296 return false; 9297 9298 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9299 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9300 // converts to VLAT and VLAT implicitly converts to GNUT." 9301 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9302 // predicates. 9303 if (VecTy->getVectorKind() == VectorType::GenericVector && 9304 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9305 return false; 9306 9307 // If -flax-vector-conversions=all is specified, the types are 9308 // certainly compatible. 9309 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9310 return true; 9311 9312 // If -flax-vector-conversions=integer is specified, the types are 9313 // compatible if the elements are integer types. 9314 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9315 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9316 FirstType->getSveEltType(*this)->isIntegerType(); 9317 } 9318 9319 return false; 9320 }; 9321 9322 return IsLaxCompatible(FirstType, SecondType) || 9323 IsLaxCompatible(SecondType, FirstType); 9324 } 9325 9326 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9327 while (true) { 9328 // __strong id 9329 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9330 if (Attr->getAttrKind() == attr::ObjCOwnership) 9331 return true; 9332 9333 Ty = Attr->getModifiedType(); 9334 9335 // X *__strong (...) 9336 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9337 Ty = Paren->getInnerType(); 9338 9339 // We do not want to look through typedefs, typeof(expr), 9340 // typeof(type), or any other way that the type is somehow 9341 // abstracted. 9342 } else { 9343 return false; 9344 } 9345 } 9346 } 9347 9348 //===----------------------------------------------------------------------===// 9349 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9350 //===----------------------------------------------------------------------===// 9351 9352 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9353 /// inheritance hierarchy of 'rProto'. 9354 bool 9355 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9356 ObjCProtocolDecl *rProto) const { 9357 if (declaresSameEntity(lProto, rProto)) 9358 return true; 9359 for (auto *PI : rProto->protocols()) 9360 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9361 return true; 9362 return false; 9363 } 9364 9365 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9366 /// Class<pr1, ...>. 9367 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9368 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9369 for (auto *lhsProto : lhs->quals()) { 9370 bool match = false; 9371 for (auto *rhsProto : rhs->quals()) { 9372 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9373 match = true; 9374 break; 9375 } 9376 } 9377 if (!match) 9378 return false; 9379 } 9380 return true; 9381 } 9382 9383 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9384 /// ObjCQualifiedIDType. 9385 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9386 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9387 bool compare) { 9388 // Allow id<P..> and an 'id' in all cases. 9389 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9390 return true; 9391 9392 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9393 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9394 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9395 return false; 9396 9397 if (lhs->isObjCQualifiedIdType()) { 9398 if (rhs->qual_empty()) { 9399 // If the RHS is a unqualified interface pointer "NSString*", 9400 // make sure we check the class hierarchy. 9401 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9402 for (auto *I : lhs->quals()) { 9403 // when comparing an id<P> on lhs with a static type on rhs, 9404 // see if static class implements all of id's protocols, directly or 9405 // through its super class and categories. 9406 if (!rhsID->ClassImplementsProtocol(I, true)) 9407 return false; 9408 } 9409 } 9410 // If there are no qualifiers and no interface, we have an 'id'. 9411 return true; 9412 } 9413 // Both the right and left sides have qualifiers. 9414 for (auto *lhsProto : lhs->quals()) { 9415 bool match = false; 9416 9417 // when comparing an id<P> on lhs with a static type on rhs, 9418 // see if static class implements all of id's protocols, directly or 9419 // through its super class and categories. 9420 for (auto *rhsProto : rhs->quals()) { 9421 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9422 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9423 match = true; 9424 break; 9425 } 9426 } 9427 // If the RHS is a qualified interface pointer "NSString<P>*", 9428 // make sure we check the class hierarchy. 9429 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9430 for (auto *I : lhs->quals()) { 9431 // when comparing an id<P> on lhs with a static type on rhs, 9432 // see if static class implements all of id's protocols, directly or 9433 // through its super class and categories. 9434 if (rhsID->ClassImplementsProtocol(I, true)) { 9435 match = true; 9436 break; 9437 } 9438 } 9439 } 9440 if (!match) 9441 return false; 9442 } 9443 9444 return true; 9445 } 9446 9447 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9448 9449 if (lhs->getInterfaceType()) { 9450 // If both the right and left sides have qualifiers. 9451 for (auto *lhsProto : lhs->quals()) { 9452 bool match = false; 9453 9454 // when comparing an id<P> on rhs with a static type on lhs, 9455 // see if static class implements all of id's protocols, directly or 9456 // through its super class and categories. 9457 // First, lhs protocols in the qualifier list must be found, direct 9458 // or indirect in rhs's qualifier list or it is a mismatch. 9459 for (auto *rhsProto : rhs->quals()) { 9460 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9461 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9462 match = true; 9463 break; 9464 } 9465 } 9466 if (!match) 9467 return false; 9468 } 9469 9470 // Static class's protocols, or its super class or category protocols 9471 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9472 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9473 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9474 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9475 // This is rather dubious but matches gcc's behavior. If lhs has 9476 // no type qualifier and its class has no static protocol(s) 9477 // assume that it is mismatch. 9478 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9479 return false; 9480 for (auto *lhsProto : LHSInheritedProtocols) { 9481 bool match = false; 9482 for (auto *rhsProto : rhs->quals()) { 9483 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9484 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9485 match = true; 9486 break; 9487 } 9488 } 9489 if (!match) 9490 return false; 9491 } 9492 } 9493 return true; 9494 } 9495 return false; 9496 } 9497 9498 /// canAssignObjCInterfaces - Return true if the two interface types are 9499 /// compatible for assignment from RHS to LHS. This handles validation of any 9500 /// protocol qualifiers on the LHS or RHS. 9501 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9502 const ObjCObjectPointerType *RHSOPT) { 9503 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9504 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9505 9506 // If either type represents the built-in 'id' type, return true. 9507 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9508 return true; 9509 9510 // Function object that propagates a successful result or handles 9511 // __kindof types. 9512 auto finish = [&](bool succeeded) -> bool { 9513 if (succeeded) 9514 return true; 9515 9516 if (!RHS->isKindOfType()) 9517 return false; 9518 9519 // Strip off __kindof and protocol qualifiers, then check whether 9520 // we can assign the other way. 9521 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9522 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9523 }; 9524 9525 // Casts from or to id<P> are allowed when the other side has compatible 9526 // protocols. 9527 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9528 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9529 } 9530 9531 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9532 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9533 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9534 } 9535 9536 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9537 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9538 return true; 9539 } 9540 9541 // If we have 2 user-defined types, fall into that path. 9542 if (LHS->getInterface() && RHS->getInterface()) { 9543 return finish(canAssignObjCInterfaces(LHS, RHS)); 9544 } 9545 9546 return false; 9547 } 9548 9549 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9550 /// for providing type-safety for objective-c pointers used to pass/return 9551 /// arguments in block literals. When passed as arguments, passing 'A*' where 9552 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9553 /// not OK. For the return type, the opposite is not OK. 9554 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9555 const ObjCObjectPointerType *LHSOPT, 9556 const ObjCObjectPointerType *RHSOPT, 9557 bool BlockReturnType) { 9558 9559 // Function object that propagates a successful result or handles 9560 // __kindof types. 9561 auto finish = [&](bool succeeded) -> bool { 9562 if (succeeded) 9563 return true; 9564 9565 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9566 if (!Expected->isKindOfType()) 9567 return false; 9568 9569 // Strip off __kindof and protocol qualifiers, then check whether 9570 // we can assign the other way. 9571 return canAssignObjCInterfacesInBlockPointer( 9572 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9573 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9574 BlockReturnType); 9575 }; 9576 9577 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9578 return true; 9579 9580 if (LHSOPT->isObjCBuiltinType()) { 9581 return finish(RHSOPT->isObjCBuiltinType() || 9582 RHSOPT->isObjCQualifiedIdType()); 9583 } 9584 9585 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9586 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9587 // Use for block parameters previous type checking for compatibility. 9588 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9589 // Or corrected type checking as in non-compat mode. 9590 (!BlockReturnType && 9591 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9592 else 9593 return finish(ObjCQualifiedIdTypesAreCompatible( 9594 (BlockReturnType ? LHSOPT : RHSOPT), 9595 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9596 } 9597 9598 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9599 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9600 if (LHS && RHS) { // We have 2 user-defined types. 9601 if (LHS != RHS) { 9602 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9603 return finish(BlockReturnType); 9604 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9605 return finish(!BlockReturnType); 9606 } 9607 else 9608 return true; 9609 } 9610 return false; 9611 } 9612 9613 /// Comparison routine for Objective-C protocols to be used with 9614 /// llvm::array_pod_sort. 9615 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9616 ObjCProtocolDecl * const *rhs) { 9617 return (*lhs)->getName().compare((*rhs)->getName()); 9618 } 9619 9620 /// getIntersectionOfProtocols - This routine finds the intersection of set 9621 /// of protocols inherited from two distinct objective-c pointer objects with 9622 /// the given common base. 9623 /// It is used to build composite qualifier list of the composite type of 9624 /// the conditional expression involving two objective-c pointer objects. 9625 static 9626 void getIntersectionOfProtocols(ASTContext &Context, 9627 const ObjCInterfaceDecl *CommonBase, 9628 const ObjCObjectPointerType *LHSOPT, 9629 const ObjCObjectPointerType *RHSOPT, 9630 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9631 9632 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9633 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9634 assert(LHS->getInterface() && "LHS must have an interface base"); 9635 assert(RHS->getInterface() && "RHS must have an interface base"); 9636 9637 // Add all of the protocols for the LHS. 9638 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9639 9640 // Start with the protocol qualifiers. 9641 for (auto proto : LHS->quals()) { 9642 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9643 } 9644 9645 // Also add the protocols associated with the LHS interface. 9646 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9647 9648 // Add all of the protocols for the RHS. 9649 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9650 9651 // Start with the protocol qualifiers. 9652 for (auto proto : RHS->quals()) { 9653 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9654 } 9655 9656 // Also add the protocols associated with the RHS interface. 9657 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9658 9659 // Compute the intersection of the collected protocol sets. 9660 for (auto proto : LHSProtocolSet) { 9661 if (RHSProtocolSet.count(proto)) 9662 IntersectionSet.push_back(proto); 9663 } 9664 9665 // Compute the set of protocols that is implied by either the common type or 9666 // the protocols within the intersection. 9667 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9668 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9669 9670 // Remove any implied protocols from the list of inherited protocols. 9671 if (!ImpliedProtocols.empty()) { 9672 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9673 return ImpliedProtocols.contains(proto); 9674 }); 9675 } 9676 9677 // Sort the remaining protocols by name. 9678 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9679 compareObjCProtocolsByName); 9680 } 9681 9682 /// Determine whether the first type is a subtype of the second. 9683 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9684 QualType rhs) { 9685 // Common case: two object pointers. 9686 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9687 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9688 if (lhsOPT && rhsOPT) 9689 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9690 9691 // Two block pointers. 9692 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9693 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9694 if (lhsBlock && rhsBlock) 9695 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9696 9697 // If either is an unqualified 'id' and the other is a block, it's 9698 // acceptable. 9699 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9700 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9701 return true; 9702 9703 return false; 9704 } 9705 9706 // Check that the given Objective-C type argument lists are equivalent. 9707 static bool sameObjCTypeArgs(ASTContext &ctx, 9708 const ObjCInterfaceDecl *iface, 9709 ArrayRef<QualType> lhsArgs, 9710 ArrayRef<QualType> rhsArgs, 9711 bool stripKindOf) { 9712 if (lhsArgs.size() != rhsArgs.size()) 9713 return false; 9714 9715 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9716 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9717 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9718 continue; 9719 9720 switch (typeParams->begin()[i]->getVariance()) { 9721 case ObjCTypeParamVariance::Invariant: 9722 if (!stripKindOf || 9723 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9724 rhsArgs[i].stripObjCKindOfType(ctx))) { 9725 return false; 9726 } 9727 break; 9728 9729 case ObjCTypeParamVariance::Covariant: 9730 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 9731 return false; 9732 break; 9733 9734 case ObjCTypeParamVariance::Contravariant: 9735 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 9736 return false; 9737 break; 9738 } 9739 } 9740 9741 return true; 9742 } 9743 9744 QualType ASTContext::areCommonBaseCompatible( 9745 const ObjCObjectPointerType *Lptr, 9746 const ObjCObjectPointerType *Rptr) { 9747 const ObjCObjectType *LHS = Lptr->getObjectType(); 9748 const ObjCObjectType *RHS = Rptr->getObjectType(); 9749 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 9750 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 9751 9752 if (!LDecl || !RDecl) 9753 return {}; 9754 9755 // When either LHS or RHS is a kindof type, we should return a kindof type. 9756 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 9757 // kindof(A). 9758 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 9759 9760 // Follow the left-hand side up the class hierarchy until we either hit a 9761 // root or find the RHS. Record the ancestors in case we don't find it. 9762 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 9763 LHSAncestors; 9764 while (true) { 9765 // Record this ancestor. We'll need this if the common type isn't in the 9766 // path from the LHS to the root. 9767 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 9768 9769 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 9770 // Get the type arguments. 9771 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 9772 bool anyChanges = false; 9773 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9774 // Both have type arguments, compare them. 9775 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9776 LHS->getTypeArgs(), RHS->getTypeArgs(), 9777 /*stripKindOf=*/true)) 9778 return {}; 9779 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9780 // If only one has type arguments, the result will not have type 9781 // arguments. 9782 LHSTypeArgs = {}; 9783 anyChanges = true; 9784 } 9785 9786 // Compute the intersection of protocols. 9787 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9788 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 9789 Protocols); 9790 if (!Protocols.empty()) 9791 anyChanges = true; 9792 9793 // If anything in the LHS will have changed, build a new result type. 9794 // If we need to return a kindof type but LHS is not a kindof type, we 9795 // build a new result type. 9796 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 9797 QualType Result = getObjCInterfaceType(LHS->getInterface()); 9798 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 9799 anyKindOf || LHS->isKindOfType()); 9800 return getObjCObjectPointerType(Result); 9801 } 9802 9803 return getObjCObjectPointerType(QualType(LHS, 0)); 9804 } 9805 9806 // Find the superclass. 9807 QualType LHSSuperType = LHS->getSuperClassType(); 9808 if (LHSSuperType.isNull()) 9809 break; 9810 9811 LHS = LHSSuperType->castAs<ObjCObjectType>(); 9812 } 9813 9814 // We didn't find anything by following the LHS to its root; now check 9815 // the RHS against the cached set of ancestors. 9816 while (true) { 9817 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 9818 if (KnownLHS != LHSAncestors.end()) { 9819 LHS = KnownLHS->second; 9820 9821 // Get the type arguments. 9822 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 9823 bool anyChanges = false; 9824 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9825 // Both have type arguments, compare them. 9826 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9827 LHS->getTypeArgs(), RHS->getTypeArgs(), 9828 /*stripKindOf=*/true)) 9829 return {}; 9830 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9831 // If only one has type arguments, the result will not have type 9832 // arguments. 9833 RHSTypeArgs = {}; 9834 anyChanges = true; 9835 } 9836 9837 // Compute the intersection of protocols. 9838 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9839 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 9840 Protocols); 9841 if (!Protocols.empty()) 9842 anyChanges = true; 9843 9844 // If we need to return a kindof type but RHS is not a kindof type, we 9845 // build a new result type. 9846 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 9847 QualType Result = getObjCInterfaceType(RHS->getInterface()); 9848 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 9849 anyKindOf || RHS->isKindOfType()); 9850 return getObjCObjectPointerType(Result); 9851 } 9852 9853 return getObjCObjectPointerType(QualType(RHS, 0)); 9854 } 9855 9856 // Find the superclass of the RHS. 9857 QualType RHSSuperType = RHS->getSuperClassType(); 9858 if (RHSSuperType.isNull()) 9859 break; 9860 9861 RHS = RHSSuperType->castAs<ObjCObjectType>(); 9862 } 9863 9864 return {}; 9865 } 9866 9867 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 9868 const ObjCObjectType *RHS) { 9869 assert(LHS->getInterface() && "LHS is not an interface type"); 9870 assert(RHS->getInterface() && "RHS is not an interface type"); 9871 9872 // Verify that the base decls are compatible: the RHS must be a subclass of 9873 // the LHS. 9874 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 9875 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 9876 if (!IsSuperClass) 9877 return false; 9878 9879 // If the LHS has protocol qualifiers, determine whether all of them are 9880 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 9881 // LHS). 9882 if (LHS->getNumProtocols() > 0) { 9883 // OK if conversion of LHS to SuperClass results in narrowing of types 9884 // ; i.e., SuperClass may implement at least one of the protocols 9885 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 9886 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 9887 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 9888 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 9889 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 9890 // qualifiers. 9891 for (auto *RHSPI : RHS->quals()) 9892 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 9893 // If there is no protocols associated with RHS, it is not a match. 9894 if (SuperClassInheritedProtocols.empty()) 9895 return false; 9896 9897 for (const auto *LHSProto : LHS->quals()) { 9898 bool SuperImplementsProtocol = false; 9899 for (auto *SuperClassProto : SuperClassInheritedProtocols) 9900 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 9901 SuperImplementsProtocol = true; 9902 break; 9903 } 9904 if (!SuperImplementsProtocol) 9905 return false; 9906 } 9907 } 9908 9909 // If the LHS is specialized, we may need to check type arguments. 9910 if (LHS->isSpecialized()) { 9911 // Follow the superclass chain until we've matched the LHS class in the 9912 // hierarchy. This substitutes type arguments through. 9913 const ObjCObjectType *RHSSuper = RHS; 9914 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 9915 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 9916 9917 // If the RHS is specializd, compare type arguments. 9918 if (RHSSuper->isSpecialized() && 9919 !sameObjCTypeArgs(*this, LHS->getInterface(), 9920 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 9921 /*stripKindOf=*/true)) { 9922 return false; 9923 } 9924 } 9925 9926 return true; 9927 } 9928 9929 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 9930 // get the "pointed to" types 9931 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 9932 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 9933 9934 if (!LHSOPT || !RHSOPT) 9935 return false; 9936 9937 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 9938 canAssignObjCInterfaces(RHSOPT, LHSOPT); 9939 } 9940 9941 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 9942 return canAssignObjCInterfaces( 9943 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 9944 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 9945 } 9946 9947 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 9948 /// both shall have the identically qualified version of a compatible type. 9949 /// C99 6.2.7p1: Two types have compatible types if their types are the 9950 /// same. See 6.7.[2,3,5] for additional rules. 9951 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 9952 bool CompareUnqualified) { 9953 if (getLangOpts().CPlusPlus) 9954 return hasSameType(LHS, RHS); 9955 9956 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 9957 } 9958 9959 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 9960 return typesAreCompatible(LHS, RHS); 9961 } 9962 9963 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 9964 return !mergeTypes(LHS, RHS, true).isNull(); 9965 } 9966 9967 /// mergeTransparentUnionType - if T is a transparent union type and a member 9968 /// of T is compatible with SubType, return the merged type, else return 9969 /// QualType() 9970 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 9971 bool OfBlockPointer, 9972 bool Unqualified) { 9973 if (const RecordType *UT = T->getAsUnionType()) { 9974 RecordDecl *UD = UT->getDecl(); 9975 if (UD->hasAttr<TransparentUnionAttr>()) { 9976 for (const auto *I : UD->fields()) { 9977 QualType ET = I->getType().getUnqualifiedType(); 9978 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 9979 if (!MT.isNull()) 9980 return MT; 9981 } 9982 } 9983 } 9984 9985 return {}; 9986 } 9987 9988 /// mergeFunctionParameterTypes - merge two types which appear as function 9989 /// parameter types 9990 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 9991 bool OfBlockPointer, 9992 bool Unqualified) { 9993 // GNU extension: two types are compatible if they appear as a function 9994 // argument, one of the types is a transparent union type and the other 9995 // type is compatible with a union member 9996 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 9997 Unqualified); 9998 if (!lmerge.isNull()) 9999 return lmerge; 10000 10001 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10002 Unqualified); 10003 if (!rmerge.isNull()) 10004 return rmerge; 10005 10006 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10007 } 10008 10009 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10010 bool OfBlockPointer, bool Unqualified, 10011 bool AllowCXX) { 10012 const auto *lbase = lhs->castAs<FunctionType>(); 10013 const auto *rbase = rhs->castAs<FunctionType>(); 10014 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10015 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10016 bool allLTypes = true; 10017 bool allRTypes = true; 10018 10019 // Check return type 10020 QualType retType; 10021 if (OfBlockPointer) { 10022 QualType RHS = rbase->getReturnType(); 10023 QualType LHS = lbase->getReturnType(); 10024 bool UnqualifiedResult = Unqualified; 10025 if (!UnqualifiedResult) 10026 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10027 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10028 } 10029 else 10030 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10031 Unqualified); 10032 if (retType.isNull()) 10033 return {}; 10034 10035 if (Unqualified) 10036 retType = retType.getUnqualifiedType(); 10037 10038 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10039 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10040 if (Unqualified) { 10041 LRetType = LRetType.getUnqualifiedType(); 10042 RRetType = RRetType.getUnqualifiedType(); 10043 } 10044 10045 if (getCanonicalType(retType) != LRetType) 10046 allLTypes = false; 10047 if (getCanonicalType(retType) != RRetType) 10048 allRTypes = false; 10049 10050 // FIXME: double check this 10051 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10052 // rbase->getRegParmAttr() != 0 && 10053 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10054 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10055 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10056 10057 // Compatible functions must have compatible calling conventions 10058 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10059 return {}; 10060 10061 // Regparm is part of the calling convention. 10062 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10063 return {}; 10064 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10065 return {}; 10066 10067 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10068 return {}; 10069 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10070 return {}; 10071 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10072 return {}; 10073 10074 // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. 10075 bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10076 10077 if (lbaseInfo.getNoReturn() != NoReturn) 10078 allLTypes = false; 10079 if (rbaseInfo.getNoReturn() != NoReturn) 10080 allRTypes = false; 10081 10082 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10083 10084 if (lproto && rproto) { // two C99 style function prototypes 10085 assert((AllowCXX || 10086 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10087 "C++ shouldn't be here"); 10088 // Compatible functions must have the same number of parameters 10089 if (lproto->getNumParams() != rproto->getNumParams()) 10090 return {}; 10091 10092 // Variadic and non-variadic functions aren't compatible 10093 if (lproto->isVariadic() != rproto->isVariadic()) 10094 return {}; 10095 10096 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10097 return {}; 10098 10099 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10100 bool canUseLeft, canUseRight; 10101 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10102 newParamInfos)) 10103 return {}; 10104 10105 if (!canUseLeft) 10106 allLTypes = false; 10107 if (!canUseRight) 10108 allRTypes = false; 10109 10110 // Check parameter type compatibility 10111 SmallVector<QualType, 10> types; 10112 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10113 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10114 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10115 QualType paramType = mergeFunctionParameterTypes( 10116 lParamType, rParamType, OfBlockPointer, Unqualified); 10117 if (paramType.isNull()) 10118 return {}; 10119 10120 if (Unqualified) 10121 paramType = paramType.getUnqualifiedType(); 10122 10123 types.push_back(paramType); 10124 if (Unqualified) { 10125 lParamType = lParamType.getUnqualifiedType(); 10126 rParamType = rParamType.getUnqualifiedType(); 10127 } 10128 10129 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10130 allLTypes = false; 10131 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10132 allRTypes = false; 10133 } 10134 10135 if (allLTypes) return lhs; 10136 if (allRTypes) return rhs; 10137 10138 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10139 EPI.ExtInfo = einfo; 10140 EPI.ExtParameterInfos = 10141 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10142 return getFunctionType(retType, types, EPI); 10143 } 10144 10145 if (lproto) allRTypes = false; 10146 if (rproto) allLTypes = false; 10147 10148 const FunctionProtoType *proto = lproto ? lproto : rproto; 10149 if (proto) { 10150 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10151 if (proto->isVariadic()) 10152 return {}; 10153 // Check that the types are compatible with the types that 10154 // would result from default argument promotions (C99 6.7.5.3p15). 10155 // The only types actually affected are promotable integer 10156 // types and floats, which would be passed as a different 10157 // type depending on whether the prototype is visible. 10158 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10159 QualType paramTy = proto->getParamType(i); 10160 10161 // Look at the converted type of enum types, since that is the type used 10162 // to pass enum values. 10163 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10164 paramTy = Enum->getDecl()->getIntegerType(); 10165 if (paramTy.isNull()) 10166 return {}; 10167 } 10168 10169 if (paramTy->isPromotableIntegerType() || 10170 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10171 return {}; 10172 } 10173 10174 if (allLTypes) return lhs; 10175 if (allRTypes) return rhs; 10176 10177 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10178 EPI.ExtInfo = einfo; 10179 return getFunctionType(retType, proto->getParamTypes(), EPI); 10180 } 10181 10182 if (allLTypes) return lhs; 10183 if (allRTypes) return rhs; 10184 return getFunctionNoProtoType(retType, einfo); 10185 } 10186 10187 /// Given that we have an enum type and a non-enum type, try to merge them. 10188 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10189 QualType other, bool isBlockReturnType) { 10190 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10191 // a signed integer type, or an unsigned integer type. 10192 // Compatibility is based on the underlying type, not the promotion 10193 // type. 10194 QualType underlyingType = ET->getDecl()->getIntegerType(); 10195 if (underlyingType.isNull()) 10196 return {}; 10197 if (Context.hasSameType(underlyingType, other)) 10198 return other; 10199 10200 // In block return types, we're more permissive and accept any 10201 // integral type of the same size. 10202 if (isBlockReturnType && other->isIntegerType() && 10203 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10204 return other; 10205 10206 return {}; 10207 } 10208 10209 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, 10210 bool OfBlockPointer, 10211 bool Unqualified, bool BlockReturnType) { 10212 // For C++ we will not reach this code with reference types (see below), 10213 // for OpenMP variant call overloading we might. 10214 // 10215 // C++ [expr]: If an expression initially has the type "reference to T", the 10216 // type is adjusted to "T" prior to any further analysis, the expression 10217 // designates the object or function denoted by the reference, and the 10218 // expression is an lvalue unless the reference is an rvalue reference and 10219 // the expression is a function call (possibly inside parentheses). 10220 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10221 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10222 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10223 LHS->getTypeClass() == RHS->getTypeClass()) 10224 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10225 OfBlockPointer, Unqualified, BlockReturnType); 10226 if (LHSRefTy || RHSRefTy) 10227 return {}; 10228 10229 if (Unqualified) { 10230 LHS = LHS.getUnqualifiedType(); 10231 RHS = RHS.getUnqualifiedType(); 10232 } 10233 10234 QualType LHSCan = getCanonicalType(LHS), 10235 RHSCan = getCanonicalType(RHS); 10236 10237 // If two types are identical, they are compatible. 10238 if (LHSCan == RHSCan) 10239 return LHS; 10240 10241 // If the qualifiers are different, the types aren't compatible... mostly. 10242 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10243 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10244 if (LQuals != RQuals) { 10245 // If any of these qualifiers are different, we have a type 10246 // mismatch. 10247 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10248 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10249 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10250 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10251 return {}; 10252 10253 // Exactly one GC qualifier difference is allowed: __strong is 10254 // okay if the other type has no GC qualifier but is an Objective 10255 // C object pointer (i.e. implicitly strong by default). We fix 10256 // this by pretending that the unqualified type was actually 10257 // qualified __strong. 10258 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10259 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10260 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10261 10262 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10263 return {}; 10264 10265 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10266 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10267 } 10268 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10269 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10270 } 10271 return {}; 10272 } 10273 10274 // Okay, qualifiers are equal. 10275 10276 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10277 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10278 10279 // We want to consider the two function types to be the same for these 10280 // comparisons, just force one to the other. 10281 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10282 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10283 10284 // Same as above for arrays 10285 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10286 LHSClass = Type::ConstantArray; 10287 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10288 RHSClass = Type::ConstantArray; 10289 10290 // ObjCInterfaces are just specialized ObjCObjects. 10291 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10292 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10293 10294 // Canonicalize ExtVector -> Vector. 10295 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10296 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10297 10298 // If the canonical type classes don't match. 10299 if (LHSClass != RHSClass) { 10300 // Note that we only have special rules for turning block enum 10301 // returns into block int returns, not vice-versa. 10302 if (const auto *ETy = LHS->getAs<EnumType>()) { 10303 return mergeEnumWithInteger(*this, ETy, RHS, false); 10304 } 10305 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10306 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10307 } 10308 // allow block pointer type to match an 'id' type. 10309 if (OfBlockPointer && !BlockReturnType) { 10310 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10311 return LHS; 10312 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10313 return RHS; 10314 } 10315 // Allow __auto_type to match anything; it merges to the type with more 10316 // information. 10317 if (const auto *AT = LHS->getAs<AutoType>()) { 10318 if (AT->isGNUAutoType()) 10319 return RHS; 10320 } 10321 if (const auto *AT = RHS->getAs<AutoType>()) { 10322 if (AT->isGNUAutoType()) 10323 return LHS; 10324 } 10325 return {}; 10326 } 10327 10328 // The canonical type classes match. 10329 switch (LHSClass) { 10330 #define TYPE(Class, Base) 10331 #define ABSTRACT_TYPE(Class, Base) 10332 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10333 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10334 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10335 #include "clang/AST/TypeNodes.inc" 10336 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10337 10338 case Type::Auto: 10339 case Type::DeducedTemplateSpecialization: 10340 case Type::LValueReference: 10341 case Type::RValueReference: 10342 case Type::MemberPointer: 10343 llvm_unreachable("C++ should never be in mergeTypes"); 10344 10345 case Type::ObjCInterface: 10346 case Type::IncompleteArray: 10347 case Type::VariableArray: 10348 case Type::FunctionProto: 10349 case Type::ExtVector: 10350 llvm_unreachable("Types are eliminated above"); 10351 10352 case Type::Pointer: 10353 { 10354 // Merge two pointer types, while trying to preserve typedef info 10355 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10356 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10357 if (Unqualified) { 10358 LHSPointee = LHSPointee.getUnqualifiedType(); 10359 RHSPointee = RHSPointee.getUnqualifiedType(); 10360 } 10361 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10362 Unqualified); 10363 if (ResultType.isNull()) 10364 return {}; 10365 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10366 return LHS; 10367 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10368 return RHS; 10369 return getPointerType(ResultType); 10370 } 10371 case Type::BlockPointer: 10372 { 10373 // Merge two block pointer types, while trying to preserve typedef info 10374 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10375 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10376 if (Unqualified) { 10377 LHSPointee = LHSPointee.getUnqualifiedType(); 10378 RHSPointee = RHSPointee.getUnqualifiedType(); 10379 } 10380 if (getLangOpts().OpenCL) { 10381 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10382 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10383 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10384 // 6.12.5) thus the following check is asymmetric. 10385 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10386 return {}; 10387 LHSPteeQual.removeAddressSpace(); 10388 RHSPteeQual.removeAddressSpace(); 10389 LHSPointee = 10390 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10391 RHSPointee = 10392 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10393 } 10394 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10395 Unqualified); 10396 if (ResultType.isNull()) 10397 return {}; 10398 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10399 return LHS; 10400 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10401 return RHS; 10402 return getBlockPointerType(ResultType); 10403 } 10404 case Type::Atomic: 10405 { 10406 // Merge two pointer types, while trying to preserve typedef info 10407 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10408 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10409 if (Unqualified) { 10410 LHSValue = LHSValue.getUnqualifiedType(); 10411 RHSValue = RHSValue.getUnqualifiedType(); 10412 } 10413 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10414 Unqualified); 10415 if (ResultType.isNull()) 10416 return {}; 10417 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10418 return LHS; 10419 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10420 return RHS; 10421 return getAtomicType(ResultType); 10422 } 10423 case Type::ConstantArray: 10424 { 10425 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10426 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10427 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10428 return {}; 10429 10430 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10431 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10432 if (Unqualified) { 10433 LHSElem = LHSElem.getUnqualifiedType(); 10434 RHSElem = RHSElem.getUnqualifiedType(); 10435 } 10436 10437 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10438 if (ResultType.isNull()) 10439 return {}; 10440 10441 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10442 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10443 10444 // If either side is a variable array, and both are complete, check whether 10445 // the current dimension is definite. 10446 if (LVAT || RVAT) { 10447 auto SizeFetch = [this](const VariableArrayType* VAT, 10448 const ConstantArrayType* CAT) 10449 -> std::pair<bool,llvm::APInt> { 10450 if (VAT) { 10451 Optional<llvm::APSInt> TheInt; 10452 Expr *E = VAT->getSizeExpr(); 10453 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10454 return std::make_pair(true, *TheInt); 10455 return std::make_pair(false, llvm::APSInt()); 10456 } 10457 if (CAT) 10458 return std::make_pair(true, CAT->getSize()); 10459 return std::make_pair(false, llvm::APInt()); 10460 }; 10461 10462 bool HaveLSize, HaveRSize; 10463 llvm::APInt LSize, RSize; 10464 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10465 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10466 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10467 return {}; // Definite, but unequal, array dimension 10468 } 10469 10470 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10471 return LHS; 10472 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10473 return RHS; 10474 if (LCAT) 10475 return getConstantArrayType(ResultType, LCAT->getSize(), 10476 LCAT->getSizeExpr(), 10477 ArrayType::ArraySizeModifier(), 0); 10478 if (RCAT) 10479 return getConstantArrayType(ResultType, RCAT->getSize(), 10480 RCAT->getSizeExpr(), 10481 ArrayType::ArraySizeModifier(), 0); 10482 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10483 return LHS; 10484 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10485 return RHS; 10486 if (LVAT) { 10487 // FIXME: This isn't correct! But tricky to implement because 10488 // the array's size has to be the size of LHS, but the type 10489 // has to be different. 10490 return LHS; 10491 } 10492 if (RVAT) { 10493 // FIXME: This isn't correct! But tricky to implement because 10494 // the array's size has to be the size of RHS, but the type 10495 // has to be different. 10496 return RHS; 10497 } 10498 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10499 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10500 return getIncompleteArrayType(ResultType, 10501 ArrayType::ArraySizeModifier(), 0); 10502 } 10503 case Type::FunctionNoProto: 10504 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); 10505 case Type::Record: 10506 case Type::Enum: 10507 return {}; 10508 case Type::Builtin: 10509 // Only exactly equal builtin types are compatible, which is tested above. 10510 return {}; 10511 case Type::Complex: 10512 // Distinct complex types are incompatible. 10513 return {}; 10514 case Type::Vector: 10515 // FIXME: The merged type should be an ExtVector! 10516 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10517 RHSCan->castAs<VectorType>())) 10518 return LHS; 10519 return {}; 10520 case Type::ConstantMatrix: 10521 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10522 RHSCan->castAs<ConstantMatrixType>())) 10523 return LHS; 10524 return {}; 10525 case Type::ObjCObject: { 10526 // Check if the types are assignment compatible. 10527 // FIXME: This should be type compatibility, e.g. whether 10528 // "LHS x; RHS x;" at global scope is legal. 10529 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10530 RHS->castAs<ObjCObjectType>())) 10531 return LHS; 10532 return {}; 10533 } 10534 case Type::ObjCObjectPointer: 10535 if (OfBlockPointer) { 10536 if (canAssignObjCInterfacesInBlockPointer( 10537 LHS->castAs<ObjCObjectPointerType>(), 10538 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10539 return LHS; 10540 return {}; 10541 } 10542 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10543 RHS->castAs<ObjCObjectPointerType>())) 10544 return LHS; 10545 return {}; 10546 case Type::Pipe: 10547 assert(LHS != RHS && 10548 "Equivalent pipe types should have already been handled!"); 10549 return {}; 10550 case Type::BitInt: { 10551 // Merge two bit-precise int types, while trying to preserve typedef info. 10552 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10553 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10554 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10555 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10556 10557 // Like unsigned/int, shouldn't have a type if they don't match. 10558 if (LHSUnsigned != RHSUnsigned) 10559 return {}; 10560 10561 if (LHSBits != RHSBits) 10562 return {}; 10563 return LHS; 10564 } 10565 } 10566 10567 llvm_unreachable("Invalid Type::Class!"); 10568 } 10569 10570 bool ASTContext::mergeExtParameterInfo( 10571 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10572 bool &CanUseFirst, bool &CanUseSecond, 10573 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10574 assert(NewParamInfos.empty() && "param info list not empty"); 10575 CanUseFirst = CanUseSecond = true; 10576 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10577 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10578 10579 // Fast path: if the first type doesn't have ext parameter infos, 10580 // we match if and only if the second type also doesn't have them. 10581 if (!FirstHasInfo && !SecondHasInfo) 10582 return true; 10583 10584 bool NeedParamInfo = false; 10585 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10586 : SecondFnType->getExtParameterInfos().size(); 10587 10588 for (size_t I = 0; I < E; ++I) { 10589 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10590 if (FirstHasInfo) 10591 FirstParam = FirstFnType->getExtParameterInfo(I); 10592 if (SecondHasInfo) 10593 SecondParam = SecondFnType->getExtParameterInfo(I); 10594 10595 // Cannot merge unless everything except the noescape flag matches. 10596 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10597 return false; 10598 10599 bool FirstNoEscape = FirstParam.isNoEscape(); 10600 bool SecondNoEscape = SecondParam.isNoEscape(); 10601 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10602 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10603 if (NewParamInfos.back().getOpaqueValue()) 10604 NeedParamInfo = true; 10605 if (FirstNoEscape != IsNoEscape) 10606 CanUseFirst = false; 10607 if (SecondNoEscape != IsNoEscape) 10608 CanUseSecond = false; 10609 } 10610 10611 if (!NeedParamInfo) 10612 NewParamInfos.clear(); 10613 10614 return true; 10615 } 10616 10617 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10618 ObjCLayouts[CD] = nullptr; 10619 } 10620 10621 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10622 /// 'RHS' attributes and returns the merged version; including for function 10623 /// return types. 10624 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10625 QualType LHSCan = getCanonicalType(LHS), 10626 RHSCan = getCanonicalType(RHS); 10627 // If two types are identical, they are compatible. 10628 if (LHSCan == RHSCan) 10629 return LHS; 10630 if (RHSCan->isFunctionType()) { 10631 if (!LHSCan->isFunctionType()) 10632 return {}; 10633 QualType OldReturnType = 10634 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10635 QualType NewReturnType = 10636 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10637 QualType ResReturnType = 10638 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10639 if (ResReturnType.isNull()) 10640 return {}; 10641 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10642 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10643 // In either case, use OldReturnType to build the new function type. 10644 const auto *F = LHS->castAs<FunctionType>(); 10645 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10646 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10647 EPI.ExtInfo = getFunctionExtInfo(LHS); 10648 QualType ResultType = 10649 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10650 return ResultType; 10651 } 10652 } 10653 return {}; 10654 } 10655 10656 // If the qualifiers are different, the types can still be merged. 10657 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10658 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10659 if (LQuals != RQuals) { 10660 // If any of these qualifiers are different, we have a type mismatch. 10661 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10662 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10663 return {}; 10664 10665 // Exactly one GC qualifier difference is allowed: __strong is 10666 // okay if the other type has no GC qualifier but is an Objective 10667 // C object pointer (i.e. implicitly strong by default). We fix 10668 // this by pretending that the unqualified type was actually 10669 // qualified __strong. 10670 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10671 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10672 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10673 10674 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10675 return {}; 10676 10677 if (GC_L == Qualifiers::Strong) 10678 return LHS; 10679 if (GC_R == Qualifiers::Strong) 10680 return RHS; 10681 return {}; 10682 } 10683 10684 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10685 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10686 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10687 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10688 if (ResQT == LHSBaseQT) 10689 return LHS; 10690 if (ResQT == RHSBaseQT) 10691 return RHS; 10692 } 10693 return {}; 10694 } 10695 10696 //===----------------------------------------------------------------------===// 10697 // Integer Predicates 10698 //===----------------------------------------------------------------------===// 10699 10700 unsigned ASTContext::getIntWidth(QualType T) const { 10701 if (const auto *ET = T->getAs<EnumType>()) 10702 T = ET->getDecl()->getIntegerType(); 10703 if (T->isBooleanType()) 10704 return 1; 10705 if (const auto *EIT = T->getAs<BitIntType>()) 10706 return EIT->getNumBits(); 10707 // For builtin types, just use the standard type sizing method 10708 return (unsigned)getTypeSize(T); 10709 } 10710 10711 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10712 assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 10713 "Unexpected type"); 10714 10715 // Turn <4 x signed int> -> <4 x unsigned int> 10716 if (const auto *VTy = T->getAs<VectorType>()) 10717 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 10718 VTy->getNumElements(), VTy->getVectorKind()); 10719 10720 // For _BitInt, return an unsigned _BitInt with same width. 10721 if (const auto *EITy = T->getAs<BitIntType>()) 10722 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 10723 10724 // For enums, get the underlying integer type of the enum, and let the general 10725 // integer type signchanging code handle it. 10726 if (const auto *ETy = T->getAs<EnumType>()) 10727 T = ETy->getDecl()->getIntegerType(); 10728 10729 switch (T->castAs<BuiltinType>()->getKind()) { 10730 case BuiltinType::Char_S: 10731 case BuiltinType::SChar: 10732 return UnsignedCharTy; 10733 case BuiltinType::Short: 10734 return UnsignedShortTy; 10735 case BuiltinType::Int: 10736 return UnsignedIntTy; 10737 case BuiltinType::Long: 10738 return UnsignedLongTy; 10739 case BuiltinType::LongLong: 10740 return UnsignedLongLongTy; 10741 case BuiltinType::Int128: 10742 return UnsignedInt128Ty; 10743 // wchar_t is special. It is either signed or not, but when it's signed, 10744 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 10745 // version of it's underlying type instead. 10746 case BuiltinType::WChar_S: 10747 return getUnsignedWCharType(); 10748 10749 case BuiltinType::ShortAccum: 10750 return UnsignedShortAccumTy; 10751 case BuiltinType::Accum: 10752 return UnsignedAccumTy; 10753 case BuiltinType::LongAccum: 10754 return UnsignedLongAccumTy; 10755 case BuiltinType::SatShortAccum: 10756 return SatUnsignedShortAccumTy; 10757 case BuiltinType::SatAccum: 10758 return SatUnsignedAccumTy; 10759 case BuiltinType::SatLongAccum: 10760 return SatUnsignedLongAccumTy; 10761 case BuiltinType::ShortFract: 10762 return UnsignedShortFractTy; 10763 case BuiltinType::Fract: 10764 return UnsignedFractTy; 10765 case BuiltinType::LongFract: 10766 return UnsignedLongFractTy; 10767 case BuiltinType::SatShortFract: 10768 return SatUnsignedShortFractTy; 10769 case BuiltinType::SatFract: 10770 return SatUnsignedFractTy; 10771 case BuiltinType::SatLongFract: 10772 return SatUnsignedLongFractTy; 10773 default: 10774 llvm_unreachable("Unexpected signed integer or fixed point type"); 10775 } 10776 } 10777 10778 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 10779 assert((T->hasUnsignedIntegerRepresentation() || 10780 T->isUnsignedFixedPointType()) && 10781 "Unexpected type"); 10782 10783 // Turn <4 x unsigned int> -> <4 x signed int> 10784 if (const auto *VTy = T->getAs<VectorType>()) 10785 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 10786 VTy->getNumElements(), VTy->getVectorKind()); 10787 10788 // For _BitInt, return a signed _BitInt with same width. 10789 if (const auto *EITy = T->getAs<BitIntType>()) 10790 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 10791 10792 // For enums, get the underlying integer type of the enum, and let the general 10793 // integer type signchanging code handle it. 10794 if (const auto *ETy = T->getAs<EnumType>()) 10795 T = ETy->getDecl()->getIntegerType(); 10796 10797 switch (T->castAs<BuiltinType>()->getKind()) { 10798 case BuiltinType::Char_U: 10799 case BuiltinType::UChar: 10800 return SignedCharTy; 10801 case BuiltinType::UShort: 10802 return ShortTy; 10803 case BuiltinType::UInt: 10804 return IntTy; 10805 case BuiltinType::ULong: 10806 return LongTy; 10807 case BuiltinType::ULongLong: 10808 return LongLongTy; 10809 case BuiltinType::UInt128: 10810 return Int128Ty; 10811 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 10812 // there's no matching "signed wchar_t". Therefore we return the signed 10813 // version of it's underlying type instead. 10814 case BuiltinType::WChar_U: 10815 return getSignedWCharType(); 10816 10817 case BuiltinType::UShortAccum: 10818 return ShortAccumTy; 10819 case BuiltinType::UAccum: 10820 return AccumTy; 10821 case BuiltinType::ULongAccum: 10822 return LongAccumTy; 10823 case BuiltinType::SatUShortAccum: 10824 return SatShortAccumTy; 10825 case BuiltinType::SatUAccum: 10826 return SatAccumTy; 10827 case BuiltinType::SatULongAccum: 10828 return SatLongAccumTy; 10829 case BuiltinType::UShortFract: 10830 return ShortFractTy; 10831 case BuiltinType::UFract: 10832 return FractTy; 10833 case BuiltinType::ULongFract: 10834 return LongFractTy; 10835 case BuiltinType::SatUShortFract: 10836 return SatShortFractTy; 10837 case BuiltinType::SatUFract: 10838 return SatFractTy; 10839 case BuiltinType::SatULongFract: 10840 return SatLongFractTy; 10841 default: 10842 llvm_unreachable("Unexpected unsigned integer or fixed point type"); 10843 } 10844 } 10845 10846 ASTMutationListener::~ASTMutationListener() = default; 10847 10848 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 10849 QualType ReturnType) {} 10850 10851 //===----------------------------------------------------------------------===// 10852 // Builtin Type Computation 10853 //===----------------------------------------------------------------------===// 10854 10855 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 10856 /// pointer over the consumed characters. This returns the resultant type. If 10857 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 10858 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 10859 /// a vector of "i*". 10860 /// 10861 /// RequiresICE is filled in on return to indicate whether the value is required 10862 /// to be an Integer Constant Expression. 10863 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 10864 ASTContext::GetBuiltinTypeError &Error, 10865 bool &RequiresICE, 10866 bool AllowTypeModifiers) { 10867 // Modifiers. 10868 int HowLong = 0; 10869 bool Signed = false, Unsigned = false; 10870 RequiresICE = false; 10871 10872 // Read the prefixed modifiers first. 10873 bool Done = false; 10874 #ifndef NDEBUG 10875 bool IsSpecial = false; 10876 #endif 10877 while (!Done) { 10878 switch (*Str++) { 10879 default: Done = true; --Str; break; 10880 case 'I': 10881 RequiresICE = true; 10882 break; 10883 case 'S': 10884 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 10885 assert(!Signed && "Can't use 'S' modifier multiple times!"); 10886 Signed = true; 10887 break; 10888 case 'U': 10889 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 10890 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 10891 Unsigned = true; 10892 break; 10893 case 'L': 10894 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 10895 assert(HowLong <= 2 && "Can't have LLLL modifier"); 10896 ++HowLong; 10897 break; 10898 case 'N': 10899 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 10900 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10901 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 10902 #ifndef NDEBUG 10903 IsSpecial = true; 10904 #endif 10905 if (Context.getTargetInfo().getLongWidth() == 32) 10906 ++HowLong; 10907 break; 10908 case 'W': 10909 // This modifier represents int64 type. 10910 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10911 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 10912 #ifndef NDEBUG 10913 IsSpecial = true; 10914 #endif 10915 switch (Context.getTargetInfo().getInt64Type()) { 10916 default: 10917 llvm_unreachable("Unexpected integer type"); 10918 case TargetInfo::SignedLong: 10919 HowLong = 1; 10920 break; 10921 case TargetInfo::SignedLongLong: 10922 HowLong = 2; 10923 break; 10924 } 10925 break; 10926 case 'Z': 10927 // This modifier represents int32 type. 10928 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10929 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 10930 #ifndef NDEBUG 10931 IsSpecial = true; 10932 #endif 10933 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 10934 default: 10935 llvm_unreachable("Unexpected integer type"); 10936 case TargetInfo::SignedInt: 10937 HowLong = 0; 10938 break; 10939 case TargetInfo::SignedLong: 10940 HowLong = 1; 10941 break; 10942 case TargetInfo::SignedLongLong: 10943 HowLong = 2; 10944 break; 10945 } 10946 break; 10947 case 'O': 10948 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10949 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 10950 #ifndef NDEBUG 10951 IsSpecial = true; 10952 #endif 10953 if (Context.getLangOpts().OpenCL) 10954 HowLong = 1; 10955 else 10956 HowLong = 2; 10957 break; 10958 } 10959 } 10960 10961 QualType Type; 10962 10963 // Read the base type. 10964 switch (*Str++) { 10965 default: llvm_unreachable("Unknown builtin type letter!"); 10966 case 'x': 10967 assert(HowLong == 0 && !Signed && !Unsigned && 10968 "Bad modifiers used with 'x'!"); 10969 Type = Context.Float16Ty; 10970 break; 10971 case 'y': 10972 assert(HowLong == 0 && !Signed && !Unsigned && 10973 "Bad modifiers used with 'y'!"); 10974 Type = Context.BFloat16Ty; 10975 break; 10976 case 'v': 10977 assert(HowLong == 0 && !Signed && !Unsigned && 10978 "Bad modifiers used with 'v'!"); 10979 Type = Context.VoidTy; 10980 break; 10981 case 'h': 10982 assert(HowLong == 0 && !Signed && !Unsigned && 10983 "Bad modifiers used with 'h'!"); 10984 Type = Context.HalfTy; 10985 break; 10986 case 'f': 10987 assert(HowLong == 0 && !Signed && !Unsigned && 10988 "Bad modifiers used with 'f'!"); 10989 Type = Context.FloatTy; 10990 break; 10991 case 'd': 10992 assert(HowLong < 3 && !Signed && !Unsigned && 10993 "Bad modifiers used with 'd'!"); 10994 if (HowLong == 1) 10995 Type = Context.LongDoubleTy; 10996 else if (HowLong == 2) 10997 Type = Context.Float128Ty; 10998 else 10999 Type = Context.DoubleTy; 11000 break; 11001 case 's': 11002 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11003 if (Unsigned) 11004 Type = Context.UnsignedShortTy; 11005 else 11006 Type = Context.ShortTy; 11007 break; 11008 case 'i': 11009 if (HowLong == 3) 11010 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11011 else if (HowLong == 2) 11012 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11013 else if (HowLong == 1) 11014 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11015 else 11016 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11017 break; 11018 case 'c': 11019 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11020 if (Signed) 11021 Type = Context.SignedCharTy; 11022 else if (Unsigned) 11023 Type = Context.UnsignedCharTy; 11024 else 11025 Type = Context.CharTy; 11026 break; 11027 case 'b': // boolean 11028 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11029 Type = Context.BoolTy; 11030 break; 11031 case 'z': // size_t. 11032 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11033 Type = Context.getSizeType(); 11034 break; 11035 case 'w': // wchar_t. 11036 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11037 Type = Context.getWideCharType(); 11038 break; 11039 case 'F': 11040 Type = Context.getCFConstantStringType(); 11041 break; 11042 case 'G': 11043 Type = Context.getObjCIdType(); 11044 break; 11045 case 'H': 11046 Type = Context.getObjCSelType(); 11047 break; 11048 case 'M': 11049 Type = Context.getObjCSuperType(); 11050 break; 11051 case 'a': 11052 Type = Context.getBuiltinVaListType(); 11053 assert(!Type.isNull() && "builtin va list type not initialized!"); 11054 break; 11055 case 'A': 11056 // This is a "reference" to a va_list; however, what exactly 11057 // this means depends on how va_list is defined. There are two 11058 // different kinds of va_list: ones passed by value, and ones 11059 // passed by reference. An example of a by-value va_list is 11060 // x86, where va_list is a char*. An example of by-ref va_list 11061 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11062 // we want this argument to be a char*&; for x86-64, we want 11063 // it to be a __va_list_tag*. 11064 Type = Context.getBuiltinVaListType(); 11065 assert(!Type.isNull() && "builtin va list type not initialized!"); 11066 if (Type->isArrayType()) 11067 Type = Context.getArrayDecayedType(Type); 11068 else 11069 Type = Context.getLValueReferenceType(Type); 11070 break; 11071 case 'q': { 11072 char *End; 11073 unsigned NumElements = strtoul(Str, &End, 10); 11074 assert(End != Str && "Missing vector size"); 11075 Str = End; 11076 11077 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11078 RequiresICE, false); 11079 assert(!RequiresICE && "Can't require vector ICE"); 11080 11081 Type = Context.getScalableVectorType(ElementType, NumElements); 11082 break; 11083 } 11084 case 'V': { 11085 char *End; 11086 unsigned NumElements = strtoul(Str, &End, 10); 11087 assert(End != Str && "Missing vector size"); 11088 Str = End; 11089 11090 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11091 RequiresICE, false); 11092 assert(!RequiresICE && "Can't require vector ICE"); 11093 11094 // TODO: No way to make AltiVec vectors in builtins yet. 11095 Type = Context.getVectorType(ElementType, NumElements, 11096 VectorType::GenericVector); 11097 break; 11098 } 11099 case 'E': { 11100 char *End; 11101 11102 unsigned NumElements = strtoul(Str, &End, 10); 11103 assert(End != Str && "Missing vector size"); 11104 11105 Str = End; 11106 11107 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11108 false); 11109 Type = Context.getExtVectorType(ElementType, NumElements); 11110 break; 11111 } 11112 case 'X': { 11113 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11114 false); 11115 assert(!RequiresICE && "Can't require complex ICE"); 11116 Type = Context.getComplexType(ElementType); 11117 break; 11118 } 11119 case 'Y': 11120 Type = Context.getPointerDiffType(); 11121 break; 11122 case 'P': 11123 Type = Context.getFILEType(); 11124 if (Type.isNull()) { 11125 Error = ASTContext::GE_Missing_stdio; 11126 return {}; 11127 } 11128 break; 11129 case 'J': 11130 if (Signed) 11131 Type = Context.getsigjmp_bufType(); 11132 else 11133 Type = Context.getjmp_bufType(); 11134 11135 if (Type.isNull()) { 11136 Error = ASTContext::GE_Missing_setjmp; 11137 return {}; 11138 } 11139 break; 11140 case 'K': 11141 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11142 Type = Context.getucontext_tType(); 11143 11144 if (Type.isNull()) { 11145 Error = ASTContext::GE_Missing_ucontext; 11146 return {}; 11147 } 11148 break; 11149 case 'p': 11150 Type = Context.getProcessIDType(); 11151 break; 11152 } 11153 11154 // If there are modifiers and if we're allowed to parse them, go for it. 11155 Done = !AllowTypeModifiers; 11156 while (!Done) { 11157 switch (char c = *Str++) { 11158 default: Done = true; --Str; break; 11159 case '*': 11160 case '&': { 11161 // Both pointers and references can have their pointee types 11162 // qualified with an address space. 11163 char *End; 11164 unsigned AddrSpace = strtoul(Str, &End, 10); 11165 if (End != Str) { 11166 // Note AddrSpace == 0 is not the same as an unspecified address space. 11167 Type = Context.getAddrSpaceQualType( 11168 Type, 11169 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11170 Str = End; 11171 } 11172 if (c == '*') 11173 Type = Context.getPointerType(Type); 11174 else 11175 Type = Context.getLValueReferenceType(Type); 11176 break; 11177 } 11178 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11179 case 'C': 11180 Type = Type.withConst(); 11181 break; 11182 case 'D': 11183 Type = Context.getVolatileType(Type); 11184 break; 11185 case 'R': 11186 Type = Type.withRestrict(); 11187 break; 11188 } 11189 } 11190 11191 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11192 "Integer constant 'I' type must be an integer"); 11193 11194 return Type; 11195 } 11196 11197 // On some targets such as PowerPC, some of the builtins are defined with custom 11198 // type descriptors for target-dependent types. These descriptors are decoded in 11199 // other functions, but it may be useful to be able to fall back to default 11200 // descriptor decoding to define builtins mixing target-dependent and target- 11201 // independent types. This function allows decoding one type descriptor with 11202 // default decoding. 11203 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11204 GetBuiltinTypeError &Error, bool &RequireICE, 11205 bool AllowTypeModifiers) const { 11206 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11207 } 11208 11209 /// GetBuiltinType - Return the type for the specified builtin. 11210 QualType ASTContext::GetBuiltinType(unsigned Id, 11211 GetBuiltinTypeError &Error, 11212 unsigned *IntegerConstantArgs) const { 11213 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11214 if (TypeStr[0] == '\0') { 11215 Error = GE_Missing_type; 11216 return {}; 11217 } 11218 11219 SmallVector<QualType, 8> ArgTypes; 11220 11221 bool RequiresICE = false; 11222 Error = GE_None; 11223 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11224 RequiresICE, true); 11225 if (Error != GE_None) 11226 return {}; 11227 11228 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11229 11230 while (TypeStr[0] && TypeStr[0] != '.') { 11231 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11232 if (Error != GE_None) 11233 return {}; 11234 11235 // If this argument is required to be an IntegerConstantExpression and the 11236 // caller cares, fill in the bitmask we return. 11237 if (RequiresICE && IntegerConstantArgs) 11238 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11239 11240 // Do array -> pointer decay. The builtin should use the decayed type. 11241 if (Ty->isArrayType()) 11242 Ty = getArrayDecayedType(Ty); 11243 11244 ArgTypes.push_back(Ty); 11245 } 11246 11247 if (Id == Builtin::BI__GetExceptionInfo) 11248 return {}; 11249 11250 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11251 "'.' should only occur at end of builtin type list!"); 11252 11253 bool Variadic = (TypeStr[0] == '.'); 11254 11255 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11256 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11257 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11258 11259 11260 // We really shouldn't be making a no-proto type here. 11261 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 11262 return getFunctionNoProtoType(ResType, EI); 11263 11264 FunctionProtoType::ExtProtoInfo EPI; 11265 EPI.ExtInfo = EI; 11266 EPI.Variadic = Variadic; 11267 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11268 EPI.ExceptionSpec.Type = 11269 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11270 11271 return getFunctionType(ResType, ArgTypes, EPI); 11272 } 11273 11274 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11275 const FunctionDecl *FD) { 11276 if (!FD->isExternallyVisible()) 11277 return GVA_Internal; 11278 11279 // Non-user-provided functions get emitted as weak definitions with every 11280 // use, no matter whether they've been explicitly instantiated etc. 11281 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 11282 if (!MD->isUserProvided()) 11283 return GVA_DiscardableODR; 11284 11285 GVALinkage External; 11286 switch (FD->getTemplateSpecializationKind()) { 11287 case TSK_Undeclared: 11288 case TSK_ExplicitSpecialization: 11289 External = GVA_StrongExternal; 11290 break; 11291 11292 case TSK_ExplicitInstantiationDefinition: 11293 return GVA_StrongODR; 11294 11295 // C++11 [temp.explicit]p10: 11296 // [ Note: The intent is that an inline function that is the subject of 11297 // an explicit instantiation declaration will still be implicitly 11298 // instantiated when used so that the body can be considered for 11299 // inlining, but that no out-of-line copy of the inline function would be 11300 // generated in the translation unit. -- end note ] 11301 case TSK_ExplicitInstantiationDeclaration: 11302 return GVA_AvailableExternally; 11303 11304 case TSK_ImplicitInstantiation: 11305 External = GVA_DiscardableODR; 11306 break; 11307 } 11308 11309 if (!FD->isInlined()) 11310 return External; 11311 11312 if ((!Context.getLangOpts().CPlusPlus && 11313 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11314 !FD->hasAttr<DLLExportAttr>()) || 11315 FD->hasAttr<GNUInlineAttr>()) { 11316 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11317 11318 // GNU or C99 inline semantics. Determine whether this symbol should be 11319 // externally visible. 11320 if (FD->isInlineDefinitionExternallyVisible()) 11321 return External; 11322 11323 // C99 inline semantics, where the symbol is not externally visible. 11324 return GVA_AvailableExternally; 11325 } 11326 11327 // Functions specified with extern and inline in -fms-compatibility mode 11328 // forcibly get emitted. While the body of the function cannot be later 11329 // replaced, the function definition cannot be discarded. 11330 if (FD->isMSExternInline()) 11331 return GVA_StrongODR; 11332 11333 return GVA_DiscardableODR; 11334 } 11335 11336 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11337 const Decl *D, GVALinkage L) { 11338 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11339 // dllexport/dllimport on inline functions. 11340 if (D->hasAttr<DLLImportAttr>()) { 11341 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11342 return GVA_AvailableExternally; 11343 } else if (D->hasAttr<DLLExportAttr>()) { 11344 if (L == GVA_DiscardableODR) 11345 return GVA_StrongODR; 11346 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11347 // Device-side functions with __global__ attribute must always be 11348 // visible externally so they can be launched from host. 11349 if (D->hasAttr<CUDAGlobalAttr>() && 11350 (L == GVA_DiscardableODR || L == GVA_Internal)) 11351 return GVA_StrongODR; 11352 // Single source offloading languages like CUDA/HIP need to be able to 11353 // access static device variables from host code of the same compilation 11354 // unit. This is done by externalizing the static variable with a shared 11355 // name between the host and device compilation which is the same for the 11356 // same compilation unit whereas different among different compilation 11357 // units. 11358 if (Context.shouldExternalize(D)) 11359 return GVA_StrongExternal; 11360 } 11361 return L; 11362 } 11363 11364 /// Adjust the GVALinkage for a declaration based on what an external AST source 11365 /// knows about whether there can be other definitions of this declaration. 11366 static GVALinkage 11367 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11368 GVALinkage L) { 11369 ExternalASTSource *Source = Ctx.getExternalSource(); 11370 if (!Source) 11371 return L; 11372 11373 switch (Source->hasExternalDefinitions(D)) { 11374 case ExternalASTSource::EK_Never: 11375 // Other translation units rely on us to provide the definition. 11376 if (L == GVA_DiscardableODR) 11377 return GVA_StrongODR; 11378 break; 11379 11380 case ExternalASTSource::EK_Always: 11381 return GVA_AvailableExternally; 11382 11383 case ExternalASTSource::EK_ReplyHazy: 11384 break; 11385 } 11386 return L; 11387 } 11388 11389 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11390 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11391 adjustGVALinkageForAttributes(*this, FD, 11392 basicGVALinkageForFunction(*this, FD))); 11393 } 11394 11395 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11396 const VarDecl *VD) { 11397 if (!VD->isExternallyVisible()) 11398 return GVA_Internal; 11399 11400 if (VD->isStaticLocal()) { 11401 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11402 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11403 LexicalContext = LexicalContext->getLexicalParent(); 11404 11405 // ObjC Blocks can create local variables that don't have a FunctionDecl 11406 // LexicalContext. 11407 if (!LexicalContext) 11408 return GVA_DiscardableODR; 11409 11410 // Otherwise, let the static local variable inherit its linkage from the 11411 // nearest enclosing function. 11412 auto StaticLocalLinkage = 11413 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11414 11415 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11416 // be emitted in any object with references to the symbol for the object it 11417 // contains, whether inline or out-of-line." 11418 // Similar behavior is observed with MSVC. An alternative ABI could use 11419 // StrongODR/AvailableExternally to match the function, but none are 11420 // known/supported currently. 11421 if (StaticLocalLinkage == GVA_StrongODR || 11422 StaticLocalLinkage == GVA_AvailableExternally) 11423 return GVA_DiscardableODR; 11424 return StaticLocalLinkage; 11425 } 11426 11427 // MSVC treats in-class initialized static data members as definitions. 11428 // By giving them non-strong linkage, out-of-line definitions won't 11429 // cause link errors. 11430 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11431 return GVA_DiscardableODR; 11432 11433 // Most non-template variables have strong linkage; inline variables are 11434 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11435 GVALinkage StrongLinkage; 11436 switch (Context.getInlineVariableDefinitionKind(VD)) { 11437 case ASTContext::InlineVariableDefinitionKind::None: 11438 StrongLinkage = GVA_StrongExternal; 11439 break; 11440 case ASTContext::InlineVariableDefinitionKind::Weak: 11441 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11442 StrongLinkage = GVA_DiscardableODR; 11443 break; 11444 case ASTContext::InlineVariableDefinitionKind::Strong: 11445 StrongLinkage = GVA_StrongODR; 11446 break; 11447 } 11448 11449 switch (VD->getTemplateSpecializationKind()) { 11450 case TSK_Undeclared: 11451 return StrongLinkage; 11452 11453 case TSK_ExplicitSpecialization: 11454 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11455 VD->isStaticDataMember() 11456 ? GVA_StrongODR 11457 : StrongLinkage; 11458 11459 case TSK_ExplicitInstantiationDefinition: 11460 return GVA_StrongODR; 11461 11462 case TSK_ExplicitInstantiationDeclaration: 11463 return GVA_AvailableExternally; 11464 11465 case TSK_ImplicitInstantiation: 11466 return GVA_DiscardableODR; 11467 } 11468 11469 llvm_unreachable("Invalid Linkage!"); 11470 } 11471 11472 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { 11473 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11474 adjustGVALinkageForAttributes(*this, VD, 11475 basicGVALinkageForVariable(*this, VD))); 11476 } 11477 11478 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11479 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11480 if (!VD->isFileVarDecl()) 11481 return false; 11482 // Global named register variables (GNU extension) are never emitted. 11483 if (VD->getStorageClass() == SC_Register) 11484 return false; 11485 if (VD->getDescribedVarTemplate() || 11486 isa<VarTemplatePartialSpecializationDecl>(VD)) 11487 return false; 11488 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11489 // We never need to emit an uninstantiated function template. 11490 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11491 return false; 11492 } else if (isa<PragmaCommentDecl>(D)) 11493 return true; 11494 else if (isa<PragmaDetectMismatchDecl>(D)) 11495 return true; 11496 else if (isa<OMPRequiresDecl>(D)) 11497 return true; 11498 else if (isa<OMPThreadPrivateDecl>(D)) 11499 return !D->getDeclContext()->isDependentContext(); 11500 else if (isa<OMPAllocateDecl>(D)) 11501 return !D->getDeclContext()->isDependentContext(); 11502 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11503 return !D->getDeclContext()->isDependentContext(); 11504 else if (isa<ImportDecl>(D)) 11505 return true; 11506 else 11507 return false; 11508 11509 // If this is a member of a class template, we do not need to emit it. 11510 if (D->getDeclContext()->isDependentContext()) 11511 return false; 11512 11513 // Weak references don't produce any output by themselves. 11514 if (D->hasAttr<WeakRefAttr>()) 11515 return false; 11516 11517 // Aliases and used decls are required. 11518 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11519 return true; 11520 11521 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11522 // Forward declarations aren't required. 11523 if (!FD->doesThisDeclarationHaveABody()) 11524 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11525 11526 // Constructors and destructors are required. 11527 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11528 return true; 11529 11530 // The key function for a class is required. This rule only comes 11531 // into play when inline functions can be key functions, though. 11532 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11533 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11534 const CXXRecordDecl *RD = MD->getParent(); 11535 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11536 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11537 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11538 return true; 11539 } 11540 } 11541 } 11542 11543 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11544 11545 // static, static inline, always_inline, and extern inline functions can 11546 // always be deferred. Normal inline functions can be deferred in C99/C++. 11547 // Implicit template instantiations can also be deferred in C++. 11548 return !isDiscardableGVALinkage(Linkage); 11549 } 11550 11551 const auto *VD = cast<VarDecl>(D); 11552 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11553 11554 // If the decl is marked as `declare target to`, it should be emitted for the 11555 // host and for the device. 11556 if (LangOpts.OpenMP && 11557 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11558 return true; 11559 11560 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11561 !isMSStaticDataMemberInlineDefinition(VD)) 11562 return false; 11563 11564 // Variables that can be needed in other TUs are required. 11565 auto Linkage = GetGVALinkageForVariable(VD); 11566 if (!isDiscardableGVALinkage(Linkage)) 11567 return true; 11568 11569 // We never need to emit a variable that is available in another TU. 11570 if (Linkage == GVA_AvailableExternally) 11571 return false; 11572 11573 // Variables that have destruction with side-effects are required. 11574 if (VD->needsDestruction(*this)) 11575 return true; 11576 11577 // Variables that have initialization with side-effects are required. 11578 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11579 // We can get a value-dependent initializer during error recovery. 11580 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11581 return true; 11582 11583 // Likewise, variables with tuple-like bindings are required if their 11584 // bindings have side-effects. 11585 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11586 for (const auto *BD : DD->bindings()) 11587 if (const auto *BindingVD = BD->getHoldingVar()) 11588 if (DeclMustBeEmitted(BindingVD)) 11589 return true; 11590 11591 return false; 11592 } 11593 11594 void ASTContext::forEachMultiversionedFunctionVersion( 11595 const FunctionDecl *FD, 11596 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11597 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11598 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11599 FD = FD->getMostRecentDecl(); 11600 // FIXME: The order of traversal here matters and depends on the order of 11601 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11602 // shouldn't rely on that. 11603 for (auto *CurDecl : 11604 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11605 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11606 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11607 std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) { 11608 SeenDecls.insert(CurFD); 11609 Pred(CurFD); 11610 } 11611 } 11612 } 11613 11614 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11615 bool IsCXXMethod, 11616 bool IsBuiltin) const { 11617 // Pass through to the C++ ABI object 11618 if (IsCXXMethod) 11619 return ABI->getDefaultMethodCallConv(IsVariadic); 11620 11621 // Builtins ignore user-specified default calling convention and remain the 11622 // Target's default calling convention. 11623 if (!IsBuiltin) { 11624 switch (LangOpts.getDefaultCallingConv()) { 11625 case LangOptions::DCC_None: 11626 break; 11627 case LangOptions::DCC_CDecl: 11628 return CC_C; 11629 case LangOptions::DCC_FastCall: 11630 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11631 return CC_X86FastCall; 11632 break; 11633 case LangOptions::DCC_StdCall: 11634 if (!IsVariadic) 11635 return CC_X86StdCall; 11636 break; 11637 case LangOptions::DCC_VectorCall: 11638 // __vectorcall cannot be applied to variadic functions. 11639 if (!IsVariadic) 11640 return CC_X86VectorCall; 11641 break; 11642 case LangOptions::DCC_RegCall: 11643 // __regcall cannot be applied to variadic functions. 11644 if (!IsVariadic) 11645 return CC_X86RegCall; 11646 break; 11647 } 11648 } 11649 return Target->getDefaultCallingConv(); 11650 } 11651 11652 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11653 // Pass through to the C++ ABI object 11654 return ABI->isNearlyEmpty(RD); 11655 } 11656 11657 VTableContextBase *ASTContext::getVTableContext() { 11658 if (!VTContext.get()) { 11659 auto ABI = Target->getCXXABI(); 11660 if (ABI.isMicrosoft()) 11661 VTContext.reset(new MicrosoftVTableContext(*this)); 11662 else { 11663 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11664 ? ItaniumVTableContext::Relative 11665 : ItaniumVTableContext::Pointer; 11666 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 11667 } 11668 } 11669 return VTContext.get(); 11670 } 11671 11672 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 11673 if (!T) 11674 T = Target; 11675 switch (T->getCXXABI().getKind()) { 11676 case TargetCXXABI::AppleARM64: 11677 case TargetCXXABI::Fuchsia: 11678 case TargetCXXABI::GenericAArch64: 11679 case TargetCXXABI::GenericItanium: 11680 case TargetCXXABI::GenericARM: 11681 case TargetCXXABI::GenericMIPS: 11682 case TargetCXXABI::iOS: 11683 case TargetCXXABI::WebAssembly: 11684 case TargetCXXABI::WatchOS: 11685 case TargetCXXABI::XL: 11686 return ItaniumMangleContext::create(*this, getDiagnostics()); 11687 case TargetCXXABI::Microsoft: 11688 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11689 } 11690 llvm_unreachable("Unsupported ABI"); 11691 } 11692 11693 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 11694 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 11695 "Device mangle context does not support Microsoft mangling."); 11696 switch (T.getCXXABI().getKind()) { 11697 case TargetCXXABI::AppleARM64: 11698 case TargetCXXABI::Fuchsia: 11699 case TargetCXXABI::GenericAArch64: 11700 case TargetCXXABI::GenericItanium: 11701 case TargetCXXABI::GenericARM: 11702 case TargetCXXABI::GenericMIPS: 11703 case TargetCXXABI::iOS: 11704 case TargetCXXABI::WebAssembly: 11705 case TargetCXXABI::WatchOS: 11706 case TargetCXXABI::XL: 11707 return ItaniumMangleContext::create( 11708 *this, getDiagnostics(), 11709 [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> { 11710 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 11711 return RD->getDeviceLambdaManglingNumber(); 11712 return llvm::None; 11713 }, 11714 /*IsAux=*/true); 11715 case TargetCXXABI::Microsoft: 11716 return MicrosoftMangleContext::create(*this, getDiagnostics(), 11717 /*IsAux=*/true); 11718 } 11719 llvm_unreachable("Unsupported ABI"); 11720 } 11721 11722 CXXABI::~CXXABI() = default; 11723 11724 size_t ASTContext::getSideTableAllocatedMemory() const { 11725 return ASTRecordLayouts.getMemorySize() + 11726 llvm::capacity_in_bytes(ObjCLayouts) + 11727 llvm::capacity_in_bytes(KeyFunctions) + 11728 llvm::capacity_in_bytes(ObjCImpls) + 11729 llvm::capacity_in_bytes(BlockVarCopyInits) + 11730 llvm::capacity_in_bytes(DeclAttrs) + 11731 llvm::capacity_in_bytes(TemplateOrInstantiation) + 11732 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 11733 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 11734 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 11735 llvm::capacity_in_bytes(OverriddenMethods) + 11736 llvm::capacity_in_bytes(Types) + 11737 llvm::capacity_in_bytes(VariableArrayTypes); 11738 } 11739 11740 /// getIntTypeForBitwidth - 11741 /// sets integer QualTy according to specified details: 11742 /// bitwidth, signed/unsigned. 11743 /// Returns empty type if there is no appropriate target types. 11744 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 11745 unsigned Signed) const { 11746 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 11747 CanQualType QualTy = getFromTargetType(Ty); 11748 if (!QualTy && DestWidth == 128) 11749 return Signed ? Int128Ty : UnsignedInt128Ty; 11750 return QualTy; 11751 } 11752 11753 /// getRealTypeForBitwidth - 11754 /// sets floating point QualTy according to specified bitwidth. 11755 /// Returns empty type if there is no appropriate target types. 11756 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 11757 FloatModeKind ExplicitType) const { 11758 FloatModeKind Ty = 11759 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 11760 switch (Ty) { 11761 case FloatModeKind::Float: 11762 return FloatTy; 11763 case FloatModeKind::Double: 11764 return DoubleTy; 11765 case FloatModeKind::LongDouble: 11766 return LongDoubleTy; 11767 case FloatModeKind::Float128: 11768 return Float128Ty; 11769 case FloatModeKind::Ibm128: 11770 return Ibm128Ty; 11771 case FloatModeKind::NoFloat: 11772 return {}; 11773 } 11774 11775 llvm_unreachable("Unhandled TargetInfo::RealType value"); 11776 } 11777 11778 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 11779 if (Number > 1) 11780 MangleNumbers[ND] = Number; 11781 } 11782 11783 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 11784 bool ForAuxTarget) const { 11785 auto I = MangleNumbers.find(ND); 11786 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 11787 // CUDA/HIP host compilation encodes host and device mangling numbers 11788 // as lower and upper half of 32 bit integer. 11789 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 11790 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 11791 } else { 11792 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 11793 "number for aux target"); 11794 } 11795 return Res > 1 ? Res : 1; 11796 } 11797 11798 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 11799 if (Number > 1) 11800 StaticLocalNumbers[VD] = Number; 11801 } 11802 11803 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 11804 auto I = StaticLocalNumbers.find(VD); 11805 return I != StaticLocalNumbers.end() ? I->second : 1; 11806 } 11807 11808 MangleNumberingContext & 11809 ASTContext::getManglingNumberContext(const DeclContext *DC) { 11810 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11811 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 11812 if (!MCtx) 11813 MCtx = createMangleNumberingContext(); 11814 return *MCtx; 11815 } 11816 11817 MangleNumberingContext & 11818 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 11819 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11820 std::unique_ptr<MangleNumberingContext> &MCtx = 11821 ExtraMangleNumberingContexts[D]; 11822 if (!MCtx) 11823 MCtx = createMangleNumberingContext(); 11824 return *MCtx; 11825 } 11826 11827 std::unique_ptr<MangleNumberingContext> 11828 ASTContext::createMangleNumberingContext() const { 11829 return ABI->createMangleNumberingContext(); 11830 } 11831 11832 const CXXConstructorDecl * 11833 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 11834 return ABI->getCopyConstructorForExceptionObject( 11835 cast<CXXRecordDecl>(RD->getFirstDecl())); 11836 } 11837 11838 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 11839 CXXConstructorDecl *CD) { 11840 return ABI->addCopyConstructorForExceptionObject( 11841 cast<CXXRecordDecl>(RD->getFirstDecl()), 11842 cast<CXXConstructorDecl>(CD->getFirstDecl())); 11843 } 11844 11845 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 11846 TypedefNameDecl *DD) { 11847 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 11848 } 11849 11850 TypedefNameDecl * 11851 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 11852 return ABI->getTypedefNameForUnnamedTagDecl(TD); 11853 } 11854 11855 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 11856 DeclaratorDecl *DD) { 11857 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 11858 } 11859 11860 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 11861 return ABI->getDeclaratorForUnnamedTagDecl(TD); 11862 } 11863 11864 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 11865 ParamIndices[D] = index; 11866 } 11867 11868 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 11869 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 11870 assert(I != ParamIndices.end() && 11871 "ParmIndices lacks entry set by ParmVarDecl"); 11872 return I->second; 11873 } 11874 11875 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 11876 unsigned Length) const { 11877 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 11878 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 11879 EltTy = EltTy.withConst(); 11880 11881 EltTy = adjustStringLiteralBaseType(EltTy); 11882 11883 // Get an array type for the string, according to C99 6.4.5. This includes 11884 // the null terminator character. 11885 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 11886 ArrayType::Normal, /*IndexTypeQuals*/ 0); 11887 } 11888 11889 StringLiteral * 11890 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 11891 StringLiteral *&Result = StringLiteralCache[Key]; 11892 if (!Result) 11893 Result = StringLiteral::Create( 11894 *this, Key, StringLiteral::Ascii, 11895 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 11896 SourceLocation()); 11897 return Result; 11898 } 11899 11900 MSGuidDecl * 11901 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 11902 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 11903 11904 llvm::FoldingSetNodeID ID; 11905 MSGuidDecl::Profile(ID, Parts); 11906 11907 void *InsertPos; 11908 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 11909 return Existing; 11910 11911 QualType GUIDType = getMSGuidType().withConst(); 11912 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 11913 MSGuidDecls.InsertNode(New, InsertPos); 11914 return New; 11915 } 11916 11917 UnnamedGlobalConstantDecl * 11918 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 11919 const APValue &APVal) const { 11920 llvm::FoldingSetNodeID ID; 11921 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 11922 11923 void *InsertPos; 11924 if (UnnamedGlobalConstantDecl *Existing = 11925 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 11926 return Existing; 11927 11928 UnnamedGlobalConstantDecl *New = 11929 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 11930 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 11931 return New; 11932 } 11933 11934 TemplateParamObjectDecl * 11935 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 11936 assert(T->isRecordType() && "template param object of unexpected type"); 11937 11938 // C++ [temp.param]p8: 11939 // [...] a static storage duration object of type 'const T' [...] 11940 T.addConst(); 11941 11942 llvm::FoldingSetNodeID ID; 11943 TemplateParamObjectDecl::Profile(ID, T, V); 11944 11945 void *InsertPos; 11946 if (TemplateParamObjectDecl *Existing = 11947 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 11948 return Existing; 11949 11950 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 11951 TemplateParamObjectDecls.InsertNode(New, InsertPos); 11952 return New; 11953 } 11954 11955 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 11956 const llvm::Triple &T = getTargetInfo().getTriple(); 11957 if (!T.isOSDarwin()) 11958 return false; 11959 11960 if (!(T.isiOS() && T.isOSVersionLT(7)) && 11961 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 11962 return false; 11963 11964 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 11965 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 11966 uint64_t Size = sizeChars.getQuantity(); 11967 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 11968 unsigned Align = alignChars.getQuantity(); 11969 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 11970 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 11971 } 11972 11973 bool 11974 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 11975 const ObjCMethodDecl *MethodImpl) { 11976 // No point trying to match an unavailable/deprecated mothod. 11977 if (MethodDecl->hasAttr<UnavailableAttr>() 11978 || MethodDecl->hasAttr<DeprecatedAttr>()) 11979 return false; 11980 if (MethodDecl->getObjCDeclQualifier() != 11981 MethodImpl->getObjCDeclQualifier()) 11982 return false; 11983 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 11984 return false; 11985 11986 if (MethodDecl->param_size() != MethodImpl->param_size()) 11987 return false; 11988 11989 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 11990 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 11991 EF = MethodDecl->param_end(); 11992 IM != EM && IF != EF; ++IM, ++IF) { 11993 const ParmVarDecl *DeclVar = (*IF); 11994 const ParmVarDecl *ImplVar = (*IM); 11995 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 11996 return false; 11997 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 11998 return false; 11999 } 12000 12001 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12002 } 12003 12004 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12005 LangAS AS; 12006 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12007 AS = LangAS::Default; 12008 else 12009 AS = QT->getPointeeType().getAddressSpace(); 12010 12011 return getTargetInfo().getNullPointerValue(AS); 12012 } 12013 12014 unsigned ASTContext::getTargetAddressSpace(QualType T) const { 12015 // Return the address space for the type. If the type is a 12016 // function type without an address space qualifier, the 12017 // program address space is used. Otherwise, the target picks 12018 // the best address space based on the type information 12019 return T->isFunctionType() && !T.hasAddressSpace() 12020 ? getTargetInfo().getProgramAddressSpace() 12021 : getTargetAddressSpace(T.getQualifiers()); 12022 } 12023 12024 unsigned ASTContext::getTargetAddressSpace(Qualifiers Q) const { 12025 return getTargetAddressSpace(Q.getAddressSpace()); 12026 } 12027 12028 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12029 if (isTargetAddressSpace(AS)) 12030 return toTargetAddressSpace(AS); 12031 else 12032 return (*AddrSpaceMap)[(unsigned)AS]; 12033 } 12034 12035 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 12036 assert(Ty->isFixedPointType()); 12037 12038 if (Ty->isSaturatedFixedPointType()) return Ty; 12039 12040 switch (Ty->castAs<BuiltinType>()->getKind()) { 12041 default: 12042 llvm_unreachable("Not a fixed point type!"); 12043 case BuiltinType::ShortAccum: 12044 return SatShortAccumTy; 12045 case BuiltinType::Accum: 12046 return SatAccumTy; 12047 case BuiltinType::LongAccum: 12048 return SatLongAccumTy; 12049 case BuiltinType::UShortAccum: 12050 return SatUnsignedShortAccumTy; 12051 case BuiltinType::UAccum: 12052 return SatUnsignedAccumTy; 12053 case BuiltinType::ULongAccum: 12054 return SatUnsignedLongAccumTy; 12055 case BuiltinType::ShortFract: 12056 return SatShortFractTy; 12057 case BuiltinType::Fract: 12058 return SatFractTy; 12059 case BuiltinType::LongFract: 12060 return SatLongFractTy; 12061 case BuiltinType::UShortFract: 12062 return SatUnsignedShortFractTy; 12063 case BuiltinType::UFract: 12064 return SatUnsignedFractTy; 12065 case BuiltinType::ULongFract: 12066 return SatUnsignedLongFractTy; 12067 } 12068 } 12069 12070 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 12071 if (LangOpts.OpenCL) 12072 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 12073 12074 if (LangOpts.CUDA) 12075 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 12076 12077 return getLangASFromTargetAS(AS); 12078 } 12079 12080 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 12081 // doesn't include ASTContext.h 12082 template 12083 clang::LazyGenerationalUpdatePtr< 12084 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 12085 clang::LazyGenerationalUpdatePtr< 12086 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 12087 const clang::ASTContext &Ctx, Decl *Value); 12088 12089 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 12090 assert(Ty->isFixedPointType()); 12091 12092 const TargetInfo &Target = getTargetInfo(); 12093 switch (Ty->castAs<BuiltinType>()->getKind()) { 12094 default: 12095 llvm_unreachable("Not a fixed point type!"); 12096 case BuiltinType::ShortAccum: 12097 case BuiltinType::SatShortAccum: 12098 return Target.getShortAccumScale(); 12099 case BuiltinType::Accum: 12100 case BuiltinType::SatAccum: 12101 return Target.getAccumScale(); 12102 case BuiltinType::LongAccum: 12103 case BuiltinType::SatLongAccum: 12104 return Target.getLongAccumScale(); 12105 case BuiltinType::UShortAccum: 12106 case BuiltinType::SatUShortAccum: 12107 return Target.getUnsignedShortAccumScale(); 12108 case BuiltinType::UAccum: 12109 case BuiltinType::SatUAccum: 12110 return Target.getUnsignedAccumScale(); 12111 case BuiltinType::ULongAccum: 12112 case BuiltinType::SatULongAccum: 12113 return Target.getUnsignedLongAccumScale(); 12114 case BuiltinType::ShortFract: 12115 case BuiltinType::SatShortFract: 12116 return Target.getShortFractScale(); 12117 case BuiltinType::Fract: 12118 case BuiltinType::SatFract: 12119 return Target.getFractScale(); 12120 case BuiltinType::LongFract: 12121 case BuiltinType::SatLongFract: 12122 return Target.getLongFractScale(); 12123 case BuiltinType::UShortFract: 12124 case BuiltinType::SatUShortFract: 12125 return Target.getUnsignedShortFractScale(); 12126 case BuiltinType::UFract: 12127 case BuiltinType::SatUFract: 12128 return Target.getUnsignedFractScale(); 12129 case BuiltinType::ULongFract: 12130 case BuiltinType::SatULongFract: 12131 return Target.getUnsignedLongFractScale(); 12132 } 12133 } 12134 12135 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 12136 assert(Ty->isFixedPointType()); 12137 12138 const TargetInfo &Target = getTargetInfo(); 12139 switch (Ty->castAs<BuiltinType>()->getKind()) { 12140 default: 12141 llvm_unreachable("Not a fixed point type!"); 12142 case BuiltinType::ShortAccum: 12143 case BuiltinType::SatShortAccum: 12144 return Target.getShortAccumIBits(); 12145 case BuiltinType::Accum: 12146 case BuiltinType::SatAccum: 12147 return Target.getAccumIBits(); 12148 case BuiltinType::LongAccum: 12149 case BuiltinType::SatLongAccum: 12150 return Target.getLongAccumIBits(); 12151 case BuiltinType::UShortAccum: 12152 case BuiltinType::SatUShortAccum: 12153 return Target.getUnsignedShortAccumIBits(); 12154 case BuiltinType::UAccum: 12155 case BuiltinType::SatUAccum: 12156 return Target.getUnsignedAccumIBits(); 12157 case BuiltinType::ULongAccum: 12158 case BuiltinType::SatULongAccum: 12159 return Target.getUnsignedLongAccumIBits(); 12160 case BuiltinType::ShortFract: 12161 case BuiltinType::SatShortFract: 12162 case BuiltinType::Fract: 12163 case BuiltinType::SatFract: 12164 case BuiltinType::LongFract: 12165 case BuiltinType::SatLongFract: 12166 case BuiltinType::UShortFract: 12167 case BuiltinType::SatUShortFract: 12168 case BuiltinType::UFract: 12169 case BuiltinType::SatUFract: 12170 case BuiltinType::ULongFract: 12171 case BuiltinType::SatULongFract: 12172 return 0; 12173 } 12174 } 12175 12176 llvm::FixedPointSemantics 12177 ASTContext::getFixedPointSemantics(QualType Ty) const { 12178 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 12179 "Can only get the fixed point semantics for a " 12180 "fixed point or integer type."); 12181 if (Ty->isIntegerType()) 12182 return llvm::FixedPointSemantics::GetIntegerSemantics( 12183 getIntWidth(Ty), Ty->isSignedIntegerType()); 12184 12185 bool isSigned = Ty->isSignedFixedPointType(); 12186 return llvm::FixedPointSemantics( 12187 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 12188 Ty->isSaturatedFixedPointType(), 12189 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 12190 } 12191 12192 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 12193 assert(Ty->isFixedPointType()); 12194 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 12195 } 12196 12197 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 12198 assert(Ty->isFixedPointType()); 12199 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 12200 } 12201 12202 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 12203 assert(Ty->isUnsignedFixedPointType() && 12204 "Expected unsigned fixed point type"); 12205 12206 switch (Ty->castAs<BuiltinType>()->getKind()) { 12207 case BuiltinType::UShortAccum: 12208 return ShortAccumTy; 12209 case BuiltinType::UAccum: 12210 return AccumTy; 12211 case BuiltinType::ULongAccum: 12212 return LongAccumTy; 12213 case BuiltinType::SatUShortAccum: 12214 return SatShortAccumTy; 12215 case BuiltinType::SatUAccum: 12216 return SatAccumTy; 12217 case BuiltinType::SatULongAccum: 12218 return SatLongAccumTy; 12219 case BuiltinType::UShortFract: 12220 return ShortFractTy; 12221 case BuiltinType::UFract: 12222 return FractTy; 12223 case BuiltinType::ULongFract: 12224 return LongFractTy; 12225 case BuiltinType::SatUShortFract: 12226 return SatShortFractTy; 12227 case BuiltinType::SatUFract: 12228 return SatFractTy; 12229 case BuiltinType::SatULongFract: 12230 return SatLongFractTy; 12231 default: 12232 llvm_unreachable("Unexpected unsigned fixed point type"); 12233 } 12234 } 12235 12236 ParsedTargetAttr 12237 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 12238 assert(TD != nullptr); 12239 ParsedTargetAttr ParsedAttr = TD->parse(); 12240 12241 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 12242 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 12243 }); 12244 return ParsedAttr; 12245 } 12246 12247 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 12248 const FunctionDecl *FD) const { 12249 if (FD) 12250 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 12251 else 12252 Target->initFeatureMap(FeatureMap, getDiagnostics(), 12253 Target->getTargetOpts().CPU, 12254 Target->getTargetOpts().Features); 12255 } 12256 12257 // Fills in the supplied string map with the set of target features for the 12258 // passed in function. 12259 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 12260 GlobalDecl GD) const { 12261 StringRef TargetCPU = Target->getTargetOpts().CPU; 12262 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 12263 if (const auto *TD = FD->getAttr<TargetAttr>()) { 12264 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 12265 12266 // Make a copy of the features as passed on the command line into the 12267 // beginning of the additional features from the function to override. 12268 ParsedAttr.Features.insert( 12269 ParsedAttr.Features.begin(), 12270 Target->getTargetOpts().FeaturesAsWritten.begin(), 12271 Target->getTargetOpts().FeaturesAsWritten.end()); 12272 12273 if (ParsedAttr.Architecture != "" && 12274 Target->isValidCPUName(ParsedAttr.Architecture)) 12275 TargetCPU = ParsedAttr.Architecture; 12276 12277 // Now populate the feature map, first with the TargetCPU which is either 12278 // the default or a new one from the target attribute string. Then we'll use 12279 // the passed in features (FeaturesAsWritten) along with the new ones from 12280 // the attribute. 12281 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 12282 ParsedAttr.Features); 12283 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 12284 llvm::SmallVector<StringRef, 32> FeaturesTmp; 12285 Target->getCPUSpecificCPUDispatchFeatures( 12286 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 12287 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 12288 Features.insert(Features.begin(), 12289 Target->getTargetOpts().FeaturesAsWritten.begin(), 12290 Target->getTargetOpts().FeaturesAsWritten.end()); 12291 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 12292 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 12293 std::vector<std::string> Features; 12294 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 12295 if (VersionStr.startswith("arch=")) 12296 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 12297 else if (VersionStr != "default") 12298 Features.push_back((StringRef{"+"} + VersionStr).str()); 12299 12300 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 12301 } else { 12302 FeatureMap = Target->getTargetOpts().FeatureMap; 12303 } 12304 } 12305 12306 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 12307 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 12308 return *OMPTraitInfoVector.back(); 12309 } 12310 12311 const StreamingDiagnostic &clang:: 12312 operator<<(const StreamingDiagnostic &DB, 12313 const ASTContext::SectionInfo &Section) { 12314 if (Section.Decl) 12315 return DB << Section.Decl; 12316 return DB << "a prior #pragma section"; 12317 } 12318 12319 bool ASTContext::mayExternalize(const Decl *D) const { 12320 bool IsStaticVar = 12321 isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static; 12322 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 12323 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 12324 (D->hasAttr<CUDAConstantAttr>() && 12325 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 12326 // CUDA/HIP: static managed variables need to be externalized since it is 12327 // a declaration in IR, therefore cannot have internal linkage. Kernels in 12328 // anonymous name space needs to be externalized to avoid duplicate symbols. 12329 return (IsStaticVar && 12330 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 12331 (D->hasAttr<CUDAGlobalAttr>() && 12332 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 12333 GVA_Internal); 12334 } 12335 12336 bool ASTContext::shouldExternalize(const Decl *D) const { 12337 return mayExternalize(D) && 12338 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 12339 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 12340 } 12341 12342 StringRef ASTContext::getCUIDHash() const { 12343 if (!CUIDHash.empty()) 12344 return CUIDHash; 12345 if (LangOpts.CUID.empty()) 12346 return StringRef(); 12347 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 12348 return CUIDHash; 12349 } 12350