1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the actions class which performs semantic analysis and 10 // builds an AST out of a parse stream. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "UsedDeclVisitor.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/ASTDiagnostic.h" 17 #include "clang/AST/Decl.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclFriend.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/ExprCXX.h" 23 #include "clang/AST/PrettyDeclStackTrace.h" 24 #include "clang/AST/StmtCXX.h" 25 #include "clang/Basic/DarwinSDKInfo.h" 26 #include "clang/Basic/DiagnosticOptions.h" 27 #include "clang/Basic/PartialDiagnostic.h" 28 #include "clang/Basic/SourceManager.h" 29 #include "clang/Basic/Stack.h" 30 #include "clang/Basic/TargetInfo.h" 31 #include "clang/Lex/HeaderSearch.h" 32 #include "clang/Lex/HeaderSearchOptions.h" 33 #include "clang/Lex/Preprocessor.h" 34 #include "clang/Sema/CXXFieldCollector.h" 35 #include "clang/Sema/DelayedDiagnostic.h" 36 #include "clang/Sema/ExternalSemaSource.h" 37 #include "clang/Sema/Initialization.h" 38 #include "clang/Sema/MultiplexExternalSemaSource.h" 39 #include "clang/Sema/ObjCMethodList.h" 40 #include "clang/Sema/Scope.h" 41 #include "clang/Sema/ScopeInfo.h" 42 #include "clang/Sema/SemaConsumer.h" 43 #include "clang/Sema/SemaInternal.h" 44 #include "clang/Sema/TemplateDeduction.h" 45 #include "clang/Sema/TemplateInstCallback.h" 46 #include "clang/Sema/TypoCorrection.h" 47 #include "llvm/ADT/DenseMap.h" 48 #include "llvm/ADT/SmallPtrSet.h" 49 #include "llvm/Support/TimeProfiler.h" 50 51 using namespace clang; 52 using namespace sema; 53 54 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) { 55 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts); 56 } 57 58 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); } 59 60 DarwinSDKInfo * 61 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, 62 StringRef Platform) { 63 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking(); 64 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) { 65 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking) 66 << Platform; 67 WarnedDarwinSDKInfoMissing = true; 68 } 69 return SDKInfo; 70 } 71 72 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() { 73 if (CachedDarwinSDKInfo) 74 return CachedDarwinSDKInfo->get(); 75 auto SDKInfo = parseDarwinSDKInfo( 76 PP.getFileManager().getVirtualFileSystem(), 77 PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot); 78 if (SDKInfo && *SDKInfo) { 79 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo)); 80 return CachedDarwinSDKInfo->get(); 81 } 82 if (!SDKInfo) 83 llvm::consumeError(SDKInfo.takeError()); 84 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>(); 85 return nullptr; 86 } 87 88 IdentifierInfo * 89 Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, 90 unsigned int Index) { 91 std::string InventedName; 92 llvm::raw_string_ostream OS(InventedName); 93 94 if (!ParamName) 95 OS << "auto:" << Index + 1; 96 else 97 OS << ParamName->getName() << ":auto"; 98 99 OS.flush(); 100 return &Context.Idents.get(OS.str()); 101 } 102 103 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, 104 const Preprocessor &PP) { 105 PrintingPolicy Policy = Context.getPrintingPolicy(); 106 // In diagnostics, we print _Bool as bool if the latter is defined as the 107 // former. 108 Policy.Bool = Context.getLangOpts().Bool; 109 if (!Policy.Bool) { 110 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) { 111 Policy.Bool = BoolMacro->isObjectLike() && 112 BoolMacro->getNumTokens() == 1 && 113 BoolMacro->getReplacementToken(0).is(tok::kw__Bool); 114 } 115 } 116 117 return Policy; 118 } 119 120 void Sema::ActOnTranslationUnitScope(Scope *S) { 121 TUScope = S; 122 PushDeclContext(S, Context.getTranslationUnitDecl()); 123 } 124 125 namespace clang { 126 namespace sema { 127 128 class SemaPPCallbacks : public PPCallbacks { 129 Sema *S = nullptr; 130 llvm::SmallVector<SourceLocation, 8> IncludeStack; 131 132 public: 133 void set(Sema &S) { this->S = &S; } 134 135 void reset() { S = nullptr; } 136 137 virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason, 138 SrcMgr::CharacteristicKind FileType, 139 FileID PrevFID) override { 140 if (!S) 141 return; 142 switch (Reason) { 143 case EnterFile: { 144 SourceManager &SM = S->getSourceManager(); 145 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc)); 146 if (IncludeLoc.isValid()) { 147 if (llvm::timeTraceProfilerEnabled()) { 148 const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc)); 149 llvm::timeTraceProfilerBegin( 150 "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>")); 151 } 152 153 IncludeStack.push_back(IncludeLoc); 154 S->DiagnoseNonDefaultPragmaAlignPack( 155 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude, 156 IncludeLoc); 157 } 158 break; 159 } 160 case ExitFile: 161 if (!IncludeStack.empty()) { 162 if (llvm::timeTraceProfilerEnabled()) 163 llvm::timeTraceProfilerEnd(); 164 165 S->DiagnoseNonDefaultPragmaAlignPack( 166 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit, 167 IncludeStack.pop_back_val()); 168 } 169 break; 170 default: 171 break; 172 } 173 } 174 }; 175 176 } // end namespace sema 177 } // end namespace clang 178 179 const unsigned Sema::MaxAlignmentExponent; 180 const uint64_t Sema::MaximumAlignment; 181 182 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, 183 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) 184 : ExternalSource(nullptr), isMultiplexExternalSource(false), 185 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), 186 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), 187 SourceMgr(PP.getSourceManager()), CollectStats(false), 188 CodeCompleter(CodeCompleter), CurContext(nullptr), 189 OriginalLexicalContext(nullptr), MSStructPragmaOn(false), 190 MSPointerToMemberRepresentationMethod( 191 LangOpts.getMSPointerToMemberRepresentationMethod()), 192 VtorDispStack(LangOpts.getVtorDispMode()), 193 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)), 194 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr), 195 CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()), 196 CurInitSeg(nullptr), VisContext(nullptr), 197 PragmaAttributeCurrentTargetDecl(nullptr), 198 IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr), 199 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp), 200 StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr), 201 StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr), 202 MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr), 203 NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr), 204 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), 205 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), 206 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false), 207 TUKind(TUKind), NumSFINAEErrors(0), 208 FullyCheckedComparisonCategories( 209 static_cast<unsigned>(ComparisonCategoryType::Last) + 1), 210 SatisfactionCache(Context), AccessCheckingSFINAE(false), 211 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), 212 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), 213 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this), 214 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), 215 CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { 216 assert(pp.TUKind == TUKind); 217 TUScope = nullptr; 218 isConstantEvaluatedOverride = false; 219 220 LoadedExternalKnownNamespaces = false; 221 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I) 222 NSNumberLiteralMethods[I] = nullptr; 223 224 if (getLangOpts().ObjC) 225 NSAPIObj.reset(new NSAPI(Context)); 226 227 if (getLangOpts().CPlusPlus) 228 FieldCollector.reset(new CXXFieldCollector()); 229 230 // Tell diagnostics how to render things from the AST library. 231 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); 232 233 ExprEvalContexts.emplace_back( 234 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{}, 235 nullptr, ExpressionEvaluationContextRecord::EK_Other); 236 237 // Initialization of data sharing attributes stack for OpenMP 238 InitDataSharingAttributesStack(); 239 240 std::unique_ptr<sema::SemaPPCallbacks> Callbacks = 241 std::make_unique<sema::SemaPPCallbacks>(); 242 SemaPPCallbackHandler = Callbacks.get(); 243 PP.addPPCallbacks(std::move(Callbacks)); 244 SemaPPCallbackHandler->set(*this); 245 } 246 247 // Anchor Sema's type info to this TU. 248 void Sema::anchor() {} 249 250 void Sema::addImplicitTypedef(StringRef Name, QualType T) { 251 DeclarationName DN = &Context.Idents.get(Name); 252 if (IdResolver.begin(DN) == IdResolver.end()) 253 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope); 254 } 255 256 void Sema::Initialize() { 257 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 258 SC->InitializeSema(*this); 259 260 // Tell the external Sema source about this Sema object. 261 if (ExternalSemaSource *ExternalSema 262 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 263 ExternalSema->InitializeSema(*this); 264 265 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we 266 // will not be able to merge any duplicate __va_list_tag decls correctly. 267 VAListTagName = PP.getIdentifierInfo("__va_list_tag"); 268 269 if (!TUScope) 270 return; 271 272 // Initialize predefined 128-bit integer types, if needed. 273 if (Context.getTargetInfo().hasInt128Type() || 274 (Context.getAuxTargetInfo() && 275 Context.getAuxTargetInfo()->hasInt128Type())) { 276 // If either of the 128-bit integer types are unavailable to name lookup, 277 // define them now. 278 DeclarationName Int128 = &Context.Idents.get("__int128_t"); 279 if (IdResolver.begin(Int128) == IdResolver.end()) 280 PushOnScopeChains(Context.getInt128Decl(), TUScope); 281 282 DeclarationName UInt128 = &Context.Idents.get("__uint128_t"); 283 if (IdResolver.begin(UInt128) == IdResolver.end()) 284 PushOnScopeChains(Context.getUInt128Decl(), TUScope); 285 } 286 287 288 // Initialize predefined Objective-C types: 289 if (getLangOpts().ObjC) { 290 // If 'SEL' does not yet refer to any declarations, make it refer to the 291 // predefined 'SEL'. 292 DeclarationName SEL = &Context.Idents.get("SEL"); 293 if (IdResolver.begin(SEL) == IdResolver.end()) 294 PushOnScopeChains(Context.getObjCSelDecl(), TUScope); 295 296 // If 'id' does not yet refer to any declarations, make it refer to the 297 // predefined 'id'. 298 DeclarationName Id = &Context.Idents.get("id"); 299 if (IdResolver.begin(Id) == IdResolver.end()) 300 PushOnScopeChains(Context.getObjCIdDecl(), TUScope); 301 302 // Create the built-in typedef for 'Class'. 303 DeclarationName Class = &Context.Idents.get("Class"); 304 if (IdResolver.begin(Class) == IdResolver.end()) 305 PushOnScopeChains(Context.getObjCClassDecl(), TUScope); 306 307 // Create the built-in forward declaratino for 'Protocol'. 308 DeclarationName Protocol = &Context.Idents.get("Protocol"); 309 if (IdResolver.begin(Protocol) == IdResolver.end()) 310 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope); 311 } 312 313 // Create the internal type for the *StringMakeConstantString builtins. 314 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString"); 315 if (IdResolver.begin(ConstantString) == IdResolver.end()) 316 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope); 317 318 // Initialize Microsoft "predefined C++ types". 319 if (getLangOpts().MSVCCompat) { 320 if (getLangOpts().CPlusPlus && 321 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) 322 PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class), 323 TUScope); 324 325 addImplicitTypedef("size_t", Context.getSizeType()); 326 } 327 328 // Initialize predefined OpenCL types and supported extensions and (optional) 329 // core features. 330 if (getLangOpts().OpenCL) { 331 getOpenCLOptions().addSupport( 332 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts()); 333 addImplicitTypedef("sampler_t", Context.OCLSamplerTy); 334 addImplicitTypedef("event_t", Context.OCLEventTy); 335 if (getLangOpts().getOpenCLCompatibleVersion() >= 200) { 336 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy); 337 addImplicitTypedef("queue_t", Context.OCLQueueTy); 338 if (getLangOpts().OpenCLPipes) 339 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy); 340 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); 341 addImplicitTypedef("atomic_uint", 342 Context.getAtomicType(Context.UnsignedIntTy)); 343 addImplicitTypedef("atomic_float", 344 Context.getAtomicType(Context.FloatTy)); 345 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as 346 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. 347 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); 348 349 350 // OpenCL v2.0 s6.13.11.6: 351 // - The atomic_long and atomic_ulong types are supported if the 352 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics 353 // extensions are supported. 354 // - The atomic_double type is only supported if double precision 355 // is supported and the cl_khr_int64_base_atomics and 356 // cl_khr_int64_extended_atomics extensions are supported. 357 // - If the device address space is 64-bits, the data types 358 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and 359 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and 360 // cl_khr_int64_extended_atomics extensions are supported. 361 362 auto AddPointerSizeDependentTypes = [&]() { 363 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType()); 364 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType()); 365 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType()); 366 auto AtomicPtrDiffT = 367 Context.getAtomicType(Context.getPointerDiffType()); 368 addImplicitTypedef("atomic_size_t", AtomicSizeT); 369 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT); 370 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT); 371 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT); 372 }; 373 374 if (Context.getTypeSize(Context.getSizeType()) == 32) { 375 AddPointerSizeDependentTypes(); 376 } 377 378 if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) { 379 auto AtomicHalfT = Context.getAtomicType(Context.HalfTy); 380 addImplicitTypedef("atomic_half", AtomicHalfT); 381 } 382 383 std::vector<QualType> Atomic64BitTypes; 384 if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics", 385 getLangOpts()) && 386 getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics", 387 getLangOpts())) { 388 if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) { 389 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy); 390 addImplicitTypedef("atomic_double", AtomicDoubleT); 391 Atomic64BitTypes.push_back(AtomicDoubleT); 392 } 393 auto AtomicLongT = Context.getAtomicType(Context.LongTy); 394 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy); 395 addImplicitTypedef("atomic_long", AtomicLongT); 396 addImplicitTypedef("atomic_ulong", AtomicULongT); 397 398 399 if (Context.getTypeSize(Context.getSizeType()) == 64) { 400 AddPointerSizeDependentTypes(); 401 } 402 } 403 } 404 405 406 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 407 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \ 408 addImplicitTypedef(#ExtType, Context.Id##Ty); \ 409 } 410 #include "clang/Basic/OpenCLExtensionTypes.def" 411 } 412 413 if (Context.getTargetInfo().hasAArch64SVETypes()) { 414 #define SVE_TYPE(Name, Id, SingletonId) \ 415 addImplicitTypedef(Name, Context.SingletonId); 416 #include "clang/Basic/AArch64SVEACLETypes.def" 417 } 418 419 if (Context.getTargetInfo().getTriple().isPPC64()) { 420 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 421 addImplicitTypedef(#Name, Context.Id##Ty); 422 #include "clang/Basic/PPCTypes.def" 423 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 424 addImplicitTypedef(#Name, Context.Id##Ty); 425 #include "clang/Basic/PPCTypes.def" 426 } 427 428 if (Context.getTargetInfo().hasRISCVVTypes()) { 429 #define RVV_TYPE(Name, Id, SingletonId) \ 430 addImplicitTypedef(Name, Context.SingletonId); 431 #include "clang/Basic/RISCVVTypes.def" 432 } 433 434 if (Context.getTargetInfo().hasBuiltinMSVaList()) { 435 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list"); 436 if (IdResolver.begin(MSVaList) == IdResolver.end()) 437 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope); 438 } 439 440 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); 441 if (IdResolver.begin(BuiltinVaList) == IdResolver.end()) 442 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope); 443 } 444 445 Sema::~Sema() { 446 assert(InstantiatingSpecializations.empty() && 447 "failed to clean up an InstantiatingTemplate?"); 448 449 if (VisContext) FreeVisContext(); 450 451 // Kill all the active scopes. 452 for (sema::FunctionScopeInfo *FSI : FunctionScopes) 453 delete FSI; 454 455 // Tell the SemaConsumer to forget about us; we're going out of scope. 456 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 457 SC->ForgetSema(); 458 459 // Detach from the external Sema source. 460 if (ExternalSemaSource *ExternalSema 461 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 462 ExternalSema->ForgetSema(); 463 464 // If Sema's ExternalSource is the multiplexer - we own it. 465 if (isMultiplexExternalSource) 466 delete ExternalSource; 467 468 // Delete cached satisfactions. 469 std::vector<ConstraintSatisfaction *> Satisfactions; 470 Satisfactions.reserve(Satisfactions.size()); 471 for (auto &Node : SatisfactionCache) 472 Satisfactions.push_back(&Node); 473 for (auto *Node : Satisfactions) 474 delete Node; 475 476 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); 477 478 // Destroys data sharing attributes stack for OpenMP 479 DestroyDataSharingAttributesStack(); 480 481 // Detach from the PP callback handler which outlives Sema since it's owned 482 // by the preprocessor. 483 SemaPPCallbackHandler->reset(); 484 } 485 486 void Sema::warnStackExhausted(SourceLocation Loc) { 487 // Only warn about this once. 488 if (!WarnedStackExhausted) { 489 Diag(Loc, diag::warn_stack_exhausted); 490 WarnedStackExhausted = true; 491 } 492 } 493 494 void Sema::runWithSufficientStackSpace(SourceLocation Loc, 495 llvm::function_ref<void()> Fn) { 496 clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn); 497 } 498 499 /// makeUnavailableInSystemHeader - There is an error in the current 500 /// context. If we're still in a system header, and we can plausibly 501 /// make the relevant declaration unavailable instead of erroring, do 502 /// so and return true. 503 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc, 504 UnavailableAttr::ImplicitReason reason) { 505 // If we're not in a function, it's an error. 506 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext); 507 if (!fn) return false; 508 509 // If we're in template instantiation, it's an error. 510 if (inTemplateInstantiation()) 511 return false; 512 513 // If that function's not in a system header, it's an error. 514 if (!Context.getSourceManager().isInSystemHeader(loc)) 515 return false; 516 517 // If the function is already unavailable, it's not an error. 518 if (fn->hasAttr<UnavailableAttr>()) return true; 519 520 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc)); 521 return true; 522 } 523 524 ASTMutationListener *Sema::getASTMutationListener() const { 525 return getASTConsumer().GetASTMutationListener(); 526 } 527 528 ///Registers an external source. If an external source already exists, 529 /// creates a multiplex external source and appends to it. 530 /// 531 ///\param[in] E - A non-null external sema source. 532 /// 533 void Sema::addExternalSource(ExternalSemaSource *E) { 534 assert(E && "Cannot use with NULL ptr"); 535 536 if (!ExternalSource) { 537 ExternalSource = E; 538 return; 539 } 540 541 if (isMultiplexExternalSource) 542 static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E); 543 else { 544 ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E); 545 isMultiplexExternalSource = true; 546 } 547 } 548 549 /// Print out statistics about the semantic analysis. 550 void Sema::PrintStats() const { 551 llvm::errs() << "\n*** Semantic Analysis Stats:\n"; 552 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n"; 553 554 BumpAlloc.PrintStats(); 555 AnalysisWarnings.PrintStats(); 556 } 557 558 void Sema::diagnoseNullableToNonnullConversion(QualType DstType, 559 QualType SrcType, 560 SourceLocation Loc) { 561 Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context); 562 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable && 563 *ExprNullability != NullabilityKind::NullableResult)) 564 return; 565 566 Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context); 567 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull) 568 return; 569 570 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType; 571 } 572 573 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) { 574 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant, 575 E->getBeginLoc())) 576 return; 577 // nullptr only exists from C++11 on, so don't warn on its absence earlier. 578 if (!getLangOpts().CPlusPlus11) 579 return; 580 581 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer) 582 return; 583 if (E->IgnoreParenImpCasts()->getType()->isNullPtrType()) 584 return; 585 586 // Don't diagnose the conversion from a 0 literal to a null pointer argument 587 // in a synthesized call to operator<=>. 588 if (!CodeSynthesisContexts.empty() && 589 CodeSynthesisContexts.back().Kind == 590 CodeSynthesisContext::RewritingOperatorAsSpaceship) 591 return; 592 593 // If it is a macro from system header, and if the macro name is not "NULL", 594 // do not warn. 595 SourceLocation MaybeMacroLoc = E->getBeginLoc(); 596 if (Diags.getSuppressSystemWarnings() && 597 SourceMgr.isInSystemMacro(MaybeMacroLoc) && 598 !findMacroSpelling(MaybeMacroLoc, "NULL")) 599 return; 600 601 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant) 602 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr"); 603 } 604 605 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast. 606 /// If there is already an implicit cast, merge into the existing one. 607 /// The result is of the given category. 608 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, 609 CastKind Kind, ExprValueKind VK, 610 const CXXCastPath *BasePath, 611 CheckedConversionKind CCK) { 612 #ifndef NDEBUG 613 if (VK == VK_PRValue && !E->isPRValue()) { 614 switch (Kind) { 615 default: 616 llvm_unreachable( 617 ("can't implicitly cast glvalue to prvalue with this cast " 618 "kind: " + 619 std::string(CastExpr::getCastKindName(Kind))) 620 .c_str()); 621 case CK_Dependent: 622 case CK_LValueToRValue: 623 case CK_ArrayToPointerDecay: 624 case CK_FunctionToPointerDecay: 625 case CK_ToVoid: 626 case CK_NonAtomicToAtomic: 627 break; 628 } 629 } 630 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) && 631 "can't cast prvalue to glvalue"); 632 #endif 633 634 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc()); 635 diagnoseZeroToNullptrConversion(Kind, E); 636 637 QualType ExprTy = Context.getCanonicalType(E->getType()); 638 QualType TypeTy = Context.getCanonicalType(Ty); 639 640 if (ExprTy == TypeTy) 641 return E; 642 643 if (Kind == CK_ArrayToPointerDecay) { 644 // C++1z [conv.array]: The temporary materialization conversion is applied. 645 // We also use this to fuel C++ DR1213, which applies to C++11 onwards. 646 if (getLangOpts().CPlusPlus && E->isPRValue()) { 647 // The temporary is an lvalue in C++98 and an xvalue otherwise. 648 ExprResult Materialized = CreateMaterializeTemporaryExpr( 649 E->getType(), E, !getLangOpts().CPlusPlus11); 650 if (Materialized.isInvalid()) 651 return ExprError(); 652 E = Materialized.get(); 653 } 654 // C17 6.7.1p6 footnote 124: The implementation can treat any register 655 // declaration simply as an auto declaration. However, whether or not 656 // addressable storage is actually used, the address of any part of an 657 // object declared with storage-class specifier register cannot be 658 // computed, either explicitly(by use of the unary & operator as discussed 659 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as 660 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an 661 // array declared with storage-class specifier register is sizeof. 662 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) { 663 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 664 if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 665 if (VD->getStorageClass() == SC_Register) { 666 Diag(E->getExprLoc(), diag::err_typecheck_address_of) 667 << /*register variable*/ 3 << E->getSourceRange(); 668 return ExprError(); 669 } 670 } 671 } 672 } 673 } 674 675 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { 676 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { 677 ImpCast->setType(Ty); 678 ImpCast->setValueKind(VK); 679 return E; 680 } 681 } 682 683 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK, 684 CurFPFeatureOverrides()); 685 } 686 687 /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding 688 /// to the conversion from scalar type ScalarTy to the Boolean type. 689 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) { 690 switch (ScalarTy->getScalarTypeKind()) { 691 case Type::STK_Bool: return CK_NoOp; 692 case Type::STK_CPointer: return CK_PointerToBoolean; 693 case Type::STK_BlockPointer: return CK_PointerToBoolean; 694 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean; 695 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean; 696 case Type::STK_Integral: return CK_IntegralToBoolean; 697 case Type::STK_Floating: return CK_FloatingToBoolean; 698 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean; 699 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean; 700 case Type::STK_FixedPoint: return CK_FixedPointToBoolean; 701 } 702 llvm_unreachable("unknown scalar type kind"); 703 } 704 705 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector. 706 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) { 707 if (D->getMostRecentDecl()->isUsed()) 708 return true; 709 710 if (D->isExternallyVisible()) 711 return true; 712 713 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 714 // If this is a function template and none of its specializations is used, 715 // we should warn. 716 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate()) 717 for (const auto *Spec : Template->specializations()) 718 if (ShouldRemoveFromUnused(SemaRef, Spec)) 719 return true; 720 721 // UnusedFileScopedDecls stores the first declaration. 722 // The declaration may have become definition so check again. 723 const FunctionDecl *DeclToCheck; 724 if (FD->hasBody(DeclToCheck)) 725 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 726 727 // Later redecls may add new information resulting in not having to warn, 728 // so check again. 729 DeclToCheck = FD->getMostRecentDecl(); 730 if (DeclToCheck != FD) 731 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 732 } 733 734 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 735 // If a variable usable in constant expressions is referenced, 736 // don't warn if it isn't used: if the value of a variable is required 737 // for the computation of a constant expression, it doesn't make sense to 738 // warn even if the variable isn't odr-used. (isReferenced doesn't 739 // precisely reflect that, but it's a decent approximation.) 740 if (VD->isReferenced() && 741 VD->mightBeUsableInConstantExpressions(SemaRef->Context)) 742 return true; 743 744 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate()) 745 // If this is a variable template and none of its specializations is used, 746 // we should warn. 747 for (const auto *Spec : Template->specializations()) 748 if (ShouldRemoveFromUnused(SemaRef, Spec)) 749 return true; 750 751 // UnusedFileScopedDecls stores the first declaration. 752 // The declaration may have become definition so check again. 753 const VarDecl *DeclToCheck = VD->getDefinition(); 754 if (DeclToCheck) 755 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 756 757 // Later redecls may add new information resulting in not having to warn, 758 // so check again. 759 DeclToCheck = VD->getMostRecentDecl(); 760 if (DeclToCheck != VD) 761 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 762 } 763 764 return false; 765 } 766 767 static bool isFunctionOrVarDeclExternC(NamedDecl *ND) { 768 if (auto *FD = dyn_cast<FunctionDecl>(ND)) 769 return FD->isExternC(); 770 return cast<VarDecl>(ND)->isExternC(); 771 } 772 773 /// Determine whether ND is an external-linkage function or variable whose 774 /// type has no linkage. 775 bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) { 776 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage, 777 // because we also want to catch the case where its type has VisibleNoLinkage, 778 // which does not affect the linkage of VD. 779 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() && 780 !isExternalFormalLinkage(VD->getType()->getLinkage()) && 781 !isFunctionOrVarDeclExternC(VD); 782 } 783 784 /// Obtains a sorted list of functions and variables that are undefined but 785 /// ODR-used. 786 void Sema::getUndefinedButUsed( 787 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) { 788 for (const auto &UndefinedUse : UndefinedButUsed) { 789 NamedDecl *ND = UndefinedUse.first; 790 791 // Ignore attributes that have become invalid. 792 if (ND->isInvalidDecl()) continue; 793 794 // __attribute__((weakref)) is basically a definition. 795 if (ND->hasAttr<WeakRefAttr>()) continue; 796 797 if (isa<CXXDeductionGuideDecl>(ND)) 798 continue; 799 800 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) { 801 // An exported function will always be emitted when defined, so even if 802 // the function is inline, it doesn't have to be emitted in this TU. An 803 // imported function implies that it has been exported somewhere else. 804 continue; 805 } 806 807 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { 808 if (FD->isDefined()) 809 continue; 810 if (FD->isExternallyVisible() && 811 !isExternalWithNoLinkageType(FD) && 812 !FD->getMostRecentDecl()->isInlined() && 813 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 814 continue; 815 if (FD->getBuiltinID()) 816 continue; 817 } else { 818 auto *VD = cast<VarDecl>(ND); 819 if (VD->hasDefinition() != VarDecl::DeclarationOnly) 820 continue; 821 if (VD->isExternallyVisible() && 822 !isExternalWithNoLinkageType(VD) && 823 !VD->getMostRecentDecl()->isInline() && 824 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 825 continue; 826 827 // Skip VarDecls that lack formal definitions but which we know are in 828 // fact defined somewhere. 829 if (VD->isKnownToBeDefined()) 830 continue; 831 } 832 833 Undefined.push_back(std::make_pair(ND, UndefinedUse.second)); 834 } 835 } 836 837 /// checkUndefinedButUsed - Check for undefined objects with internal linkage 838 /// or that are inline. 839 static void checkUndefinedButUsed(Sema &S) { 840 if (S.UndefinedButUsed.empty()) return; 841 842 // Collect all the still-undefined entities with internal linkage. 843 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined; 844 S.getUndefinedButUsed(Undefined); 845 if (Undefined.empty()) return; 846 847 for (auto Undef : Undefined) { 848 ValueDecl *VD = cast<ValueDecl>(Undef.first); 849 SourceLocation UseLoc = Undef.second; 850 851 if (S.isExternalWithNoLinkageType(VD)) { 852 // C++ [basic.link]p8: 853 // A type without linkage shall not be used as the type of a variable 854 // or function with external linkage unless 855 // -- the entity has C language linkage 856 // -- the entity is not odr-used or is defined in the same TU 857 // 858 // As an extension, accept this in cases where the type is externally 859 // visible, since the function or variable actually can be defined in 860 // another translation unit in that case. 861 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage()) 862 ? diag::ext_undefined_internal_type 863 : diag::err_undefined_internal_type) 864 << isa<VarDecl>(VD) << VD; 865 } else if (!VD->isExternallyVisible()) { 866 // FIXME: We can promote this to an error. The function or variable can't 867 // be defined anywhere else, so the program must necessarily violate the 868 // one definition rule. 869 bool IsImplicitBase = false; 870 if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) { 871 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>(); 872 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive( 873 llvm::omp::TraitProperty:: 874 implementation_extension_disable_implicit_base)) { 875 const auto *Func = cast<FunctionDecl>( 876 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl()); 877 IsImplicitBase = BaseD->isImplicit() && 878 Func->getIdentifier()->isMangledOpenMPVariantName(); 879 } 880 } 881 if (!S.getLangOpts().OpenMP || !IsImplicitBase) 882 S.Diag(VD->getLocation(), diag::warn_undefined_internal) 883 << isa<VarDecl>(VD) << VD; 884 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) { 885 (void)FD; 886 assert(FD->getMostRecentDecl()->isInlined() && 887 "used object requires definition but isn't inline or internal?"); 888 // FIXME: This is ill-formed; we should reject. 889 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD; 890 } else { 891 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() && 892 "used var requires definition but isn't inline or internal?"); 893 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD; 894 } 895 if (UseLoc.isValid()) 896 S.Diag(UseLoc, diag::note_used_here); 897 } 898 899 S.UndefinedButUsed.clear(); 900 } 901 902 void Sema::LoadExternalWeakUndeclaredIdentifiers() { 903 if (!ExternalSource) 904 return; 905 906 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; 907 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); 908 for (auto &WeakID : WeakIDs) 909 WeakUndeclaredIdentifiers.insert(WeakID); 910 } 911 912 913 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap; 914 915 /// Returns true, if all methods and nested classes of the given 916 /// CXXRecordDecl are defined in this translation unit. 917 /// 918 /// Should only be called from ActOnEndOfTranslationUnit so that all 919 /// definitions are actually read. 920 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD, 921 RecordCompleteMap &MNCComplete) { 922 RecordCompleteMap::iterator Cache = MNCComplete.find(RD); 923 if (Cache != MNCComplete.end()) 924 return Cache->second; 925 if (!RD->isCompleteDefinition()) 926 return false; 927 bool Complete = true; 928 for (DeclContext::decl_iterator I = RD->decls_begin(), 929 E = RD->decls_end(); 930 I != E && Complete; ++I) { 931 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) 932 Complete = M->isDefined() || M->isDefaulted() || 933 (M->isPure() && !isa<CXXDestructorDecl>(M)); 934 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I)) 935 // If the template function is marked as late template parsed at this 936 // point, it has not been instantiated and therefore we have not 937 // performed semantic analysis on it yet, so we cannot know if the type 938 // can be considered complete. 939 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() && 940 F->getTemplatedDecl()->isDefined(); 941 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) { 942 if (R->isInjectedClassName()) 943 continue; 944 if (R->hasDefinition()) 945 Complete = MethodsAndNestedClassesComplete(R->getDefinition(), 946 MNCComplete); 947 else 948 Complete = false; 949 } 950 } 951 MNCComplete[RD] = Complete; 952 return Complete; 953 } 954 955 /// Returns true, if the given CXXRecordDecl is fully defined in this 956 /// translation unit, i.e. all methods are defined or pure virtual and all 957 /// friends, friend functions and nested classes are fully defined in this 958 /// translation unit. 959 /// 960 /// Should only be called from ActOnEndOfTranslationUnit so that all 961 /// definitions are actually read. 962 static bool IsRecordFullyDefined(const CXXRecordDecl *RD, 963 RecordCompleteMap &RecordsComplete, 964 RecordCompleteMap &MNCComplete) { 965 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD); 966 if (Cache != RecordsComplete.end()) 967 return Cache->second; 968 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete); 969 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(), 970 E = RD->friend_end(); 971 I != E && Complete; ++I) { 972 // Check if friend classes and methods are complete. 973 if (TypeSourceInfo *TSI = (*I)->getFriendType()) { 974 // Friend classes are available as the TypeSourceInfo of the FriendDecl. 975 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl()) 976 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete); 977 else 978 Complete = false; 979 } else { 980 // Friend functions are available through the NamedDecl of FriendDecl. 981 if (const FunctionDecl *FD = 982 dyn_cast<FunctionDecl>((*I)->getFriendDecl())) 983 Complete = FD->isDefined(); 984 else 985 // This is a template friend, give up. 986 Complete = false; 987 } 988 } 989 RecordsComplete[RD] = Complete; 990 return Complete; 991 } 992 993 void Sema::emitAndClearUnusedLocalTypedefWarnings() { 994 if (ExternalSource) 995 ExternalSource->ReadUnusedLocalTypedefNameCandidates( 996 UnusedLocalTypedefNameCandidates); 997 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) { 998 if (TD->isReferenced()) 999 continue; 1000 Diag(TD->getLocation(), diag::warn_unused_local_typedef) 1001 << isa<TypeAliasDecl>(TD) << TD->getDeclName(); 1002 } 1003 UnusedLocalTypedefNameCandidates.clear(); 1004 } 1005 1006 /// This is called before the very first declaration in the translation unit 1007 /// is parsed. Note that the ASTContext may have already injected some 1008 /// declarations. 1009 void Sema::ActOnStartOfTranslationUnit() { 1010 if (getLangOpts().ModulesTS && 1011 (getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface || 1012 getLangOpts().getCompilingModule() == LangOptions::CMK_None)) { 1013 // We start in an implied global module fragment. 1014 SourceLocation StartOfTU = 1015 SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); 1016 ActOnGlobalModuleFragmentDecl(StartOfTU); 1017 ModuleScopes.back().ImplicitGlobalModuleFragment = true; 1018 } 1019 } 1020 1021 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) { 1022 // No explicit actions are required at the end of the global module fragment. 1023 if (Kind == TUFragmentKind::Global) 1024 return; 1025 1026 // Transfer late parsed template instantiations over to the pending template 1027 // instantiation list. During normal compilation, the late template parser 1028 // will be installed and instantiating these templates will succeed. 1029 // 1030 // If we are building a TU prefix for serialization, it is also safe to 1031 // transfer these over, even though they are not parsed. The end of the TU 1032 // should be outside of any eager template instantiation scope, so when this 1033 // AST is deserialized, these templates will not be parsed until the end of 1034 // the combined TU. 1035 PendingInstantiations.insert(PendingInstantiations.end(), 1036 LateParsedInstantiations.begin(), 1037 LateParsedInstantiations.end()); 1038 LateParsedInstantiations.clear(); 1039 1040 // If DefinedUsedVTables ends up marking any virtual member functions it 1041 // might lead to more pending template instantiations, which we then need 1042 // to instantiate. 1043 DefineUsedVTables(); 1044 1045 // C++: Perform implicit template instantiations. 1046 // 1047 // FIXME: When we perform these implicit instantiations, we do not 1048 // carefully keep track of the point of instantiation (C++ [temp.point]). 1049 // This means that name lookup that occurs within the template 1050 // instantiation will always happen at the end of the translation unit, 1051 // so it will find some names that are not required to be found. This is 1052 // valid, but we could do better by diagnosing if an instantiation uses a 1053 // name that was not visible at its first point of instantiation. 1054 if (ExternalSource) { 1055 // Load pending instantiations from the external source. 1056 SmallVector<PendingImplicitInstantiation, 4> Pending; 1057 ExternalSource->ReadPendingInstantiations(Pending); 1058 for (auto PII : Pending) 1059 if (auto Func = dyn_cast<FunctionDecl>(PII.first)) 1060 Func->setInstantiationIsPending(true); 1061 PendingInstantiations.insert(PendingInstantiations.begin(), 1062 Pending.begin(), Pending.end()); 1063 } 1064 1065 { 1066 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1067 PerformPendingInstantiations(); 1068 } 1069 1070 emitDeferredDiags(); 1071 1072 assert(LateParsedInstantiations.empty() && 1073 "end of TU template instantiation should not create more " 1074 "late-parsed templates"); 1075 1076 // Report diagnostics for uncorrected delayed typos. Ideally all of them 1077 // should have been corrected by that time, but it is very hard to cover all 1078 // cases in practice. 1079 for (const auto &Typo : DelayedTypos) { 1080 // We pass an empty TypoCorrection to indicate no correction was performed. 1081 Typo.second.DiagHandler(TypoCorrection()); 1082 } 1083 DelayedTypos.clear(); 1084 } 1085 1086 /// ActOnEndOfTranslationUnit - This is called at the very end of the 1087 /// translation unit when EOF is reached and all but the top-level scope is 1088 /// popped. 1089 void Sema::ActOnEndOfTranslationUnit() { 1090 assert(DelayedDiagnostics.getCurrentPool() == nullptr 1091 && "reached end of translation unit with a pool attached?"); 1092 1093 // If code completion is enabled, don't perform any end-of-translation-unit 1094 // work. 1095 if (PP.isCodeCompletionEnabled()) 1096 return; 1097 1098 // Complete translation units and modules define vtables and perform implicit 1099 // instantiations. PCH files do not. 1100 if (TUKind != TU_Prefix) { 1101 DiagnoseUseOfUnimplementedSelectors(); 1102 1103 ActOnEndOfTranslationUnitFragment( 1104 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind == 1105 Module::PrivateModuleFragment 1106 ? TUFragmentKind::Private 1107 : TUFragmentKind::Normal); 1108 1109 if (LateTemplateParserCleanup) 1110 LateTemplateParserCleanup(OpaqueParser); 1111 1112 CheckDelayedMemberExceptionSpecs(); 1113 } else { 1114 // If we are building a TU prefix for serialization, it is safe to transfer 1115 // these over, even though they are not parsed. The end of the TU should be 1116 // outside of any eager template instantiation scope, so when this AST is 1117 // deserialized, these templates will not be parsed until the end of the 1118 // combined TU. 1119 PendingInstantiations.insert(PendingInstantiations.end(), 1120 LateParsedInstantiations.begin(), 1121 LateParsedInstantiations.end()); 1122 LateParsedInstantiations.clear(); 1123 1124 if (LangOpts.PCHInstantiateTemplates) { 1125 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1126 PerformPendingInstantiations(); 1127 } 1128 } 1129 1130 DiagnoseUnterminatedPragmaAlignPack(); 1131 DiagnoseUnterminatedPragmaAttribute(); 1132 1133 // All delayed member exception specs should be checked or we end up accepting 1134 // incompatible declarations. 1135 assert(DelayedOverridingExceptionSpecChecks.empty()); 1136 assert(DelayedEquivalentExceptionSpecChecks.empty()); 1137 1138 // All dllexport classes should have been processed already. 1139 assert(DelayedDllExportClasses.empty()); 1140 assert(DelayedDllExportMemberFunctions.empty()); 1141 1142 // Remove file scoped decls that turned out to be used. 1143 UnusedFileScopedDecls.erase( 1144 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true), 1145 UnusedFileScopedDecls.end(), 1146 [this](const DeclaratorDecl *DD) { 1147 return ShouldRemoveFromUnused(this, DD); 1148 }), 1149 UnusedFileScopedDecls.end()); 1150 1151 if (TUKind == TU_Prefix) { 1152 // Translation unit prefixes don't need any of the checking below. 1153 if (!PP.isIncrementalProcessingEnabled()) 1154 TUScope = nullptr; 1155 return; 1156 } 1157 1158 // Check for #pragma weak identifiers that were never declared 1159 LoadExternalWeakUndeclaredIdentifiers(); 1160 for (auto WeakID : WeakUndeclaredIdentifiers) { 1161 if (WeakID.second.getUsed()) 1162 continue; 1163 1164 Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(), 1165 LookupOrdinaryName); 1166 if (PrevDecl != nullptr && 1167 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) 1168 Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type) 1169 << "'weak'" << ExpectedVariableOrFunction; 1170 else 1171 Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared) 1172 << WeakID.first; 1173 } 1174 1175 if (LangOpts.CPlusPlus11 && 1176 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation())) 1177 CheckDelegatingCtorCycles(); 1178 1179 if (!Diags.hasErrorOccurred()) { 1180 if (ExternalSource) 1181 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed); 1182 checkUndefinedButUsed(*this); 1183 } 1184 1185 // A global-module-fragment is only permitted within a module unit. 1186 bool DiagnosedMissingModuleDeclaration = false; 1187 if (!ModuleScopes.empty() && 1188 ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment && 1189 !ModuleScopes.back().ImplicitGlobalModuleFragment) { 1190 Diag(ModuleScopes.back().BeginLoc, 1191 diag::err_module_declaration_missing_after_global_module_introducer); 1192 DiagnosedMissingModuleDeclaration = true; 1193 } 1194 1195 if (TUKind == TU_Module) { 1196 // If we are building a module interface unit, we need to have seen the 1197 // module declaration by now. 1198 if (getLangOpts().getCompilingModule() == 1199 LangOptions::CMK_ModuleInterface && 1200 (ModuleScopes.empty() || 1201 !ModuleScopes.back().Module->isModulePurview()) && 1202 !DiagnosedMissingModuleDeclaration) { 1203 // FIXME: Make a better guess as to where to put the module declaration. 1204 Diag(getSourceManager().getLocForStartOfFile( 1205 getSourceManager().getMainFileID()), 1206 diag::err_module_declaration_missing); 1207 } 1208 1209 // If we are building a module, resolve all of the exported declarations 1210 // now. 1211 if (Module *CurrentModule = PP.getCurrentModule()) { 1212 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); 1213 1214 SmallVector<Module *, 2> Stack; 1215 Stack.push_back(CurrentModule); 1216 while (!Stack.empty()) { 1217 Module *Mod = Stack.pop_back_val(); 1218 1219 // Resolve the exported declarations and conflicts. 1220 // FIXME: Actually complain, once we figure out how to teach the 1221 // diagnostic client to deal with complaints in the module map at this 1222 // point. 1223 ModMap.resolveExports(Mod, /*Complain=*/false); 1224 ModMap.resolveUses(Mod, /*Complain=*/false); 1225 ModMap.resolveConflicts(Mod, /*Complain=*/false); 1226 1227 // Queue the submodules, so their exports will also be resolved. 1228 Stack.append(Mod->submodule_begin(), Mod->submodule_end()); 1229 } 1230 } 1231 1232 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for 1233 // modules when they are built, not every time they are used. 1234 emitAndClearUnusedLocalTypedefWarnings(); 1235 } 1236 1237 // C99 6.9.2p2: 1238 // A declaration of an identifier for an object that has file 1239 // scope without an initializer, and without a storage-class 1240 // specifier or with the storage-class specifier static, 1241 // constitutes a tentative definition. If a translation unit 1242 // contains one or more tentative definitions for an identifier, 1243 // and the translation unit contains no external definition for 1244 // that identifier, then the behavior is exactly as if the 1245 // translation unit contains a file scope declaration of that 1246 // identifier, with the composite type as of the end of the 1247 // translation unit, with an initializer equal to 0. 1248 llvm::SmallSet<VarDecl *, 32> Seen; 1249 for (TentativeDefinitionsType::iterator 1250 T = TentativeDefinitions.begin(ExternalSource), 1251 TEnd = TentativeDefinitions.end(); 1252 T != TEnd; ++T) { 1253 VarDecl *VD = (*T)->getActingDefinition(); 1254 1255 // If the tentative definition was completed, getActingDefinition() returns 1256 // null. If we've already seen this variable before, insert()'s second 1257 // return value is false. 1258 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second) 1259 continue; 1260 1261 if (const IncompleteArrayType *ArrayT 1262 = Context.getAsIncompleteArrayType(VD->getType())) { 1263 // Set the length of the array to 1 (C99 6.9.2p5). 1264 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array); 1265 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true); 1266 QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One, 1267 nullptr, ArrayType::Normal, 0); 1268 VD->setType(T); 1269 } else if (RequireCompleteType(VD->getLocation(), VD->getType(), 1270 diag::err_tentative_def_incomplete_type)) 1271 VD->setInvalidDecl(); 1272 1273 // No initialization is performed for a tentative definition. 1274 CheckCompleteVariableDeclaration(VD); 1275 1276 // Notify the consumer that we've completed a tentative definition. 1277 if (!VD->isInvalidDecl()) 1278 Consumer.CompleteTentativeDefinition(VD); 1279 } 1280 1281 for (auto D : ExternalDeclarations) { 1282 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed()) 1283 continue; 1284 1285 Consumer.CompleteExternalDeclaration(D); 1286 } 1287 1288 // If there were errors, disable 'unused' warnings since they will mostly be 1289 // noise. Don't warn for a use from a module: either we should warn on all 1290 // file-scope declarations in modules or not at all, but whether the 1291 // declaration is used is immaterial. 1292 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) { 1293 // Output warning for unused file scoped decls. 1294 for (UnusedFileScopedDeclsType::iterator 1295 I = UnusedFileScopedDecls.begin(ExternalSource), 1296 E = UnusedFileScopedDecls.end(); I != E; ++I) { 1297 if (ShouldRemoveFromUnused(this, *I)) 1298 continue; 1299 1300 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { 1301 const FunctionDecl *DiagD; 1302 if (!FD->hasBody(DiagD)) 1303 DiagD = FD; 1304 if (DiagD->isDeleted()) 1305 continue; // Deleted functions are supposed to be unused. 1306 if (DiagD->isReferenced()) { 1307 if (isa<CXXMethodDecl>(DiagD)) 1308 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function) 1309 << DiagD; 1310 else { 1311 if (FD->getStorageClass() == SC_Static && 1312 !FD->isInlineSpecified() && 1313 !SourceMgr.isInMainFile( 1314 SourceMgr.getExpansionLoc(FD->getLocation()))) 1315 Diag(DiagD->getLocation(), 1316 diag::warn_unneeded_static_internal_decl) 1317 << DiagD; 1318 else 1319 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1320 << /*function*/ 0 << DiagD; 1321 } 1322 } else { 1323 if (FD->getDescribedFunctionTemplate()) 1324 Diag(DiagD->getLocation(), diag::warn_unused_template) 1325 << /*function*/ 0 << DiagD; 1326 else 1327 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD) 1328 ? diag::warn_unused_member_function 1329 : diag::warn_unused_function) 1330 << DiagD; 1331 } 1332 } else { 1333 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition(); 1334 if (!DiagD) 1335 DiagD = cast<VarDecl>(*I); 1336 if (DiagD->isReferenced()) { 1337 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1338 << /*variable*/ 1 << DiagD; 1339 } else if (DiagD->getType().isConstQualified()) { 1340 const SourceManager &SM = SourceMgr; 1341 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) || 1342 !PP.getLangOpts().IsHeaderFile) 1343 Diag(DiagD->getLocation(), diag::warn_unused_const_variable) 1344 << DiagD; 1345 } else { 1346 if (DiagD->getDescribedVarTemplate()) 1347 Diag(DiagD->getLocation(), diag::warn_unused_template) 1348 << /*variable*/ 1 << DiagD; 1349 else 1350 Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD; 1351 } 1352 } 1353 } 1354 1355 emitAndClearUnusedLocalTypedefWarnings(); 1356 } 1357 1358 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) { 1359 // FIXME: Load additional unused private field candidates from the external 1360 // source. 1361 RecordCompleteMap RecordsComplete; 1362 RecordCompleteMap MNCComplete; 1363 for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(), 1364 E = UnusedPrivateFields.end(); I != E; ++I) { 1365 const NamedDecl *D = *I; 1366 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); 1367 if (RD && !RD->isUnion() && 1368 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) { 1369 Diag(D->getLocation(), diag::warn_unused_private_field) 1370 << D->getDeclName(); 1371 } 1372 } 1373 } 1374 1375 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { 1376 if (ExternalSource) 1377 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); 1378 for (const auto &DeletedFieldInfo : DeleteExprs) { 1379 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { 1380 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, 1381 DeleteExprLoc.second); 1382 } 1383 } 1384 } 1385 1386 // Check we've noticed that we're no longer parsing the initializer for every 1387 // variable. If we miss cases, then at best we have a performance issue and 1388 // at worst a rejects-valid bug. 1389 assert(ParsingInitForAutoVars.empty() && 1390 "Didn't unmark var as having its initializer parsed"); 1391 1392 if (!PP.isIncrementalProcessingEnabled()) 1393 TUScope = nullptr; 1394 } 1395 1396 1397 //===----------------------------------------------------------------------===// 1398 // Helper functions. 1399 //===----------------------------------------------------------------------===// 1400 1401 DeclContext *Sema::getFunctionLevelDeclContext() { 1402 DeclContext *DC = CurContext; 1403 1404 while (true) { 1405 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) || 1406 isa<RequiresExprBodyDecl>(DC)) { 1407 DC = DC->getParent(); 1408 } else if (isa<CXXMethodDecl>(DC) && 1409 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call && 1410 cast<CXXRecordDecl>(DC->getParent())->isLambda()) { 1411 DC = DC->getParent()->getParent(); 1412 } 1413 else break; 1414 } 1415 1416 return DC; 1417 } 1418 1419 /// getCurFunctionDecl - If inside of a function body, this returns a pointer 1420 /// to the function decl for the function being parsed. If we're currently 1421 /// in a 'block', this returns the containing context. 1422 FunctionDecl *Sema::getCurFunctionDecl() { 1423 DeclContext *DC = getFunctionLevelDeclContext(); 1424 return dyn_cast<FunctionDecl>(DC); 1425 } 1426 1427 ObjCMethodDecl *Sema::getCurMethodDecl() { 1428 DeclContext *DC = getFunctionLevelDeclContext(); 1429 while (isa<RecordDecl>(DC)) 1430 DC = DC->getParent(); 1431 return dyn_cast<ObjCMethodDecl>(DC); 1432 } 1433 1434 NamedDecl *Sema::getCurFunctionOrMethodDecl() { 1435 DeclContext *DC = getFunctionLevelDeclContext(); 1436 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC)) 1437 return cast<NamedDecl>(DC); 1438 return nullptr; 1439 } 1440 1441 LangAS Sema::getDefaultCXXMethodAddrSpace() const { 1442 if (getLangOpts().OpenCL) 1443 return getASTContext().getDefaultOpenCLPointeeAddrSpace(); 1444 return LangAS::Default; 1445 } 1446 1447 void Sema::EmitCurrentDiagnostic(unsigned DiagID) { 1448 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here 1449 // and yet we also use the current diag ID on the DiagnosticsEngine. This has 1450 // been made more painfully obvious by the refactor that introduced this 1451 // function, but it is possible that the incoming argument can be 1452 // eliminated. If it truly cannot be (for example, there is some reentrancy 1453 // issue I am not seeing yet), then there should at least be a clarifying 1454 // comment somewhere. 1455 if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) { 1456 switch (DiagnosticIDs::getDiagnosticSFINAEResponse( 1457 Diags.getCurrentDiagID())) { 1458 case DiagnosticIDs::SFINAE_Report: 1459 // We'll report the diagnostic below. 1460 break; 1461 1462 case DiagnosticIDs::SFINAE_SubstitutionFailure: 1463 // Count this failure so that we know that template argument deduction 1464 // has failed. 1465 ++NumSFINAEErrors; 1466 1467 // Make a copy of this suppressed diagnostic and store it with the 1468 // template-deduction information. 1469 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1470 Diagnostic DiagInfo(&Diags); 1471 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1472 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1473 } 1474 1475 Diags.setLastDiagnosticIgnored(true); 1476 Diags.Clear(); 1477 return; 1478 1479 case DiagnosticIDs::SFINAE_AccessControl: { 1480 // Per C++ Core Issue 1170, access control is part of SFINAE. 1481 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily 1482 // make access control a part of SFINAE for the purposes of checking 1483 // type traits. 1484 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11) 1485 break; 1486 1487 SourceLocation Loc = Diags.getCurrentDiagLoc(); 1488 1489 // Suppress this diagnostic. 1490 ++NumSFINAEErrors; 1491 1492 // Make a copy of this suppressed diagnostic and store it with the 1493 // template-deduction information. 1494 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1495 Diagnostic DiagInfo(&Diags); 1496 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1497 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1498 } 1499 1500 Diags.setLastDiagnosticIgnored(true); 1501 Diags.Clear(); 1502 1503 // Now the diagnostic state is clear, produce a C++98 compatibility 1504 // warning. 1505 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control); 1506 1507 // The last diagnostic which Sema produced was ignored. Suppress any 1508 // notes attached to it. 1509 Diags.setLastDiagnosticIgnored(true); 1510 return; 1511 } 1512 1513 case DiagnosticIDs::SFINAE_Suppress: 1514 // Make a copy of this suppressed diagnostic and store it with the 1515 // template-deduction information; 1516 if (*Info) { 1517 Diagnostic DiagInfo(&Diags); 1518 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(), 1519 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1520 } 1521 1522 // Suppress this diagnostic. 1523 Diags.setLastDiagnosticIgnored(true); 1524 Diags.Clear(); 1525 return; 1526 } 1527 } 1528 1529 // Copy the diagnostic printing policy over the ASTContext printing policy. 1530 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292 1531 Context.setPrintingPolicy(getPrintingPolicy()); 1532 1533 // Emit the diagnostic. 1534 if (!Diags.EmitCurrentDiagnostic()) 1535 return; 1536 1537 // If this is not a note, and we're in a template instantiation 1538 // that is different from the last template instantiation where 1539 // we emitted an error, print a template instantiation 1540 // backtrace. 1541 if (!DiagnosticIDs::isBuiltinNote(DiagID)) 1542 PrintContextStack(); 1543 } 1544 1545 Sema::SemaDiagnosticBuilder 1546 Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) { 1547 return Diag(Loc, PD.getDiagID(), DeferHint) << PD; 1548 } 1549 1550 bool Sema::hasUncompilableErrorOccurred() const { 1551 if (getDiagnostics().hasUncompilableErrorOccurred()) 1552 return true; 1553 auto *FD = dyn_cast<FunctionDecl>(CurContext); 1554 if (!FD) 1555 return false; 1556 auto Loc = DeviceDeferredDiags.find(FD); 1557 if (Loc == DeviceDeferredDiags.end()) 1558 return false; 1559 for (auto PDAt : Loc->second) { 1560 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID())) 1561 return true; 1562 } 1563 return false; 1564 } 1565 1566 // Print notes showing how we can reach FD starting from an a priori 1567 // known-callable function. 1568 static void emitCallStackNotes(Sema &S, FunctionDecl *FD) { 1569 auto FnIt = S.DeviceKnownEmittedFns.find(FD); 1570 while (FnIt != S.DeviceKnownEmittedFns.end()) { 1571 // Respect error limit. 1572 if (S.Diags.hasFatalErrorOccurred()) 1573 return; 1574 DiagnosticBuilder Builder( 1575 S.Diags.Report(FnIt->second.Loc, diag::note_called_by)); 1576 Builder << FnIt->second.FD; 1577 FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD); 1578 } 1579 } 1580 1581 namespace { 1582 1583 /// Helper class that emits deferred diagnostic messages if an entity directly 1584 /// or indirectly using the function that causes the deferred diagnostic 1585 /// messages is known to be emitted. 1586 /// 1587 /// During parsing of AST, certain diagnostic messages are recorded as deferred 1588 /// diagnostics since it is unknown whether the functions containing such 1589 /// diagnostics will be emitted. A list of potentially emitted functions and 1590 /// variables that may potentially trigger emission of functions are also 1591 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions 1592 /// by each function to emit deferred diagnostics. 1593 /// 1594 /// During the visit, certain OpenMP directives or initializer of variables 1595 /// with certain OpenMP attributes will cause subsequent visiting of any 1596 /// functions enter a state which is called OpenMP device context in this 1597 /// implementation. The state is exited when the directive or initializer is 1598 /// exited. This state can change the emission states of subsequent uses 1599 /// of functions. 1600 /// 1601 /// Conceptually the functions or variables to be visited form a use graph 1602 /// where the parent node uses the child node. At any point of the visit, 1603 /// the tree nodes traversed from the tree root to the current node form a use 1604 /// stack. The emission state of the current node depends on two factors: 1605 /// 1. the emission state of the root node 1606 /// 2. whether the current node is in OpenMP device context 1607 /// If the function is decided to be emitted, its contained deferred diagnostics 1608 /// are emitted, together with the information about the use stack. 1609 /// 1610 class DeferredDiagnosticsEmitter 1611 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> { 1612 public: 1613 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited; 1614 1615 // Whether the function is already in the current use-path. 1616 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath; 1617 1618 // The current use-path. 1619 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath; 1620 1621 // Whether the visiting of the function has been done. Done[0] is for the 1622 // case not in OpenMP device context. Done[1] is for the case in OpenMP 1623 // device context. We need two sets because diagnostics emission may be 1624 // different depending on whether it is in OpenMP device context. 1625 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2]; 1626 1627 // Emission state of the root node of the current use graph. 1628 bool ShouldEmitRootNode; 1629 1630 // Current OpenMP device context level. It is initialized to 0 and each 1631 // entering of device context increases it by 1 and each exit decreases 1632 // it by 1. Non-zero value indicates it is currently in device context. 1633 unsigned InOMPDeviceContext; 1634 1635 DeferredDiagnosticsEmitter(Sema &S) 1636 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {} 1637 1638 bool shouldVisitDiscardedStmt() const { return false; } 1639 1640 void VisitOMPTargetDirective(OMPTargetDirective *Node) { 1641 ++InOMPDeviceContext; 1642 Inherited::VisitOMPTargetDirective(Node); 1643 --InOMPDeviceContext; 1644 } 1645 1646 void visitUsedDecl(SourceLocation Loc, Decl *D) { 1647 if (isa<VarDecl>(D)) 1648 return; 1649 if (auto *FD = dyn_cast<FunctionDecl>(D)) 1650 checkFunc(Loc, FD); 1651 else 1652 Inherited::visitUsedDecl(Loc, D); 1653 } 1654 1655 void checkVar(VarDecl *VD) { 1656 assert(VD->isFileVarDecl() && 1657 "Should only check file-scope variables"); 1658 if (auto *Init = VD->getInit()) { 1659 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); 1660 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost || 1661 *DevTy == OMPDeclareTargetDeclAttr::DT_Any); 1662 if (IsDev) 1663 ++InOMPDeviceContext; 1664 this->Visit(Init); 1665 if (IsDev) 1666 --InOMPDeviceContext; 1667 } 1668 } 1669 1670 void checkFunc(SourceLocation Loc, FunctionDecl *FD) { 1671 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0]; 1672 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back(); 1673 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) || 1674 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD)) 1675 return; 1676 // Finalize analysis of OpenMP-specific constructs. 1677 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 && 1678 (ShouldEmitRootNode || InOMPDeviceContext)) 1679 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); 1680 if (Caller) 1681 S.DeviceKnownEmittedFns[FD] = {Caller, Loc}; 1682 // Always emit deferred diagnostics for the direct users. This does not 1683 // lead to explosion of diagnostics since each user is visited at most 1684 // twice. 1685 if (ShouldEmitRootNode || InOMPDeviceContext) 1686 emitDeferredDiags(FD, Caller); 1687 // Do not revisit a function if the function body has been completely 1688 // visited before. 1689 if (!Done.insert(FD).second) 1690 return; 1691 InUsePath.insert(FD); 1692 UsePath.push_back(FD); 1693 if (auto *S = FD->getBody()) { 1694 this->Visit(S); 1695 } 1696 UsePath.pop_back(); 1697 InUsePath.erase(FD); 1698 } 1699 1700 void checkRecordedDecl(Decl *D) { 1701 if (auto *FD = dyn_cast<FunctionDecl>(D)) { 1702 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) == 1703 Sema::FunctionEmissionStatus::Emitted; 1704 checkFunc(SourceLocation(), FD); 1705 } else 1706 checkVar(cast<VarDecl>(D)); 1707 } 1708 1709 // Emit any deferred diagnostics for FD 1710 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) { 1711 auto It = S.DeviceDeferredDiags.find(FD); 1712 if (It == S.DeviceDeferredDiags.end()) 1713 return; 1714 bool HasWarningOrError = false; 1715 bool FirstDiag = true; 1716 for (PartialDiagnosticAt &PDAt : It->second) { 1717 // Respect error limit. 1718 if (S.Diags.hasFatalErrorOccurred()) 1719 return; 1720 const SourceLocation &Loc = PDAt.first; 1721 const PartialDiagnostic &PD = PDAt.second; 1722 HasWarningOrError |= 1723 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >= 1724 DiagnosticsEngine::Warning; 1725 { 1726 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID())); 1727 PD.Emit(Builder); 1728 } 1729 // Emit the note on the first diagnostic in case too many diagnostics 1730 // cause the note not emitted. 1731 if (FirstDiag && HasWarningOrError && ShowCallStack) { 1732 emitCallStackNotes(S, FD); 1733 FirstDiag = false; 1734 } 1735 } 1736 } 1737 }; 1738 } // namespace 1739 1740 void Sema::emitDeferredDiags() { 1741 if (ExternalSource) 1742 ExternalSource->ReadDeclsToCheckForDeferredDiags( 1743 DeclsToCheckForDeferredDiags); 1744 1745 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) || 1746 DeclsToCheckForDeferredDiags.empty()) 1747 return; 1748 1749 DeferredDiagnosticsEmitter DDE(*this); 1750 for (auto D : DeclsToCheckForDeferredDiags) 1751 DDE.checkRecordedDecl(D); 1752 } 1753 1754 // In CUDA, there are some constructs which may appear in semantically-valid 1755 // code, but trigger errors if we ever generate code for the function in which 1756 // they appear. Essentially every construct you're not allowed to use on the 1757 // device falls into this category, because you are allowed to use these 1758 // constructs in a __host__ __device__ function, but only if that function is 1759 // never codegen'ed on the device. 1760 // 1761 // To handle semantic checking for these constructs, we keep track of the set of 1762 // functions we know will be emitted, either because we could tell a priori that 1763 // they would be emitted, or because they were transitively called by a 1764 // known-emitted function. 1765 // 1766 // We also keep a partial call graph of which not-known-emitted functions call 1767 // which other not-known-emitted functions. 1768 // 1769 // When we see something which is illegal if the current function is emitted 1770 // (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or 1771 // CheckCUDACall), we first check if the current function is known-emitted. If 1772 // so, we immediately output the diagnostic. 1773 // 1774 // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags 1775 // until we discover that the function is known-emitted, at which point we take 1776 // it out of this map and emit the diagnostic. 1777 1778 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc, 1779 unsigned DiagID, 1780 FunctionDecl *Fn, Sema &S) 1781 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn), 1782 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) { 1783 switch (K) { 1784 case K_Nop: 1785 break; 1786 case K_Immediate: 1787 case K_ImmediateWithCallStack: 1788 ImmediateDiag.emplace( 1789 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID)); 1790 break; 1791 case K_Deferred: 1792 assert(Fn && "Must have a function to attach the deferred diag to."); 1793 auto &Diags = S.DeviceDeferredDiags[Fn]; 1794 PartialDiagId.emplace(Diags.size()); 1795 Diags.emplace_back(Loc, S.PDiag(DiagID)); 1796 break; 1797 } 1798 } 1799 1800 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D) 1801 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn), 1802 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag), 1803 PartialDiagId(D.PartialDiagId) { 1804 // Clean the previous diagnostics. 1805 D.ShowCallStack = false; 1806 D.ImmediateDiag.reset(); 1807 D.PartialDiagId.reset(); 1808 } 1809 1810 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() { 1811 if (ImmediateDiag) { 1812 // Emit our diagnostic and, if it was a warning or error, output a callstack 1813 // if Fn isn't a priori known-emitted. 1814 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel( 1815 DiagID, Loc) >= DiagnosticsEngine::Warning; 1816 ImmediateDiag.reset(); // Emit the immediate diag. 1817 if (IsWarningOrError && ShowCallStack) 1818 emitCallStackNotes(S, Fn); 1819 } else { 1820 assert((!PartialDiagId || ShowCallStack) && 1821 "Must always show call stack for deferred diags."); 1822 } 1823 } 1824 1825 Sema::SemaDiagnosticBuilder 1826 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) { 1827 FD = FD ? FD : getCurFunctionDecl(); 1828 if (LangOpts.OpenMP) 1829 return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD) 1830 : diagIfOpenMPHostCode(Loc, DiagID, FD); 1831 if (getLangOpts().CUDA) 1832 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID) 1833 : CUDADiagIfHostCode(Loc, DiagID); 1834 1835 if (getLangOpts().SYCLIsDevice) 1836 return SYCLDiagIfDeviceCode(Loc, DiagID); 1837 1838 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID, 1839 FD, *this); 1840 } 1841 1842 Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID, 1843 bool DeferHint) { 1844 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID); 1845 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag && 1846 DiagnosticIDs::isDeferrable(DiagID) && 1847 (DeferHint || DeferDiags || !IsError); 1848 auto SetIsLastErrorImmediate = [&](bool Flag) { 1849 if (IsError) 1850 IsLastErrorImmediate = Flag; 1851 }; 1852 if (!ShouldDefer) { 1853 SetIsLastErrorImmediate(true); 1854 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, 1855 DiagID, getCurFunctionDecl(), *this); 1856 } 1857 1858 SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice 1859 ? CUDADiagIfDeviceCode(Loc, DiagID) 1860 : CUDADiagIfHostCode(Loc, DiagID); 1861 SetIsLastErrorImmediate(DB.isImmediate()); 1862 return DB; 1863 } 1864 1865 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) { 1866 if (isUnevaluatedContext() || Ty.isNull()) 1867 return; 1868 1869 // The original idea behind checkTypeSupport function is that unused 1870 // declarations can be replaced with an array of bytes of the same size during 1871 // codegen, such replacement doesn't seem to be possible for types without 1872 // constant byte size like zero length arrays. So, do a deep check for SYCL. 1873 if (D && LangOpts.SYCLIsDevice) { 1874 llvm::DenseSet<QualType> Visited; 1875 deepTypeCheckForSYCLDevice(Loc, Visited, D); 1876 } 1877 1878 Decl *C = cast<Decl>(getCurLexicalContext()); 1879 1880 // Memcpy operations for structs containing a member with unsupported type 1881 // are ok, though. 1882 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) { 1883 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 1884 MD->isTrivial()) 1885 return; 1886 1887 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD)) 1888 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial()) 1889 return; 1890 } 1891 1892 // Try to associate errors with the lexical context, if that is a function, or 1893 // the value declaration otherwise. 1894 FunctionDecl *FD = isa<FunctionDecl>(C) ? cast<FunctionDecl>(C) 1895 : dyn_cast_or_null<FunctionDecl>(D); 1896 1897 auto CheckDeviceType = [&](QualType Ty) { 1898 if (Ty->isDependentType()) 1899 return; 1900 1901 if (Ty->isBitIntType()) { 1902 if (!Context.getTargetInfo().hasBitIntType()) { 1903 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1904 if (D) 1905 PD << D; 1906 else 1907 PD << "expression"; 1908 targetDiag(Loc, PD, FD) 1909 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/ 1910 << Ty << Context.getTargetInfo().getTriple().str(); 1911 } 1912 return; 1913 } 1914 1915 // Check if we are dealing with two 'long double' but with different 1916 // semantics. 1917 bool LongDoubleMismatched = false; 1918 if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) { 1919 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty); 1920 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() && 1921 !Context.getTargetInfo().hasFloat128Type()) || 1922 (&Sem == &llvm::APFloat::PPCDoubleDouble() && 1923 !Context.getTargetInfo().hasIbm128Type())) 1924 LongDoubleMismatched = true; 1925 } 1926 1927 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) || 1928 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) || 1929 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) || 1930 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 && 1931 !Context.getTargetInfo().hasInt128Type()) || 1932 LongDoubleMismatched) { 1933 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1934 if (D) 1935 PD << D; 1936 else 1937 PD << "expression"; 1938 1939 if (targetDiag(Loc, PD, FD) 1940 << true /*show bit size*/ 1941 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty 1942 << false /*return*/ << Context.getTargetInfo().getTriple().str()) { 1943 if (D) 1944 D->setInvalidDecl(); 1945 } 1946 if (D) 1947 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1948 } 1949 }; 1950 1951 auto CheckType = [&](QualType Ty, bool IsRetTy = false) { 1952 if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice) || 1953 LangOpts.CUDAIsDevice) 1954 CheckDeviceType(Ty); 1955 1956 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType(); 1957 const TargetInfo &TI = Context.getTargetInfo(); 1958 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) { 1959 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1960 if (D) 1961 PD << D; 1962 else 1963 PD << "expression"; 1964 1965 if (Diag(Loc, PD, FD) 1966 << false /*show bit size*/ << 0 << Ty << false /*return*/ 1967 << Context.getTargetInfo().getTriple().str()) { 1968 if (D) 1969 D->setInvalidDecl(); 1970 } 1971 if (D) 1972 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1973 } 1974 1975 bool IsDouble = UnqualTy == Context.DoubleTy; 1976 bool IsFloat = UnqualTy == Context.FloatTy; 1977 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) { 1978 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1979 if (D) 1980 PD << D; 1981 else 1982 PD << "expression"; 1983 1984 if (Diag(Loc, PD, FD) 1985 << false /*show bit size*/ << 0 << Ty << true /*return*/ 1986 << Context.getTargetInfo().getTriple().str()) { 1987 if (D) 1988 D->setInvalidDecl(); 1989 } 1990 if (D) 1991 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1992 } 1993 }; 1994 1995 CheckType(Ty); 1996 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) { 1997 for (const auto &ParamTy : FPTy->param_types()) 1998 CheckType(ParamTy); 1999 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true); 2000 } 2001 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty)) 2002 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true); 2003 } 2004 2005 /// Looks through the macro-expansion chain for the given 2006 /// location, looking for a macro expansion with the given name. 2007 /// If one is found, returns true and sets the location to that 2008 /// expansion loc. 2009 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) { 2010 SourceLocation loc = locref; 2011 if (!loc.isMacroID()) return false; 2012 2013 // There's no good way right now to look at the intermediate 2014 // expansions, so just jump to the expansion location. 2015 loc = getSourceManager().getExpansionLoc(loc); 2016 2017 // If that's written with the name, stop here. 2018 SmallString<16> buffer; 2019 if (getPreprocessor().getSpelling(loc, buffer) == name) { 2020 locref = loc; 2021 return true; 2022 } 2023 return false; 2024 } 2025 2026 /// Determines the active Scope associated with the given declaration 2027 /// context. 2028 /// 2029 /// This routine maps a declaration context to the active Scope object that 2030 /// represents that declaration context in the parser. It is typically used 2031 /// from "scope-less" code (e.g., template instantiation, lazy creation of 2032 /// declarations) that injects a name for name-lookup purposes and, therefore, 2033 /// must update the Scope. 2034 /// 2035 /// \returns The scope corresponding to the given declaraion context, or NULL 2036 /// if no such scope is open. 2037 Scope *Sema::getScopeForContext(DeclContext *Ctx) { 2038 2039 if (!Ctx) 2040 return nullptr; 2041 2042 Ctx = Ctx->getPrimaryContext(); 2043 for (Scope *S = getCurScope(); S; S = S->getParent()) { 2044 // Ignore scopes that cannot have declarations. This is important for 2045 // out-of-line definitions of static class members. 2046 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) 2047 if (DeclContext *Entity = S->getEntity()) 2048 if (Ctx == Entity->getPrimaryContext()) 2049 return S; 2050 } 2051 2052 return nullptr; 2053 } 2054 2055 /// Enter a new function scope 2056 void Sema::PushFunctionScope() { 2057 if (FunctionScopes.empty() && CachedFunctionScope) { 2058 // Use CachedFunctionScope to avoid allocating memory when possible. 2059 CachedFunctionScope->Clear(); 2060 FunctionScopes.push_back(CachedFunctionScope.release()); 2061 } else { 2062 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); 2063 } 2064 if (LangOpts.OpenMP) 2065 pushOpenMPFunctionRegion(); 2066 } 2067 2068 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { 2069 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(), 2070 BlockScope, Block)); 2071 } 2072 2073 LambdaScopeInfo *Sema::PushLambdaScope() { 2074 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics()); 2075 FunctionScopes.push_back(LSI); 2076 return LSI; 2077 } 2078 2079 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) { 2080 if (LambdaScopeInfo *const LSI = getCurLambda()) { 2081 LSI->AutoTemplateParameterDepth = Depth; 2082 return; 2083 } 2084 llvm_unreachable( 2085 "Remove assertion if intentionally called in a non-lambda context."); 2086 } 2087 2088 // Check that the type of the VarDecl has an accessible copy constructor and 2089 // resolve its destructor's exception specification. 2090 // This also performs initialization of block variables when they are moved 2091 // to the heap. It uses the same rules as applicable for implicit moves 2092 // according to the C++ standard in effect ([class.copy.elision]p3). 2093 static void checkEscapingByref(VarDecl *VD, Sema &S) { 2094 QualType T = VD->getType(); 2095 EnterExpressionEvaluationContext scope( 2096 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated); 2097 SourceLocation Loc = VD->getLocation(); 2098 Expr *VarRef = 2099 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); 2100 ExprResult Result; 2101 auto IE = InitializedEntity::InitializeBlock(Loc, T); 2102 if (S.getLangOpts().CPlusPlus2b) { 2103 auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr, 2104 VK_XValue, FPOptionsOverride()); 2105 Result = S.PerformCopyInitialization(IE, SourceLocation(), E); 2106 } else { 2107 Result = S.PerformMoveOrCopyInitialization( 2108 IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible}, 2109 VarRef); 2110 } 2111 2112 if (!Result.isInvalid()) { 2113 Result = S.MaybeCreateExprWithCleanups(Result); 2114 Expr *Init = Result.getAs<Expr>(); 2115 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init)); 2116 } 2117 2118 // The destructor's exception specification is needed when IRGen generates 2119 // block copy/destroy functions. Resolve it here. 2120 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 2121 if (CXXDestructorDecl *DD = RD->getDestructor()) { 2122 auto *FPT = DD->getType()->getAs<FunctionProtoType>(); 2123 S.ResolveExceptionSpec(Loc, FPT); 2124 } 2125 } 2126 2127 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { 2128 // Set the EscapingByref flag of __block variables captured by 2129 // escaping blocks. 2130 for (const BlockDecl *BD : FSI.Blocks) { 2131 for (const BlockDecl::Capture &BC : BD->captures()) { 2132 VarDecl *VD = BC.getVariable(); 2133 if (VD->hasAttr<BlocksAttr>()) { 2134 // Nothing to do if this is a __block variable captured by a 2135 // non-escaping block. 2136 if (BD->doesNotEscape()) 2137 continue; 2138 VD->setEscapingByref(); 2139 } 2140 // Check whether the captured variable is or contains an object of 2141 // non-trivial C union type. 2142 QualType CapType = BC.getVariable()->getType(); 2143 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() || 2144 CapType.hasNonTrivialToPrimitiveCopyCUnion()) 2145 S.checkNonTrivialCUnion(BC.getVariable()->getType(), 2146 BD->getCaretLocation(), 2147 Sema::NTCUC_BlockCapture, 2148 Sema::NTCUK_Destruct|Sema::NTCUK_Copy); 2149 } 2150 } 2151 2152 for (VarDecl *VD : FSI.ByrefBlockVars) { 2153 // __block variables might require us to capture a copy-initializer. 2154 if (!VD->isEscapingByref()) 2155 continue; 2156 // It's currently invalid to ever have a __block variable with an 2157 // array type; should we diagnose that here? 2158 // Regardless, we don't want to ignore array nesting when 2159 // constructing this copy. 2160 if (VD->getType()->isStructureOrClassType()) 2161 checkEscapingByref(VD, S); 2162 } 2163 } 2164 2165 /// Pop a function (or block or lambda or captured region) scope from the stack. 2166 /// 2167 /// \param WP The warning policy to use for CFG-based warnings, or null if such 2168 /// warnings should not be produced. 2169 /// \param D The declaration corresponding to this function scope, if producing 2170 /// CFG-based warnings. 2171 /// \param BlockType The type of the block expression, if D is a BlockDecl. 2172 Sema::PoppedFunctionScopePtr 2173 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, 2174 const Decl *D, QualType BlockType) { 2175 assert(!FunctionScopes.empty() && "mismatched push/pop!"); 2176 2177 markEscapingByrefs(*FunctionScopes.back(), *this); 2178 2179 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(), 2180 PoppedFunctionScopeDeleter(this)); 2181 2182 if (LangOpts.OpenMP) 2183 popOpenMPFunctionRegion(Scope.get()); 2184 2185 // Issue any analysis-based warnings. 2186 if (WP && D) 2187 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType); 2188 else 2189 for (const auto &PUD : Scope->PossiblyUnreachableDiags) 2190 Diag(PUD.Loc, PUD.PD); 2191 2192 return Scope; 2193 } 2194 2195 void Sema::PoppedFunctionScopeDeleter:: 2196 operator()(sema::FunctionScopeInfo *Scope) const { 2197 // Stash the function scope for later reuse if it's for a normal function. 2198 if (Scope->isPlainFunction() && !Self->CachedFunctionScope) 2199 Self->CachedFunctionScope.reset(Scope); 2200 else 2201 delete Scope; 2202 } 2203 2204 void Sema::PushCompoundScope(bool IsStmtExpr) { 2205 getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr)); 2206 } 2207 2208 void Sema::PopCompoundScope() { 2209 FunctionScopeInfo *CurFunction = getCurFunction(); 2210 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop"); 2211 2212 CurFunction->CompoundScopes.pop_back(); 2213 } 2214 2215 /// Determine whether any errors occurred within this function/method/ 2216 /// block. 2217 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const { 2218 return getCurFunction()->hasUnrecoverableErrorOccurred(); 2219 } 2220 2221 void Sema::setFunctionHasBranchIntoScope() { 2222 if (!FunctionScopes.empty()) 2223 FunctionScopes.back()->setHasBranchIntoScope(); 2224 } 2225 2226 void Sema::setFunctionHasBranchProtectedScope() { 2227 if (!FunctionScopes.empty()) 2228 FunctionScopes.back()->setHasBranchProtectedScope(); 2229 } 2230 2231 void Sema::setFunctionHasIndirectGoto() { 2232 if (!FunctionScopes.empty()) 2233 FunctionScopes.back()->setHasIndirectGoto(); 2234 } 2235 2236 void Sema::setFunctionHasMustTail() { 2237 if (!FunctionScopes.empty()) 2238 FunctionScopes.back()->setHasMustTail(); 2239 } 2240 2241 BlockScopeInfo *Sema::getCurBlock() { 2242 if (FunctionScopes.empty()) 2243 return nullptr; 2244 2245 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back()); 2246 if (CurBSI && CurBSI->TheDecl && 2247 !CurBSI->TheDecl->Encloses(CurContext)) { 2248 // We have switched contexts due to template instantiation. 2249 assert(!CodeSynthesisContexts.empty()); 2250 return nullptr; 2251 } 2252 2253 return CurBSI; 2254 } 2255 2256 FunctionScopeInfo *Sema::getEnclosingFunction() const { 2257 if (FunctionScopes.empty()) 2258 return nullptr; 2259 2260 for (int e = FunctionScopes.size() - 1; e >= 0; --e) { 2261 if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) 2262 continue; 2263 return FunctionScopes[e]; 2264 } 2265 return nullptr; 2266 } 2267 2268 LambdaScopeInfo *Sema::getEnclosingLambda() const { 2269 for (auto *Scope : llvm::reverse(FunctionScopes)) { 2270 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) { 2271 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) { 2272 // We have switched contexts due to template instantiation. 2273 // FIXME: We should swap out the FunctionScopes during code synthesis 2274 // so that we don't need to check for this. 2275 assert(!CodeSynthesisContexts.empty()); 2276 return nullptr; 2277 } 2278 return LSI; 2279 } 2280 } 2281 return nullptr; 2282 } 2283 2284 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) { 2285 if (FunctionScopes.empty()) 2286 return nullptr; 2287 2288 auto I = FunctionScopes.rbegin(); 2289 if (IgnoreNonLambdaCapturingScope) { 2290 auto E = FunctionScopes.rend(); 2291 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I)) 2292 ++I; 2293 if (I == E) 2294 return nullptr; 2295 } 2296 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I); 2297 if (CurLSI && CurLSI->Lambda && 2298 !CurLSI->Lambda->Encloses(CurContext)) { 2299 // We have switched contexts due to template instantiation. 2300 assert(!CodeSynthesisContexts.empty()); 2301 return nullptr; 2302 } 2303 2304 return CurLSI; 2305 } 2306 2307 // We have a generic lambda if we parsed auto parameters, or we have 2308 // an associated template parameter list. 2309 LambdaScopeInfo *Sema::getCurGenericLambda() { 2310 if (LambdaScopeInfo *LSI = getCurLambda()) { 2311 return (LSI->TemplateParams.size() || 2312 LSI->GLTemplateParameterList) ? LSI : nullptr; 2313 } 2314 return nullptr; 2315 } 2316 2317 2318 void Sema::ActOnComment(SourceRange Comment) { 2319 if (!LangOpts.RetainCommentsFromSystemHeaders && 2320 SourceMgr.isInSystemHeader(Comment.getBegin())) 2321 return; 2322 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false); 2323 if (RC.isAlmostTrailingComment()) { 2324 SourceRange MagicMarkerRange(Comment.getBegin(), 2325 Comment.getBegin().getLocWithOffset(3)); 2326 StringRef MagicMarkerText; 2327 switch (RC.getKind()) { 2328 case RawComment::RCK_OrdinaryBCPL: 2329 MagicMarkerText = "///<"; 2330 break; 2331 case RawComment::RCK_OrdinaryC: 2332 MagicMarkerText = "/**<"; 2333 break; 2334 default: 2335 llvm_unreachable("if this is an almost Doxygen comment, " 2336 "it should be ordinary"); 2337 } 2338 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) << 2339 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText); 2340 } 2341 Context.addComment(RC); 2342 } 2343 2344 // Pin this vtable to this file. 2345 ExternalSemaSource::~ExternalSemaSource() {} 2346 char ExternalSemaSource::ID; 2347 2348 void ExternalSemaSource::ReadMethodPool(Selector Sel) { } 2349 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { } 2350 2351 void ExternalSemaSource::ReadKnownNamespaces( 2352 SmallVectorImpl<NamespaceDecl *> &Namespaces) { 2353 } 2354 2355 void ExternalSemaSource::ReadUndefinedButUsed( 2356 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {} 2357 2358 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< 2359 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} 2360 2361 /// Figure out if an expression could be turned into a call. 2362 /// 2363 /// Use this when trying to recover from an error where the programmer may have 2364 /// written just the name of a function instead of actually calling it. 2365 /// 2366 /// \param E - The expression to examine. 2367 /// \param ZeroArgCallReturnTy - If the expression can be turned into a call 2368 /// with no arguments, this parameter is set to the type returned by such a 2369 /// call; otherwise, it is set to an empty QualType. 2370 /// \param OverloadSet - If the expression is an overloaded function 2371 /// name, this parameter is populated with the decls of the various overloads. 2372 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, 2373 UnresolvedSetImpl &OverloadSet) { 2374 ZeroArgCallReturnTy = QualType(); 2375 OverloadSet.clear(); 2376 2377 const OverloadExpr *Overloads = nullptr; 2378 bool IsMemExpr = false; 2379 if (E.getType() == Context.OverloadTy) { 2380 OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); 2381 2382 // Ignore overloads that are pointer-to-member constants. 2383 if (FR.HasFormOfMemberPointer) 2384 return false; 2385 2386 Overloads = FR.Expression; 2387 } else if (E.getType() == Context.BoundMemberTy) { 2388 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens()); 2389 IsMemExpr = true; 2390 } 2391 2392 bool Ambiguous = false; 2393 bool IsMV = false; 2394 2395 if (Overloads) { 2396 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), 2397 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { 2398 OverloadSet.addDecl(*it); 2399 2400 // Check whether the function is a non-template, non-member which takes no 2401 // arguments. 2402 if (IsMemExpr) 2403 continue; 2404 if (const FunctionDecl *OverloadDecl 2405 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { 2406 if (OverloadDecl->getMinRequiredArguments() == 0) { 2407 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous && 2408 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() || 2409 OverloadDecl->isCPUSpecificMultiVersion()))) { 2410 ZeroArgCallReturnTy = QualType(); 2411 Ambiguous = true; 2412 } else { 2413 ZeroArgCallReturnTy = OverloadDecl->getReturnType(); 2414 IsMV = OverloadDecl->isCPUDispatchMultiVersion() || 2415 OverloadDecl->isCPUSpecificMultiVersion(); 2416 } 2417 } 2418 } 2419 } 2420 2421 // If it's not a member, use better machinery to try to resolve the call 2422 if (!IsMemExpr) 2423 return !ZeroArgCallReturnTy.isNull(); 2424 } 2425 2426 // Attempt to call the member with no arguments - this will correctly handle 2427 // member templates with defaults/deduction of template arguments, overloads 2428 // with default arguments, etc. 2429 if (IsMemExpr && !E.isTypeDependent()) { 2430 Sema::TentativeAnalysisScope Trap(*this); 2431 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), 2432 None, SourceLocation()); 2433 if (R.isUsable()) { 2434 ZeroArgCallReturnTy = R.get()->getType(); 2435 return true; 2436 } 2437 return false; 2438 } 2439 2440 if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { 2441 if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { 2442 if (Fun->getMinRequiredArguments() == 0) 2443 ZeroArgCallReturnTy = Fun->getReturnType(); 2444 return true; 2445 } 2446 } 2447 2448 // We don't have an expression that's convenient to get a FunctionDecl from, 2449 // but we can at least check if the type is "function of 0 arguments". 2450 QualType ExprTy = E.getType(); 2451 const FunctionType *FunTy = nullptr; 2452 QualType PointeeTy = ExprTy->getPointeeType(); 2453 if (!PointeeTy.isNull()) 2454 FunTy = PointeeTy->getAs<FunctionType>(); 2455 if (!FunTy) 2456 FunTy = ExprTy->getAs<FunctionType>(); 2457 2458 if (const FunctionProtoType *FPT = 2459 dyn_cast_or_null<FunctionProtoType>(FunTy)) { 2460 if (FPT->getNumParams() == 0) 2461 ZeroArgCallReturnTy = FunTy->getReturnType(); 2462 return true; 2463 } 2464 return false; 2465 } 2466 2467 /// Give notes for a set of overloads. 2468 /// 2469 /// A companion to tryExprAsCall. In cases when the name that the programmer 2470 /// wrote was an overloaded function, we may be able to make some guesses about 2471 /// plausible overloads based on their return types; such guesses can be handed 2472 /// off to this method to be emitted as notes. 2473 /// 2474 /// \param Overloads - The overloads to note. 2475 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to 2476 /// -fshow-overloads=best, this is the location to attach to the note about too 2477 /// many candidates. Typically this will be the location of the original 2478 /// ill-formed expression. 2479 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, 2480 const SourceLocation FinalNoteLoc) { 2481 unsigned ShownOverloads = 0; 2482 unsigned SuppressedOverloads = 0; 2483 for (UnresolvedSetImpl::iterator It = Overloads.begin(), 2484 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2485 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) { 2486 ++SuppressedOverloads; 2487 continue; 2488 } 2489 2490 NamedDecl *Fn = (*It)->getUnderlyingDecl(); 2491 // Don't print overloads for non-default multiversioned functions. 2492 if (const auto *FD = Fn->getAsFunction()) { 2493 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() && 2494 !FD->getAttr<TargetAttr>()->isDefaultVersion()) 2495 continue; 2496 } 2497 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call); 2498 ++ShownOverloads; 2499 } 2500 2501 S.Diags.overloadCandidatesShown(ShownOverloads); 2502 2503 if (SuppressedOverloads) 2504 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates) 2505 << SuppressedOverloads; 2506 } 2507 2508 static void notePlausibleOverloads(Sema &S, SourceLocation Loc, 2509 const UnresolvedSetImpl &Overloads, 2510 bool (*IsPlausibleResult)(QualType)) { 2511 if (!IsPlausibleResult) 2512 return noteOverloads(S, Overloads, Loc); 2513 2514 UnresolvedSet<2> PlausibleOverloads; 2515 for (OverloadExpr::decls_iterator It = Overloads.begin(), 2516 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2517 const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It); 2518 QualType OverloadResultTy = OverloadDecl->getReturnType(); 2519 if (IsPlausibleResult(OverloadResultTy)) 2520 PlausibleOverloads.addDecl(It.getDecl()); 2521 } 2522 noteOverloads(S, PlausibleOverloads, Loc); 2523 } 2524 2525 /// Determine whether the given expression can be called by just 2526 /// putting parentheses after it. Notably, expressions with unary 2527 /// operators can't be because the unary operator will start parsing 2528 /// outside the call. 2529 static bool IsCallableWithAppend(Expr *E) { 2530 E = E->IgnoreImplicit(); 2531 return (!isa<CStyleCastExpr>(E) && 2532 !isa<UnaryOperator>(E) && 2533 !isa<BinaryOperator>(E) && 2534 !isa<CXXOperatorCallExpr>(E)); 2535 } 2536 2537 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) { 2538 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2539 E = UO->getSubExpr(); 2540 2541 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { 2542 if (ULE->getNumDecls() == 0) 2543 return false; 2544 2545 const NamedDecl *ND = *ULE->decls_begin(); 2546 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2547 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion(); 2548 } 2549 return false; 2550 } 2551 2552 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, 2553 bool ForceComplain, 2554 bool (*IsPlausibleResult)(QualType)) { 2555 SourceLocation Loc = E.get()->getExprLoc(); 2556 SourceRange Range = E.get()->getSourceRange(); 2557 2558 QualType ZeroArgCallTy; 2559 UnresolvedSet<4> Overloads; 2560 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) && 2561 !ZeroArgCallTy.isNull() && 2562 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) { 2563 // At this point, we know E is potentially callable with 0 2564 // arguments and that it returns something of a reasonable type, 2565 // so we can emit a fixit and carry on pretending that E was 2566 // actually a CallExpr. 2567 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd()); 2568 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2569 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range 2570 << (IsCallableWithAppend(E.get()) 2571 ? FixItHint::CreateInsertion(ParenInsertionLoc, "()") 2572 : FixItHint()); 2573 if (!IsMV) 2574 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2575 2576 // FIXME: Try this before emitting the fixit, and suppress diagnostics 2577 // while doing so. 2578 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None, 2579 Range.getEnd().getLocWithOffset(1)); 2580 return true; 2581 } 2582 2583 if (!ForceComplain) return false; 2584 2585 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2586 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range; 2587 if (!IsMV) 2588 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2589 E = ExprError(); 2590 return true; 2591 } 2592 2593 IdentifierInfo *Sema::getSuperIdentifier() const { 2594 if (!Ident_super) 2595 Ident_super = &Context.Idents.get("super"); 2596 return Ident_super; 2597 } 2598 2599 IdentifierInfo *Sema::getFloat128Identifier() const { 2600 if (!Ident___float128) 2601 Ident___float128 = &Context.Idents.get("__float128"); 2602 return Ident___float128; 2603 } 2604 2605 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, 2606 CapturedRegionKind K, 2607 unsigned OpenMPCaptureLevel) { 2608 auto *CSI = new CapturedRegionScopeInfo( 2609 getDiagnostics(), S, CD, RD, CD->getContextParam(), K, 2610 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0, 2611 OpenMPCaptureLevel); 2612 CSI->ReturnType = Context.VoidTy; 2613 FunctionScopes.push_back(CSI); 2614 } 2615 2616 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { 2617 if (FunctionScopes.empty()) 2618 return nullptr; 2619 2620 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); 2621 } 2622 2623 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & 2624 Sema::getMismatchingDeleteExpressions() const { 2625 return DeleteExprs; 2626 } 2627