1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the actions class which performs semantic analysis and 10 // builds an AST out of a parse stream. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "UsedDeclVisitor.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/ASTDiagnostic.h" 17 #include "clang/AST/Decl.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclFriend.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/ExprCXX.h" 23 #include "clang/AST/PrettyDeclStackTrace.h" 24 #include "clang/AST/StmtCXX.h" 25 #include "clang/Basic/DarwinSDKInfo.h" 26 #include "clang/Basic/DiagnosticOptions.h" 27 #include "clang/Basic/PartialDiagnostic.h" 28 #include "clang/Basic/SourceManager.h" 29 #include "clang/Basic/Stack.h" 30 #include "clang/Basic/TargetInfo.h" 31 #include "clang/Lex/HeaderSearch.h" 32 #include "clang/Lex/HeaderSearchOptions.h" 33 #include "clang/Lex/Preprocessor.h" 34 #include "clang/Sema/CXXFieldCollector.h" 35 #include "clang/Sema/DelayedDiagnostic.h" 36 #include "clang/Sema/ExternalSemaSource.h" 37 #include "clang/Sema/Initialization.h" 38 #include "clang/Sema/MultiplexExternalSemaSource.h" 39 #include "clang/Sema/ObjCMethodList.h" 40 #include "clang/Sema/Scope.h" 41 #include "clang/Sema/ScopeInfo.h" 42 #include "clang/Sema/SemaConsumer.h" 43 #include "clang/Sema/SemaInternal.h" 44 #include "clang/Sema/TemplateDeduction.h" 45 #include "clang/Sema/TemplateInstCallback.h" 46 #include "clang/Sema/TypoCorrection.h" 47 #include "llvm/ADT/DenseMap.h" 48 #include "llvm/ADT/SmallPtrSet.h" 49 #include "llvm/Support/TimeProfiler.h" 50 51 using namespace clang; 52 using namespace sema; 53 54 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) { 55 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts); 56 } 57 58 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); } 59 60 DarwinSDKInfo * 61 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, 62 StringRef Platform) { 63 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking(); 64 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) { 65 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking) 66 << Platform; 67 WarnedDarwinSDKInfoMissing = true; 68 } 69 return SDKInfo; 70 } 71 72 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() { 73 if (CachedDarwinSDKInfo) 74 return CachedDarwinSDKInfo->get(); 75 auto SDKInfo = parseDarwinSDKInfo( 76 PP.getFileManager().getVirtualFileSystem(), 77 PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot); 78 if (SDKInfo && *SDKInfo) { 79 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo)); 80 return CachedDarwinSDKInfo->get(); 81 } 82 if (!SDKInfo) 83 llvm::consumeError(SDKInfo.takeError()); 84 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>(); 85 return nullptr; 86 } 87 88 IdentifierInfo * 89 Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, 90 unsigned int Index) { 91 std::string InventedName; 92 llvm::raw_string_ostream OS(InventedName); 93 94 if (!ParamName) 95 OS << "auto:" << Index + 1; 96 else 97 OS << ParamName->getName() << ":auto"; 98 99 OS.flush(); 100 return &Context.Idents.get(OS.str()); 101 } 102 103 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, 104 const Preprocessor &PP) { 105 PrintingPolicy Policy = Context.getPrintingPolicy(); 106 // In diagnostics, we print _Bool as bool if the latter is defined as the 107 // former. 108 Policy.Bool = Context.getLangOpts().Bool; 109 if (!Policy.Bool) { 110 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) { 111 Policy.Bool = BoolMacro->isObjectLike() && 112 BoolMacro->getNumTokens() == 1 && 113 BoolMacro->getReplacementToken(0).is(tok::kw__Bool); 114 } 115 } 116 117 // Shorten the data output if needed 118 Policy.EntireContentsOfLargeArray = false; 119 120 return Policy; 121 } 122 123 void Sema::ActOnTranslationUnitScope(Scope *S) { 124 TUScope = S; 125 PushDeclContext(S, Context.getTranslationUnitDecl()); 126 } 127 128 namespace clang { 129 namespace sema { 130 131 class SemaPPCallbacks : public PPCallbacks { 132 Sema *S = nullptr; 133 llvm::SmallVector<SourceLocation, 8> IncludeStack; 134 135 public: 136 void set(Sema &S) { this->S = &S; } 137 138 void reset() { S = nullptr; } 139 140 virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason, 141 SrcMgr::CharacteristicKind FileType, 142 FileID PrevFID) override { 143 if (!S) 144 return; 145 switch (Reason) { 146 case EnterFile: { 147 SourceManager &SM = S->getSourceManager(); 148 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc)); 149 if (IncludeLoc.isValid()) { 150 if (llvm::timeTraceProfilerEnabled()) { 151 const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc)); 152 llvm::timeTraceProfilerBegin( 153 "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>")); 154 } 155 156 IncludeStack.push_back(IncludeLoc); 157 S->DiagnoseNonDefaultPragmaAlignPack( 158 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude, 159 IncludeLoc); 160 } 161 break; 162 } 163 case ExitFile: 164 if (!IncludeStack.empty()) { 165 if (llvm::timeTraceProfilerEnabled()) 166 llvm::timeTraceProfilerEnd(); 167 168 S->DiagnoseNonDefaultPragmaAlignPack( 169 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit, 170 IncludeStack.pop_back_val()); 171 } 172 break; 173 default: 174 break; 175 } 176 } 177 }; 178 179 } // end namespace sema 180 } // end namespace clang 181 182 const unsigned Sema::MaxAlignmentExponent; 183 const uint64_t Sema::MaximumAlignment; 184 185 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, 186 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) 187 : ExternalSource(nullptr), isMultiplexExternalSource(false), 188 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), 189 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), 190 SourceMgr(PP.getSourceManager()), CollectStats(false), 191 CodeCompleter(CodeCompleter), CurContext(nullptr), 192 OriginalLexicalContext(nullptr), MSStructPragmaOn(false), 193 MSPointerToMemberRepresentationMethod( 194 LangOpts.getMSPointerToMemberRepresentationMethod()), 195 VtorDispStack(LangOpts.getVtorDispMode()), 196 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)), 197 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr), 198 CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()), 199 CurInitSeg(nullptr), VisContext(nullptr), 200 PragmaAttributeCurrentTargetDecl(nullptr), 201 IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr), 202 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp), 203 StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr), 204 StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr), 205 MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr), 206 NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr), 207 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), 208 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), 209 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false), 210 TUKind(TUKind), NumSFINAEErrors(0), 211 FullyCheckedComparisonCategories( 212 static_cast<unsigned>(ComparisonCategoryType::Last) + 1), 213 SatisfactionCache(Context), AccessCheckingSFINAE(false), 214 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), 215 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), 216 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this), 217 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), 218 CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { 219 assert(pp.TUKind == TUKind); 220 TUScope = nullptr; 221 isConstantEvaluatedOverride = false; 222 223 LoadedExternalKnownNamespaces = false; 224 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I) 225 NSNumberLiteralMethods[I] = nullptr; 226 227 if (getLangOpts().ObjC) 228 NSAPIObj.reset(new NSAPI(Context)); 229 230 if (getLangOpts().CPlusPlus) 231 FieldCollector.reset(new CXXFieldCollector()); 232 233 // Tell diagnostics how to render things from the AST library. 234 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); 235 236 ExprEvalContexts.emplace_back( 237 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{}, 238 nullptr, ExpressionEvaluationContextRecord::EK_Other); 239 240 // Initialization of data sharing attributes stack for OpenMP 241 InitDataSharingAttributesStack(); 242 243 std::unique_ptr<sema::SemaPPCallbacks> Callbacks = 244 std::make_unique<sema::SemaPPCallbacks>(); 245 SemaPPCallbackHandler = Callbacks.get(); 246 PP.addPPCallbacks(std::move(Callbacks)); 247 SemaPPCallbackHandler->set(*this); 248 if (getLangOpts().getFPEvalMethod() == LangOptions::FEM_UnsetOnCommandLine) 249 // Use setting from TargetInfo. 250 PP.setCurrentFPEvalMethod(SourceLocation(), 251 ctxt.getTargetInfo().getFPEvalMethod()); 252 else 253 // Set initial value of __FLT_EVAL_METHOD__ from the command line. 254 PP.setCurrentFPEvalMethod(SourceLocation(), 255 getLangOpts().getFPEvalMethod()); 256 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod()); 257 } 258 259 // Anchor Sema's type info to this TU. 260 void Sema::anchor() {} 261 262 void Sema::addImplicitTypedef(StringRef Name, QualType T) { 263 DeclarationName DN = &Context.Idents.get(Name); 264 if (IdResolver.begin(DN) == IdResolver.end()) 265 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope); 266 } 267 268 void Sema::Initialize() { 269 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 270 SC->InitializeSema(*this); 271 272 // Tell the external Sema source about this Sema object. 273 if (ExternalSemaSource *ExternalSema 274 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 275 ExternalSema->InitializeSema(*this); 276 277 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we 278 // will not be able to merge any duplicate __va_list_tag decls correctly. 279 VAListTagName = PP.getIdentifierInfo("__va_list_tag"); 280 281 if (!TUScope) 282 return; 283 284 // Initialize predefined 128-bit integer types, if needed. 285 if (Context.getTargetInfo().hasInt128Type() || 286 (Context.getAuxTargetInfo() && 287 Context.getAuxTargetInfo()->hasInt128Type())) { 288 // If either of the 128-bit integer types are unavailable to name lookup, 289 // define them now. 290 DeclarationName Int128 = &Context.Idents.get("__int128_t"); 291 if (IdResolver.begin(Int128) == IdResolver.end()) 292 PushOnScopeChains(Context.getInt128Decl(), TUScope); 293 294 DeclarationName UInt128 = &Context.Idents.get("__uint128_t"); 295 if (IdResolver.begin(UInt128) == IdResolver.end()) 296 PushOnScopeChains(Context.getUInt128Decl(), TUScope); 297 } 298 299 300 // Initialize predefined Objective-C types: 301 if (getLangOpts().ObjC) { 302 // If 'SEL' does not yet refer to any declarations, make it refer to the 303 // predefined 'SEL'. 304 DeclarationName SEL = &Context.Idents.get("SEL"); 305 if (IdResolver.begin(SEL) == IdResolver.end()) 306 PushOnScopeChains(Context.getObjCSelDecl(), TUScope); 307 308 // If 'id' does not yet refer to any declarations, make it refer to the 309 // predefined 'id'. 310 DeclarationName Id = &Context.Idents.get("id"); 311 if (IdResolver.begin(Id) == IdResolver.end()) 312 PushOnScopeChains(Context.getObjCIdDecl(), TUScope); 313 314 // Create the built-in typedef for 'Class'. 315 DeclarationName Class = &Context.Idents.get("Class"); 316 if (IdResolver.begin(Class) == IdResolver.end()) 317 PushOnScopeChains(Context.getObjCClassDecl(), TUScope); 318 319 // Create the built-in forward declaratino for 'Protocol'. 320 DeclarationName Protocol = &Context.Idents.get("Protocol"); 321 if (IdResolver.begin(Protocol) == IdResolver.end()) 322 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope); 323 } 324 325 // Create the internal type for the *StringMakeConstantString builtins. 326 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString"); 327 if (IdResolver.begin(ConstantString) == IdResolver.end()) 328 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope); 329 330 // Initialize Microsoft "predefined C++ types". 331 if (getLangOpts().MSVCCompat) { 332 if (getLangOpts().CPlusPlus && 333 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) 334 PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class), 335 TUScope); 336 337 addImplicitTypedef("size_t", Context.getSizeType()); 338 } 339 340 // Initialize predefined OpenCL types and supported extensions and (optional) 341 // core features. 342 if (getLangOpts().OpenCL) { 343 getOpenCLOptions().addSupport( 344 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts()); 345 addImplicitTypedef("sampler_t", Context.OCLSamplerTy); 346 addImplicitTypedef("event_t", Context.OCLEventTy); 347 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion(); 348 if (OCLCompatibleVersion >= 200) { 349 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) { 350 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy); 351 addImplicitTypedef("queue_t", Context.OCLQueueTy); 352 } 353 if (getLangOpts().OpenCLPipes) 354 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy); 355 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); 356 addImplicitTypedef("atomic_uint", 357 Context.getAtomicType(Context.UnsignedIntTy)); 358 addImplicitTypedef("atomic_float", 359 Context.getAtomicType(Context.FloatTy)); 360 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as 361 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. 362 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); 363 364 365 // OpenCL v2.0 s6.13.11.6: 366 // - The atomic_long and atomic_ulong types are supported if the 367 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics 368 // extensions are supported. 369 // - The atomic_double type is only supported if double precision 370 // is supported and the cl_khr_int64_base_atomics and 371 // cl_khr_int64_extended_atomics extensions are supported. 372 // - If the device address space is 64-bits, the data types 373 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and 374 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and 375 // cl_khr_int64_extended_atomics extensions are supported. 376 377 auto AddPointerSizeDependentTypes = [&]() { 378 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType()); 379 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType()); 380 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType()); 381 auto AtomicPtrDiffT = 382 Context.getAtomicType(Context.getPointerDiffType()); 383 addImplicitTypedef("atomic_size_t", AtomicSizeT); 384 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT); 385 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT); 386 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT); 387 }; 388 389 if (Context.getTypeSize(Context.getSizeType()) == 32) { 390 AddPointerSizeDependentTypes(); 391 } 392 393 if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) { 394 auto AtomicHalfT = Context.getAtomicType(Context.HalfTy); 395 addImplicitTypedef("atomic_half", AtomicHalfT); 396 } 397 398 std::vector<QualType> Atomic64BitTypes; 399 if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics", 400 getLangOpts()) && 401 getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics", 402 getLangOpts())) { 403 if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) { 404 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy); 405 addImplicitTypedef("atomic_double", AtomicDoubleT); 406 Atomic64BitTypes.push_back(AtomicDoubleT); 407 } 408 auto AtomicLongT = Context.getAtomicType(Context.LongTy); 409 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy); 410 addImplicitTypedef("atomic_long", AtomicLongT); 411 addImplicitTypedef("atomic_ulong", AtomicULongT); 412 413 414 if (Context.getTypeSize(Context.getSizeType()) == 64) { 415 AddPointerSizeDependentTypes(); 416 } 417 } 418 } 419 420 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 421 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \ 422 addImplicitTypedef(#ExtType, Context.Id##Ty); \ 423 } 424 #include "clang/Basic/OpenCLExtensionTypes.def" 425 } 426 427 if (Context.getTargetInfo().hasAArch64SVETypes()) { 428 #define SVE_TYPE(Name, Id, SingletonId) \ 429 addImplicitTypedef(Name, Context.SingletonId); 430 #include "clang/Basic/AArch64SVEACLETypes.def" 431 } 432 433 if (Context.getTargetInfo().getTriple().isPPC64()) { 434 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 435 addImplicitTypedef(#Name, Context.Id##Ty); 436 #include "clang/Basic/PPCTypes.def" 437 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 438 addImplicitTypedef(#Name, Context.Id##Ty); 439 #include "clang/Basic/PPCTypes.def" 440 } 441 442 if (Context.getTargetInfo().hasRISCVVTypes()) { 443 #define RVV_TYPE(Name, Id, SingletonId) \ 444 addImplicitTypedef(Name, Context.SingletonId); 445 #include "clang/Basic/RISCVVTypes.def" 446 } 447 448 if (Context.getTargetInfo().hasBuiltinMSVaList()) { 449 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list"); 450 if (IdResolver.begin(MSVaList) == IdResolver.end()) 451 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope); 452 } 453 454 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); 455 if (IdResolver.begin(BuiltinVaList) == IdResolver.end()) 456 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope); 457 } 458 459 Sema::~Sema() { 460 assert(InstantiatingSpecializations.empty() && 461 "failed to clean up an InstantiatingTemplate?"); 462 463 if (VisContext) FreeVisContext(); 464 465 // Kill all the active scopes. 466 for (sema::FunctionScopeInfo *FSI : FunctionScopes) 467 delete FSI; 468 469 // Tell the SemaConsumer to forget about us; we're going out of scope. 470 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 471 SC->ForgetSema(); 472 473 // Detach from the external Sema source. 474 if (ExternalSemaSource *ExternalSema 475 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 476 ExternalSema->ForgetSema(); 477 478 // If Sema's ExternalSource is the multiplexer - we own it. 479 if (isMultiplexExternalSource) 480 delete ExternalSource; 481 482 // Delete cached satisfactions. 483 std::vector<ConstraintSatisfaction *> Satisfactions; 484 Satisfactions.reserve(Satisfactions.size()); 485 for (auto &Node : SatisfactionCache) 486 Satisfactions.push_back(&Node); 487 for (auto *Node : Satisfactions) 488 delete Node; 489 490 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); 491 492 // Destroys data sharing attributes stack for OpenMP 493 DestroyDataSharingAttributesStack(); 494 495 // Detach from the PP callback handler which outlives Sema since it's owned 496 // by the preprocessor. 497 SemaPPCallbackHandler->reset(); 498 } 499 500 void Sema::warnStackExhausted(SourceLocation Loc) { 501 // Only warn about this once. 502 if (!WarnedStackExhausted) { 503 Diag(Loc, diag::warn_stack_exhausted); 504 WarnedStackExhausted = true; 505 } 506 } 507 508 void Sema::runWithSufficientStackSpace(SourceLocation Loc, 509 llvm::function_ref<void()> Fn) { 510 clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn); 511 } 512 513 /// makeUnavailableInSystemHeader - There is an error in the current 514 /// context. If we're still in a system header, and we can plausibly 515 /// make the relevant declaration unavailable instead of erroring, do 516 /// so and return true. 517 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc, 518 UnavailableAttr::ImplicitReason reason) { 519 // If we're not in a function, it's an error. 520 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext); 521 if (!fn) return false; 522 523 // If we're in template instantiation, it's an error. 524 if (inTemplateInstantiation()) 525 return false; 526 527 // If that function's not in a system header, it's an error. 528 if (!Context.getSourceManager().isInSystemHeader(loc)) 529 return false; 530 531 // If the function is already unavailable, it's not an error. 532 if (fn->hasAttr<UnavailableAttr>()) return true; 533 534 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc)); 535 return true; 536 } 537 538 ASTMutationListener *Sema::getASTMutationListener() const { 539 return getASTConsumer().GetASTMutationListener(); 540 } 541 542 ///Registers an external source. If an external source already exists, 543 /// creates a multiplex external source and appends to it. 544 /// 545 ///\param[in] E - A non-null external sema source. 546 /// 547 void Sema::addExternalSource(ExternalSemaSource *E) { 548 assert(E && "Cannot use with NULL ptr"); 549 550 if (!ExternalSource) { 551 ExternalSource = E; 552 return; 553 } 554 555 if (isMultiplexExternalSource) 556 static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E); 557 else { 558 ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E); 559 isMultiplexExternalSource = true; 560 } 561 } 562 563 /// Print out statistics about the semantic analysis. 564 void Sema::PrintStats() const { 565 llvm::errs() << "\n*** Semantic Analysis Stats:\n"; 566 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n"; 567 568 BumpAlloc.PrintStats(); 569 AnalysisWarnings.PrintStats(); 570 } 571 572 void Sema::diagnoseNullableToNonnullConversion(QualType DstType, 573 QualType SrcType, 574 SourceLocation Loc) { 575 Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context); 576 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable && 577 *ExprNullability != NullabilityKind::NullableResult)) 578 return; 579 580 Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context); 581 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull) 582 return; 583 584 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType; 585 } 586 587 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) { 588 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant, 589 E->getBeginLoc())) 590 return; 591 // nullptr only exists from C++11 on, so don't warn on its absence earlier. 592 if (!getLangOpts().CPlusPlus11) 593 return; 594 595 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer) 596 return; 597 if (E->IgnoreParenImpCasts()->getType()->isNullPtrType()) 598 return; 599 600 // Don't diagnose the conversion from a 0 literal to a null pointer argument 601 // in a synthesized call to operator<=>. 602 if (!CodeSynthesisContexts.empty() && 603 CodeSynthesisContexts.back().Kind == 604 CodeSynthesisContext::RewritingOperatorAsSpaceship) 605 return; 606 607 // If it is a macro from system header, and if the macro name is not "NULL", 608 // do not warn. 609 SourceLocation MaybeMacroLoc = E->getBeginLoc(); 610 if (Diags.getSuppressSystemWarnings() && 611 SourceMgr.isInSystemMacro(MaybeMacroLoc) && 612 !findMacroSpelling(MaybeMacroLoc, "NULL")) 613 return; 614 615 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant) 616 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr"); 617 } 618 619 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast. 620 /// If there is already an implicit cast, merge into the existing one. 621 /// The result is of the given category. 622 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, 623 CastKind Kind, ExprValueKind VK, 624 const CXXCastPath *BasePath, 625 CheckedConversionKind CCK) { 626 #ifndef NDEBUG 627 if (VK == VK_PRValue && !E->isPRValue()) { 628 switch (Kind) { 629 default: 630 llvm_unreachable( 631 ("can't implicitly cast glvalue to prvalue with this cast " 632 "kind: " + 633 std::string(CastExpr::getCastKindName(Kind))) 634 .c_str()); 635 case CK_Dependent: 636 case CK_LValueToRValue: 637 case CK_ArrayToPointerDecay: 638 case CK_FunctionToPointerDecay: 639 case CK_ToVoid: 640 case CK_NonAtomicToAtomic: 641 break; 642 } 643 } 644 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) && 645 "can't cast prvalue to glvalue"); 646 #endif 647 648 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc()); 649 diagnoseZeroToNullptrConversion(Kind, E); 650 651 QualType ExprTy = Context.getCanonicalType(E->getType()); 652 QualType TypeTy = Context.getCanonicalType(Ty); 653 654 if (ExprTy == TypeTy) 655 return E; 656 657 if (Kind == CK_ArrayToPointerDecay) { 658 // C++1z [conv.array]: The temporary materialization conversion is applied. 659 // We also use this to fuel C++ DR1213, which applies to C++11 onwards. 660 if (getLangOpts().CPlusPlus && E->isPRValue()) { 661 // The temporary is an lvalue in C++98 and an xvalue otherwise. 662 ExprResult Materialized = CreateMaterializeTemporaryExpr( 663 E->getType(), E, !getLangOpts().CPlusPlus11); 664 if (Materialized.isInvalid()) 665 return ExprError(); 666 E = Materialized.get(); 667 } 668 // C17 6.7.1p6 footnote 124: The implementation can treat any register 669 // declaration simply as an auto declaration. However, whether or not 670 // addressable storage is actually used, the address of any part of an 671 // object declared with storage-class specifier register cannot be 672 // computed, either explicitly(by use of the unary & operator as discussed 673 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as 674 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an 675 // array declared with storage-class specifier register is sizeof. 676 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) { 677 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 678 if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 679 if (VD->getStorageClass() == SC_Register) { 680 Diag(E->getExprLoc(), diag::err_typecheck_address_of) 681 << /*register variable*/ 3 << E->getSourceRange(); 682 return ExprError(); 683 } 684 } 685 } 686 } 687 } 688 689 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { 690 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { 691 ImpCast->setType(Ty); 692 ImpCast->setValueKind(VK); 693 return E; 694 } 695 } 696 697 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK, 698 CurFPFeatureOverrides()); 699 } 700 701 /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding 702 /// to the conversion from scalar type ScalarTy to the Boolean type. 703 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) { 704 switch (ScalarTy->getScalarTypeKind()) { 705 case Type::STK_Bool: return CK_NoOp; 706 case Type::STK_CPointer: return CK_PointerToBoolean; 707 case Type::STK_BlockPointer: return CK_PointerToBoolean; 708 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean; 709 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean; 710 case Type::STK_Integral: return CK_IntegralToBoolean; 711 case Type::STK_Floating: return CK_FloatingToBoolean; 712 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean; 713 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean; 714 case Type::STK_FixedPoint: return CK_FixedPointToBoolean; 715 } 716 llvm_unreachable("unknown scalar type kind"); 717 } 718 719 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector. 720 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) { 721 if (D->getMostRecentDecl()->isUsed()) 722 return true; 723 724 if (D->isExternallyVisible()) 725 return true; 726 727 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 728 // If this is a function template and none of its specializations is used, 729 // we should warn. 730 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate()) 731 for (const auto *Spec : Template->specializations()) 732 if (ShouldRemoveFromUnused(SemaRef, Spec)) 733 return true; 734 735 // UnusedFileScopedDecls stores the first declaration. 736 // The declaration may have become definition so check again. 737 const FunctionDecl *DeclToCheck; 738 if (FD->hasBody(DeclToCheck)) 739 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 740 741 // Later redecls may add new information resulting in not having to warn, 742 // so check again. 743 DeclToCheck = FD->getMostRecentDecl(); 744 if (DeclToCheck != FD) 745 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 746 } 747 748 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 749 // If a variable usable in constant expressions is referenced, 750 // don't warn if it isn't used: if the value of a variable is required 751 // for the computation of a constant expression, it doesn't make sense to 752 // warn even if the variable isn't odr-used. (isReferenced doesn't 753 // precisely reflect that, but it's a decent approximation.) 754 if (VD->isReferenced() && 755 VD->mightBeUsableInConstantExpressions(SemaRef->Context)) 756 return true; 757 758 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate()) 759 // If this is a variable template and none of its specializations is used, 760 // we should warn. 761 for (const auto *Spec : Template->specializations()) 762 if (ShouldRemoveFromUnused(SemaRef, Spec)) 763 return true; 764 765 // UnusedFileScopedDecls stores the first declaration. 766 // The declaration may have become definition so check again. 767 const VarDecl *DeclToCheck = VD->getDefinition(); 768 if (DeclToCheck) 769 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 770 771 // Later redecls may add new information resulting in not having to warn, 772 // so check again. 773 DeclToCheck = VD->getMostRecentDecl(); 774 if (DeclToCheck != VD) 775 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 776 } 777 778 return false; 779 } 780 781 static bool isFunctionOrVarDeclExternC(NamedDecl *ND) { 782 if (auto *FD = dyn_cast<FunctionDecl>(ND)) 783 return FD->isExternC(); 784 return cast<VarDecl>(ND)->isExternC(); 785 } 786 787 /// Determine whether ND is an external-linkage function or variable whose 788 /// type has no linkage. 789 bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) { 790 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage, 791 // because we also want to catch the case where its type has VisibleNoLinkage, 792 // which does not affect the linkage of VD. 793 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() && 794 !isExternalFormalLinkage(VD->getType()->getLinkage()) && 795 !isFunctionOrVarDeclExternC(VD); 796 } 797 798 /// Obtains a sorted list of functions and variables that are undefined but 799 /// ODR-used. 800 void Sema::getUndefinedButUsed( 801 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) { 802 for (const auto &UndefinedUse : UndefinedButUsed) { 803 NamedDecl *ND = UndefinedUse.first; 804 805 // Ignore attributes that have become invalid. 806 if (ND->isInvalidDecl()) continue; 807 808 // __attribute__((weakref)) is basically a definition. 809 if (ND->hasAttr<WeakRefAttr>()) continue; 810 811 if (isa<CXXDeductionGuideDecl>(ND)) 812 continue; 813 814 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) { 815 // An exported function will always be emitted when defined, so even if 816 // the function is inline, it doesn't have to be emitted in this TU. An 817 // imported function implies that it has been exported somewhere else. 818 continue; 819 } 820 821 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { 822 if (FD->isDefined()) 823 continue; 824 if (FD->isExternallyVisible() && 825 !isExternalWithNoLinkageType(FD) && 826 !FD->getMostRecentDecl()->isInlined() && 827 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 828 continue; 829 if (FD->getBuiltinID()) 830 continue; 831 } else { 832 auto *VD = cast<VarDecl>(ND); 833 if (VD->hasDefinition() != VarDecl::DeclarationOnly) 834 continue; 835 if (VD->isExternallyVisible() && 836 !isExternalWithNoLinkageType(VD) && 837 !VD->getMostRecentDecl()->isInline() && 838 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 839 continue; 840 841 // Skip VarDecls that lack formal definitions but which we know are in 842 // fact defined somewhere. 843 if (VD->isKnownToBeDefined()) 844 continue; 845 } 846 847 Undefined.push_back(std::make_pair(ND, UndefinedUse.second)); 848 } 849 } 850 851 /// checkUndefinedButUsed - Check for undefined objects with internal linkage 852 /// or that are inline. 853 static void checkUndefinedButUsed(Sema &S) { 854 if (S.UndefinedButUsed.empty()) return; 855 856 // Collect all the still-undefined entities with internal linkage. 857 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined; 858 S.getUndefinedButUsed(Undefined); 859 if (Undefined.empty()) return; 860 861 for (auto Undef : Undefined) { 862 ValueDecl *VD = cast<ValueDecl>(Undef.first); 863 SourceLocation UseLoc = Undef.second; 864 865 if (S.isExternalWithNoLinkageType(VD)) { 866 // C++ [basic.link]p8: 867 // A type without linkage shall not be used as the type of a variable 868 // or function with external linkage unless 869 // -- the entity has C language linkage 870 // -- the entity is not odr-used or is defined in the same TU 871 // 872 // As an extension, accept this in cases where the type is externally 873 // visible, since the function or variable actually can be defined in 874 // another translation unit in that case. 875 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage()) 876 ? diag::ext_undefined_internal_type 877 : diag::err_undefined_internal_type) 878 << isa<VarDecl>(VD) << VD; 879 } else if (!VD->isExternallyVisible()) { 880 // FIXME: We can promote this to an error. The function or variable can't 881 // be defined anywhere else, so the program must necessarily violate the 882 // one definition rule. 883 bool IsImplicitBase = false; 884 if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) { 885 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>(); 886 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive( 887 llvm::omp::TraitProperty:: 888 implementation_extension_disable_implicit_base)) { 889 const auto *Func = cast<FunctionDecl>( 890 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl()); 891 IsImplicitBase = BaseD->isImplicit() && 892 Func->getIdentifier()->isMangledOpenMPVariantName(); 893 } 894 } 895 if (!S.getLangOpts().OpenMP || !IsImplicitBase) 896 S.Diag(VD->getLocation(), diag::warn_undefined_internal) 897 << isa<VarDecl>(VD) << VD; 898 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) { 899 (void)FD; 900 assert(FD->getMostRecentDecl()->isInlined() && 901 "used object requires definition but isn't inline or internal?"); 902 // FIXME: This is ill-formed; we should reject. 903 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD; 904 } else { 905 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() && 906 "used var requires definition but isn't inline or internal?"); 907 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD; 908 } 909 if (UseLoc.isValid()) 910 S.Diag(UseLoc, diag::note_used_here); 911 } 912 913 S.UndefinedButUsed.clear(); 914 } 915 916 void Sema::LoadExternalWeakUndeclaredIdentifiers() { 917 if (!ExternalSource) 918 return; 919 920 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; 921 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); 922 for (auto &WeakID : WeakIDs) 923 WeakUndeclaredIdentifiers.insert(WeakID); 924 } 925 926 927 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap; 928 929 /// Returns true, if all methods and nested classes of the given 930 /// CXXRecordDecl are defined in this translation unit. 931 /// 932 /// Should only be called from ActOnEndOfTranslationUnit so that all 933 /// definitions are actually read. 934 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD, 935 RecordCompleteMap &MNCComplete) { 936 RecordCompleteMap::iterator Cache = MNCComplete.find(RD); 937 if (Cache != MNCComplete.end()) 938 return Cache->second; 939 if (!RD->isCompleteDefinition()) 940 return false; 941 bool Complete = true; 942 for (DeclContext::decl_iterator I = RD->decls_begin(), 943 E = RD->decls_end(); 944 I != E && Complete; ++I) { 945 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) 946 Complete = M->isDefined() || M->isDefaulted() || 947 (M->isPure() && !isa<CXXDestructorDecl>(M)); 948 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I)) 949 // If the template function is marked as late template parsed at this 950 // point, it has not been instantiated and therefore we have not 951 // performed semantic analysis on it yet, so we cannot know if the type 952 // can be considered complete. 953 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() && 954 F->getTemplatedDecl()->isDefined(); 955 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) { 956 if (R->isInjectedClassName()) 957 continue; 958 if (R->hasDefinition()) 959 Complete = MethodsAndNestedClassesComplete(R->getDefinition(), 960 MNCComplete); 961 else 962 Complete = false; 963 } 964 } 965 MNCComplete[RD] = Complete; 966 return Complete; 967 } 968 969 /// Returns true, if the given CXXRecordDecl is fully defined in this 970 /// translation unit, i.e. all methods are defined or pure virtual and all 971 /// friends, friend functions and nested classes are fully defined in this 972 /// translation unit. 973 /// 974 /// Should only be called from ActOnEndOfTranslationUnit so that all 975 /// definitions are actually read. 976 static bool IsRecordFullyDefined(const CXXRecordDecl *RD, 977 RecordCompleteMap &RecordsComplete, 978 RecordCompleteMap &MNCComplete) { 979 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD); 980 if (Cache != RecordsComplete.end()) 981 return Cache->second; 982 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete); 983 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(), 984 E = RD->friend_end(); 985 I != E && Complete; ++I) { 986 // Check if friend classes and methods are complete. 987 if (TypeSourceInfo *TSI = (*I)->getFriendType()) { 988 // Friend classes are available as the TypeSourceInfo of the FriendDecl. 989 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl()) 990 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete); 991 else 992 Complete = false; 993 } else { 994 // Friend functions are available through the NamedDecl of FriendDecl. 995 if (const FunctionDecl *FD = 996 dyn_cast<FunctionDecl>((*I)->getFriendDecl())) 997 Complete = FD->isDefined(); 998 else 999 // This is a template friend, give up. 1000 Complete = false; 1001 } 1002 } 1003 RecordsComplete[RD] = Complete; 1004 return Complete; 1005 } 1006 1007 void Sema::emitAndClearUnusedLocalTypedefWarnings() { 1008 if (ExternalSource) 1009 ExternalSource->ReadUnusedLocalTypedefNameCandidates( 1010 UnusedLocalTypedefNameCandidates); 1011 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) { 1012 if (TD->isReferenced()) 1013 continue; 1014 Diag(TD->getLocation(), diag::warn_unused_local_typedef) 1015 << isa<TypeAliasDecl>(TD) << TD->getDeclName(); 1016 } 1017 UnusedLocalTypedefNameCandidates.clear(); 1018 } 1019 1020 /// This is called before the very first declaration in the translation unit 1021 /// is parsed. Note that the ASTContext may have already injected some 1022 /// declarations. 1023 void Sema::ActOnStartOfTranslationUnit() { 1024 if (getLangOpts().ModulesTS && 1025 (getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface || 1026 getLangOpts().getCompilingModule() == LangOptions::CMK_None)) { 1027 // We start in an implied global module fragment. 1028 SourceLocation StartOfTU = 1029 SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); 1030 ActOnGlobalModuleFragmentDecl(StartOfTU); 1031 ModuleScopes.back().ImplicitGlobalModuleFragment = true; 1032 } 1033 } 1034 1035 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) { 1036 // No explicit actions are required at the end of the global module fragment. 1037 if (Kind == TUFragmentKind::Global) 1038 return; 1039 1040 // Transfer late parsed template instantiations over to the pending template 1041 // instantiation list. During normal compilation, the late template parser 1042 // will be installed and instantiating these templates will succeed. 1043 // 1044 // If we are building a TU prefix for serialization, it is also safe to 1045 // transfer these over, even though they are not parsed. The end of the TU 1046 // should be outside of any eager template instantiation scope, so when this 1047 // AST is deserialized, these templates will not be parsed until the end of 1048 // the combined TU. 1049 PendingInstantiations.insert(PendingInstantiations.end(), 1050 LateParsedInstantiations.begin(), 1051 LateParsedInstantiations.end()); 1052 LateParsedInstantiations.clear(); 1053 1054 // If DefinedUsedVTables ends up marking any virtual member functions it 1055 // might lead to more pending template instantiations, which we then need 1056 // to instantiate. 1057 DefineUsedVTables(); 1058 1059 // C++: Perform implicit template instantiations. 1060 // 1061 // FIXME: When we perform these implicit instantiations, we do not 1062 // carefully keep track of the point of instantiation (C++ [temp.point]). 1063 // This means that name lookup that occurs within the template 1064 // instantiation will always happen at the end of the translation unit, 1065 // so it will find some names that are not required to be found. This is 1066 // valid, but we could do better by diagnosing if an instantiation uses a 1067 // name that was not visible at its first point of instantiation. 1068 if (ExternalSource) { 1069 // Load pending instantiations from the external source. 1070 SmallVector<PendingImplicitInstantiation, 4> Pending; 1071 ExternalSource->ReadPendingInstantiations(Pending); 1072 for (auto PII : Pending) 1073 if (auto Func = dyn_cast<FunctionDecl>(PII.first)) 1074 Func->setInstantiationIsPending(true); 1075 PendingInstantiations.insert(PendingInstantiations.begin(), 1076 Pending.begin(), Pending.end()); 1077 } 1078 1079 { 1080 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1081 PerformPendingInstantiations(); 1082 } 1083 1084 emitDeferredDiags(); 1085 1086 assert(LateParsedInstantiations.empty() && 1087 "end of TU template instantiation should not create more " 1088 "late-parsed templates"); 1089 1090 // Report diagnostics for uncorrected delayed typos. Ideally all of them 1091 // should have been corrected by that time, but it is very hard to cover all 1092 // cases in practice. 1093 for (const auto &Typo : DelayedTypos) { 1094 // We pass an empty TypoCorrection to indicate no correction was performed. 1095 Typo.second.DiagHandler(TypoCorrection()); 1096 } 1097 DelayedTypos.clear(); 1098 } 1099 1100 /// ActOnEndOfTranslationUnit - This is called at the very end of the 1101 /// translation unit when EOF is reached and all but the top-level scope is 1102 /// popped. 1103 void Sema::ActOnEndOfTranslationUnit() { 1104 assert(DelayedDiagnostics.getCurrentPool() == nullptr 1105 && "reached end of translation unit with a pool attached?"); 1106 1107 // If code completion is enabled, don't perform any end-of-translation-unit 1108 // work. 1109 if (PP.isCodeCompletionEnabled()) 1110 return; 1111 1112 // Complete translation units and modules define vtables and perform implicit 1113 // instantiations. PCH files do not. 1114 if (TUKind != TU_Prefix) { 1115 DiagnoseUseOfUnimplementedSelectors(); 1116 1117 ActOnEndOfTranslationUnitFragment( 1118 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind == 1119 Module::PrivateModuleFragment 1120 ? TUFragmentKind::Private 1121 : TUFragmentKind::Normal); 1122 1123 if (LateTemplateParserCleanup) 1124 LateTemplateParserCleanup(OpaqueParser); 1125 1126 CheckDelayedMemberExceptionSpecs(); 1127 } else { 1128 // If we are building a TU prefix for serialization, it is safe to transfer 1129 // these over, even though they are not parsed. The end of the TU should be 1130 // outside of any eager template instantiation scope, so when this AST is 1131 // deserialized, these templates will not be parsed until the end of the 1132 // combined TU. 1133 PendingInstantiations.insert(PendingInstantiations.end(), 1134 LateParsedInstantiations.begin(), 1135 LateParsedInstantiations.end()); 1136 LateParsedInstantiations.clear(); 1137 1138 if (LangOpts.PCHInstantiateTemplates) { 1139 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1140 PerformPendingInstantiations(); 1141 } 1142 } 1143 1144 DiagnoseUnterminatedPragmaAlignPack(); 1145 DiagnoseUnterminatedPragmaAttribute(); 1146 1147 // All delayed member exception specs should be checked or we end up accepting 1148 // incompatible declarations. 1149 assert(DelayedOverridingExceptionSpecChecks.empty()); 1150 assert(DelayedEquivalentExceptionSpecChecks.empty()); 1151 1152 // All dllexport classes should have been processed already. 1153 assert(DelayedDllExportClasses.empty()); 1154 assert(DelayedDllExportMemberFunctions.empty()); 1155 1156 // Remove file scoped decls that turned out to be used. 1157 UnusedFileScopedDecls.erase( 1158 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true), 1159 UnusedFileScopedDecls.end(), 1160 [this](const DeclaratorDecl *DD) { 1161 return ShouldRemoveFromUnused(this, DD); 1162 }), 1163 UnusedFileScopedDecls.end()); 1164 1165 if (TUKind == TU_Prefix) { 1166 // Translation unit prefixes don't need any of the checking below. 1167 if (!PP.isIncrementalProcessingEnabled()) 1168 TUScope = nullptr; 1169 return; 1170 } 1171 1172 // Check for #pragma weak identifiers that were never declared 1173 LoadExternalWeakUndeclaredIdentifiers(); 1174 for (auto WeakID : WeakUndeclaredIdentifiers) { 1175 if (WeakID.second.getUsed()) 1176 continue; 1177 1178 Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(), 1179 LookupOrdinaryName); 1180 if (PrevDecl != nullptr && 1181 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) 1182 Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type) 1183 << "'weak'" << ExpectedVariableOrFunction; 1184 else 1185 Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared) 1186 << WeakID.first; 1187 } 1188 1189 if (LangOpts.CPlusPlus11 && 1190 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation())) 1191 CheckDelegatingCtorCycles(); 1192 1193 if (!Diags.hasErrorOccurred()) { 1194 if (ExternalSource) 1195 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed); 1196 checkUndefinedButUsed(*this); 1197 } 1198 1199 // A global-module-fragment is only permitted within a module unit. 1200 bool DiagnosedMissingModuleDeclaration = false; 1201 if (!ModuleScopes.empty() && 1202 ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment && 1203 !ModuleScopes.back().ImplicitGlobalModuleFragment) { 1204 Diag(ModuleScopes.back().BeginLoc, 1205 diag::err_module_declaration_missing_after_global_module_introducer); 1206 DiagnosedMissingModuleDeclaration = true; 1207 } 1208 1209 if (TUKind == TU_Module) { 1210 // If we are building a module interface unit, we need to have seen the 1211 // module declaration by now. 1212 if (getLangOpts().getCompilingModule() == 1213 LangOptions::CMK_ModuleInterface && 1214 (ModuleScopes.empty() || 1215 !ModuleScopes.back().Module->isModulePurview()) && 1216 !DiagnosedMissingModuleDeclaration) { 1217 // FIXME: Make a better guess as to where to put the module declaration. 1218 Diag(getSourceManager().getLocForStartOfFile( 1219 getSourceManager().getMainFileID()), 1220 diag::err_module_declaration_missing); 1221 } 1222 1223 // If we are building a module, resolve all of the exported declarations 1224 // now. 1225 if (Module *CurrentModule = PP.getCurrentModule()) { 1226 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); 1227 1228 SmallVector<Module *, 2> Stack; 1229 Stack.push_back(CurrentModule); 1230 while (!Stack.empty()) { 1231 Module *Mod = Stack.pop_back_val(); 1232 1233 // Resolve the exported declarations and conflicts. 1234 // FIXME: Actually complain, once we figure out how to teach the 1235 // diagnostic client to deal with complaints in the module map at this 1236 // point. 1237 ModMap.resolveExports(Mod, /*Complain=*/false); 1238 ModMap.resolveUses(Mod, /*Complain=*/false); 1239 ModMap.resolveConflicts(Mod, /*Complain=*/false); 1240 1241 // Queue the submodules, so their exports will also be resolved. 1242 Stack.append(Mod->submodule_begin(), Mod->submodule_end()); 1243 } 1244 } 1245 1246 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for 1247 // modules when they are built, not every time they are used. 1248 emitAndClearUnusedLocalTypedefWarnings(); 1249 } 1250 1251 // C99 6.9.2p2: 1252 // A declaration of an identifier for an object that has file 1253 // scope without an initializer, and without a storage-class 1254 // specifier or with the storage-class specifier static, 1255 // constitutes a tentative definition. If a translation unit 1256 // contains one or more tentative definitions for an identifier, 1257 // and the translation unit contains no external definition for 1258 // that identifier, then the behavior is exactly as if the 1259 // translation unit contains a file scope declaration of that 1260 // identifier, with the composite type as of the end of the 1261 // translation unit, with an initializer equal to 0. 1262 llvm::SmallSet<VarDecl *, 32> Seen; 1263 for (TentativeDefinitionsType::iterator 1264 T = TentativeDefinitions.begin(ExternalSource), 1265 TEnd = TentativeDefinitions.end(); 1266 T != TEnd; ++T) { 1267 VarDecl *VD = (*T)->getActingDefinition(); 1268 1269 // If the tentative definition was completed, getActingDefinition() returns 1270 // null. If we've already seen this variable before, insert()'s second 1271 // return value is false. 1272 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second) 1273 continue; 1274 1275 if (const IncompleteArrayType *ArrayT 1276 = Context.getAsIncompleteArrayType(VD->getType())) { 1277 // Set the length of the array to 1 (C99 6.9.2p5). 1278 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array); 1279 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true); 1280 QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One, 1281 nullptr, ArrayType::Normal, 0); 1282 VD->setType(T); 1283 } else if (RequireCompleteType(VD->getLocation(), VD->getType(), 1284 diag::err_tentative_def_incomplete_type)) 1285 VD->setInvalidDecl(); 1286 1287 // No initialization is performed for a tentative definition. 1288 CheckCompleteVariableDeclaration(VD); 1289 1290 // Notify the consumer that we've completed a tentative definition. 1291 if (!VD->isInvalidDecl()) 1292 Consumer.CompleteTentativeDefinition(VD); 1293 } 1294 1295 for (auto D : ExternalDeclarations) { 1296 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed()) 1297 continue; 1298 1299 Consumer.CompleteExternalDeclaration(D); 1300 } 1301 1302 // If there were errors, disable 'unused' warnings since they will mostly be 1303 // noise. Don't warn for a use from a module: either we should warn on all 1304 // file-scope declarations in modules or not at all, but whether the 1305 // declaration is used is immaterial. 1306 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) { 1307 // Output warning for unused file scoped decls. 1308 for (UnusedFileScopedDeclsType::iterator 1309 I = UnusedFileScopedDecls.begin(ExternalSource), 1310 E = UnusedFileScopedDecls.end(); I != E; ++I) { 1311 if (ShouldRemoveFromUnused(this, *I)) 1312 continue; 1313 1314 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { 1315 const FunctionDecl *DiagD; 1316 if (!FD->hasBody(DiagD)) 1317 DiagD = FD; 1318 if (DiagD->isDeleted()) 1319 continue; // Deleted functions are supposed to be unused. 1320 if (DiagD->isReferenced()) { 1321 if (isa<CXXMethodDecl>(DiagD)) 1322 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function) 1323 << DiagD; 1324 else { 1325 if (FD->getStorageClass() == SC_Static && 1326 !FD->isInlineSpecified() && 1327 !SourceMgr.isInMainFile( 1328 SourceMgr.getExpansionLoc(FD->getLocation()))) 1329 Diag(DiagD->getLocation(), 1330 diag::warn_unneeded_static_internal_decl) 1331 << DiagD; 1332 else 1333 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1334 << /*function*/ 0 << DiagD; 1335 } 1336 } else { 1337 if (FD->getDescribedFunctionTemplate()) 1338 Diag(DiagD->getLocation(), diag::warn_unused_template) 1339 << /*function*/ 0 << DiagD; 1340 else 1341 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD) 1342 ? diag::warn_unused_member_function 1343 : diag::warn_unused_function) 1344 << DiagD; 1345 } 1346 } else { 1347 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition(); 1348 if (!DiagD) 1349 DiagD = cast<VarDecl>(*I); 1350 if (DiagD->isReferenced()) { 1351 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1352 << /*variable*/ 1 << DiagD; 1353 } else if (DiagD->getType().isConstQualified()) { 1354 const SourceManager &SM = SourceMgr; 1355 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) || 1356 !PP.getLangOpts().IsHeaderFile) 1357 Diag(DiagD->getLocation(), diag::warn_unused_const_variable) 1358 << DiagD; 1359 } else { 1360 if (DiagD->getDescribedVarTemplate()) 1361 Diag(DiagD->getLocation(), diag::warn_unused_template) 1362 << /*variable*/ 1 << DiagD; 1363 else 1364 Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD; 1365 } 1366 } 1367 } 1368 1369 emitAndClearUnusedLocalTypedefWarnings(); 1370 } 1371 1372 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) { 1373 // FIXME: Load additional unused private field candidates from the external 1374 // source. 1375 RecordCompleteMap RecordsComplete; 1376 RecordCompleteMap MNCComplete; 1377 for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(), 1378 E = UnusedPrivateFields.end(); I != E; ++I) { 1379 const NamedDecl *D = *I; 1380 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); 1381 if (RD && !RD->isUnion() && 1382 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) { 1383 Diag(D->getLocation(), diag::warn_unused_private_field) 1384 << D->getDeclName(); 1385 } 1386 } 1387 } 1388 1389 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { 1390 if (ExternalSource) 1391 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); 1392 for (const auto &DeletedFieldInfo : DeleteExprs) { 1393 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { 1394 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, 1395 DeleteExprLoc.second); 1396 } 1397 } 1398 } 1399 1400 // Check we've noticed that we're no longer parsing the initializer for every 1401 // variable. If we miss cases, then at best we have a performance issue and 1402 // at worst a rejects-valid bug. 1403 assert(ParsingInitForAutoVars.empty() && 1404 "Didn't unmark var as having its initializer parsed"); 1405 1406 if (!PP.isIncrementalProcessingEnabled()) 1407 TUScope = nullptr; 1408 } 1409 1410 1411 //===----------------------------------------------------------------------===// 1412 // Helper functions. 1413 //===----------------------------------------------------------------------===// 1414 1415 DeclContext *Sema::getFunctionLevelDeclContext() { 1416 DeclContext *DC = CurContext; 1417 1418 while (true) { 1419 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) || 1420 isa<RequiresExprBodyDecl>(DC)) { 1421 DC = DC->getParent(); 1422 } else if (isa<CXXMethodDecl>(DC) && 1423 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call && 1424 cast<CXXRecordDecl>(DC->getParent())->isLambda()) { 1425 DC = DC->getParent()->getParent(); 1426 } 1427 else break; 1428 } 1429 1430 return DC; 1431 } 1432 1433 /// getCurFunctionDecl - If inside of a function body, this returns a pointer 1434 /// to the function decl for the function being parsed. If we're currently 1435 /// in a 'block', this returns the containing context. 1436 FunctionDecl *Sema::getCurFunctionDecl() { 1437 DeclContext *DC = getFunctionLevelDeclContext(); 1438 return dyn_cast<FunctionDecl>(DC); 1439 } 1440 1441 ObjCMethodDecl *Sema::getCurMethodDecl() { 1442 DeclContext *DC = getFunctionLevelDeclContext(); 1443 while (isa<RecordDecl>(DC)) 1444 DC = DC->getParent(); 1445 return dyn_cast<ObjCMethodDecl>(DC); 1446 } 1447 1448 NamedDecl *Sema::getCurFunctionOrMethodDecl() { 1449 DeclContext *DC = getFunctionLevelDeclContext(); 1450 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC)) 1451 return cast<NamedDecl>(DC); 1452 return nullptr; 1453 } 1454 1455 LangAS Sema::getDefaultCXXMethodAddrSpace() const { 1456 if (getLangOpts().OpenCL) 1457 return getASTContext().getDefaultOpenCLPointeeAddrSpace(); 1458 return LangAS::Default; 1459 } 1460 1461 void Sema::EmitCurrentDiagnostic(unsigned DiagID) { 1462 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here 1463 // and yet we also use the current diag ID on the DiagnosticsEngine. This has 1464 // been made more painfully obvious by the refactor that introduced this 1465 // function, but it is possible that the incoming argument can be 1466 // eliminated. If it truly cannot be (for example, there is some reentrancy 1467 // issue I am not seeing yet), then there should at least be a clarifying 1468 // comment somewhere. 1469 if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) { 1470 switch (DiagnosticIDs::getDiagnosticSFINAEResponse( 1471 Diags.getCurrentDiagID())) { 1472 case DiagnosticIDs::SFINAE_Report: 1473 // We'll report the diagnostic below. 1474 break; 1475 1476 case DiagnosticIDs::SFINAE_SubstitutionFailure: 1477 // Count this failure so that we know that template argument deduction 1478 // has failed. 1479 ++NumSFINAEErrors; 1480 1481 // Make a copy of this suppressed diagnostic and store it with the 1482 // template-deduction information. 1483 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1484 Diagnostic DiagInfo(&Diags); 1485 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1486 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1487 } 1488 1489 Diags.setLastDiagnosticIgnored(true); 1490 Diags.Clear(); 1491 return; 1492 1493 case DiagnosticIDs::SFINAE_AccessControl: { 1494 // Per C++ Core Issue 1170, access control is part of SFINAE. 1495 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily 1496 // make access control a part of SFINAE for the purposes of checking 1497 // type traits. 1498 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11) 1499 break; 1500 1501 SourceLocation Loc = Diags.getCurrentDiagLoc(); 1502 1503 // Suppress this diagnostic. 1504 ++NumSFINAEErrors; 1505 1506 // Make a copy of this suppressed diagnostic and store it with the 1507 // template-deduction information. 1508 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1509 Diagnostic DiagInfo(&Diags); 1510 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1511 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1512 } 1513 1514 Diags.setLastDiagnosticIgnored(true); 1515 Diags.Clear(); 1516 1517 // Now the diagnostic state is clear, produce a C++98 compatibility 1518 // warning. 1519 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control); 1520 1521 // The last diagnostic which Sema produced was ignored. Suppress any 1522 // notes attached to it. 1523 Diags.setLastDiagnosticIgnored(true); 1524 return; 1525 } 1526 1527 case DiagnosticIDs::SFINAE_Suppress: 1528 // Make a copy of this suppressed diagnostic and store it with the 1529 // template-deduction information; 1530 if (*Info) { 1531 Diagnostic DiagInfo(&Diags); 1532 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(), 1533 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1534 } 1535 1536 // Suppress this diagnostic. 1537 Diags.setLastDiagnosticIgnored(true); 1538 Diags.Clear(); 1539 return; 1540 } 1541 } 1542 1543 // Copy the diagnostic printing policy over the ASTContext printing policy. 1544 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292 1545 Context.setPrintingPolicy(getPrintingPolicy()); 1546 1547 // Emit the diagnostic. 1548 if (!Diags.EmitCurrentDiagnostic()) 1549 return; 1550 1551 // If this is not a note, and we're in a template instantiation 1552 // that is different from the last template instantiation where 1553 // we emitted an error, print a template instantiation 1554 // backtrace. 1555 if (!DiagnosticIDs::isBuiltinNote(DiagID)) 1556 PrintContextStack(); 1557 } 1558 1559 Sema::SemaDiagnosticBuilder 1560 Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) { 1561 return Diag(Loc, PD.getDiagID(), DeferHint) << PD; 1562 } 1563 1564 bool Sema::hasUncompilableErrorOccurred() const { 1565 if (getDiagnostics().hasUncompilableErrorOccurred()) 1566 return true; 1567 auto *FD = dyn_cast<FunctionDecl>(CurContext); 1568 if (!FD) 1569 return false; 1570 auto Loc = DeviceDeferredDiags.find(FD); 1571 if (Loc == DeviceDeferredDiags.end()) 1572 return false; 1573 for (auto PDAt : Loc->second) { 1574 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID())) 1575 return true; 1576 } 1577 return false; 1578 } 1579 1580 // Print notes showing how we can reach FD starting from an a priori 1581 // known-callable function. 1582 static void emitCallStackNotes(Sema &S, FunctionDecl *FD) { 1583 auto FnIt = S.DeviceKnownEmittedFns.find(FD); 1584 while (FnIt != S.DeviceKnownEmittedFns.end()) { 1585 // Respect error limit. 1586 if (S.Diags.hasFatalErrorOccurred()) 1587 return; 1588 DiagnosticBuilder Builder( 1589 S.Diags.Report(FnIt->second.Loc, diag::note_called_by)); 1590 Builder << FnIt->second.FD; 1591 FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD); 1592 } 1593 } 1594 1595 namespace { 1596 1597 /// Helper class that emits deferred diagnostic messages if an entity directly 1598 /// or indirectly using the function that causes the deferred diagnostic 1599 /// messages is known to be emitted. 1600 /// 1601 /// During parsing of AST, certain diagnostic messages are recorded as deferred 1602 /// diagnostics since it is unknown whether the functions containing such 1603 /// diagnostics will be emitted. A list of potentially emitted functions and 1604 /// variables that may potentially trigger emission of functions are also 1605 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions 1606 /// by each function to emit deferred diagnostics. 1607 /// 1608 /// During the visit, certain OpenMP directives or initializer of variables 1609 /// with certain OpenMP attributes will cause subsequent visiting of any 1610 /// functions enter a state which is called OpenMP device context in this 1611 /// implementation. The state is exited when the directive or initializer is 1612 /// exited. This state can change the emission states of subsequent uses 1613 /// of functions. 1614 /// 1615 /// Conceptually the functions or variables to be visited form a use graph 1616 /// where the parent node uses the child node. At any point of the visit, 1617 /// the tree nodes traversed from the tree root to the current node form a use 1618 /// stack. The emission state of the current node depends on two factors: 1619 /// 1. the emission state of the root node 1620 /// 2. whether the current node is in OpenMP device context 1621 /// If the function is decided to be emitted, its contained deferred diagnostics 1622 /// are emitted, together with the information about the use stack. 1623 /// 1624 class DeferredDiagnosticsEmitter 1625 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> { 1626 public: 1627 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited; 1628 1629 // Whether the function is already in the current use-path. 1630 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath; 1631 1632 // The current use-path. 1633 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath; 1634 1635 // Whether the visiting of the function has been done. Done[0] is for the 1636 // case not in OpenMP device context. Done[1] is for the case in OpenMP 1637 // device context. We need two sets because diagnostics emission may be 1638 // different depending on whether it is in OpenMP device context. 1639 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2]; 1640 1641 // Emission state of the root node of the current use graph. 1642 bool ShouldEmitRootNode; 1643 1644 // Current OpenMP device context level. It is initialized to 0 and each 1645 // entering of device context increases it by 1 and each exit decreases 1646 // it by 1. Non-zero value indicates it is currently in device context. 1647 unsigned InOMPDeviceContext; 1648 1649 DeferredDiagnosticsEmitter(Sema &S) 1650 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {} 1651 1652 bool shouldVisitDiscardedStmt() const { return false; } 1653 1654 void VisitOMPTargetDirective(OMPTargetDirective *Node) { 1655 ++InOMPDeviceContext; 1656 Inherited::VisitOMPTargetDirective(Node); 1657 --InOMPDeviceContext; 1658 } 1659 1660 void visitUsedDecl(SourceLocation Loc, Decl *D) { 1661 if (isa<VarDecl>(D)) 1662 return; 1663 if (auto *FD = dyn_cast<FunctionDecl>(D)) 1664 checkFunc(Loc, FD); 1665 else 1666 Inherited::visitUsedDecl(Loc, D); 1667 } 1668 1669 void checkVar(VarDecl *VD) { 1670 assert(VD->isFileVarDecl() && 1671 "Should only check file-scope variables"); 1672 if (auto *Init = VD->getInit()) { 1673 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); 1674 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost || 1675 *DevTy == OMPDeclareTargetDeclAttr::DT_Any); 1676 if (IsDev) 1677 ++InOMPDeviceContext; 1678 this->Visit(Init); 1679 if (IsDev) 1680 --InOMPDeviceContext; 1681 } 1682 } 1683 1684 void checkFunc(SourceLocation Loc, FunctionDecl *FD) { 1685 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0]; 1686 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back(); 1687 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) || 1688 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD)) 1689 return; 1690 // Finalize analysis of OpenMP-specific constructs. 1691 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 && 1692 (ShouldEmitRootNode || InOMPDeviceContext)) 1693 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); 1694 if (Caller) 1695 S.DeviceKnownEmittedFns[FD] = {Caller, Loc}; 1696 // Always emit deferred diagnostics for the direct users. This does not 1697 // lead to explosion of diagnostics since each user is visited at most 1698 // twice. 1699 if (ShouldEmitRootNode || InOMPDeviceContext) 1700 emitDeferredDiags(FD, Caller); 1701 // Do not revisit a function if the function body has been completely 1702 // visited before. 1703 if (!Done.insert(FD).second) 1704 return; 1705 InUsePath.insert(FD); 1706 UsePath.push_back(FD); 1707 if (auto *S = FD->getBody()) { 1708 this->Visit(S); 1709 } 1710 UsePath.pop_back(); 1711 InUsePath.erase(FD); 1712 } 1713 1714 void checkRecordedDecl(Decl *D) { 1715 if (auto *FD = dyn_cast<FunctionDecl>(D)) { 1716 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) == 1717 Sema::FunctionEmissionStatus::Emitted; 1718 checkFunc(SourceLocation(), FD); 1719 } else 1720 checkVar(cast<VarDecl>(D)); 1721 } 1722 1723 // Emit any deferred diagnostics for FD 1724 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) { 1725 auto It = S.DeviceDeferredDiags.find(FD); 1726 if (It == S.DeviceDeferredDiags.end()) 1727 return; 1728 bool HasWarningOrError = false; 1729 bool FirstDiag = true; 1730 for (PartialDiagnosticAt &PDAt : It->second) { 1731 // Respect error limit. 1732 if (S.Diags.hasFatalErrorOccurred()) 1733 return; 1734 const SourceLocation &Loc = PDAt.first; 1735 const PartialDiagnostic &PD = PDAt.second; 1736 HasWarningOrError |= 1737 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >= 1738 DiagnosticsEngine::Warning; 1739 { 1740 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID())); 1741 PD.Emit(Builder); 1742 } 1743 // Emit the note on the first diagnostic in case too many diagnostics 1744 // cause the note not emitted. 1745 if (FirstDiag && HasWarningOrError && ShowCallStack) { 1746 emitCallStackNotes(S, FD); 1747 FirstDiag = false; 1748 } 1749 } 1750 } 1751 }; 1752 } // namespace 1753 1754 void Sema::emitDeferredDiags() { 1755 if (ExternalSource) 1756 ExternalSource->ReadDeclsToCheckForDeferredDiags( 1757 DeclsToCheckForDeferredDiags); 1758 1759 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) || 1760 DeclsToCheckForDeferredDiags.empty()) 1761 return; 1762 1763 DeferredDiagnosticsEmitter DDE(*this); 1764 for (auto D : DeclsToCheckForDeferredDiags) 1765 DDE.checkRecordedDecl(D); 1766 } 1767 1768 // In CUDA, there are some constructs which may appear in semantically-valid 1769 // code, but trigger errors if we ever generate code for the function in which 1770 // they appear. Essentially every construct you're not allowed to use on the 1771 // device falls into this category, because you are allowed to use these 1772 // constructs in a __host__ __device__ function, but only if that function is 1773 // never codegen'ed on the device. 1774 // 1775 // To handle semantic checking for these constructs, we keep track of the set of 1776 // functions we know will be emitted, either because we could tell a priori that 1777 // they would be emitted, or because they were transitively called by a 1778 // known-emitted function. 1779 // 1780 // We also keep a partial call graph of which not-known-emitted functions call 1781 // which other not-known-emitted functions. 1782 // 1783 // When we see something which is illegal if the current function is emitted 1784 // (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or 1785 // CheckCUDACall), we first check if the current function is known-emitted. If 1786 // so, we immediately output the diagnostic. 1787 // 1788 // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags 1789 // until we discover that the function is known-emitted, at which point we take 1790 // it out of this map and emit the diagnostic. 1791 1792 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc, 1793 unsigned DiagID, 1794 FunctionDecl *Fn, Sema &S) 1795 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn), 1796 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) { 1797 switch (K) { 1798 case K_Nop: 1799 break; 1800 case K_Immediate: 1801 case K_ImmediateWithCallStack: 1802 ImmediateDiag.emplace( 1803 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID)); 1804 break; 1805 case K_Deferred: 1806 assert(Fn && "Must have a function to attach the deferred diag to."); 1807 auto &Diags = S.DeviceDeferredDiags[Fn]; 1808 PartialDiagId.emplace(Diags.size()); 1809 Diags.emplace_back(Loc, S.PDiag(DiagID)); 1810 break; 1811 } 1812 } 1813 1814 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D) 1815 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn), 1816 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag), 1817 PartialDiagId(D.PartialDiagId) { 1818 // Clean the previous diagnostics. 1819 D.ShowCallStack = false; 1820 D.ImmediateDiag.reset(); 1821 D.PartialDiagId.reset(); 1822 } 1823 1824 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() { 1825 if (ImmediateDiag) { 1826 // Emit our diagnostic and, if it was a warning or error, output a callstack 1827 // if Fn isn't a priori known-emitted. 1828 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel( 1829 DiagID, Loc) >= DiagnosticsEngine::Warning; 1830 ImmediateDiag.reset(); // Emit the immediate diag. 1831 if (IsWarningOrError && ShowCallStack) 1832 emitCallStackNotes(S, Fn); 1833 } else { 1834 assert((!PartialDiagId || ShowCallStack) && 1835 "Must always show call stack for deferred diags."); 1836 } 1837 } 1838 1839 Sema::SemaDiagnosticBuilder 1840 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) { 1841 FD = FD ? FD : getCurFunctionDecl(); 1842 if (LangOpts.OpenMP) 1843 return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD) 1844 : diagIfOpenMPHostCode(Loc, DiagID, FD); 1845 if (getLangOpts().CUDA) 1846 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID) 1847 : CUDADiagIfHostCode(Loc, DiagID); 1848 1849 if (getLangOpts().SYCLIsDevice) 1850 return SYCLDiagIfDeviceCode(Loc, DiagID); 1851 1852 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID, 1853 FD, *this); 1854 } 1855 1856 Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID, 1857 bool DeferHint) { 1858 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID); 1859 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag && 1860 DiagnosticIDs::isDeferrable(DiagID) && 1861 (DeferHint || DeferDiags || !IsError); 1862 auto SetIsLastErrorImmediate = [&](bool Flag) { 1863 if (IsError) 1864 IsLastErrorImmediate = Flag; 1865 }; 1866 if (!ShouldDefer) { 1867 SetIsLastErrorImmediate(true); 1868 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, 1869 DiagID, getCurFunctionDecl(), *this); 1870 } 1871 1872 SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice 1873 ? CUDADiagIfDeviceCode(Loc, DiagID) 1874 : CUDADiagIfHostCode(Loc, DiagID); 1875 SetIsLastErrorImmediate(DB.isImmediate()); 1876 return DB; 1877 } 1878 1879 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) { 1880 if (isUnevaluatedContext() || Ty.isNull()) 1881 return; 1882 1883 // The original idea behind checkTypeSupport function is that unused 1884 // declarations can be replaced with an array of bytes of the same size during 1885 // codegen, such replacement doesn't seem to be possible for types without 1886 // constant byte size like zero length arrays. So, do a deep check for SYCL. 1887 if (D && LangOpts.SYCLIsDevice) { 1888 llvm::DenseSet<QualType> Visited; 1889 deepTypeCheckForSYCLDevice(Loc, Visited, D); 1890 } 1891 1892 Decl *C = cast<Decl>(getCurLexicalContext()); 1893 1894 // Memcpy operations for structs containing a member with unsupported type 1895 // are ok, though. 1896 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) { 1897 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 1898 MD->isTrivial()) 1899 return; 1900 1901 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD)) 1902 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial()) 1903 return; 1904 } 1905 1906 // Try to associate errors with the lexical context, if that is a function, or 1907 // the value declaration otherwise. 1908 FunctionDecl *FD = isa<FunctionDecl>(C) ? cast<FunctionDecl>(C) 1909 : dyn_cast_or_null<FunctionDecl>(D); 1910 1911 auto CheckDeviceType = [&](QualType Ty) { 1912 if (Ty->isDependentType()) 1913 return; 1914 1915 if (Ty->isBitIntType()) { 1916 if (!Context.getTargetInfo().hasBitIntType()) { 1917 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1918 if (D) 1919 PD << D; 1920 else 1921 PD << "expression"; 1922 targetDiag(Loc, PD, FD) 1923 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/ 1924 << Ty << Context.getTargetInfo().getTriple().str(); 1925 } 1926 return; 1927 } 1928 1929 // Check if we are dealing with two 'long double' but with different 1930 // semantics. 1931 bool LongDoubleMismatched = false; 1932 if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) { 1933 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty); 1934 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() && 1935 !Context.getTargetInfo().hasFloat128Type()) || 1936 (&Sem == &llvm::APFloat::PPCDoubleDouble() && 1937 !Context.getTargetInfo().hasIbm128Type())) 1938 LongDoubleMismatched = true; 1939 } 1940 1941 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) || 1942 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) || 1943 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) || 1944 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 && 1945 !Context.getTargetInfo().hasInt128Type()) || 1946 LongDoubleMismatched) { 1947 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1948 if (D) 1949 PD << D; 1950 else 1951 PD << "expression"; 1952 1953 if (targetDiag(Loc, PD, FD) 1954 << true /*show bit size*/ 1955 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty 1956 << false /*return*/ << Context.getTargetInfo().getTriple().str()) { 1957 if (D) 1958 D->setInvalidDecl(); 1959 } 1960 if (D) 1961 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1962 } 1963 }; 1964 1965 auto CheckType = [&](QualType Ty, bool IsRetTy = false) { 1966 if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice) || 1967 LangOpts.CUDAIsDevice) 1968 CheckDeviceType(Ty); 1969 1970 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType(); 1971 const TargetInfo &TI = Context.getTargetInfo(); 1972 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) { 1973 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1974 if (D) 1975 PD << D; 1976 else 1977 PD << "expression"; 1978 1979 if (Diag(Loc, PD, FD) 1980 << false /*show bit size*/ << 0 << Ty << false /*return*/ 1981 << Context.getTargetInfo().getTriple().str()) { 1982 if (D) 1983 D->setInvalidDecl(); 1984 } 1985 if (D) 1986 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1987 } 1988 1989 bool IsDouble = UnqualTy == Context.DoubleTy; 1990 bool IsFloat = UnqualTy == Context.FloatTy; 1991 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) { 1992 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1993 if (D) 1994 PD << D; 1995 else 1996 PD << "expression"; 1997 1998 if (Diag(Loc, PD, FD) 1999 << false /*show bit size*/ << 0 << Ty << true /*return*/ 2000 << Context.getTargetInfo().getTriple().str()) { 2001 if (D) 2002 D->setInvalidDecl(); 2003 } 2004 if (D) 2005 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 2006 } 2007 }; 2008 2009 CheckType(Ty); 2010 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) { 2011 for (const auto &ParamTy : FPTy->param_types()) 2012 CheckType(ParamTy); 2013 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true); 2014 } 2015 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty)) 2016 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true); 2017 } 2018 2019 /// Looks through the macro-expansion chain for the given 2020 /// location, looking for a macro expansion with the given name. 2021 /// If one is found, returns true and sets the location to that 2022 /// expansion loc. 2023 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) { 2024 SourceLocation loc = locref; 2025 if (!loc.isMacroID()) return false; 2026 2027 // There's no good way right now to look at the intermediate 2028 // expansions, so just jump to the expansion location. 2029 loc = getSourceManager().getExpansionLoc(loc); 2030 2031 // If that's written with the name, stop here. 2032 SmallString<16> buffer; 2033 if (getPreprocessor().getSpelling(loc, buffer) == name) { 2034 locref = loc; 2035 return true; 2036 } 2037 return false; 2038 } 2039 2040 /// Determines the active Scope associated with the given declaration 2041 /// context. 2042 /// 2043 /// This routine maps a declaration context to the active Scope object that 2044 /// represents that declaration context in the parser. It is typically used 2045 /// from "scope-less" code (e.g., template instantiation, lazy creation of 2046 /// declarations) that injects a name for name-lookup purposes and, therefore, 2047 /// must update the Scope. 2048 /// 2049 /// \returns The scope corresponding to the given declaraion context, or NULL 2050 /// if no such scope is open. 2051 Scope *Sema::getScopeForContext(DeclContext *Ctx) { 2052 2053 if (!Ctx) 2054 return nullptr; 2055 2056 Ctx = Ctx->getPrimaryContext(); 2057 for (Scope *S = getCurScope(); S; S = S->getParent()) { 2058 // Ignore scopes that cannot have declarations. This is important for 2059 // out-of-line definitions of static class members. 2060 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) 2061 if (DeclContext *Entity = S->getEntity()) 2062 if (Ctx == Entity->getPrimaryContext()) 2063 return S; 2064 } 2065 2066 return nullptr; 2067 } 2068 2069 /// Enter a new function scope 2070 void Sema::PushFunctionScope() { 2071 if (FunctionScopes.empty() && CachedFunctionScope) { 2072 // Use CachedFunctionScope to avoid allocating memory when possible. 2073 CachedFunctionScope->Clear(); 2074 FunctionScopes.push_back(CachedFunctionScope.release()); 2075 } else { 2076 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); 2077 } 2078 if (LangOpts.OpenMP) 2079 pushOpenMPFunctionRegion(); 2080 } 2081 2082 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { 2083 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(), 2084 BlockScope, Block)); 2085 } 2086 2087 LambdaScopeInfo *Sema::PushLambdaScope() { 2088 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics()); 2089 FunctionScopes.push_back(LSI); 2090 return LSI; 2091 } 2092 2093 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) { 2094 if (LambdaScopeInfo *const LSI = getCurLambda()) { 2095 LSI->AutoTemplateParameterDepth = Depth; 2096 return; 2097 } 2098 llvm_unreachable( 2099 "Remove assertion if intentionally called in a non-lambda context."); 2100 } 2101 2102 // Check that the type of the VarDecl has an accessible copy constructor and 2103 // resolve its destructor's exception specification. 2104 // This also performs initialization of block variables when they are moved 2105 // to the heap. It uses the same rules as applicable for implicit moves 2106 // according to the C++ standard in effect ([class.copy.elision]p3). 2107 static void checkEscapingByref(VarDecl *VD, Sema &S) { 2108 QualType T = VD->getType(); 2109 EnterExpressionEvaluationContext scope( 2110 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated); 2111 SourceLocation Loc = VD->getLocation(); 2112 Expr *VarRef = 2113 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); 2114 ExprResult Result; 2115 auto IE = InitializedEntity::InitializeBlock(Loc, T); 2116 if (S.getLangOpts().CPlusPlus2b) { 2117 auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr, 2118 VK_XValue, FPOptionsOverride()); 2119 Result = S.PerformCopyInitialization(IE, SourceLocation(), E); 2120 } else { 2121 Result = S.PerformMoveOrCopyInitialization( 2122 IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible}, 2123 VarRef); 2124 } 2125 2126 if (!Result.isInvalid()) { 2127 Result = S.MaybeCreateExprWithCleanups(Result); 2128 Expr *Init = Result.getAs<Expr>(); 2129 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init)); 2130 } 2131 2132 // The destructor's exception specification is needed when IRGen generates 2133 // block copy/destroy functions. Resolve it here. 2134 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 2135 if (CXXDestructorDecl *DD = RD->getDestructor()) { 2136 auto *FPT = DD->getType()->getAs<FunctionProtoType>(); 2137 S.ResolveExceptionSpec(Loc, FPT); 2138 } 2139 } 2140 2141 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { 2142 // Set the EscapingByref flag of __block variables captured by 2143 // escaping blocks. 2144 for (const BlockDecl *BD : FSI.Blocks) { 2145 for (const BlockDecl::Capture &BC : BD->captures()) { 2146 VarDecl *VD = BC.getVariable(); 2147 if (VD->hasAttr<BlocksAttr>()) { 2148 // Nothing to do if this is a __block variable captured by a 2149 // non-escaping block. 2150 if (BD->doesNotEscape()) 2151 continue; 2152 VD->setEscapingByref(); 2153 } 2154 // Check whether the captured variable is or contains an object of 2155 // non-trivial C union type. 2156 QualType CapType = BC.getVariable()->getType(); 2157 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() || 2158 CapType.hasNonTrivialToPrimitiveCopyCUnion()) 2159 S.checkNonTrivialCUnion(BC.getVariable()->getType(), 2160 BD->getCaretLocation(), 2161 Sema::NTCUC_BlockCapture, 2162 Sema::NTCUK_Destruct|Sema::NTCUK_Copy); 2163 } 2164 } 2165 2166 for (VarDecl *VD : FSI.ByrefBlockVars) { 2167 // __block variables might require us to capture a copy-initializer. 2168 if (!VD->isEscapingByref()) 2169 continue; 2170 // It's currently invalid to ever have a __block variable with an 2171 // array type; should we diagnose that here? 2172 // Regardless, we don't want to ignore array nesting when 2173 // constructing this copy. 2174 if (VD->getType()->isStructureOrClassType()) 2175 checkEscapingByref(VD, S); 2176 } 2177 } 2178 2179 /// Pop a function (or block or lambda or captured region) scope from the stack. 2180 /// 2181 /// \param WP The warning policy to use for CFG-based warnings, or null if such 2182 /// warnings should not be produced. 2183 /// \param D The declaration corresponding to this function scope, if producing 2184 /// CFG-based warnings. 2185 /// \param BlockType The type of the block expression, if D is a BlockDecl. 2186 Sema::PoppedFunctionScopePtr 2187 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, 2188 const Decl *D, QualType BlockType) { 2189 assert(!FunctionScopes.empty() && "mismatched push/pop!"); 2190 2191 markEscapingByrefs(*FunctionScopes.back(), *this); 2192 2193 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(), 2194 PoppedFunctionScopeDeleter(this)); 2195 2196 if (LangOpts.OpenMP) 2197 popOpenMPFunctionRegion(Scope.get()); 2198 2199 // Issue any analysis-based warnings. 2200 if (WP && D) 2201 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType); 2202 else 2203 for (const auto &PUD : Scope->PossiblyUnreachableDiags) 2204 Diag(PUD.Loc, PUD.PD); 2205 2206 return Scope; 2207 } 2208 2209 void Sema::PoppedFunctionScopeDeleter:: 2210 operator()(sema::FunctionScopeInfo *Scope) const { 2211 // Stash the function scope for later reuse if it's for a normal function. 2212 if (Scope->isPlainFunction() && !Self->CachedFunctionScope) 2213 Self->CachedFunctionScope.reset(Scope); 2214 else 2215 delete Scope; 2216 } 2217 2218 void Sema::PushCompoundScope(bool IsStmtExpr) { 2219 getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr)); 2220 } 2221 2222 void Sema::PopCompoundScope() { 2223 FunctionScopeInfo *CurFunction = getCurFunction(); 2224 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop"); 2225 2226 CurFunction->CompoundScopes.pop_back(); 2227 } 2228 2229 /// Determine whether any errors occurred within this function/method/ 2230 /// block. 2231 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const { 2232 return getCurFunction()->hasUnrecoverableErrorOccurred(); 2233 } 2234 2235 void Sema::setFunctionHasBranchIntoScope() { 2236 if (!FunctionScopes.empty()) 2237 FunctionScopes.back()->setHasBranchIntoScope(); 2238 } 2239 2240 void Sema::setFunctionHasBranchProtectedScope() { 2241 if (!FunctionScopes.empty()) 2242 FunctionScopes.back()->setHasBranchProtectedScope(); 2243 } 2244 2245 void Sema::setFunctionHasIndirectGoto() { 2246 if (!FunctionScopes.empty()) 2247 FunctionScopes.back()->setHasIndirectGoto(); 2248 } 2249 2250 void Sema::setFunctionHasMustTail() { 2251 if (!FunctionScopes.empty()) 2252 FunctionScopes.back()->setHasMustTail(); 2253 } 2254 2255 BlockScopeInfo *Sema::getCurBlock() { 2256 if (FunctionScopes.empty()) 2257 return nullptr; 2258 2259 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back()); 2260 if (CurBSI && CurBSI->TheDecl && 2261 !CurBSI->TheDecl->Encloses(CurContext)) { 2262 // We have switched contexts due to template instantiation. 2263 assert(!CodeSynthesisContexts.empty()); 2264 return nullptr; 2265 } 2266 2267 return CurBSI; 2268 } 2269 2270 FunctionScopeInfo *Sema::getEnclosingFunction() const { 2271 if (FunctionScopes.empty()) 2272 return nullptr; 2273 2274 for (int e = FunctionScopes.size() - 1; e >= 0; --e) { 2275 if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) 2276 continue; 2277 return FunctionScopes[e]; 2278 } 2279 return nullptr; 2280 } 2281 2282 LambdaScopeInfo *Sema::getEnclosingLambda() const { 2283 for (auto *Scope : llvm::reverse(FunctionScopes)) { 2284 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) { 2285 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) { 2286 // We have switched contexts due to template instantiation. 2287 // FIXME: We should swap out the FunctionScopes during code synthesis 2288 // so that we don't need to check for this. 2289 assert(!CodeSynthesisContexts.empty()); 2290 return nullptr; 2291 } 2292 return LSI; 2293 } 2294 } 2295 return nullptr; 2296 } 2297 2298 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) { 2299 if (FunctionScopes.empty()) 2300 return nullptr; 2301 2302 auto I = FunctionScopes.rbegin(); 2303 if (IgnoreNonLambdaCapturingScope) { 2304 auto E = FunctionScopes.rend(); 2305 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I)) 2306 ++I; 2307 if (I == E) 2308 return nullptr; 2309 } 2310 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I); 2311 if (CurLSI && CurLSI->Lambda && 2312 !CurLSI->Lambda->Encloses(CurContext)) { 2313 // We have switched contexts due to template instantiation. 2314 assert(!CodeSynthesisContexts.empty()); 2315 return nullptr; 2316 } 2317 2318 return CurLSI; 2319 } 2320 2321 // We have a generic lambda if we parsed auto parameters, or we have 2322 // an associated template parameter list. 2323 LambdaScopeInfo *Sema::getCurGenericLambda() { 2324 if (LambdaScopeInfo *LSI = getCurLambda()) { 2325 return (LSI->TemplateParams.size() || 2326 LSI->GLTemplateParameterList) ? LSI : nullptr; 2327 } 2328 return nullptr; 2329 } 2330 2331 2332 void Sema::ActOnComment(SourceRange Comment) { 2333 if (!LangOpts.RetainCommentsFromSystemHeaders && 2334 SourceMgr.isInSystemHeader(Comment.getBegin())) 2335 return; 2336 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false); 2337 if (RC.isAlmostTrailingComment()) { 2338 SourceRange MagicMarkerRange(Comment.getBegin(), 2339 Comment.getBegin().getLocWithOffset(3)); 2340 StringRef MagicMarkerText; 2341 switch (RC.getKind()) { 2342 case RawComment::RCK_OrdinaryBCPL: 2343 MagicMarkerText = "///<"; 2344 break; 2345 case RawComment::RCK_OrdinaryC: 2346 MagicMarkerText = "/**<"; 2347 break; 2348 default: 2349 llvm_unreachable("if this is an almost Doxygen comment, " 2350 "it should be ordinary"); 2351 } 2352 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) << 2353 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText); 2354 } 2355 Context.addComment(RC); 2356 } 2357 2358 // Pin this vtable to this file. 2359 ExternalSemaSource::~ExternalSemaSource() {} 2360 char ExternalSemaSource::ID; 2361 2362 void ExternalSemaSource::ReadMethodPool(Selector Sel) { } 2363 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { } 2364 2365 void ExternalSemaSource::ReadKnownNamespaces( 2366 SmallVectorImpl<NamespaceDecl *> &Namespaces) { 2367 } 2368 2369 void ExternalSemaSource::ReadUndefinedButUsed( 2370 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {} 2371 2372 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< 2373 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} 2374 2375 /// Figure out if an expression could be turned into a call. 2376 /// 2377 /// Use this when trying to recover from an error where the programmer may have 2378 /// written just the name of a function instead of actually calling it. 2379 /// 2380 /// \param E - The expression to examine. 2381 /// \param ZeroArgCallReturnTy - If the expression can be turned into a call 2382 /// with no arguments, this parameter is set to the type returned by such a 2383 /// call; otherwise, it is set to an empty QualType. 2384 /// \param OverloadSet - If the expression is an overloaded function 2385 /// name, this parameter is populated with the decls of the various overloads. 2386 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, 2387 UnresolvedSetImpl &OverloadSet) { 2388 ZeroArgCallReturnTy = QualType(); 2389 OverloadSet.clear(); 2390 2391 const OverloadExpr *Overloads = nullptr; 2392 bool IsMemExpr = false; 2393 if (E.getType() == Context.OverloadTy) { 2394 OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); 2395 2396 // Ignore overloads that are pointer-to-member constants. 2397 if (FR.HasFormOfMemberPointer) 2398 return false; 2399 2400 Overloads = FR.Expression; 2401 } else if (E.getType() == Context.BoundMemberTy) { 2402 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens()); 2403 IsMemExpr = true; 2404 } 2405 2406 bool Ambiguous = false; 2407 bool IsMV = false; 2408 2409 if (Overloads) { 2410 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), 2411 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { 2412 OverloadSet.addDecl(*it); 2413 2414 // Check whether the function is a non-template, non-member which takes no 2415 // arguments. 2416 if (IsMemExpr) 2417 continue; 2418 if (const FunctionDecl *OverloadDecl 2419 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { 2420 if (OverloadDecl->getMinRequiredArguments() == 0) { 2421 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous && 2422 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() || 2423 OverloadDecl->isCPUSpecificMultiVersion()))) { 2424 ZeroArgCallReturnTy = QualType(); 2425 Ambiguous = true; 2426 } else { 2427 ZeroArgCallReturnTy = OverloadDecl->getReturnType(); 2428 IsMV = OverloadDecl->isCPUDispatchMultiVersion() || 2429 OverloadDecl->isCPUSpecificMultiVersion(); 2430 } 2431 } 2432 } 2433 } 2434 2435 // If it's not a member, use better machinery to try to resolve the call 2436 if (!IsMemExpr) 2437 return !ZeroArgCallReturnTy.isNull(); 2438 } 2439 2440 // Attempt to call the member with no arguments - this will correctly handle 2441 // member templates with defaults/deduction of template arguments, overloads 2442 // with default arguments, etc. 2443 if (IsMemExpr && !E.isTypeDependent()) { 2444 Sema::TentativeAnalysisScope Trap(*this); 2445 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), 2446 None, SourceLocation()); 2447 if (R.isUsable()) { 2448 ZeroArgCallReturnTy = R.get()->getType(); 2449 return true; 2450 } 2451 return false; 2452 } 2453 2454 if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { 2455 if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { 2456 if (Fun->getMinRequiredArguments() == 0) 2457 ZeroArgCallReturnTy = Fun->getReturnType(); 2458 return true; 2459 } 2460 } 2461 2462 // We don't have an expression that's convenient to get a FunctionDecl from, 2463 // but we can at least check if the type is "function of 0 arguments". 2464 QualType ExprTy = E.getType(); 2465 const FunctionType *FunTy = nullptr; 2466 QualType PointeeTy = ExprTy->getPointeeType(); 2467 if (!PointeeTy.isNull()) 2468 FunTy = PointeeTy->getAs<FunctionType>(); 2469 if (!FunTy) 2470 FunTy = ExprTy->getAs<FunctionType>(); 2471 2472 if (const FunctionProtoType *FPT = 2473 dyn_cast_or_null<FunctionProtoType>(FunTy)) { 2474 if (FPT->getNumParams() == 0) 2475 ZeroArgCallReturnTy = FunTy->getReturnType(); 2476 return true; 2477 } 2478 return false; 2479 } 2480 2481 /// Give notes for a set of overloads. 2482 /// 2483 /// A companion to tryExprAsCall. In cases when the name that the programmer 2484 /// wrote was an overloaded function, we may be able to make some guesses about 2485 /// plausible overloads based on their return types; such guesses can be handed 2486 /// off to this method to be emitted as notes. 2487 /// 2488 /// \param Overloads - The overloads to note. 2489 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to 2490 /// -fshow-overloads=best, this is the location to attach to the note about too 2491 /// many candidates. Typically this will be the location of the original 2492 /// ill-formed expression. 2493 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, 2494 const SourceLocation FinalNoteLoc) { 2495 unsigned ShownOverloads = 0; 2496 unsigned SuppressedOverloads = 0; 2497 for (UnresolvedSetImpl::iterator It = Overloads.begin(), 2498 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2499 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) { 2500 ++SuppressedOverloads; 2501 continue; 2502 } 2503 2504 NamedDecl *Fn = (*It)->getUnderlyingDecl(); 2505 // Don't print overloads for non-default multiversioned functions. 2506 if (const auto *FD = Fn->getAsFunction()) { 2507 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() && 2508 !FD->getAttr<TargetAttr>()->isDefaultVersion()) 2509 continue; 2510 } 2511 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call); 2512 ++ShownOverloads; 2513 } 2514 2515 S.Diags.overloadCandidatesShown(ShownOverloads); 2516 2517 if (SuppressedOverloads) 2518 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates) 2519 << SuppressedOverloads; 2520 } 2521 2522 static void notePlausibleOverloads(Sema &S, SourceLocation Loc, 2523 const UnresolvedSetImpl &Overloads, 2524 bool (*IsPlausibleResult)(QualType)) { 2525 if (!IsPlausibleResult) 2526 return noteOverloads(S, Overloads, Loc); 2527 2528 UnresolvedSet<2> PlausibleOverloads; 2529 for (OverloadExpr::decls_iterator It = Overloads.begin(), 2530 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2531 const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It); 2532 QualType OverloadResultTy = OverloadDecl->getReturnType(); 2533 if (IsPlausibleResult(OverloadResultTy)) 2534 PlausibleOverloads.addDecl(It.getDecl()); 2535 } 2536 noteOverloads(S, PlausibleOverloads, Loc); 2537 } 2538 2539 /// Determine whether the given expression can be called by just 2540 /// putting parentheses after it. Notably, expressions with unary 2541 /// operators can't be because the unary operator will start parsing 2542 /// outside the call. 2543 static bool IsCallableWithAppend(Expr *E) { 2544 E = E->IgnoreImplicit(); 2545 return (!isa<CStyleCastExpr>(E) && 2546 !isa<UnaryOperator>(E) && 2547 !isa<BinaryOperator>(E) && 2548 !isa<CXXOperatorCallExpr>(E)); 2549 } 2550 2551 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) { 2552 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2553 E = UO->getSubExpr(); 2554 2555 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { 2556 if (ULE->getNumDecls() == 0) 2557 return false; 2558 2559 const NamedDecl *ND = *ULE->decls_begin(); 2560 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2561 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion(); 2562 } 2563 return false; 2564 } 2565 2566 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, 2567 bool ForceComplain, 2568 bool (*IsPlausibleResult)(QualType)) { 2569 SourceLocation Loc = E.get()->getExprLoc(); 2570 SourceRange Range = E.get()->getSourceRange(); 2571 UnresolvedSet<4> Overloads; 2572 2573 // If this is a SFINAE context, don't try anything that might trigger ADL 2574 // prematurely. 2575 if (!isSFINAEContext()) { 2576 QualType ZeroArgCallTy; 2577 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) && 2578 !ZeroArgCallTy.isNull() && 2579 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) { 2580 // At this point, we know E is potentially callable with 0 2581 // arguments and that it returns something of a reasonable type, 2582 // so we can emit a fixit and carry on pretending that E was 2583 // actually a CallExpr. 2584 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd()); 2585 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2586 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range 2587 << (IsCallableWithAppend(E.get()) 2588 ? FixItHint::CreateInsertion(ParenInsertionLoc, 2589 "()") 2590 : FixItHint()); 2591 if (!IsMV) 2592 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2593 2594 // FIXME: Try this before emitting the fixit, and suppress diagnostics 2595 // while doing so. 2596 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None, 2597 Range.getEnd().getLocWithOffset(1)); 2598 return true; 2599 } 2600 } 2601 if (!ForceComplain) return false; 2602 2603 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2604 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range; 2605 if (!IsMV) 2606 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2607 E = ExprError(); 2608 return true; 2609 } 2610 2611 IdentifierInfo *Sema::getSuperIdentifier() const { 2612 if (!Ident_super) 2613 Ident_super = &Context.Idents.get("super"); 2614 return Ident_super; 2615 } 2616 2617 IdentifierInfo *Sema::getFloat128Identifier() const { 2618 if (!Ident___float128) 2619 Ident___float128 = &Context.Idents.get("__float128"); 2620 return Ident___float128; 2621 } 2622 2623 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, 2624 CapturedRegionKind K, 2625 unsigned OpenMPCaptureLevel) { 2626 auto *CSI = new CapturedRegionScopeInfo( 2627 getDiagnostics(), S, CD, RD, CD->getContextParam(), K, 2628 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0, 2629 OpenMPCaptureLevel); 2630 CSI->ReturnType = Context.VoidTy; 2631 FunctionScopes.push_back(CSI); 2632 } 2633 2634 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { 2635 if (FunctionScopes.empty()) 2636 return nullptr; 2637 2638 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); 2639 } 2640 2641 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & 2642 Sema::getMismatchingDeleteExpressions() const { 2643 return DeleteExprs; 2644 } 2645 2646 Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S) 2647 : S(S), OldFPFeaturesState(S.CurFPFeatures), 2648 OldOverrides(S.FpPragmaStack.CurrentValue), 2649 OldEvalMethod(S.PP.getCurrentFPEvalMethod()), 2650 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {} 2651 2652 Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() { 2653 S.CurFPFeatures = OldFPFeaturesState; 2654 S.FpPragmaStack.CurrentValue = OldOverrides; 2655 S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod); 2656 } 2657