1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the actions class which performs semantic analysis and 10 // builds an AST out of a parse stream. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "UsedDeclVisitor.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/ASTDiagnostic.h" 17 #include "clang/AST/Decl.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclFriend.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/ExprCXX.h" 23 #include "clang/AST/PrettyDeclStackTrace.h" 24 #include "clang/AST/StmtCXX.h" 25 #include "clang/Basic/DarwinSDKInfo.h" 26 #include "clang/Basic/DiagnosticOptions.h" 27 #include "clang/Basic/PartialDiagnostic.h" 28 #include "clang/Basic/SourceManager.h" 29 #include "clang/Basic/Stack.h" 30 #include "clang/Basic/TargetInfo.h" 31 #include "clang/Lex/HeaderSearch.h" 32 #include "clang/Lex/HeaderSearchOptions.h" 33 #include "clang/Lex/Preprocessor.h" 34 #include "clang/Sema/CXXFieldCollector.h" 35 #include "clang/Sema/DelayedDiagnostic.h" 36 #include "clang/Sema/ExternalSemaSource.h" 37 #include "clang/Sema/Initialization.h" 38 #include "clang/Sema/MultiplexExternalSemaSource.h" 39 #include "clang/Sema/ObjCMethodList.h" 40 #include "clang/Sema/Scope.h" 41 #include "clang/Sema/ScopeInfo.h" 42 #include "clang/Sema/SemaConsumer.h" 43 #include "clang/Sema/SemaInternal.h" 44 #include "clang/Sema/TemplateDeduction.h" 45 #include "clang/Sema/TemplateInstCallback.h" 46 #include "clang/Sema/TypoCorrection.h" 47 #include "llvm/ADT/DenseMap.h" 48 #include "llvm/ADT/SmallPtrSet.h" 49 #include "llvm/Support/TimeProfiler.h" 50 51 using namespace clang; 52 using namespace sema; 53 54 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) { 55 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts); 56 } 57 58 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); } 59 60 DarwinSDKInfo * 61 Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc, 62 StringRef Platform) { 63 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking(); 64 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) { 65 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking) 66 << Platform; 67 WarnedDarwinSDKInfoMissing = true; 68 } 69 return SDKInfo; 70 } 71 72 DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() { 73 if (CachedDarwinSDKInfo) 74 return CachedDarwinSDKInfo->get(); 75 auto SDKInfo = parseDarwinSDKInfo( 76 PP.getFileManager().getVirtualFileSystem(), 77 PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot); 78 if (SDKInfo && *SDKInfo) { 79 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo)); 80 return CachedDarwinSDKInfo->get(); 81 } 82 if (!SDKInfo) 83 llvm::consumeError(SDKInfo.takeError()); 84 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>(); 85 return nullptr; 86 } 87 88 IdentifierInfo * 89 Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, 90 unsigned int Index) { 91 std::string InventedName; 92 llvm::raw_string_ostream OS(InventedName); 93 94 if (!ParamName) 95 OS << "auto:" << Index + 1; 96 else 97 OS << ParamName->getName() << ":auto"; 98 99 OS.flush(); 100 return &Context.Idents.get(OS.str()); 101 } 102 103 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, 104 const Preprocessor &PP) { 105 PrintingPolicy Policy = Context.getPrintingPolicy(); 106 // In diagnostics, we print _Bool as bool if the latter is defined as the 107 // former. 108 Policy.Bool = Context.getLangOpts().Bool; 109 if (!Policy.Bool) { 110 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) { 111 Policy.Bool = BoolMacro->isObjectLike() && 112 BoolMacro->getNumTokens() == 1 && 113 BoolMacro->getReplacementToken(0).is(tok::kw__Bool); 114 } 115 } 116 117 // Shorten the data output if needed 118 Policy.EntireContentsOfLargeArray = false; 119 120 return Policy; 121 } 122 123 void Sema::ActOnTranslationUnitScope(Scope *S) { 124 TUScope = S; 125 PushDeclContext(S, Context.getTranslationUnitDecl()); 126 } 127 128 namespace clang { 129 namespace sema { 130 131 class SemaPPCallbacks : public PPCallbacks { 132 Sema *S = nullptr; 133 llvm::SmallVector<SourceLocation, 8> IncludeStack; 134 135 public: 136 void set(Sema &S) { this->S = &S; } 137 138 void reset() { S = nullptr; } 139 140 virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason, 141 SrcMgr::CharacteristicKind FileType, 142 FileID PrevFID) override { 143 if (!S) 144 return; 145 switch (Reason) { 146 case EnterFile: { 147 SourceManager &SM = S->getSourceManager(); 148 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc)); 149 if (IncludeLoc.isValid()) { 150 if (llvm::timeTraceProfilerEnabled()) { 151 const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc)); 152 llvm::timeTraceProfilerBegin( 153 "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>")); 154 } 155 156 IncludeStack.push_back(IncludeLoc); 157 S->DiagnoseNonDefaultPragmaAlignPack( 158 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude, 159 IncludeLoc); 160 } 161 break; 162 } 163 case ExitFile: 164 if (!IncludeStack.empty()) { 165 if (llvm::timeTraceProfilerEnabled()) 166 llvm::timeTraceProfilerEnd(); 167 168 S->DiagnoseNonDefaultPragmaAlignPack( 169 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit, 170 IncludeStack.pop_back_val()); 171 } 172 break; 173 default: 174 break; 175 } 176 } 177 }; 178 179 } // end namespace sema 180 } // end namespace clang 181 182 const unsigned Sema::MaxAlignmentExponent; 183 const uint64_t Sema::MaximumAlignment; 184 185 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, 186 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) 187 : ExternalSource(nullptr), isMultiplexExternalSource(false), 188 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), 189 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), 190 SourceMgr(PP.getSourceManager()), CollectStats(false), 191 CodeCompleter(CodeCompleter), CurContext(nullptr), 192 OriginalLexicalContext(nullptr), MSStructPragmaOn(false), 193 MSPointerToMemberRepresentationMethod( 194 LangOpts.getMSPointerToMemberRepresentationMethod()), 195 VtorDispStack(LangOpts.getVtorDispMode()), 196 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)), 197 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr), 198 CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()), 199 CurInitSeg(nullptr), VisContext(nullptr), 200 PragmaAttributeCurrentTargetDecl(nullptr), 201 IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr), 202 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp), 203 StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr), 204 StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr), 205 MSVCGuidDecl(nullptr), StdSourceLocationImplDecl(nullptr), 206 NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr), 207 StringWithUTF8StringMethod(nullptr), 208 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), 209 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), 210 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false), 211 TUKind(TUKind), NumSFINAEErrors(0), 212 FullyCheckedComparisonCategories( 213 static_cast<unsigned>(ComparisonCategoryType::Last) + 1), 214 SatisfactionCache(Context), AccessCheckingSFINAE(false), 215 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), 216 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), 217 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this), 218 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), 219 CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { 220 assert(pp.TUKind == TUKind); 221 TUScope = nullptr; 222 isConstantEvaluatedOverride = false; 223 224 LoadedExternalKnownNamespaces = false; 225 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I) 226 NSNumberLiteralMethods[I] = nullptr; 227 228 if (getLangOpts().ObjC) 229 NSAPIObj.reset(new NSAPI(Context)); 230 231 if (getLangOpts().CPlusPlus) 232 FieldCollector.reset(new CXXFieldCollector()); 233 234 // Tell diagnostics how to render things from the AST library. 235 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); 236 237 // This evaluation context exists to ensure that there's always at least one 238 // valid evaluation context available. It is never removed from the 239 // evaluation stack. 240 ExprEvalContexts.emplace_back( 241 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{}, 242 nullptr, ExpressionEvaluationContextRecord::EK_Other); 243 244 // Initialization of data sharing attributes stack for OpenMP 245 InitDataSharingAttributesStack(); 246 247 std::unique_ptr<sema::SemaPPCallbacks> Callbacks = 248 std::make_unique<sema::SemaPPCallbacks>(); 249 SemaPPCallbackHandler = Callbacks.get(); 250 PP.addPPCallbacks(std::move(Callbacks)); 251 SemaPPCallbackHandler->set(*this); 252 if (getLangOpts().getFPEvalMethod() == LangOptions::FEM_UnsetOnCommandLine) 253 // Use setting from TargetInfo. 254 PP.setCurrentFPEvalMethod(SourceLocation(), 255 ctxt.getTargetInfo().getFPEvalMethod()); 256 else 257 // Set initial value of __FLT_EVAL_METHOD__ from the command line. 258 PP.setCurrentFPEvalMethod(SourceLocation(), 259 getLangOpts().getFPEvalMethod()); 260 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod()); 261 // When `-ffast-math` option is enabled, it triggers several driver math 262 // options to be enabled. Among those, only one the following two modes 263 // affect the eval-method: reciprocal or reassociate. 264 if (getLangOpts().AllowFPReassoc || getLangOpts().AllowRecip) 265 PP.setCurrentFPEvalMethod(SourceLocation(), 266 LangOptions::FEM_Indeterminable); 267 } 268 269 // Anchor Sema's type info to this TU. 270 void Sema::anchor() {} 271 272 void Sema::addImplicitTypedef(StringRef Name, QualType T) { 273 DeclarationName DN = &Context.Idents.get(Name); 274 if (IdResolver.begin(DN) == IdResolver.end()) 275 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope); 276 } 277 278 void Sema::Initialize() { 279 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 280 SC->InitializeSema(*this); 281 282 // Tell the external Sema source about this Sema object. 283 if (ExternalSemaSource *ExternalSema 284 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 285 ExternalSema->InitializeSema(*this); 286 287 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we 288 // will not be able to merge any duplicate __va_list_tag decls correctly. 289 VAListTagName = PP.getIdentifierInfo("__va_list_tag"); 290 291 if (!TUScope) 292 return; 293 294 // Initialize predefined 128-bit integer types, if needed. 295 if (Context.getTargetInfo().hasInt128Type() || 296 (Context.getAuxTargetInfo() && 297 Context.getAuxTargetInfo()->hasInt128Type())) { 298 // If either of the 128-bit integer types are unavailable to name lookup, 299 // define them now. 300 DeclarationName Int128 = &Context.Idents.get("__int128_t"); 301 if (IdResolver.begin(Int128) == IdResolver.end()) 302 PushOnScopeChains(Context.getInt128Decl(), TUScope); 303 304 DeclarationName UInt128 = &Context.Idents.get("__uint128_t"); 305 if (IdResolver.begin(UInt128) == IdResolver.end()) 306 PushOnScopeChains(Context.getUInt128Decl(), TUScope); 307 } 308 309 310 // Initialize predefined Objective-C types: 311 if (getLangOpts().ObjC) { 312 // If 'SEL' does not yet refer to any declarations, make it refer to the 313 // predefined 'SEL'. 314 DeclarationName SEL = &Context.Idents.get("SEL"); 315 if (IdResolver.begin(SEL) == IdResolver.end()) 316 PushOnScopeChains(Context.getObjCSelDecl(), TUScope); 317 318 // If 'id' does not yet refer to any declarations, make it refer to the 319 // predefined 'id'. 320 DeclarationName Id = &Context.Idents.get("id"); 321 if (IdResolver.begin(Id) == IdResolver.end()) 322 PushOnScopeChains(Context.getObjCIdDecl(), TUScope); 323 324 // Create the built-in typedef for 'Class'. 325 DeclarationName Class = &Context.Idents.get("Class"); 326 if (IdResolver.begin(Class) == IdResolver.end()) 327 PushOnScopeChains(Context.getObjCClassDecl(), TUScope); 328 329 // Create the built-in forward declaratino for 'Protocol'. 330 DeclarationName Protocol = &Context.Idents.get("Protocol"); 331 if (IdResolver.begin(Protocol) == IdResolver.end()) 332 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope); 333 } 334 335 // Create the internal type for the *StringMakeConstantString builtins. 336 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString"); 337 if (IdResolver.begin(ConstantString) == IdResolver.end()) 338 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope); 339 340 // Initialize Microsoft "predefined C++ types". 341 if (getLangOpts().MSVCCompat) { 342 if (getLangOpts().CPlusPlus && 343 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) 344 PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class), 345 TUScope); 346 347 addImplicitTypedef("size_t", Context.getSizeType()); 348 } 349 350 // Initialize predefined OpenCL types and supported extensions and (optional) 351 // core features. 352 if (getLangOpts().OpenCL) { 353 getOpenCLOptions().addSupport( 354 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts()); 355 addImplicitTypedef("sampler_t", Context.OCLSamplerTy); 356 addImplicitTypedef("event_t", Context.OCLEventTy); 357 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion(); 358 if (OCLCompatibleVersion >= 200) { 359 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) { 360 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy); 361 addImplicitTypedef("queue_t", Context.OCLQueueTy); 362 } 363 if (getLangOpts().OpenCLPipes) 364 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy); 365 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); 366 addImplicitTypedef("atomic_uint", 367 Context.getAtomicType(Context.UnsignedIntTy)); 368 addImplicitTypedef("atomic_float", 369 Context.getAtomicType(Context.FloatTy)); 370 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as 371 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. 372 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); 373 374 375 // OpenCL v2.0 s6.13.11.6: 376 // - The atomic_long and atomic_ulong types are supported if the 377 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics 378 // extensions are supported. 379 // - The atomic_double type is only supported if double precision 380 // is supported and the cl_khr_int64_base_atomics and 381 // cl_khr_int64_extended_atomics extensions are supported. 382 // - If the device address space is 64-bits, the data types 383 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and 384 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and 385 // cl_khr_int64_extended_atomics extensions are supported. 386 387 auto AddPointerSizeDependentTypes = [&]() { 388 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType()); 389 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType()); 390 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType()); 391 auto AtomicPtrDiffT = 392 Context.getAtomicType(Context.getPointerDiffType()); 393 addImplicitTypedef("atomic_size_t", AtomicSizeT); 394 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT); 395 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT); 396 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT); 397 }; 398 399 if (Context.getTypeSize(Context.getSizeType()) == 32) { 400 AddPointerSizeDependentTypes(); 401 } 402 403 if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) { 404 auto AtomicHalfT = Context.getAtomicType(Context.HalfTy); 405 addImplicitTypedef("atomic_half", AtomicHalfT); 406 } 407 408 std::vector<QualType> Atomic64BitTypes; 409 if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics", 410 getLangOpts()) && 411 getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics", 412 getLangOpts())) { 413 if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) { 414 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy); 415 addImplicitTypedef("atomic_double", AtomicDoubleT); 416 Atomic64BitTypes.push_back(AtomicDoubleT); 417 } 418 auto AtomicLongT = Context.getAtomicType(Context.LongTy); 419 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy); 420 addImplicitTypedef("atomic_long", AtomicLongT); 421 addImplicitTypedef("atomic_ulong", AtomicULongT); 422 423 424 if (Context.getTypeSize(Context.getSizeType()) == 64) { 425 AddPointerSizeDependentTypes(); 426 } 427 } 428 } 429 430 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 431 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \ 432 addImplicitTypedef(#ExtType, Context.Id##Ty); \ 433 } 434 #include "clang/Basic/OpenCLExtensionTypes.def" 435 } 436 437 if (Context.getTargetInfo().hasAArch64SVETypes()) { 438 #define SVE_TYPE(Name, Id, SingletonId) \ 439 addImplicitTypedef(Name, Context.SingletonId); 440 #include "clang/Basic/AArch64SVEACLETypes.def" 441 } 442 443 if (Context.getTargetInfo().getTriple().isPPC64()) { 444 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 445 addImplicitTypedef(#Name, Context.Id##Ty); 446 #include "clang/Basic/PPCTypes.def" 447 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 448 addImplicitTypedef(#Name, Context.Id##Ty); 449 #include "clang/Basic/PPCTypes.def" 450 } 451 452 if (Context.getTargetInfo().hasRISCVVTypes()) { 453 #define RVV_TYPE(Name, Id, SingletonId) \ 454 addImplicitTypedef(Name, Context.SingletonId); 455 #include "clang/Basic/RISCVVTypes.def" 456 } 457 458 if (Context.getTargetInfo().hasBuiltinMSVaList()) { 459 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list"); 460 if (IdResolver.begin(MSVaList) == IdResolver.end()) 461 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope); 462 } 463 464 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); 465 if (IdResolver.begin(BuiltinVaList) == IdResolver.end()) 466 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope); 467 } 468 469 Sema::~Sema() { 470 assert(InstantiatingSpecializations.empty() && 471 "failed to clean up an InstantiatingTemplate?"); 472 473 if (VisContext) FreeVisContext(); 474 475 // Kill all the active scopes. 476 for (sema::FunctionScopeInfo *FSI : FunctionScopes) 477 delete FSI; 478 479 // Tell the SemaConsumer to forget about us; we're going out of scope. 480 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 481 SC->ForgetSema(); 482 483 // Detach from the external Sema source. 484 if (ExternalSemaSource *ExternalSema 485 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 486 ExternalSema->ForgetSema(); 487 488 // If Sema's ExternalSource is the multiplexer - we own it. 489 if (isMultiplexExternalSource) 490 delete ExternalSource; 491 492 // Delete cached satisfactions. 493 std::vector<ConstraintSatisfaction *> Satisfactions; 494 Satisfactions.reserve(Satisfactions.size()); 495 for (auto &Node : SatisfactionCache) 496 Satisfactions.push_back(&Node); 497 for (auto *Node : Satisfactions) 498 delete Node; 499 500 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); 501 502 // Destroys data sharing attributes stack for OpenMP 503 DestroyDataSharingAttributesStack(); 504 505 // Detach from the PP callback handler which outlives Sema since it's owned 506 // by the preprocessor. 507 SemaPPCallbackHandler->reset(); 508 } 509 510 void Sema::warnStackExhausted(SourceLocation Loc) { 511 // Only warn about this once. 512 if (!WarnedStackExhausted) { 513 Diag(Loc, diag::warn_stack_exhausted); 514 WarnedStackExhausted = true; 515 } 516 } 517 518 void Sema::runWithSufficientStackSpace(SourceLocation Loc, 519 llvm::function_ref<void()> Fn) { 520 clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn); 521 } 522 523 /// makeUnavailableInSystemHeader - There is an error in the current 524 /// context. If we're still in a system header, and we can plausibly 525 /// make the relevant declaration unavailable instead of erroring, do 526 /// so and return true. 527 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc, 528 UnavailableAttr::ImplicitReason reason) { 529 // If we're not in a function, it's an error. 530 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext); 531 if (!fn) return false; 532 533 // If we're in template instantiation, it's an error. 534 if (inTemplateInstantiation()) 535 return false; 536 537 // If that function's not in a system header, it's an error. 538 if (!Context.getSourceManager().isInSystemHeader(loc)) 539 return false; 540 541 // If the function is already unavailable, it's not an error. 542 if (fn->hasAttr<UnavailableAttr>()) return true; 543 544 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc)); 545 return true; 546 } 547 548 ASTMutationListener *Sema::getASTMutationListener() const { 549 return getASTConsumer().GetASTMutationListener(); 550 } 551 552 ///Registers an external source. If an external source already exists, 553 /// creates a multiplex external source and appends to it. 554 /// 555 ///\param[in] E - A non-null external sema source. 556 /// 557 void Sema::addExternalSource(ExternalSemaSource *E) { 558 assert(E && "Cannot use with NULL ptr"); 559 560 if (!ExternalSource) { 561 ExternalSource = E; 562 return; 563 } 564 565 if (isMultiplexExternalSource) 566 static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E); 567 else { 568 ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E); 569 isMultiplexExternalSource = true; 570 } 571 } 572 573 /// Print out statistics about the semantic analysis. 574 void Sema::PrintStats() const { 575 llvm::errs() << "\n*** Semantic Analysis Stats:\n"; 576 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n"; 577 578 BumpAlloc.PrintStats(); 579 AnalysisWarnings.PrintStats(); 580 } 581 582 void Sema::diagnoseNullableToNonnullConversion(QualType DstType, 583 QualType SrcType, 584 SourceLocation Loc) { 585 Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context); 586 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable && 587 *ExprNullability != NullabilityKind::NullableResult)) 588 return; 589 590 Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context); 591 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull) 592 return; 593 594 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType; 595 } 596 597 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) { 598 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant, 599 E->getBeginLoc())) 600 return; 601 // nullptr only exists from C++11 on, so don't warn on its absence earlier. 602 if (!getLangOpts().CPlusPlus11) 603 return; 604 605 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer) 606 return; 607 if (E->IgnoreParenImpCasts()->getType()->isNullPtrType()) 608 return; 609 610 // Don't diagnose the conversion from a 0 literal to a null pointer argument 611 // in a synthesized call to operator<=>. 612 if (!CodeSynthesisContexts.empty() && 613 CodeSynthesisContexts.back().Kind == 614 CodeSynthesisContext::RewritingOperatorAsSpaceship) 615 return; 616 617 // If it is a macro from system header, and if the macro name is not "NULL", 618 // do not warn. 619 SourceLocation MaybeMacroLoc = E->getBeginLoc(); 620 if (Diags.getSuppressSystemWarnings() && 621 SourceMgr.isInSystemMacro(MaybeMacroLoc) && 622 !findMacroSpelling(MaybeMacroLoc, "NULL")) 623 return; 624 625 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant) 626 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr"); 627 } 628 629 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast. 630 /// If there is already an implicit cast, merge into the existing one. 631 /// The result is of the given category. 632 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, 633 CastKind Kind, ExprValueKind VK, 634 const CXXCastPath *BasePath, 635 CheckedConversionKind CCK) { 636 #ifndef NDEBUG 637 if (VK == VK_PRValue && !E->isPRValue()) { 638 switch (Kind) { 639 default: 640 llvm_unreachable( 641 ("can't implicitly cast glvalue to prvalue with this cast " 642 "kind: " + 643 std::string(CastExpr::getCastKindName(Kind))) 644 .c_str()); 645 case CK_Dependent: 646 case CK_LValueToRValue: 647 case CK_ArrayToPointerDecay: 648 case CK_FunctionToPointerDecay: 649 case CK_ToVoid: 650 case CK_NonAtomicToAtomic: 651 break; 652 } 653 } 654 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) && 655 "can't cast prvalue to glvalue"); 656 #endif 657 658 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc()); 659 diagnoseZeroToNullptrConversion(Kind, E); 660 661 QualType ExprTy = Context.getCanonicalType(E->getType()); 662 QualType TypeTy = Context.getCanonicalType(Ty); 663 664 if (ExprTy == TypeTy) 665 return E; 666 667 if (Kind == CK_ArrayToPointerDecay) { 668 // C++1z [conv.array]: The temporary materialization conversion is applied. 669 // We also use this to fuel C++ DR1213, which applies to C++11 onwards. 670 if (getLangOpts().CPlusPlus && E->isPRValue()) { 671 // The temporary is an lvalue in C++98 and an xvalue otherwise. 672 ExprResult Materialized = CreateMaterializeTemporaryExpr( 673 E->getType(), E, !getLangOpts().CPlusPlus11); 674 if (Materialized.isInvalid()) 675 return ExprError(); 676 E = Materialized.get(); 677 } 678 // C17 6.7.1p6 footnote 124: The implementation can treat any register 679 // declaration simply as an auto declaration. However, whether or not 680 // addressable storage is actually used, the address of any part of an 681 // object declared with storage-class specifier register cannot be 682 // computed, either explicitly(by use of the unary & operator as discussed 683 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as 684 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an 685 // array declared with storage-class specifier register is sizeof. 686 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) { 687 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 688 if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 689 if (VD->getStorageClass() == SC_Register) { 690 Diag(E->getExprLoc(), diag::err_typecheck_address_of) 691 << /*register variable*/ 3 << E->getSourceRange(); 692 return ExprError(); 693 } 694 } 695 } 696 } 697 } 698 699 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { 700 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { 701 ImpCast->setType(Ty); 702 ImpCast->setValueKind(VK); 703 return E; 704 } 705 } 706 707 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK, 708 CurFPFeatureOverrides()); 709 } 710 711 /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding 712 /// to the conversion from scalar type ScalarTy to the Boolean type. 713 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) { 714 switch (ScalarTy->getScalarTypeKind()) { 715 case Type::STK_Bool: return CK_NoOp; 716 case Type::STK_CPointer: return CK_PointerToBoolean; 717 case Type::STK_BlockPointer: return CK_PointerToBoolean; 718 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean; 719 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean; 720 case Type::STK_Integral: return CK_IntegralToBoolean; 721 case Type::STK_Floating: return CK_FloatingToBoolean; 722 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean; 723 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean; 724 case Type::STK_FixedPoint: return CK_FixedPointToBoolean; 725 } 726 llvm_unreachable("unknown scalar type kind"); 727 } 728 729 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector. 730 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) { 731 if (D->getMostRecentDecl()->isUsed()) 732 return true; 733 734 if (D->isExternallyVisible()) 735 return true; 736 737 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 738 // If this is a function template and none of its specializations is used, 739 // we should warn. 740 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate()) 741 for (const auto *Spec : Template->specializations()) 742 if (ShouldRemoveFromUnused(SemaRef, Spec)) 743 return true; 744 745 // UnusedFileScopedDecls stores the first declaration. 746 // The declaration may have become definition so check again. 747 const FunctionDecl *DeclToCheck; 748 if (FD->hasBody(DeclToCheck)) 749 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 750 751 // Later redecls may add new information resulting in not having to warn, 752 // so check again. 753 DeclToCheck = FD->getMostRecentDecl(); 754 if (DeclToCheck != FD) 755 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 756 } 757 758 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 759 // If a variable usable in constant expressions is referenced, 760 // don't warn if it isn't used: if the value of a variable is required 761 // for the computation of a constant expression, it doesn't make sense to 762 // warn even if the variable isn't odr-used. (isReferenced doesn't 763 // precisely reflect that, but it's a decent approximation.) 764 if (VD->isReferenced() && 765 VD->mightBeUsableInConstantExpressions(SemaRef->Context)) 766 return true; 767 768 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate()) 769 // If this is a variable template and none of its specializations is used, 770 // we should warn. 771 for (const auto *Spec : Template->specializations()) 772 if (ShouldRemoveFromUnused(SemaRef, Spec)) 773 return true; 774 775 // UnusedFileScopedDecls stores the first declaration. 776 // The declaration may have become definition so check again. 777 const VarDecl *DeclToCheck = VD->getDefinition(); 778 if (DeclToCheck) 779 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 780 781 // Later redecls may add new information resulting in not having to warn, 782 // so check again. 783 DeclToCheck = VD->getMostRecentDecl(); 784 if (DeclToCheck != VD) 785 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 786 } 787 788 return false; 789 } 790 791 static bool isFunctionOrVarDeclExternC(NamedDecl *ND) { 792 if (auto *FD = dyn_cast<FunctionDecl>(ND)) 793 return FD->isExternC(); 794 return cast<VarDecl>(ND)->isExternC(); 795 } 796 797 /// Determine whether ND is an external-linkage function or variable whose 798 /// type has no linkage. 799 bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) { 800 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage, 801 // because we also want to catch the case where its type has VisibleNoLinkage, 802 // which does not affect the linkage of VD. 803 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() && 804 !isExternalFormalLinkage(VD->getType()->getLinkage()) && 805 !isFunctionOrVarDeclExternC(VD); 806 } 807 808 /// Obtains a sorted list of functions and variables that are undefined but 809 /// ODR-used. 810 void Sema::getUndefinedButUsed( 811 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) { 812 for (const auto &UndefinedUse : UndefinedButUsed) { 813 NamedDecl *ND = UndefinedUse.first; 814 815 // Ignore attributes that have become invalid. 816 if (ND->isInvalidDecl()) continue; 817 818 // __attribute__((weakref)) is basically a definition. 819 if (ND->hasAttr<WeakRefAttr>()) continue; 820 821 if (isa<CXXDeductionGuideDecl>(ND)) 822 continue; 823 824 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) { 825 // An exported function will always be emitted when defined, so even if 826 // the function is inline, it doesn't have to be emitted in this TU. An 827 // imported function implies that it has been exported somewhere else. 828 continue; 829 } 830 831 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { 832 if (FD->isDefined()) 833 continue; 834 if (FD->isExternallyVisible() && 835 !isExternalWithNoLinkageType(FD) && 836 !FD->getMostRecentDecl()->isInlined() && 837 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 838 continue; 839 if (FD->getBuiltinID()) 840 continue; 841 } else { 842 auto *VD = cast<VarDecl>(ND); 843 if (VD->hasDefinition() != VarDecl::DeclarationOnly) 844 continue; 845 if (VD->isExternallyVisible() && 846 !isExternalWithNoLinkageType(VD) && 847 !VD->getMostRecentDecl()->isInline() && 848 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 849 continue; 850 851 // Skip VarDecls that lack formal definitions but which we know are in 852 // fact defined somewhere. 853 if (VD->isKnownToBeDefined()) 854 continue; 855 } 856 857 Undefined.push_back(std::make_pair(ND, UndefinedUse.second)); 858 } 859 } 860 861 /// checkUndefinedButUsed - Check for undefined objects with internal linkage 862 /// or that are inline. 863 static void checkUndefinedButUsed(Sema &S) { 864 if (S.UndefinedButUsed.empty()) return; 865 866 // Collect all the still-undefined entities with internal linkage. 867 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined; 868 S.getUndefinedButUsed(Undefined); 869 if (Undefined.empty()) return; 870 871 for (auto Undef : Undefined) { 872 ValueDecl *VD = cast<ValueDecl>(Undef.first); 873 SourceLocation UseLoc = Undef.second; 874 875 if (S.isExternalWithNoLinkageType(VD)) { 876 // C++ [basic.link]p8: 877 // A type without linkage shall not be used as the type of a variable 878 // or function with external linkage unless 879 // -- the entity has C language linkage 880 // -- the entity is not odr-used or is defined in the same TU 881 // 882 // As an extension, accept this in cases where the type is externally 883 // visible, since the function or variable actually can be defined in 884 // another translation unit in that case. 885 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage()) 886 ? diag::ext_undefined_internal_type 887 : diag::err_undefined_internal_type) 888 << isa<VarDecl>(VD) << VD; 889 } else if (!VD->isExternallyVisible()) { 890 // FIXME: We can promote this to an error. The function or variable can't 891 // be defined anywhere else, so the program must necessarily violate the 892 // one definition rule. 893 bool IsImplicitBase = false; 894 if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) { 895 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>(); 896 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive( 897 llvm::omp::TraitProperty:: 898 implementation_extension_disable_implicit_base)) { 899 const auto *Func = cast<FunctionDecl>( 900 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl()); 901 IsImplicitBase = BaseD->isImplicit() && 902 Func->getIdentifier()->isMangledOpenMPVariantName(); 903 } 904 } 905 if (!S.getLangOpts().OpenMP || !IsImplicitBase) 906 S.Diag(VD->getLocation(), diag::warn_undefined_internal) 907 << isa<VarDecl>(VD) << VD; 908 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) { 909 (void)FD; 910 assert(FD->getMostRecentDecl()->isInlined() && 911 "used object requires definition but isn't inline or internal?"); 912 // FIXME: This is ill-formed; we should reject. 913 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD; 914 } else { 915 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() && 916 "used var requires definition but isn't inline or internal?"); 917 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD; 918 } 919 if (UseLoc.isValid()) 920 S.Diag(UseLoc, diag::note_used_here); 921 } 922 923 S.UndefinedButUsed.clear(); 924 } 925 926 void Sema::LoadExternalWeakUndeclaredIdentifiers() { 927 if (!ExternalSource) 928 return; 929 930 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; 931 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); 932 for (auto &WeakID : WeakIDs) 933 (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second); 934 } 935 936 937 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap; 938 939 /// Returns true, if all methods and nested classes of the given 940 /// CXXRecordDecl are defined in this translation unit. 941 /// 942 /// Should only be called from ActOnEndOfTranslationUnit so that all 943 /// definitions are actually read. 944 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD, 945 RecordCompleteMap &MNCComplete) { 946 RecordCompleteMap::iterator Cache = MNCComplete.find(RD); 947 if (Cache != MNCComplete.end()) 948 return Cache->second; 949 if (!RD->isCompleteDefinition()) 950 return false; 951 bool Complete = true; 952 for (DeclContext::decl_iterator I = RD->decls_begin(), 953 E = RD->decls_end(); 954 I != E && Complete; ++I) { 955 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) 956 Complete = M->isDefined() || M->isDefaulted() || 957 (M->isPure() && !isa<CXXDestructorDecl>(M)); 958 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I)) 959 // If the template function is marked as late template parsed at this 960 // point, it has not been instantiated and therefore we have not 961 // performed semantic analysis on it yet, so we cannot know if the type 962 // can be considered complete. 963 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() && 964 F->getTemplatedDecl()->isDefined(); 965 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) { 966 if (R->isInjectedClassName()) 967 continue; 968 if (R->hasDefinition()) 969 Complete = MethodsAndNestedClassesComplete(R->getDefinition(), 970 MNCComplete); 971 else 972 Complete = false; 973 } 974 } 975 MNCComplete[RD] = Complete; 976 return Complete; 977 } 978 979 /// Returns true, if the given CXXRecordDecl is fully defined in this 980 /// translation unit, i.e. all methods are defined or pure virtual and all 981 /// friends, friend functions and nested classes are fully defined in this 982 /// translation unit. 983 /// 984 /// Should only be called from ActOnEndOfTranslationUnit so that all 985 /// definitions are actually read. 986 static bool IsRecordFullyDefined(const CXXRecordDecl *RD, 987 RecordCompleteMap &RecordsComplete, 988 RecordCompleteMap &MNCComplete) { 989 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD); 990 if (Cache != RecordsComplete.end()) 991 return Cache->second; 992 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete); 993 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(), 994 E = RD->friend_end(); 995 I != E && Complete; ++I) { 996 // Check if friend classes and methods are complete. 997 if (TypeSourceInfo *TSI = (*I)->getFriendType()) { 998 // Friend classes are available as the TypeSourceInfo of the FriendDecl. 999 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl()) 1000 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete); 1001 else 1002 Complete = false; 1003 } else { 1004 // Friend functions are available through the NamedDecl of FriendDecl. 1005 if (const FunctionDecl *FD = 1006 dyn_cast<FunctionDecl>((*I)->getFriendDecl())) 1007 Complete = FD->isDefined(); 1008 else 1009 // This is a template friend, give up. 1010 Complete = false; 1011 } 1012 } 1013 RecordsComplete[RD] = Complete; 1014 return Complete; 1015 } 1016 1017 void Sema::emitAndClearUnusedLocalTypedefWarnings() { 1018 if (ExternalSource) 1019 ExternalSource->ReadUnusedLocalTypedefNameCandidates( 1020 UnusedLocalTypedefNameCandidates); 1021 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) { 1022 if (TD->isReferenced()) 1023 continue; 1024 Diag(TD->getLocation(), diag::warn_unused_local_typedef) 1025 << isa<TypeAliasDecl>(TD) << TD->getDeclName(); 1026 } 1027 UnusedLocalTypedefNameCandidates.clear(); 1028 } 1029 1030 /// This is called before the very first declaration in the translation unit 1031 /// is parsed. Note that the ASTContext may have already injected some 1032 /// declarations. 1033 void Sema::ActOnStartOfTranslationUnit() { 1034 if (getLangOpts().CPlusPlusModules && 1035 getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit) 1036 HandleStartOfHeaderUnit(); 1037 else if (getLangOpts().ModulesTS && 1038 (getLangOpts().getCompilingModule() == 1039 LangOptions::CMK_ModuleInterface || 1040 getLangOpts().getCompilingModule() == LangOptions::CMK_None)) { 1041 // We start in an implied global module fragment. 1042 SourceLocation StartOfTU = 1043 SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); 1044 ActOnGlobalModuleFragmentDecl(StartOfTU); 1045 ModuleScopes.back().ImplicitGlobalModuleFragment = true; 1046 } 1047 } 1048 1049 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) { 1050 // No explicit actions are required at the end of the global module fragment. 1051 if (Kind == TUFragmentKind::Global) 1052 return; 1053 1054 // Transfer late parsed template instantiations over to the pending template 1055 // instantiation list. During normal compilation, the late template parser 1056 // will be installed and instantiating these templates will succeed. 1057 // 1058 // If we are building a TU prefix for serialization, it is also safe to 1059 // transfer these over, even though they are not parsed. The end of the TU 1060 // should be outside of any eager template instantiation scope, so when this 1061 // AST is deserialized, these templates will not be parsed until the end of 1062 // the combined TU. 1063 PendingInstantiations.insert(PendingInstantiations.end(), 1064 LateParsedInstantiations.begin(), 1065 LateParsedInstantiations.end()); 1066 LateParsedInstantiations.clear(); 1067 1068 // If DefinedUsedVTables ends up marking any virtual member functions it 1069 // might lead to more pending template instantiations, which we then need 1070 // to instantiate. 1071 DefineUsedVTables(); 1072 1073 // C++: Perform implicit template instantiations. 1074 // 1075 // FIXME: When we perform these implicit instantiations, we do not 1076 // carefully keep track of the point of instantiation (C++ [temp.point]). 1077 // This means that name lookup that occurs within the template 1078 // instantiation will always happen at the end of the translation unit, 1079 // so it will find some names that are not required to be found. This is 1080 // valid, but we could do better by diagnosing if an instantiation uses a 1081 // name that was not visible at its first point of instantiation. 1082 if (ExternalSource) { 1083 // Load pending instantiations from the external source. 1084 SmallVector<PendingImplicitInstantiation, 4> Pending; 1085 ExternalSource->ReadPendingInstantiations(Pending); 1086 for (auto PII : Pending) 1087 if (auto Func = dyn_cast<FunctionDecl>(PII.first)) 1088 Func->setInstantiationIsPending(true); 1089 PendingInstantiations.insert(PendingInstantiations.begin(), 1090 Pending.begin(), Pending.end()); 1091 } 1092 1093 { 1094 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1095 PerformPendingInstantiations(); 1096 } 1097 1098 emitDeferredDiags(); 1099 1100 assert(LateParsedInstantiations.empty() && 1101 "end of TU template instantiation should not create more " 1102 "late-parsed templates"); 1103 1104 // Report diagnostics for uncorrected delayed typos. Ideally all of them 1105 // should have been corrected by that time, but it is very hard to cover all 1106 // cases in practice. 1107 for (const auto &Typo : DelayedTypos) { 1108 // We pass an empty TypoCorrection to indicate no correction was performed. 1109 Typo.second.DiagHandler(TypoCorrection()); 1110 } 1111 DelayedTypos.clear(); 1112 } 1113 1114 /// ActOnEndOfTranslationUnit - This is called at the very end of the 1115 /// translation unit when EOF is reached and all but the top-level scope is 1116 /// popped. 1117 void Sema::ActOnEndOfTranslationUnit() { 1118 assert(DelayedDiagnostics.getCurrentPool() == nullptr 1119 && "reached end of translation unit with a pool attached?"); 1120 1121 // If code completion is enabled, don't perform any end-of-translation-unit 1122 // work. 1123 if (PP.isCodeCompletionEnabled()) 1124 return; 1125 1126 // Complete translation units and modules define vtables and perform implicit 1127 // instantiations. PCH files do not. 1128 if (TUKind != TU_Prefix) { 1129 DiagnoseUseOfUnimplementedSelectors(); 1130 1131 ActOnEndOfTranslationUnitFragment( 1132 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind == 1133 Module::PrivateModuleFragment 1134 ? TUFragmentKind::Private 1135 : TUFragmentKind::Normal); 1136 1137 if (LateTemplateParserCleanup) 1138 LateTemplateParserCleanup(OpaqueParser); 1139 1140 CheckDelayedMemberExceptionSpecs(); 1141 } else { 1142 // If we are building a TU prefix for serialization, it is safe to transfer 1143 // these over, even though they are not parsed. The end of the TU should be 1144 // outside of any eager template instantiation scope, so when this AST is 1145 // deserialized, these templates will not be parsed until the end of the 1146 // combined TU. 1147 PendingInstantiations.insert(PendingInstantiations.end(), 1148 LateParsedInstantiations.begin(), 1149 LateParsedInstantiations.end()); 1150 LateParsedInstantiations.clear(); 1151 1152 if (LangOpts.PCHInstantiateTemplates) { 1153 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1154 PerformPendingInstantiations(); 1155 } 1156 } 1157 1158 DiagnoseUnterminatedPragmaAlignPack(); 1159 DiagnoseUnterminatedPragmaAttribute(); 1160 DiagnoseUnterminatedOpenMPDeclareTarget(); 1161 1162 // All delayed member exception specs should be checked or we end up accepting 1163 // incompatible declarations. 1164 assert(DelayedOverridingExceptionSpecChecks.empty()); 1165 assert(DelayedEquivalentExceptionSpecChecks.empty()); 1166 1167 // All dllexport classes should have been processed already. 1168 assert(DelayedDllExportClasses.empty()); 1169 assert(DelayedDllExportMemberFunctions.empty()); 1170 1171 // Remove file scoped decls that turned out to be used. 1172 UnusedFileScopedDecls.erase( 1173 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true), 1174 UnusedFileScopedDecls.end(), 1175 [this](const DeclaratorDecl *DD) { 1176 return ShouldRemoveFromUnused(this, DD); 1177 }), 1178 UnusedFileScopedDecls.end()); 1179 1180 if (TUKind == TU_Prefix) { 1181 // Translation unit prefixes don't need any of the checking below. 1182 if (!PP.isIncrementalProcessingEnabled()) 1183 TUScope = nullptr; 1184 return; 1185 } 1186 1187 // Check for #pragma weak identifiers that were never declared 1188 LoadExternalWeakUndeclaredIdentifiers(); 1189 for (const auto &WeakIDs : WeakUndeclaredIdentifiers) { 1190 if (WeakIDs.second.empty()) 1191 continue; 1192 1193 Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(), 1194 LookupOrdinaryName); 1195 if (PrevDecl != nullptr && 1196 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) 1197 for (const auto &WI : WeakIDs.second) 1198 Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type) 1199 << "'weak'" << ExpectedVariableOrFunction; 1200 else 1201 for (const auto &WI : WeakIDs.second) 1202 Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared) 1203 << WeakIDs.first; 1204 } 1205 1206 if (LangOpts.CPlusPlus11 && 1207 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation())) 1208 CheckDelegatingCtorCycles(); 1209 1210 if (!Diags.hasErrorOccurred()) { 1211 if (ExternalSource) 1212 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed); 1213 checkUndefinedButUsed(*this); 1214 } 1215 1216 // A global-module-fragment is only permitted within a module unit. 1217 bool DiagnosedMissingModuleDeclaration = false; 1218 if (!ModuleScopes.empty() && 1219 ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment && 1220 !ModuleScopes.back().ImplicitGlobalModuleFragment) { 1221 Diag(ModuleScopes.back().BeginLoc, 1222 diag::err_module_declaration_missing_after_global_module_introducer); 1223 DiagnosedMissingModuleDeclaration = true; 1224 } 1225 1226 if (TUKind == TU_Module) { 1227 // If we are building a module interface unit, we need to have seen the 1228 // module declaration by now. 1229 if (getLangOpts().getCompilingModule() == 1230 LangOptions::CMK_ModuleInterface && 1231 (ModuleScopes.empty() || 1232 !ModuleScopes.back().Module->isModulePurview()) && 1233 !DiagnosedMissingModuleDeclaration) { 1234 // FIXME: Make a better guess as to where to put the module declaration. 1235 Diag(getSourceManager().getLocForStartOfFile( 1236 getSourceManager().getMainFileID()), 1237 diag::err_module_declaration_missing); 1238 } 1239 1240 // If we are building a module, resolve all of the exported declarations 1241 // now. 1242 if (Module *CurrentModule = PP.getCurrentModule()) { 1243 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); 1244 1245 SmallVector<Module *, 2> Stack; 1246 Stack.push_back(CurrentModule); 1247 while (!Stack.empty()) { 1248 Module *Mod = Stack.pop_back_val(); 1249 1250 // Resolve the exported declarations and conflicts. 1251 // FIXME: Actually complain, once we figure out how to teach the 1252 // diagnostic client to deal with complaints in the module map at this 1253 // point. 1254 ModMap.resolveExports(Mod, /*Complain=*/false); 1255 ModMap.resolveUses(Mod, /*Complain=*/false); 1256 ModMap.resolveConflicts(Mod, /*Complain=*/false); 1257 1258 // Queue the submodules, so their exports will also be resolved. 1259 Stack.append(Mod->submodule_begin(), Mod->submodule_end()); 1260 } 1261 } 1262 1263 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for 1264 // modules when they are built, not every time they are used. 1265 emitAndClearUnusedLocalTypedefWarnings(); 1266 } 1267 1268 // C99 6.9.2p2: 1269 // A declaration of an identifier for an object that has file 1270 // scope without an initializer, and without a storage-class 1271 // specifier or with the storage-class specifier static, 1272 // constitutes a tentative definition. If a translation unit 1273 // contains one or more tentative definitions for an identifier, 1274 // and the translation unit contains no external definition for 1275 // that identifier, then the behavior is exactly as if the 1276 // translation unit contains a file scope declaration of that 1277 // identifier, with the composite type as of the end of the 1278 // translation unit, with an initializer equal to 0. 1279 llvm::SmallSet<VarDecl *, 32> Seen; 1280 for (TentativeDefinitionsType::iterator 1281 T = TentativeDefinitions.begin(ExternalSource), 1282 TEnd = TentativeDefinitions.end(); 1283 T != TEnd; ++T) { 1284 VarDecl *VD = (*T)->getActingDefinition(); 1285 1286 // If the tentative definition was completed, getActingDefinition() returns 1287 // null. If we've already seen this variable before, insert()'s second 1288 // return value is false. 1289 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second) 1290 continue; 1291 1292 if (const IncompleteArrayType *ArrayT 1293 = Context.getAsIncompleteArrayType(VD->getType())) { 1294 // Set the length of the array to 1 (C99 6.9.2p5). 1295 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array); 1296 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true); 1297 QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One, 1298 nullptr, ArrayType::Normal, 0); 1299 VD->setType(T); 1300 } else if (RequireCompleteType(VD->getLocation(), VD->getType(), 1301 diag::err_tentative_def_incomplete_type)) 1302 VD->setInvalidDecl(); 1303 1304 // No initialization is performed for a tentative definition. 1305 CheckCompleteVariableDeclaration(VD); 1306 1307 // Notify the consumer that we've completed a tentative definition. 1308 if (!VD->isInvalidDecl()) 1309 Consumer.CompleteTentativeDefinition(VD); 1310 } 1311 1312 for (auto D : ExternalDeclarations) { 1313 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed()) 1314 continue; 1315 1316 Consumer.CompleteExternalDeclaration(D); 1317 } 1318 1319 // If there were errors, disable 'unused' warnings since they will mostly be 1320 // noise. Don't warn for a use from a module: either we should warn on all 1321 // file-scope declarations in modules or not at all, but whether the 1322 // declaration is used is immaterial. 1323 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) { 1324 // Output warning for unused file scoped decls. 1325 for (UnusedFileScopedDeclsType::iterator 1326 I = UnusedFileScopedDecls.begin(ExternalSource), 1327 E = UnusedFileScopedDecls.end(); I != E; ++I) { 1328 if (ShouldRemoveFromUnused(this, *I)) 1329 continue; 1330 1331 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { 1332 const FunctionDecl *DiagD; 1333 if (!FD->hasBody(DiagD)) 1334 DiagD = FD; 1335 if (DiagD->isDeleted()) 1336 continue; // Deleted functions are supposed to be unused. 1337 if (DiagD->isReferenced()) { 1338 if (isa<CXXMethodDecl>(DiagD)) 1339 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function) 1340 << DiagD; 1341 else { 1342 if (FD->getStorageClass() == SC_Static && 1343 !FD->isInlineSpecified() && 1344 !SourceMgr.isInMainFile( 1345 SourceMgr.getExpansionLoc(FD->getLocation()))) 1346 Diag(DiagD->getLocation(), 1347 diag::warn_unneeded_static_internal_decl) 1348 << DiagD; 1349 else 1350 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1351 << /*function*/ 0 << DiagD; 1352 } 1353 } else { 1354 if (FD->getDescribedFunctionTemplate()) 1355 Diag(DiagD->getLocation(), diag::warn_unused_template) 1356 << /*function*/ 0 << DiagD; 1357 else 1358 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD) 1359 ? diag::warn_unused_member_function 1360 : diag::warn_unused_function) 1361 << DiagD; 1362 } 1363 } else { 1364 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition(); 1365 if (!DiagD) 1366 DiagD = cast<VarDecl>(*I); 1367 if (DiagD->isReferenced()) { 1368 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1369 << /*variable*/ 1 << DiagD; 1370 } else if (DiagD->getType().isConstQualified()) { 1371 const SourceManager &SM = SourceMgr; 1372 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) || 1373 !PP.getLangOpts().IsHeaderFile) 1374 Diag(DiagD->getLocation(), diag::warn_unused_const_variable) 1375 << DiagD; 1376 } else { 1377 if (DiagD->getDescribedVarTemplate()) 1378 Diag(DiagD->getLocation(), diag::warn_unused_template) 1379 << /*variable*/ 1 << DiagD; 1380 else 1381 Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD; 1382 } 1383 } 1384 } 1385 1386 emitAndClearUnusedLocalTypedefWarnings(); 1387 } 1388 1389 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) { 1390 // FIXME: Load additional unused private field candidates from the external 1391 // source. 1392 RecordCompleteMap RecordsComplete; 1393 RecordCompleteMap MNCComplete; 1394 for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(), 1395 E = UnusedPrivateFields.end(); I != E; ++I) { 1396 const NamedDecl *D = *I; 1397 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); 1398 if (RD && !RD->isUnion() && 1399 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) { 1400 Diag(D->getLocation(), diag::warn_unused_private_field) 1401 << D->getDeclName(); 1402 } 1403 } 1404 } 1405 1406 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { 1407 if (ExternalSource) 1408 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); 1409 for (const auto &DeletedFieldInfo : DeleteExprs) { 1410 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { 1411 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, 1412 DeleteExprLoc.second); 1413 } 1414 } 1415 } 1416 1417 // Check we've noticed that we're no longer parsing the initializer for every 1418 // variable. If we miss cases, then at best we have a performance issue and 1419 // at worst a rejects-valid bug. 1420 assert(ParsingInitForAutoVars.empty() && 1421 "Didn't unmark var as having its initializer parsed"); 1422 1423 if (!PP.isIncrementalProcessingEnabled()) 1424 TUScope = nullptr; 1425 } 1426 1427 1428 //===----------------------------------------------------------------------===// 1429 // Helper functions. 1430 //===----------------------------------------------------------------------===// 1431 1432 DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) { 1433 DeclContext *DC = CurContext; 1434 1435 while (true) { 1436 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) || 1437 isa<RequiresExprBodyDecl>(DC)) { 1438 DC = DC->getParent(); 1439 } else if (!AllowLambda && isa<CXXMethodDecl>(DC) && 1440 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call && 1441 cast<CXXRecordDecl>(DC->getParent())->isLambda()) { 1442 DC = DC->getParent()->getParent(); 1443 } else break; 1444 } 1445 1446 return DC; 1447 } 1448 1449 /// getCurFunctionDecl - If inside of a function body, this returns a pointer 1450 /// to the function decl for the function being parsed. If we're currently 1451 /// in a 'block', this returns the containing context. 1452 FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) { 1453 DeclContext *DC = getFunctionLevelDeclContext(AllowLambda); 1454 return dyn_cast<FunctionDecl>(DC); 1455 } 1456 1457 ObjCMethodDecl *Sema::getCurMethodDecl() { 1458 DeclContext *DC = getFunctionLevelDeclContext(); 1459 while (isa<RecordDecl>(DC)) 1460 DC = DC->getParent(); 1461 return dyn_cast<ObjCMethodDecl>(DC); 1462 } 1463 1464 NamedDecl *Sema::getCurFunctionOrMethodDecl() { 1465 DeclContext *DC = getFunctionLevelDeclContext(); 1466 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC)) 1467 return cast<NamedDecl>(DC); 1468 return nullptr; 1469 } 1470 1471 LangAS Sema::getDefaultCXXMethodAddrSpace() const { 1472 if (getLangOpts().OpenCL) 1473 return getASTContext().getDefaultOpenCLPointeeAddrSpace(); 1474 return LangAS::Default; 1475 } 1476 1477 void Sema::EmitCurrentDiagnostic(unsigned DiagID) { 1478 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here 1479 // and yet we also use the current diag ID on the DiagnosticsEngine. This has 1480 // been made more painfully obvious by the refactor that introduced this 1481 // function, but it is possible that the incoming argument can be 1482 // eliminated. If it truly cannot be (for example, there is some reentrancy 1483 // issue I am not seeing yet), then there should at least be a clarifying 1484 // comment somewhere. 1485 if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) { 1486 switch (DiagnosticIDs::getDiagnosticSFINAEResponse( 1487 Diags.getCurrentDiagID())) { 1488 case DiagnosticIDs::SFINAE_Report: 1489 // We'll report the diagnostic below. 1490 break; 1491 1492 case DiagnosticIDs::SFINAE_SubstitutionFailure: 1493 // Count this failure so that we know that template argument deduction 1494 // has failed. 1495 ++NumSFINAEErrors; 1496 1497 // Make a copy of this suppressed diagnostic and store it with the 1498 // template-deduction information. 1499 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1500 Diagnostic DiagInfo(&Diags); 1501 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1502 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1503 } 1504 1505 Diags.setLastDiagnosticIgnored(true); 1506 Diags.Clear(); 1507 return; 1508 1509 case DiagnosticIDs::SFINAE_AccessControl: { 1510 // Per C++ Core Issue 1170, access control is part of SFINAE. 1511 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily 1512 // make access control a part of SFINAE for the purposes of checking 1513 // type traits. 1514 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11) 1515 break; 1516 1517 SourceLocation Loc = Diags.getCurrentDiagLoc(); 1518 1519 // Suppress this diagnostic. 1520 ++NumSFINAEErrors; 1521 1522 // Make a copy of this suppressed diagnostic and store it with the 1523 // template-deduction information. 1524 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1525 Diagnostic DiagInfo(&Diags); 1526 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1527 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1528 } 1529 1530 Diags.setLastDiagnosticIgnored(true); 1531 Diags.Clear(); 1532 1533 // Now the diagnostic state is clear, produce a C++98 compatibility 1534 // warning. 1535 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control); 1536 1537 // The last diagnostic which Sema produced was ignored. Suppress any 1538 // notes attached to it. 1539 Diags.setLastDiagnosticIgnored(true); 1540 return; 1541 } 1542 1543 case DiagnosticIDs::SFINAE_Suppress: 1544 // Make a copy of this suppressed diagnostic and store it with the 1545 // template-deduction information; 1546 if (*Info) { 1547 Diagnostic DiagInfo(&Diags); 1548 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(), 1549 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1550 } 1551 1552 // Suppress this diagnostic. 1553 Diags.setLastDiagnosticIgnored(true); 1554 Diags.Clear(); 1555 return; 1556 } 1557 } 1558 1559 // Copy the diagnostic printing policy over the ASTContext printing policy. 1560 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292 1561 Context.setPrintingPolicy(getPrintingPolicy()); 1562 1563 // Emit the diagnostic. 1564 if (!Diags.EmitCurrentDiagnostic()) 1565 return; 1566 1567 // If this is not a note, and we're in a template instantiation 1568 // that is different from the last template instantiation where 1569 // we emitted an error, print a template instantiation 1570 // backtrace. 1571 if (!DiagnosticIDs::isBuiltinNote(DiagID)) 1572 PrintContextStack(); 1573 } 1574 1575 Sema::SemaDiagnosticBuilder 1576 Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) { 1577 return Diag(Loc, PD.getDiagID(), DeferHint) << PD; 1578 } 1579 1580 bool Sema::hasUncompilableErrorOccurred() const { 1581 if (getDiagnostics().hasUncompilableErrorOccurred()) 1582 return true; 1583 auto *FD = dyn_cast<FunctionDecl>(CurContext); 1584 if (!FD) 1585 return false; 1586 auto Loc = DeviceDeferredDiags.find(FD); 1587 if (Loc == DeviceDeferredDiags.end()) 1588 return false; 1589 for (auto PDAt : Loc->second) { 1590 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID())) 1591 return true; 1592 } 1593 return false; 1594 } 1595 1596 // Print notes showing how we can reach FD starting from an a priori 1597 // known-callable function. 1598 static void emitCallStackNotes(Sema &S, FunctionDecl *FD) { 1599 auto FnIt = S.DeviceKnownEmittedFns.find(FD); 1600 while (FnIt != S.DeviceKnownEmittedFns.end()) { 1601 // Respect error limit. 1602 if (S.Diags.hasFatalErrorOccurred()) 1603 return; 1604 DiagnosticBuilder Builder( 1605 S.Diags.Report(FnIt->second.Loc, diag::note_called_by)); 1606 Builder << FnIt->second.FD; 1607 FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD); 1608 } 1609 } 1610 1611 namespace { 1612 1613 /// Helper class that emits deferred diagnostic messages if an entity directly 1614 /// or indirectly using the function that causes the deferred diagnostic 1615 /// messages is known to be emitted. 1616 /// 1617 /// During parsing of AST, certain diagnostic messages are recorded as deferred 1618 /// diagnostics since it is unknown whether the functions containing such 1619 /// diagnostics will be emitted. A list of potentially emitted functions and 1620 /// variables that may potentially trigger emission of functions are also 1621 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions 1622 /// by each function to emit deferred diagnostics. 1623 /// 1624 /// During the visit, certain OpenMP directives or initializer of variables 1625 /// with certain OpenMP attributes will cause subsequent visiting of any 1626 /// functions enter a state which is called OpenMP device context in this 1627 /// implementation. The state is exited when the directive or initializer is 1628 /// exited. This state can change the emission states of subsequent uses 1629 /// of functions. 1630 /// 1631 /// Conceptually the functions or variables to be visited form a use graph 1632 /// where the parent node uses the child node. At any point of the visit, 1633 /// the tree nodes traversed from the tree root to the current node form a use 1634 /// stack. The emission state of the current node depends on two factors: 1635 /// 1. the emission state of the root node 1636 /// 2. whether the current node is in OpenMP device context 1637 /// If the function is decided to be emitted, its contained deferred diagnostics 1638 /// are emitted, together with the information about the use stack. 1639 /// 1640 class DeferredDiagnosticsEmitter 1641 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> { 1642 public: 1643 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited; 1644 1645 // Whether the function is already in the current use-path. 1646 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath; 1647 1648 // The current use-path. 1649 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath; 1650 1651 // Whether the visiting of the function has been done. Done[0] is for the 1652 // case not in OpenMP device context. Done[1] is for the case in OpenMP 1653 // device context. We need two sets because diagnostics emission may be 1654 // different depending on whether it is in OpenMP device context. 1655 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2]; 1656 1657 // Emission state of the root node of the current use graph. 1658 bool ShouldEmitRootNode; 1659 1660 // Current OpenMP device context level. It is initialized to 0 and each 1661 // entering of device context increases it by 1 and each exit decreases 1662 // it by 1. Non-zero value indicates it is currently in device context. 1663 unsigned InOMPDeviceContext; 1664 1665 DeferredDiagnosticsEmitter(Sema &S) 1666 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {} 1667 1668 bool shouldVisitDiscardedStmt() const { return false; } 1669 1670 void VisitOMPTargetDirective(OMPTargetDirective *Node) { 1671 ++InOMPDeviceContext; 1672 Inherited::VisitOMPTargetDirective(Node); 1673 --InOMPDeviceContext; 1674 } 1675 1676 void visitUsedDecl(SourceLocation Loc, Decl *D) { 1677 if (isa<VarDecl>(D)) 1678 return; 1679 if (auto *FD = dyn_cast<FunctionDecl>(D)) 1680 checkFunc(Loc, FD); 1681 else 1682 Inherited::visitUsedDecl(Loc, D); 1683 } 1684 1685 void checkVar(VarDecl *VD) { 1686 assert(VD->isFileVarDecl() && 1687 "Should only check file-scope variables"); 1688 if (auto *Init = VD->getInit()) { 1689 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); 1690 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost || 1691 *DevTy == OMPDeclareTargetDeclAttr::DT_Any); 1692 if (IsDev) 1693 ++InOMPDeviceContext; 1694 this->Visit(Init); 1695 if (IsDev) 1696 --InOMPDeviceContext; 1697 } 1698 } 1699 1700 void checkFunc(SourceLocation Loc, FunctionDecl *FD) { 1701 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0]; 1702 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back(); 1703 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) || 1704 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD)) 1705 return; 1706 // Finalize analysis of OpenMP-specific constructs. 1707 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 && 1708 (ShouldEmitRootNode || InOMPDeviceContext)) 1709 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); 1710 if (Caller) 1711 S.DeviceKnownEmittedFns[FD] = {Caller, Loc}; 1712 // Always emit deferred diagnostics for the direct users. This does not 1713 // lead to explosion of diagnostics since each user is visited at most 1714 // twice. 1715 if (ShouldEmitRootNode || InOMPDeviceContext) 1716 emitDeferredDiags(FD, Caller); 1717 // Do not revisit a function if the function body has been completely 1718 // visited before. 1719 if (!Done.insert(FD).second) 1720 return; 1721 InUsePath.insert(FD); 1722 UsePath.push_back(FD); 1723 if (auto *S = FD->getBody()) { 1724 this->Visit(S); 1725 } 1726 UsePath.pop_back(); 1727 InUsePath.erase(FD); 1728 } 1729 1730 void checkRecordedDecl(Decl *D) { 1731 if (auto *FD = dyn_cast<FunctionDecl>(D)) { 1732 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) == 1733 Sema::FunctionEmissionStatus::Emitted; 1734 checkFunc(SourceLocation(), FD); 1735 } else 1736 checkVar(cast<VarDecl>(D)); 1737 } 1738 1739 // Emit any deferred diagnostics for FD 1740 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) { 1741 auto It = S.DeviceDeferredDiags.find(FD); 1742 if (It == S.DeviceDeferredDiags.end()) 1743 return; 1744 bool HasWarningOrError = false; 1745 bool FirstDiag = true; 1746 for (PartialDiagnosticAt &PDAt : It->second) { 1747 // Respect error limit. 1748 if (S.Diags.hasFatalErrorOccurred()) 1749 return; 1750 const SourceLocation &Loc = PDAt.first; 1751 const PartialDiagnostic &PD = PDAt.second; 1752 HasWarningOrError |= 1753 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >= 1754 DiagnosticsEngine::Warning; 1755 { 1756 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID())); 1757 PD.Emit(Builder); 1758 } 1759 // Emit the note on the first diagnostic in case too many diagnostics 1760 // cause the note not emitted. 1761 if (FirstDiag && HasWarningOrError && ShowCallStack) { 1762 emitCallStackNotes(S, FD); 1763 FirstDiag = false; 1764 } 1765 } 1766 } 1767 }; 1768 } // namespace 1769 1770 void Sema::emitDeferredDiags() { 1771 if (ExternalSource) 1772 ExternalSource->ReadDeclsToCheckForDeferredDiags( 1773 DeclsToCheckForDeferredDiags); 1774 1775 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) || 1776 DeclsToCheckForDeferredDiags.empty()) 1777 return; 1778 1779 DeferredDiagnosticsEmitter DDE(*this); 1780 for (auto D : DeclsToCheckForDeferredDiags) 1781 DDE.checkRecordedDecl(D); 1782 } 1783 1784 // In CUDA, there are some constructs which may appear in semantically-valid 1785 // code, but trigger errors if we ever generate code for the function in which 1786 // they appear. Essentially every construct you're not allowed to use on the 1787 // device falls into this category, because you are allowed to use these 1788 // constructs in a __host__ __device__ function, but only if that function is 1789 // never codegen'ed on the device. 1790 // 1791 // To handle semantic checking for these constructs, we keep track of the set of 1792 // functions we know will be emitted, either because we could tell a priori that 1793 // they would be emitted, or because they were transitively called by a 1794 // known-emitted function. 1795 // 1796 // We also keep a partial call graph of which not-known-emitted functions call 1797 // which other not-known-emitted functions. 1798 // 1799 // When we see something which is illegal if the current function is emitted 1800 // (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or 1801 // CheckCUDACall), we first check if the current function is known-emitted. If 1802 // so, we immediately output the diagnostic. 1803 // 1804 // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags 1805 // until we discover that the function is known-emitted, at which point we take 1806 // it out of this map and emit the diagnostic. 1807 1808 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc, 1809 unsigned DiagID, 1810 FunctionDecl *Fn, Sema &S) 1811 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn), 1812 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) { 1813 switch (K) { 1814 case K_Nop: 1815 break; 1816 case K_Immediate: 1817 case K_ImmediateWithCallStack: 1818 ImmediateDiag.emplace( 1819 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID)); 1820 break; 1821 case K_Deferred: 1822 assert(Fn && "Must have a function to attach the deferred diag to."); 1823 auto &Diags = S.DeviceDeferredDiags[Fn]; 1824 PartialDiagId.emplace(Diags.size()); 1825 Diags.emplace_back(Loc, S.PDiag(DiagID)); 1826 break; 1827 } 1828 } 1829 1830 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D) 1831 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn), 1832 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag), 1833 PartialDiagId(D.PartialDiagId) { 1834 // Clean the previous diagnostics. 1835 D.ShowCallStack = false; 1836 D.ImmediateDiag.reset(); 1837 D.PartialDiagId.reset(); 1838 } 1839 1840 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() { 1841 if (ImmediateDiag) { 1842 // Emit our diagnostic and, if it was a warning or error, output a callstack 1843 // if Fn isn't a priori known-emitted. 1844 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel( 1845 DiagID, Loc) >= DiagnosticsEngine::Warning; 1846 ImmediateDiag.reset(); // Emit the immediate diag. 1847 if (IsWarningOrError && ShowCallStack) 1848 emitCallStackNotes(S, Fn); 1849 } else { 1850 assert((!PartialDiagId || ShowCallStack) && 1851 "Must always show call stack for deferred diags."); 1852 } 1853 } 1854 1855 Sema::SemaDiagnosticBuilder 1856 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) { 1857 FD = FD ? FD : getCurFunctionDecl(); 1858 if (LangOpts.OpenMP) 1859 return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD) 1860 : diagIfOpenMPHostCode(Loc, DiagID, FD); 1861 if (getLangOpts().CUDA) 1862 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID) 1863 : CUDADiagIfHostCode(Loc, DiagID); 1864 1865 if (getLangOpts().SYCLIsDevice) 1866 return SYCLDiagIfDeviceCode(Loc, DiagID); 1867 1868 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID, 1869 FD, *this); 1870 } 1871 1872 Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID, 1873 bool DeferHint) { 1874 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID); 1875 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag && 1876 DiagnosticIDs::isDeferrable(DiagID) && 1877 (DeferHint || DeferDiags || !IsError); 1878 auto SetIsLastErrorImmediate = [&](bool Flag) { 1879 if (IsError) 1880 IsLastErrorImmediate = Flag; 1881 }; 1882 if (!ShouldDefer) { 1883 SetIsLastErrorImmediate(true); 1884 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, 1885 DiagID, getCurFunctionDecl(), *this); 1886 } 1887 1888 SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice 1889 ? CUDADiagIfDeviceCode(Loc, DiagID) 1890 : CUDADiagIfHostCode(Loc, DiagID); 1891 SetIsLastErrorImmediate(DB.isImmediate()); 1892 return DB; 1893 } 1894 1895 void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) { 1896 if (isUnevaluatedContext() || Ty.isNull()) 1897 return; 1898 1899 // The original idea behind checkTypeSupport function is that unused 1900 // declarations can be replaced with an array of bytes of the same size during 1901 // codegen, such replacement doesn't seem to be possible for types without 1902 // constant byte size like zero length arrays. So, do a deep check for SYCL. 1903 if (D && LangOpts.SYCLIsDevice) { 1904 llvm::DenseSet<QualType> Visited; 1905 deepTypeCheckForSYCLDevice(Loc, Visited, D); 1906 } 1907 1908 Decl *C = cast<Decl>(getCurLexicalContext()); 1909 1910 // Memcpy operations for structs containing a member with unsupported type 1911 // are ok, though. 1912 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) { 1913 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 1914 MD->isTrivial()) 1915 return; 1916 1917 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD)) 1918 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial()) 1919 return; 1920 } 1921 1922 // Try to associate errors with the lexical context, if that is a function, or 1923 // the value declaration otherwise. 1924 FunctionDecl *FD = isa<FunctionDecl>(C) ? cast<FunctionDecl>(C) 1925 : dyn_cast_or_null<FunctionDecl>(D); 1926 1927 auto CheckDeviceType = [&](QualType Ty) { 1928 if (Ty->isDependentType()) 1929 return; 1930 1931 if (Ty->isBitIntType()) { 1932 if (!Context.getTargetInfo().hasBitIntType()) { 1933 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1934 if (D) 1935 PD << D; 1936 else 1937 PD << "expression"; 1938 targetDiag(Loc, PD, FD) 1939 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/ 1940 << Ty << Context.getTargetInfo().getTriple().str(); 1941 } 1942 return; 1943 } 1944 1945 // Check if we are dealing with two 'long double' but with different 1946 // semantics. 1947 bool LongDoubleMismatched = false; 1948 if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) { 1949 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty); 1950 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() && 1951 !Context.getTargetInfo().hasFloat128Type()) || 1952 (&Sem == &llvm::APFloat::PPCDoubleDouble() && 1953 !Context.getTargetInfo().hasIbm128Type())) 1954 LongDoubleMismatched = true; 1955 } 1956 1957 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) || 1958 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) || 1959 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) || 1960 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 && 1961 !Context.getTargetInfo().hasInt128Type()) || 1962 LongDoubleMismatched) { 1963 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1964 if (D) 1965 PD << D; 1966 else 1967 PD << "expression"; 1968 1969 if (targetDiag(Loc, PD, FD) 1970 << true /*show bit size*/ 1971 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty 1972 << false /*return*/ << Context.getTargetInfo().getTriple().str()) { 1973 if (D) 1974 D->setInvalidDecl(); 1975 } 1976 if (D) 1977 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1978 } 1979 }; 1980 1981 auto CheckType = [&](QualType Ty, bool IsRetTy = false) { 1982 if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice) || 1983 LangOpts.CUDAIsDevice) 1984 CheckDeviceType(Ty); 1985 1986 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType(); 1987 const TargetInfo &TI = Context.getTargetInfo(); 1988 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) { 1989 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 1990 if (D) 1991 PD << D; 1992 else 1993 PD << "expression"; 1994 1995 if (Diag(Loc, PD, FD) 1996 << false /*show bit size*/ << 0 << Ty << false /*return*/ 1997 << Context.getTargetInfo().getTriple().str()) { 1998 if (D) 1999 D->setInvalidDecl(); 2000 } 2001 if (D) 2002 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 2003 } 2004 2005 bool IsDouble = UnqualTy == Context.DoubleTy; 2006 bool IsFloat = UnqualTy == Context.FloatTy; 2007 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) { 2008 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type); 2009 if (D) 2010 PD << D; 2011 else 2012 PD << "expression"; 2013 2014 if (Diag(Loc, PD, FD) 2015 << false /*show bit size*/ << 0 << Ty << true /*return*/ 2016 << Context.getTargetInfo().getTriple().str()) { 2017 if (D) 2018 D->setInvalidDecl(); 2019 } 2020 if (D) 2021 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 2022 } 2023 }; 2024 2025 CheckType(Ty); 2026 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) { 2027 for (const auto &ParamTy : FPTy->param_types()) 2028 CheckType(ParamTy); 2029 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true); 2030 } 2031 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty)) 2032 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true); 2033 } 2034 2035 /// Looks through the macro-expansion chain for the given 2036 /// location, looking for a macro expansion with the given name. 2037 /// If one is found, returns true and sets the location to that 2038 /// expansion loc. 2039 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) { 2040 SourceLocation loc = locref; 2041 if (!loc.isMacroID()) return false; 2042 2043 // There's no good way right now to look at the intermediate 2044 // expansions, so just jump to the expansion location. 2045 loc = getSourceManager().getExpansionLoc(loc); 2046 2047 // If that's written with the name, stop here. 2048 SmallString<16> buffer; 2049 if (getPreprocessor().getSpelling(loc, buffer) == name) { 2050 locref = loc; 2051 return true; 2052 } 2053 return false; 2054 } 2055 2056 /// Determines the active Scope associated with the given declaration 2057 /// context. 2058 /// 2059 /// This routine maps a declaration context to the active Scope object that 2060 /// represents that declaration context in the parser. It is typically used 2061 /// from "scope-less" code (e.g., template instantiation, lazy creation of 2062 /// declarations) that injects a name for name-lookup purposes and, therefore, 2063 /// must update the Scope. 2064 /// 2065 /// \returns The scope corresponding to the given declaraion context, or NULL 2066 /// if no such scope is open. 2067 Scope *Sema::getScopeForContext(DeclContext *Ctx) { 2068 2069 if (!Ctx) 2070 return nullptr; 2071 2072 Ctx = Ctx->getPrimaryContext(); 2073 for (Scope *S = getCurScope(); S; S = S->getParent()) { 2074 // Ignore scopes that cannot have declarations. This is important for 2075 // out-of-line definitions of static class members. 2076 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) 2077 if (DeclContext *Entity = S->getEntity()) 2078 if (Ctx == Entity->getPrimaryContext()) 2079 return S; 2080 } 2081 2082 return nullptr; 2083 } 2084 2085 /// Enter a new function scope 2086 void Sema::PushFunctionScope() { 2087 if (FunctionScopes.empty() && CachedFunctionScope) { 2088 // Use CachedFunctionScope to avoid allocating memory when possible. 2089 CachedFunctionScope->Clear(); 2090 FunctionScopes.push_back(CachedFunctionScope.release()); 2091 } else { 2092 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); 2093 } 2094 if (LangOpts.OpenMP) 2095 pushOpenMPFunctionRegion(); 2096 } 2097 2098 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { 2099 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(), 2100 BlockScope, Block)); 2101 } 2102 2103 LambdaScopeInfo *Sema::PushLambdaScope() { 2104 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics()); 2105 FunctionScopes.push_back(LSI); 2106 return LSI; 2107 } 2108 2109 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) { 2110 if (LambdaScopeInfo *const LSI = getCurLambda()) { 2111 LSI->AutoTemplateParameterDepth = Depth; 2112 return; 2113 } 2114 llvm_unreachable( 2115 "Remove assertion if intentionally called in a non-lambda context."); 2116 } 2117 2118 // Check that the type of the VarDecl has an accessible copy constructor and 2119 // resolve its destructor's exception specification. 2120 // This also performs initialization of block variables when they are moved 2121 // to the heap. It uses the same rules as applicable for implicit moves 2122 // according to the C++ standard in effect ([class.copy.elision]p3). 2123 static void checkEscapingByref(VarDecl *VD, Sema &S) { 2124 QualType T = VD->getType(); 2125 EnterExpressionEvaluationContext scope( 2126 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated); 2127 SourceLocation Loc = VD->getLocation(); 2128 Expr *VarRef = 2129 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); 2130 ExprResult Result; 2131 auto IE = InitializedEntity::InitializeBlock(Loc, T); 2132 if (S.getLangOpts().CPlusPlus2b) { 2133 auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr, 2134 VK_XValue, FPOptionsOverride()); 2135 Result = S.PerformCopyInitialization(IE, SourceLocation(), E); 2136 } else { 2137 Result = S.PerformMoveOrCopyInitialization( 2138 IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible}, 2139 VarRef); 2140 } 2141 2142 if (!Result.isInvalid()) { 2143 Result = S.MaybeCreateExprWithCleanups(Result); 2144 Expr *Init = Result.getAs<Expr>(); 2145 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init)); 2146 } 2147 2148 // The destructor's exception specification is needed when IRGen generates 2149 // block copy/destroy functions. Resolve it here. 2150 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 2151 if (CXXDestructorDecl *DD = RD->getDestructor()) { 2152 auto *FPT = DD->getType()->getAs<FunctionProtoType>(); 2153 S.ResolveExceptionSpec(Loc, FPT); 2154 } 2155 } 2156 2157 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { 2158 // Set the EscapingByref flag of __block variables captured by 2159 // escaping blocks. 2160 for (const BlockDecl *BD : FSI.Blocks) { 2161 for (const BlockDecl::Capture &BC : BD->captures()) { 2162 VarDecl *VD = BC.getVariable(); 2163 if (VD->hasAttr<BlocksAttr>()) { 2164 // Nothing to do if this is a __block variable captured by a 2165 // non-escaping block. 2166 if (BD->doesNotEscape()) 2167 continue; 2168 VD->setEscapingByref(); 2169 } 2170 // Check whether the captured variable is or contains an object of 2171 // non-trivial C union type. 2172 QualType CapType = BC.getVariable()->getType(); 2173 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() || 2174 CapType.hasNonTrivialToPrimitiveCopyCUnion()) 2175 S.checkNonTrivialCUnion(BC.getVariable()->getType(), 2176 BD->getCaretLocation(), 2177 Sema::NTCUC_BlockCapture, 2178 Sema::NTCUK_Destruct|Sema::NTCUK_Copy); 2179 } 2180 } 2181 2182 for (VarDecl *VD : FSI.ByrefBlockVars) { 2183 // __block variables might require us to capture a copy-initializer. 2184 if (!VD->isEscapingByref()) 2185 continue; 2186 // It's currently invalid to ever have a __block variable with an 2187 // array type; should we diagnose that here? 2188 // Regardless, we don't want to ignore array nesting when 2189 // constructing this copy. 2190 if (VD->getType()->isStructureOrClassType()) 2191 checkEscapingByref(VD, S); 2192 } 2193 } 2194 2195 /// Pop a function (or block or lambda or captured region) scope from the stack. 2196 /// 2197 /// \param WP The warning policy to use for CFG-based warnings, or null if such 2198 /// warnings should not be produced. 2199 /// \param D The declaration corresponding to this function scope, if producing 2200 /// CFG-based warnings. 2201 /// \param BlockType The type of the block expression, if D is a BlockDecl. 2202 Sema::PoppedFunctionScopePtr 2203 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, 2204 const Decl *D, QualType BlockType) { 2205 assert(!FunctionScopes.empty() && "mismatched push/pop!"); 2206 2207 markEscapingByrefs(*FunctionScopes.back(), *this); 2208 2209 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(), 2210 PoppedFunctionScopeDeleter(this)); 2211 2212 if (LangOpts.OpenMP) 2213 popOpenMPFunctionRegion(Scope.get()); 2214 2215 // Issue any analysis-based warnings. 2216 if (WP && D) 2217 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType); 2218 else 2219 for (const auto &PUD : Scope->PossiblyUnreachableDiags) 2220 Diag(PUD.Loc, PUD.PD); 2221 2222 return Scope; 2223 } 2224 2225 void Sema::PoppedFunctionScopeDeleter:: 2226 operator()(sema::FunctionScopeInfo *Scope) const { 2227 // Stash the function scope for later reuse if it's for a normal function. 2228 if (Scope->isPlainFunction() && !Self->CachedFunctionScope) 2229 Self->CachedFunctionScope.reset(Scope); 2230 else 2231 delete Scope; 2232 } 2233 2234 void Sema::PushCompoundScope(bool IsStmtExpr) { 2235 getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr)); 2236 } 2237 2238 void Sema::PopCompoundScope() { 2239 FunctionScopeInfo *CurFunction = getCurFunction(); 2240 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop"); 2241 2242 CurFunction->CompoundScopes.pop_back(); 2243 } 2244 2245 /// Determine whether any errors occurred within this function/method/ 2246 /// block. 2247 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const { 2248 return getCurFunction()->hasUnrecoverableErrorOccurred(); 2249 } 2250 2251 void Sema::setFunctionHasBranchIntoScope() { 2252 if (!FunctionScopes.empty()) 2253 FunctionScopes.back()->setHasBranchIntoScope(); 2254 } 2255 2256 void Sema::setFunctionHasBranchProtectedScope() { 2257 if (!FunctionScopes.empty()) 2258 FunctionScopes.back()->setHasBranchProtectedScope(); 2259 } 2260 2261 void Sema::setFunctionHasIndirectGoto() { 2262 if (!FunctionScopes.empty()) 2263 FunctionScopes.back()->setHasIndirectGoto(); 2264 } 2265 2266 void Sema::setFunctionHasMustTail() { 2267 if (!FunctionScopes.empty()) 2268 FunctionScopes.back()->setHasMustTail(); 2269 } 2270 2271 BlockScopeInfo *Sema::getCurBlock() { 2272 if (FunctionScopes.empty()) 2273 return nullptr; 2274 2275 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back()); 2276 if (CurBSI && CurBSI->TheDecl && 2277 !CurBSI->TheDecl->Encloses(CurContext)) { 2278 // We have switched contexts due to template instantiation. 2279 assert(!CodeSynthesisContexts.empty()); 2280 return nullptr; 2281 } 2282 2283 return CurBSI; 2284 } 2285 2286 FunctionScopeInfo *Sema::getEnclosingFunction() const { 2287 if (FunctionScopes.empty()) 2288 return nullptr; 2289 2290 for (int e = FunctionScopes.size() - 1; e >= 0; --e) { 2291 if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) 2292 continue; 2293 return FunctionScopes[e]; 2294 } 2295 return nullptr; 2296 } 2297 2298 LambdaScopeInfo *Sema::getEnclosingLambda() const { 2299 for (auto *Scope : llvm::reverse(FunctionScopes)) { 2300 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) { 2301 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) { 2302 // We have switched contexts due to template instantiation. 2303 // FIXME: We should swap out the FunctionScopes during code synthesis 2304 // so that we don't need to check for this. 2305 assert(!CodeSynthesisContexts.empty()); 2306 return nullptr; 2307 } 2308 return LSI; 2309 } 2310 } 2311 return nullptr; 2312 } 2313 2314 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) { 2315 if (FunctionScopes.empty()) 2316 return nullptr; 2317 2318 auto I = FunctionScopes.rbegin(); 2319 if (IgnoreNonLambdaCapturingScope) { 2320 auto E = FunctionScopes.rend(); 2321 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I)) 2322 ++I; 2323 if (I == E) 2324 return nullptr; 2325 } 2326 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I); 2327 if (CurLSI && CurLSI->Lambda && 2328 !CurLSI->Lambda->Encloses(CurContext)) { 2329 // We have switched contexts due to template instantiation. 2330 assert(!CodeSynthesisContexts.empty()); 2331 return nullptr; 2332 } 2333 2334 return CurLSI; 2335 } 2336 2337 // We have a generic lambda if we parsed auto parameters, or we have 2338 // an associated template parameter list. 2339 LambdaScopeInfo *Sema::getCurGenericLambda() { 2340 if (LambdaScopeInfo *LSI = getCurLambda()) { 2341 return (LSI->TemplateParams.size() || 2342 LSI->GLTemplateParameterList) ? LSI : nullptr; 2343 } 2344 return nullptr; 2345 } 2346 2347 2348 void Sema::ActOnComment(SourceRange Comment) { 2349 if (!LangOpts.RetainCommentsFromSystemHeaders && 2350 SourceMgr.isInSystemHeader(Comment.getBegin())) 2351 return; 2352 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false); 2353 if (RC.isAlmostTrailingComment()) { 2354 SourceRange MagicMarkerRange(Comment.getBegin(), 2355 Comment.getBegin().getLocWithOffset(3)); 2356 StringRef MagicMarkerText; 2357 switch (RC.getKind()) { 2358 case RawComment::RCK_OrdinaryBCPL: 2359 MagicMarkerText = "///<"; 2360 break; 2361 case RawComment::RCK_OrdinaryC: 2362 MagicMarkerText = "/**<"; 2363 break; 2364 default: 2365 llvm_unreachable("if this is an almost Doxygen comment, " 2366 "it should be ordinary"); 2367 } 2368 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) << 2369 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText); 2370 } 2371 Context.addComment(RC); 2372 } 2373 2374 // Pin this vtable to this file. 2375 ExternalSemaSource::~ExternalSemaSource() {} 2376 char ExternalSemaSource::ID; 2377 2378 void ExternalSemaSource::ReadMethodPool(Selector Sel) { } 2379 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { } 2380 2381 void ExternalSemaSource::ReadKnownNamespaces( 2382 SmallVectorImpl<NamespaceDecl *> &Namespaces) { 2383 } 2384 2385 void ExternalSemaSource::ReadUndefinedButUsed( 2386 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {} 2387 2388 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< 2389 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} 2390 2391 /// Figure out if an expression could be turned into a call. 2392 /// 2393 /// Use this when trying to recover from an error where the programmer may have 2394 /// written just the name of a function instead of actually calling it. 2395 /// 2396 /// \param E - The expression to examine. 2397 /// \param ZeroArgCallReturnTy - If the expression can be turned into a call 2398 /// with no arguments, this parameter is set to the type returned by such a 2399 /// call; otherwise, it is set to an empty QualType. 2400 /// \param OverloadSet - If the expression is an overloaded function 2401 /// name, this parameter is populated with the decls of the various overloads. 2402 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, 2403 UnresolvedSetImpl &OverloadSet) { 2404 ZeroArgCallReturnTy = QualType(); 2405 OverloadSet.clear(); 2406 2407 const OverloadExpr *Overloads = nullptr; 2408 bool IsMemExpr = false; 2409 if (E.getType() == Context.OverloadTy) { 2410 OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); 2411 2412 // Ignore overloads that are pointer-to-member constants. 2413 if (FR.HasFormOfMemberPointer) 2414 return false; 2415 2416 Overloads = FR.Expression; 2417 } else if (E.getType() == Context.BoundMemberTy) { 2418 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens()); 2419 IsMemExpr = true; 2420 } 2421 2422 bool Ambiguous = false; 2423 bool IsMV = false; 2424 2425 if (Overloads) { 2426 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), 2427 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { 2428 OverloadSet.addDecl(*it); 2429 2430 // Check whether the function is a non-template, non-member which takes no 2431 // arguments. 2432 if (IsMemExpr) 2433 continue; 2434 if (const FunctionDecl *OverloadDecl 2435 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { 2436 if (OverloadDecl->getMinRequiredArguments() == 0) { 2437 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous && 2438 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() || 2439 OverloadDecl->isCPUSpecificMultiVersion()))) { 2440 ZeroArgCallReturnTy = QualType(); 2441 Ambiguous = true; 2442 } else { 2443 ZeroArgCallReturnTy = OverloadDecl->getReturnType(); 2444 IsMV = OverloadDecl->isCPUDispatchMultiVersion() || 2445 OverloadDecl->isCPUSpecificMultiVersion(); 2446 } 2447 } 2448 } 2449 } 2450 2451 // If it's not a member, use better machinery to try to resolve the call 2452 if (!IsMemExpr) 2453 return !ZeroArgCallReturnTy.isNull(); 2454 } 2455 2456 // Attempt to call the member with no arguments - this will correctly handle 2457 // member templates with defaults/deduction of template arguments, overloads 2458 // with default arguments, etc. 2459 if (IsMemExpr && !E.isTypeDependent()) { 2460 Sema::TentativeAnalysisScope Trap(*this); 2461 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), 2462 None, SourceLocation()); 2463 if (R.isUsable()) { 2464 ZeroArgCallReturnTy = R.get()->getType(); 2465 return true; 2466 } 2467 return false; 2468 } 2469 2470 if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { 2471 if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { 2472 if (Fun->getMinRequiredArguments() == 0) 2473 ZeroArgCallReturnTy = Fun->getReturnType(); 2474 return true; 2475 } 2476 } 2477 2478 // We don't have an expression that's convenient to get a FunctionDecl from, 2479 // but we can at least check if the type is "function of 0 arguments". 2480 QualType ExprTy = E.getType(); 2481 const FunctionType *FunTy = nullptr; 2482 QualType PointeeTy = ExprTy->getPointeeType(); 2483 if (!PointeeTy.isNull()) 2484 FunTy = PointeeTy->getAs<FunctionType>(); 2485 if (!FunTy) 2486 FunTy = ExprTy->getAs<FunctionType>(); 2487 2488 if (const FunctionProtoType *FPT = 2489 dyn_cast_or_null<FunctionProtoType>(FunTy)) { 2490 if (FPT->getNumParams() == 0) 2491 ZeroArgCallReturnTy = FunTy->getReturnType(); 2492 return true; 2493 } 2494 return false; 2495 } 2496 2497 /// Give notes for a set of overloads. 2498 /// 2499 /// A companion to tryExprAsCall. In cases when the name that the programmer 2500 /// wrote was an overloaded function, we may be able to make some guesses about 2501 /// plausible overloads based on their return types; such guesses can be handed 2502 /// off to this method to be emitted as notes. 2503 /// 2504 /// \param Overloads - The overloads to note. 2505 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to 2506 /// -fshow-overloads=best, this is the location to attach to the note about too 2507 /// many candidates. Typically this will be the location of the original 2508 /// ill-formed expression. 2509 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, 2510 const SourceLocation FinalNoteLoc) { 2511 unsigned ShownOverloads = 0; 2512 unsigned SuppressedOverloads = 0; 2513 for (UnresolvedSetImpl::iterator It = Overloads.begin(), 2514 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2515 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) { 2516 ++SuppressedOverloads; 2517 continue; 2518 } 2519 2520 NamedDecl *Fn = (*It)->getUnderlyingDecl(); 2521 // Don't print overloads for non-default multiversioned functions. 2522 if (const auto *FD = Fn->getAsFunction()) { 2523 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() && 2524 !FD->getAttr<TargetAttr>()->isDefaultVersion()) 2525 continue; 2526 } 2527 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call); 2528 ++ShownOverloads; 2529 } 2530 2531 S.Diags.overloadCandidatesShown(ShownOverloads); 2532 2533 if (SuppressedOverloads) 2534 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates) 2535 << SuppressedOverloads; 2536 } 2537 2538 static void notePlausibleOverloads(Sema &S, SourceLocation Loc, 2539 const UnresolvedSetImpl &Overloads, 2540 bool (*IsPlausibleResult)(QualType)) { 2541 if (!IsPlausibleResult) 2542 return noteOverloads(S, Overloads, Loc); 2543 2544 UnresolvedSet<2> PlausibleOverloads; 2545 for (OverloadExpr::decls_iterator It = Overloads.begin(), 2546 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2547 const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It); 2548 QualType OverloadResultTy = OverloadDecl->getReturnType(); 2549 if (IsPlausibleResult(OverloadResultTy)) 2550 PlausibleOverloads.addDecl(It.getDecl()); 2551 } 2552 noteOverloads(S, PlausibleOverloads, Loc); 2553 } 2554 2555 /// Determine whether the given expression can be called by just 2556 /// putting parentheses after it. Notably, expressions with unary 2557 /// operators can't be because the unary operator will start parsing 2558 /// outside the call. 2559 static bool IsCallableWithAppend(Expr *E) { 2560 E = E->IgnoreImplicit(); 2561 return (!isa<CStyleCastExpr>(E) && 2562 !isa<UnaryOperator>(E) && 2563 !isa<BinaryOperator>(E) && 2564 !isa<CXXOperatorCallExpr>(E)); 2565 } 2566 2567 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) { 2568 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2569 E = UO->getSubExpr(); 2570 2571 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { 2572 if (ULE->getNumDecls() == 0) 2573 return false; 2574 2575 const NamedDecl *ND = *ULE->decls_begin(); 2576 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2577 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion(); 2578 } 2579 return false; 2580 } 2581 2582 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, 2583 bool ForceComplain, 2584 bool (*IsPlausibleResult)(QualType)) { 2585 SourceLocation Loc = E.get()->getExprLoc(); 2586 SourceRange Range = E.get()->getSourceRange(); 2587 UnresolvedSet<4> Overloads; 2588 2589 // If this is a SFINAE context, don't try anything that might trigger ADL 2590 // prematurely. 2591 if (!isSFINAEContext()) { 2592 QualType ZeroArgCallTy; 2593 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) && 2594 !ZeroArgCallTy.isNull() && 2595 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) { 2596 // At this point, we know E is potentially callable with 0 2597 // arguments and that it returns something of a reasonable type, 2598 // so we can emit a fixit and carry on pretending that E was 2599 // actually a CallExpr. 2600 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd()); 2601 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2602 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range 2603 << (IsCallableWithAppend(E.get()) 2604 ? FixItHint::CreateInsertion(ParenInsertionLoc, 2605 "()") 2606 : FixItHint()); 2607 if (!IsMV) 2608 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2609 2610 // FIXME: Try this before emitting the fixit, and suppress diagnostics 2611 // while doing so. 2612 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None, 2613 Range.getEnd().getLocWithOffset(1)); 2614 return true; 2615 } 2616 } 2617 if (!ForceComplain) return false; 2618 2619 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2620 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range; 2621 if (!IsMV) 2622 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2623 E = ExprError(); 2624 return true; 2625 } 2626 2627 IdentifierInfo *Sema::getSuperIdentifier() const { 2628 if (!Ident_super) 2629 Ident_super = &Context.Idents.get("super"); 2630 return Ident_super; 2631 } 2632 2633 IdentifierInfo *Sema::getFloat128Identifier() const { 2634 if (!Ident___float128) 2635 Ident___float128 = &Context.Idents.get("__float128"); 2636 return Ident___float128; 2637 } 2638 2639 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, 2640 CapturedRegionKind K, 2641 unsigned OpenMPCaptureLevel) { 2642 auto *CSI = new CapturedRegionScopeInfo( 2643 getDiagnostics(), S, CD, RD, CD->getContextParam(), K, 2644 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0, 2645 OpenMPCaptureLevel); 2646 CSI->ReturnType = Context.VoidTy; 2647 FunctionScopes.push_back(CSI); 2648 } 2649 2650 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { 2651 if (FunctionScopes.empty()) 2652 return nullptr; 2653 2654 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); 2655 } 2656 2657 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & 2658 Sema::getMismatchingDeleteExpressions() const { 2659 return DeleteExprs; 2660 } 2661 2662 Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S) 2663 : S(S), OldFPFeaturesState(S.CurFPFeatures), 2664 OldOverrides(S.FpPragmaStack.CurrentValue), 2665 OldEvalMethod(S.PP.getCurrentFPEvalMethod()), 2666 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {} 2667 2668 Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() { 2669 S.CurFPFeatures = OldFPFeaturesState; 2670 S.FpPragmaStack.CurrentValue = OldOverrides; 2671 S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod); 2672 } 2673