1 //===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the actions class which performs semantic analysis and 10 // builds an AST out of a parse stream. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "UsedDeclVisitor.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/ASTDiagnostic.h" 17 #include "clang/AST/Decl.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/DeclFriend.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/ExprCXX.h" 23 #include "clang/AST/PrettyDeclStackTrace.h" 24 #include "clang/AST/StmtCXX.h" 25 #include "clang/Basic/DiagnosticOptions.h" 26 #include "clang/Basic/PartialDiagnostic.h" 27 #include "clang/Basic/SourceManager.h" 28 #include "clang/Basic/Stack.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "clang/Lex/HeaderSearch.h" 31 #include "clang/Lex/Preprocessor.h" 32 #include "clang/Sema/CXXFieldCollector.h" 33 #include "clang/Sema/DelayedDiagnostic.h" 34 #include "clang/Sema/ExternalSemaSource.h" 35 #include "clang/Sema/Initialization.h" 36 #include "clang/Sema/MultiplexExternalSemaSource.h" 37 #include "clang/Sema/ObjCMethodList.h" 38 #include "clang/Sema/Scope.h" 39 #include "clang/Sema/ScopeInfo.h" 40 #include "clang/Sema/SemaConsumer.h" 41 #include "clang/Sema/SemaInternal.h" 42 #include "clang/Sema/TemplateDeduction.h" 43 #include "clang/Sema/TemplateInstCallback.h" 44 #include "clang/Sema/TypoCorrection.h" 45 #include "llvm/ADT/DenseMap.h" 46 #include "llvm/ADT/SmallPtrSet.h" 47 #include "llvm/Support/TimeProfiler.h" 48 49 using namespace clang; 50 using namespace sema; 51 52 SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) { 53 return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts); 54 } 55 56 ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); } 57 58 IdentifierInfo * 59 Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, 60 unsigned int Index) { 61 std::string InventedName; 62 llvm::raw_string_ostream OS(InventedName); 63 64 if (!ParamName) 65 OS << "auto:" << Index + 1; 66 else 67 OS << ParamName->getName() << ":auto"; 68 69 OS.flush(); 70 return &Context.Idents.get(OS.str()); 71 } 72 73 PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, 74 const Preprocessor &PP) { 75 PrintingPolicy Policy = Context.getPrintingPolicy(); 76 // In diagnostics, we print _Bool as bool if the latter is defined as the 77 // former. 78 Policy.Bool = Context.getLangOpts().Bool; 79 if (!Policy.Bool) { 80 if (const MacroInfo *BoolMacro = PP.getMacroInfo(Context.getBoolName())) { 81 Policy.Bool = BoolMacro->isObjectLike() && 82 BoolMacro->getNumTokens() == 1 && 83 BoolMacro->getReplacementToken(0).is(tok::kw__Bool); 84 } 85 } 86 87 return Policy; 88 } 89 90 void Sema::ActOnTranslationUnitScope(Scope *S) { 91 TUScope = S; 92 PushDeclContext(S, Context.getTranslationUnitDecl()); 93 } 94 95 namespace clang { 96 namespace sema { 97 98 class SemaPPCallbacks : public PPCallbacks { 99 Sema *S = nullptr; 100 llvm::SmallVector<SourceLocation, 8> IncludeStack; 101 102 public: 103 void set(Sema &S) { this->S = &S; } 104 105 void reset() { S = nullptr; } 106 107 virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason, 108 SrcMgr::CharacteristicKind FileType, 109 FileID PrevFID) override { 110 if (!S) 111 return; 112 switch (Reason) { 113 case EnterFile: { 114 SourceManager &SM = S->getSourceManager(); 115 SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc)); 116 if (IncludeLoc.isValid()) { 117 if (llvm::timeTraceProfilerEnabled()) { 118 const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc)); 119 llvm::timeTraceProfilerBegin( 120 "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>")); 121 } 122 123 IncludeStack.push_back(IncludeLoc); 124 S->DiagnoseNonDefaultPragmaAlignPack( 125 Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude, 126 IncludeLoc); 127 } 128 break; 129 } 130 case ExitFile: 131 if (!IncludeStack.empty()) { 132 if (llvm::timeTraceProfilerEnabled()) 133 llvm::timeTraceProfilerEnd(); 134 135 S->DiagnoseNonDefaultPragmaAlignPack( 136 Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit, 137 IncludeStack.pop_back_val()); 138 } 139 break; 140 default: 141 break; 142 } 143 } 144 }; 145 146 } // end namespace sema 147 } // end namespace clang 148 149 const unsigned Sema::MaxAlignmentExponent; 150 const unsigned Sema::MaximumAlignment; 151 152 Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, 153 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) 154 : ExternalSource(nullptr), isMultiplexExternalSource(false), 155 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), 156 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), 157 SourceMgr(PP.getSourceManager()), CollectStats(false), 158 CodeCompleter(CodeCompleter), CurContext(nullptr), 159 OriginalLexicalContext(nullptr), MSStructPragmaOn(false), 160 MSPointerToMemberRepresentationMethod( 161 LangOpts.getMSPointerToMemberRepresentationMethod()), 162 VtorDispStack(LangOpts.getVtorDispMode()), 163 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)), 164 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr), 165 CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()), 166 CurInitSeg(nullptr), VisContext(nullptr), 167 PragmaAttributeCurrentTargetDecl(nullptr), 168 IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr), 169 LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp), 170 StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr), 171 StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr), 172 MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr), 173 NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr), 174 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), 175 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), 176 DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false), 177 TUKind(TUKind), NumSFINAEErrors(0), 178 FullyCheckedComparisonCategories( 179 static_cast<unsigned>(ComparisonCategoryType::Last) + 1), 180 SatisfactionCache(Context), AccessCheckingSFINAE(false), 181 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), 182 ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), 183 DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this), 184 ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), 185 CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { 186 TUScope = nullptr; 187 isConstantEvaluatedOverride = false; 188 189 LoadedExternalKnownNamespaces = false; 190 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I) 191 NSNumberLiteralMethods[I] = nullptr; 192 193 if (getLangOpts().ObjC) 194 NSAPIObj.reset(new NSAPI(Context)); 195 196 if (getLangOpts().CPlusPlus) 197 FieldCollector.reset(new CXXFieldCollector()); 198 199 // Tell diagnostics how to render things from the AST library. 200 Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); 201 202 ExprEvalContexts.emplace_back( 203 ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{}, 204 nullptr, ExpressionEvaluationContextRecord::EK_Other); 205 206 // Initialization of data sharing attributes stack for OpenMP 207 InitDataSharingAttributesStack(); 208 209 std::unique_ptr<sema::SemaPPCallbacks> Callbacks = 210 std::make_unique<sema::SemaPPCallbacks>(); 211 SemaPPCallbackHandler = Callbacks.get(); 212 PP.addPPCallbacks(std::move(Callbacks)); 213 SemaPPCallbackHandler->set(*this); 214 } 215 216 // Anchor Sema's type info to this TU. 217 void Sema::anchor() {} 218 219 void Sema::addImplicitTypedef(StringRef Name, QualType T) { 220 DeclarationName DN = &Context.Idents.get(Name); 221 if (IdResolver.begin(DN) == IdResolver.end()) 222 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope); 223 } 224 225 void Sema::Initialize() { 226 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 227 SC->InitializeSema(*this); 228 229 // Tell the external Sema source about this Sema object. 230 if (ExternalSemaSource *ExternalSema 231 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 232 ExternalSema->InitializeSema(*this); 233 234 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we 235 // will not be able to merge any duplicate __va_list_tag decls correctly. 236 VAListTagName = PP.getIdentifierInfo("__va_list_tag"); 237 238 if (!TUScope) 239 return; 240 241 // Initialize predefined 128-bit integer types, if needed. 242 if (Context.getTargetInfo().hasInt128Type() || 243 (Context.getAuxTargetInfo() && 244 Context.getAuxTargetInfo()->hasInt128Type())) { 245 // If either of the 128-bit integer types are unavailable to name lookup, 246 // define them now. 247 DeclarationName Int128 = &Context.Idents.get("__int128_t"); 248 if (IdResolver.begin(Int128) == IdResolver.end()) 249 PushOnScopeChains(Context.getInt128Decl(), TUScope); 250 251 DeclarationName UInt128 = &Context.Idents.get("__uint128_t"); 252 if (IdResolver.begin(UInt128) == IdResolver.end()) 253 PushOnScopeChains(Context.getUInt128Decl(), TUScope); 254 } 255 256 257 // Initialize predefined Objective-C types: 258 if (getLangOpts().ObjC) { 259 // If 'SEL' does not yet refer to any declarations, make it refer to the 260 // predefined 'SEL'. 261 DeclarationName SEL = &Context.Idents.get("SEL"); 262 if (IdResolver.begin(SEL) == IdResolver.end()) 263 PushOnScopeChains(Context.getObjCSelDecl(), TUScope); 264 265 // If 'id' does not yet refer to any declarations, make it refer to the 266 // predefined 'id'. 267 DeclarationName Id = &Context.Idents.get("id"); 268 if (IdResolver.begin(Id) == IdResolver.end()) 269 PushOnScopeChains(Context.getObjCIdDecl(), TUScope); 270 271 // Create the built-in typedef for 'Class'. 272 DeclarationName Class = &Context.Idents.get("Class"); 273 if (IdResolver.begin(Class) == IdResolver.end()) 274 PushOnScopeChains(Context.getObjCClassDecl(), TUScope); 275 276 // Create the built-in forward declaratino for 'Protocol'. 277 DeclarationName Protocol = &Context.Idents.get("Protocol"); 278 if (IdResolver.begin(Protocol) == IdResolver.end()) 279 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope); 280 } 281 282 // Create the internal type for the *StringMakeConstantString builtins. 283 DeclarationName ConstantString = &Context.Idents.get("__NSConstantString"); 284 if (IdResolver.begin(ConstantString) == IdResolver.end()) 285 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope); 286 287 // Initialize Microsoft "predefined C++ types". 288 if (getLangOpts().MSVCCompat) { 289 if (getLangOpts().CPlusPlus && 290 IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) 291 PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class), 292 TUScope); 293 294 addImplicitTypedef("size_t", Context.getSizeType()); 295 } 296 297 // Initialize predefined OpenCL types and supported extensions and (optional) 298 // core features. 299 if (getLangOpts().OpenCL) { 300 getOpenCLOptions().addSupport( 301 Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts()); 302 getOpenCLOptions().enableSupportedCore(getLangOpts()); 303 addImplicitTypedef("sampler_t", Context.OCLSamplerTy); 304 addImplicitTypedef("event_t", Context.OCLEventTy); 305 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().OpenCLVersion >= 200) { 306 addImplicitTypedef("clk_event_t", Context.OCLClkEventTy); 307 addImplicitTypedef("queue_t", Context.OCLQueueTy); 308 addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy); 309 addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); 310 addImplicitTypedef("atomic_uint", 311 Context.getAtomicType(Context.UnsignedIntTy)); 312 auto AtomicLongT = Context.getAtomicType(Context.LongTy); 313 addImplicitTypedef("atomic_long", AtomicLongT); 314 auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy); 315 addImplicitTypedef("atomic_ulong", AtomicULongT); 316 addImplicitTypedef("atomic_float", 317 Context.getAtomicType(Context.FloatTy)); 318 auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy); 319 addImplicitTypedef("atomic_double", AtomicDoubleT); 320 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as 321 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. 322 addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); 323 auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType()); 324 addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT); 325 auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType()); 326 addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT); 327 auto AtomicSizeT = Context.getAtomicType(Context.getSizeType()); 328 addImplicitTypedef("atomic_size_t", AtomicSizeT); 329 auto AtomicPtrDiffT = Context.getAtomicType(Context.getPointerDiffType()); 330 addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT); 331 332 // OpenCL v2.0 s6.13.11.6: 333 // - The atomic_long and atomic_ulong types are supported if the 334 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics 335 // extensions are supported. 336 // - The atomic_double type is only supported if double precision 337 // is supported and the cl_khr_int64_base_atomics and 338 // cl_khr_int64_extended_atomics extensions are supported. 339 // - If the device address space is 64-bits, the data types 340 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and 341 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and 342 // cl_khr_int64_extended_atomics extensions are supported. 343 std::vector<QualType> Atomic64BitTypes; 344 Atomic64BitTypes.push_back(AtomicLongT); 345 Atomic64BitTypes.push_back(AtomicULongT); 346 Atomic64BitTypes.push_back(AtomicDoubleT); 347 if (Context.getTypeSize(AtomicSizeT) == 64) { 348 Atomic64BitTypes.push_back(AtomicSizeT); 349 Atomic64BitTypes.push_back(AtomicIntPtrT); 350 Atomic64BitTypes.push_back(AtomicUIntPtrT); 351 Atomic64BitTypes.push_back(AtomicPtrDiffT); 352 } 353 for (auto &I : Atomic64BitTypes) 354 setOpenCLExtensionForType(I, 355 "cl_khr_int64_base_atomics cl_khr_int64_extended_atomics"); 356 357 setOpenCLExtensionForType(AtomicDoubleT, "cl_khr_fp64"); 358 } 359 360 setOpenCLExtensionForType(Context.DoubleTy, "cl_khr_fp64"); 361 362 #define GENERIC_IMAGE_TYPE_EXT(Type, Id, Ext) \ 363 setOpenCLExtensionForType(Context.Id, Ext); 364 #include "clang/Basic/OpenCLImageTypes.def" 365 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 366 addImplicitTypedef(#ExtType, Context.Id##Ty); \ 367 setOpenCLExtensionForType(Context.Id##Ty, #Ext); 368 #include "clang/Basic/OpenCLExtensionTypes.def" 369 } 370 371 if (Context.getTargetInfo().hasAArch64SVETypes()) { 372 #define SVE_TYPE(Name, Id, SingletonId) \ 373 addImplicitTypedef(Name, Context.SingletonId); 374 #include "clang/Basic/AArch64SVEACLETypes.def" 375 } 376 377 if (Context.getTargetInfo().getTriple().isPPC64() && 378 Context.getTargetInfo().hasFeature("paired-vector-memops")) { 379 if (Context.getTargetInfo().hasFeature("mma")) { 380 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 381 addImplicitTypedef(#Name, Context.Id##Ty); 382 #include "clang/Basic/PPCTypes.def" 383 } 384 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 385 addImplicitTypedef(#Name, Context.Id##Ty); 386 #include "clang/Basic/PPCTypes.def" 387 } 388 389 if (Context.getTargetInfo().hasRISCVVTypes()) { 390 #define RVV_TYPE(Name, Id, SingletonId) \ 391 addImplicitTypedef(Name, Context.SingletonId); 392 #include "clang/Basic/RISCVVTypes.def" 393 } 394 395 if (Context.getTargetInfo().hasBuiltinMSVaList()) { 396 DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list"); 397 if (IdResolver.begin(MSVaList) == IdResolver.end()) 398 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope); 399 } 400 401 DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); 402 if (IdResolver.begin(BuiltinVaList) == IdResolver.end()) 403 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope); 404 } 405 406 Sema::~Sema() { 407 assert(InstantiatingSpecializations.empty() && 408 "failed to clean up an InstantiatingTemplate?"); 409 410 if (VisContext) FreeVisContext(); 411 412 // Kill all the active scopes. 413 for (sema::FunctionScopeInfo *FSI : FunctionScopes) 414 delete FSI; 415 416 // Tell the SemaConsumer to forget about us; we're going out of scope. 417 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) 418 SC->ForgetSema(); 419 420 // Detach from the external Sema source. 421 if (ExternalSemaSource *ExternalSema 422 = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) 423 ExternalSema->ForgetSema(); 424 425 // If Sema's ExternalSource is the multiplexer - we own it. 426 if (isMultiplexExternalSource) 427 delete ExternalSource; 428 429 // Delete cached satisfactions. 430 std::vector<ConstraintSatisfaction *> Satisfactions; 431 Satisfactions.reserve(Satisfactions.size()); 432 for (auto &Node : SatisfactionCache) 433 Satisfactions.push_back(&Node); 434 for (auto *Node : Satisfactions) 435 delete Node; 436 437 threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); 438 439 // Destroys data sharing attributes stack for OpenMP 440 DestroyDataSharingAttributesStack(); 441 442 // Detach from the PP callback handler which outlives Sema since it's owned 443 // by the preprocessor. 444 SemaPPCallbackHandler->reset(); 445 } 446 447 void Sema::warnStackExhausted(SourceLocation Loc) { 448 // Only warn about this once. 449 if (!WarnedStackExhausted) { 450 Diag(Loc, diag::warn_stack_exhausted); 451 WarnedStackExhausted = true; 452 } 453 } 454 455 void Sema::runWithSufficientStackSpace(SourceLocation Loc, 456 llvm::function_ref<void()> Fn) { 457 clang::runWithSufficientStackSpace([&] { warnStackExhausted(Loc); }, Fn); 458 } 459 460 /// makeUnavailableInSystemHeader - There is an error in the current 461 /// context. If we're still in a system header, and we can plausibly 462 /// make the relevant declaration unavailable instead of erroring, do 463 /// so and return true. 464 bool Sema::makeUnavailableInSystemHeader(SourceLocation loc, 465 UnavailableAttr::ImplicitReason reason) { 466 // If we're not in a function, it's an error. 467 FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext); 468 if (!fn) return false; 469 470 // If we're in template instantiation, it's an error. 471 if (inTemplateInstantiation()) 472 return false; 473 474 // If that function's not in a system header, it's an error. 475 if (!Context.getSourceManager().isInSystemHeader(loc)) 476 return false; 477 478 // If the function is already unavailable, it's not an error. 479 if (fn->hasAttr<UnavailableAttr>()) return true; 480 481 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc)); 482 return true; 483 } 484 485 ASTMutationListener *Sema::getASTMutationListener() const { 486 return getASTConsumer().GetASTMutationListener(); 487 } 488 489 ///Registers an external source. If an external source already exists, 490 /// creates a multiplex external source and appends to it. 491 /// 492 ///\param[in] E - A non-null external sema source. 493 /// 494 void Sema::addExternalSource(ExternalSemaSource *E) { 495 assert(E && "Cannot use with NULL ptr"); 496 497 if (!ExternalSource) { 498 ExternalSource = E; 499 return; 500 } 501 502 if (isMultiplexExternalSource) 503 static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E); 504 else { 505 ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E); 506 isMultiplexExternalSource = true; 507 } 508 } 509 510 /// Print out statistics about the semantic analysis. 511 void Sema::PrintStats() const { 512 llvm::errs() << "\n*** Semantic Analysis Stats:\n"; 513 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n"; 514 515 BumpAlloc.PrintStats(); 516 AnalysisWarnings.PrintStats(); 517 } 518 519 void Sema::diagnoseNullableToNonnullConversion(QualType DstType, 520 QualType SrcType, 521 SourceLocation Loc) { 522 Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context); 523 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable && 524 *ExprNullability != NullabilityKind::NullableResult)) 525 return; 526 527 Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context); 528 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull) 529 return; 530 531 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType; 532 } 533 534 void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) { 535 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant, 536 E->getBeginLoc())) 537 return; 538 // nullptr only exists from C++11 on, so don't warn on its absence earlier. 539 if (!getLangOpts().CPlusPlus11) 540 return; 541 542 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer) 543 return; 544 if (E->IgnoreParenImpCasts()->getType()->isNullPtrType()) 545 return; 546 547 // Don't diagnose the conversion from a 0 literal to a null pointer argument 548 // in a synthesized call to operator<=>. 549 if (!CodeSynthesisContexts.empty() && 550 CodeSynthesisContexts.back().Kind == 551 CodeSynthesisContext::RewritingOperatorAsSpaceship) 552 return; 553 554 // If it is a macro from system header, and if the macro name is not "NULL", 555 // do not warn. 556 SourceLocation MaybeMacroLoc = E->getBeginLoc(); 557 if (Diags.getSuppressSystemWarnings() && 558 SourceMgr.isInSystemMacro(MaybeMacroLoc) && 559 !findMacroSpelling(MaybeMacroLoc, "NULL")) 560 return; 561 562 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant) 563 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr"); 564 } 565 566 /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast. 567 /// If there is already an implicit cast, merge into the existing one. 568 /// The result is of the given category. 569 ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, 570 CastKind Kind, ExprValueKind VK, 571 const CXXCastPath *BasePath, 572 CheckedConversionKind CCK) { 573 #ifndef NDEBUG 574 if (VK == VK_RValue && !E->isRValue()) { 575 switch (Kind) { 576 default: 577 llvm_unreachable(("can't implicitly cast lvalue to rvalue with this cast " 578 "kind: " + 579 std::string(CastExpr::getCastKindName(Kind))) 580 .c_str()); 581 case CK_Dependent: 582 case CK_LValueToRValue: 583 case CK_ArrayToPointerDecay: 584 case CK_FunctionToPointerDecay: 585 case CK_ToVoid: 586 case CK_NonAtomicToAtomic: 587 break; 588 } 589 } 590 assert((VK == VK_RValue || Kind == CK_Dependent || !E->isRValue()) && 591 "can't cast rvalue to lvalue"); 592 #endif 593 594 diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc()); 595 diagnoseZeroToNullptrConversion(Kind, E); 596 597 QualType ExprTy = Context.getCanonicalType(E->getType()); 598 QualType TypeTy = Context.getCanonicalType(Ty); 599 600 if (ExprTy == TypeTy) 601 return E; 602 603 // C++1z [conv.array]: The temporary materialization conversion is applied. 604 // We also use this to fuel C++ DR1213, which applies to C++11 onwards. 605 if (Kind == CK_ArrayToPointerDecay && getLangOpts().CPlusPlus && 606 E->getValueKind() == VK_RValue) { 607 // The temporary is an lvalue in C++98 and an xvalue otherwise. 608 ExprResult Materialized = CreateMaterializeTemporaryExpr( 609 E->getType(), E, !getLangOpts().CPlusPlus11); 610 if (Materialized.isInvalid()) 611 return ExprError(); 612 E = Materialized.get(); 613 } 614 615 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { 616 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { 617 ImpCast->setType(Ty); 618 ImpCast->setValueKind(VK); 619 return E; 620 } 621 } 622 623 return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK, 624 CurFPFeatureOverrides()); 625 } 626 627 /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding 628 /// to the conversion from scalar type ScalarTy to the Boolean type. 629 CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) { 630 switch (ScalarTy->getScalarTypeKind()) { 631 case Type::STK_Bool: return CK_NoOp; 632 case Type::STK_CPointer: return CK_PointerToBoolean; 633 case Type::STK_BlockPointer: return CK_PointerToBoolean; 634 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean; 635 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean; 636 case Type::STK_Integral: return CK_IntegralToBoolean; 637 case Type::STK_Floating: return CK_FloatingToBoolean; 638 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean; 639 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean; 640 case Type::STK_FixedPoint: return CK_FixedPointToBoolean; 641 } 642 llvm_unreachable("unknown scalar type kind"); 643 } 644 645 /// Used to prune the decls of Sema's UnusedFileScopedDecls vector. 646 static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) { 647 if (D->getMostRecentDecl()->isUsed()) 648 return true; 649 650 if (D->isExternallyVisible()) 651 return true; 652 653 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 654 // If this is a function template and none of its specializations is used, 655 // we should warn. 656 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate()) 657 for (const auto *Spec : Template->specializations()) 658 if (ShouldRemoveFromUnused(SemaRef, Spec)) 659 return true; 660 661 // UnusedFileScopedDecls stores the first declaration. 662 // The declaration may have become definition so check again. 663 const FunctionDecl *DeclToCheck; 664 if (FD->hasBody(DeclToCheck)) 665 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 666 667 // Later redecls may add new information resulting in not having to warn, 668 // so check again. 669 DeclToCheck = FD->getMostRecentDecl(); 670 if (DeclToCheck != FD) 671 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 672 } 673 674 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { 675 // If a variable usable in constant expressions is referenced, 676 // don't warn if it isn't used: if the value of a variable is required 677 // for the computation of a constant expression, it doesn't make sense to 678 // warn even if the variable isn't odr-used. (isReferenced doesn't 679 // precisely reflect that, but it's a decent approximation.) 680 if (VD->isReferenced() && 681 VD->mightBeUsableInConstantExpressions(SemaRef->Context)) 682 return true; 683 684 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate()) 685 // If this is a variable template and none of its specializations is used, 686 // we should warn. 687 for (const auto *Spec : Template->specializations()) 688 if (ShouldRemoveFromUnused(SemaRef, Spec)) 689 return true; 690 691 // UnusedFileScopedDecls stores the first declaration. 692 // The declaration may have become definition so check again. 693 const VarDecl *DeclToCheck = VD->getDefinition(); 694 if (DeclToCheck) 695 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 696 697 // Later redecls may add new information resulting in not having to warn, 698 // so check again. 699 DeclToCheck = VD->getMostRecentDecl(); 700 if (DeclToCheck != VD) 701 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); 702 } 703 704 return false; 705 } 706 707 static bool isFunctionOrVarDeclExternC(NamedDecl *ND) { 708 if (auto *FD = dyn_cast<FunctionDecl>(ND)) 709 return FD->isExternC(); 710 return cast<VarDecl>(ND)->isExternC(); 711 } 712 713 /// Determine whether ND is an external-linkage function or variable whose 714 /// type has no linkage. 715 bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) { 716 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage, 717 // because we also want to catch the case where its type has VisibleNoLinkage, 718 // which does not affect the linkage of VD. 719 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() && 720 !isExternalFormalLinkage(VD->getType()->getLinkage()) && 721 !isFunctionOrVarDeclExternC(VD); 722 } 723 724 /// Obtains a sorted list of functions and variables that are undefined but 725 /// ODR-used. 726 void Sema::getUndefinedButUsed( 727 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) { 728 for (const auto &UndefinedUse : UndefinedButUsed) { 729 NamedDecl *ND = UndefinedUse.first; 730 731 // Ignore attributes that have become invalid. 732 if (ND->isInvalidDecl()) continue; 733 734 // __attribute__((weakref)) is basically a definition. 735 if (ND->hasAttr<WeakRefAttr>()) continue; 736 737 if (isa<CXXDeductionGuideDecl>(ND)) 738 continue; 739 740 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) { 741 // An exported function will always be emitted when defined, so even if 742 // the function is inline, it doesn't have to be emitted in this TU. An 743 // imported function implies that it has been exported somewhere else. 744 continue; 745 } 746 747 if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { 748 if (FD->isDefined()) 749 continue; 750 if (FD->isExternallyVisible() && 751 !isExternalWithNoLinkageType(FD) && 752 !FD->getMostRecentDecl()->isInlined() && 753 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 754 continue; 755 if (FD->getBuiltinID()) 756 continue; 757 } else { 758 auto *VD = cast<VarDecl>(ND); 759 if (VD->hasDefinition() != VarDecl::DeclarationOnly) 760 continue; 761 if (VD->isExternallyVisible() && 762 !isExternalWithNoLinkageType(VD) && 763 !VD->getMostRecentDecl()->isInline() && 764 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>()) 765 continue; 766 767 // Skip VarDecls that lack formal definitions but which we know are in 768 // fact defined somewhere. 769 if (VD->isKnownToBeDefined()) 770 continue; 771 } 772 773 Undefined.push_back(std::make_pair(ND, UndefinedUse.second)); 774 } 775 } 776 777 /// checkUndefinedButUsed - Check for undefined objects with internal linkage 778 /// or that are inline. 779 static void checkUndefinedButUsed(Sema &S) { 780 if (S.UndefinedButUsed.empty()) return; 781 782 // Collect all the still-undefined entities with internal linkage. 783 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined; 784 S.getUndefinedButUsed(Undefined); 785 if (Undefined.empty()) return; 786 787 for (auto Undef : Undefined) { 788 ValueDecl *VD = cast<ValueDecl>(Undef.first); 789 SourceLocation UseLoc = Undef.second; 790 791 if (S.isExternalWithNoLinkageType(VD)) { 792 // C++ [basic.link]p8: 793 // A type without linkage shall not be used as the type of a variable 794 // or function with external linkage unless 795 // -- the entity has C language linkage 796 // -- the entity is not odr-used or is defined in the same TU 797 // 798 // As an extension, accept this in cases where the type is externally 799 // visible, since the function or variable actually can be defined in 800 // another translation unit in that case. 801 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage()) 802 ? diag::ext_undefined_internal_type 803 : diag::err_undefined_internal_type) 804 << isa<VarDecl>(VD) << VD; 805 } else if (!VD->isExternallyVisible()) { 806 // FIXME: We can promote this to an error. The function or variable can't 807 // be defined anywhere else, so the program must necessarily violate the 808 // one definition rule. 809 S.Diag(VD->getLocation(), diag::warn_undefined_internal) 810 << isa<VarDecl>(VD) << VD; 811 } else if (auto *FD = dyn_cast<FunctionDecl>(VD)) { 812 (void)FD; 813 assert(FD->getMostRecentDecl()->isInlined() && 814 "used object requires definition but isn't inline or internal?"); 815 // FIXME: This is ill-formed; we should reject. 816 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD; 817 } else { 818 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() && 819 "used var requires definition but isn't inline or internal?"); 820 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD; 821 } 822 if (UseLoc.isValid()) 823 S.Diag(UseLoc, diag::note_used_here); 824 } 825 826 S.UndefinedButUsed.clear(); 827 } 828 829 void Sema::LoadExternalWeakUndeclaredIdentifiers() { 830 if (!ExternalSource) 831 return; 832 833 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; 834 ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); 835 for (auto &WeakID : WeakIDs) 836 WeakUndeclaredIdentifiers.insert(WeakID); 837 } 838 839 840 typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap; 841 842 /// Returns true, if all methods and nested classes of the given 843 /// CXXRecordDecl are defined in this translation unit. 844 /// 845 /// Should only be called from ActOnEndOfTranslationUnit so that all 846 /// definitions are actually read. 847 static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD, 848 RecordCompleteMap &MNCComplete) { 849 RecordCompleteMap::iterator Cache = MNCComplete.find(RD); 850 if (Cache != MNCComplete.end()) 851 return Cache->second; 852 if (!RD->isCompleteDefinition()) 853 return false; 854 bool Complete = true; 855 for (DeclContext::decl_iterator I = RD->decls_begin(), 856 E = RD->decls_end(); 857 I != E && Complete; ++I) { 858 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) 859 Complete = M->isDefined() || M->isDefaulted() || 860 (M->isPure() && !isa<CXXDestructorDecl>(M)); 861 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I)) 862 // If the template function is marked as late template parsed at this 863 // point, it has not been instantiated and therefore we have not 864 // performed semantic analysis on it yet, so we cannot know if the type 865 // can be considered complete. 866 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() && 867 F->getTemplatedDecl()->isDefined(); 868 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) { 869 if (R->isInjectedClassName()) 870 continue; 871 if (R->hasDefinition()) 872 Complete = MethodsAndNestedClassesComplete(R->getDefinition(), 873 MNCComplete); 874 else 875 Complete = false; 876 } 877 } 878 MNCComplete[RD] = Complete; 879 return Complete; 880 } 881 882 /// Returns true, if the given CXXRecordDecl is fully defined in this 883 /// translation unit, i.e. all methods are defined or pure virtual and all 884 /// friends, friend functions and nested classes are fully defined in this 885 /// translation unit. 886 /// 887 /// Should only be called from ActOnEndOfTranslationUnit so that all 888 /// definitions are actually read. 889 static bool IsRecordFullyDefined(const CXXRecordDecl *RD, 890 RecordCompleteMap &RecordsComplete, 891 RecordCompleteMap &MNCComplete) { 892 RecordCompleteMap::iterator Cache = RecordsComplete.find(RD); 893 if (Cache != RecordsComplete.end()) 894 return Cache->second; 895 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete); 896 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(), 897 E = RD->friend_end(); 898 I != E && Complete; ++I) { 899 // Check if friend classes and methods are complete. 900 if (TypeSourceInfo *TSI = (*I)->getFriendType()) { 901 // Friend classes are available as the TypeSourceInfo of the FriendDecl. 902 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl()) 903 Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete); 904 else 905 Complete = false; 906 } else { 907 // Friend functions are available through the NamedDecl of FriendDecl. 908 if (const FunctionDecl *FD = 909 dyn_cast<FunctionDecl>((*I)->getFriendDecl())) 910 Complete = FD->isDefined(); 911 else 912 // This is a template friend, give up. 913 Complete = false; 914 } 915 } 916 RecordsComplete[RD] = Complete; 917 return Complete; 918 } 919 920 void Sema::emitAndClearUnusedLocalTypedefWarnings() { 921 if (ExternalSource) 922 ExternalSource->ReadUnusedLocalTypedefNameCandidates( 923 UnusedLocalTypedefNameCandidates); 924 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) { 925 if (TD->isReferenced()) 926 continue; 927 Diag(TD->getLocation(), diag::warn_unused_local_typedef) 928 << isa<TypeAliasDecl>(TD) << TD->getDeclName(); 929 } 930 UnusedLocalTypedefNameCandidates.clear(); 931 } 932 933 /// This is called before the very first declaration in the translation unit 934 /// is parsed. Note that the ASTContext may have already injected some 935 /// declarations. 936 void Sema::ActOnStartOfTranslationUnit() { 937 if (getLangOpts().ModulesTS && 938 (getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface || 939 getLangOpts().getCompilingModule() == LangOptions::CMK_None)) { 940 // We start in an implied global module fragment. 941 SourceLocation StartOfTU = 942 SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID()); 943 ActOnGlobalModuleFragmentDecl(StartOfTU); 944 ModuleScopes.back().ImplicitGlobalModuleFragment = true; 945 } 946 } 947 948 void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) { 949 // No explicit actions are required at the end of the global module fragment. 950 if (Kind == TUFragmentKind::Global) 951 return; 952 953 // Transfer late parsed template instantiations over to the pending template 954 // instantiation list. During normal compilation, the late template parser 955 // will be installed and instantiating these templates will succeed. 956 // 957 // If we are building a TU prefix for serialization, it is also safe to 958 // transfer these over, even though they are not parsed. The end of the TU 959 // should be outside of any eager template instantiation scope, so when this 960 // AST is deserialized, these templates will not be parsed until the end of 961 // the combined TU. 962 PendingInstantiations.insert(PendingInstantiations.end(), 963 LateParsedInstantiations.begin(), 964 LateParsedInstantiations.end()); 965 LateParsedInstantiations.clear(); 966 967 // If DefinedUsedVTables ends up marking any virtual member functions it 968 // might lead to more pending template instantiations, which we then need 969 // to instantiate. 970 DefineUsedVTables(); 971 972 // C++: Perform implicit template instantiations. 973 // 974 // FIXME: When we perform these implicit instantiations, we do not 975 // carefully keep track of the point of instantiation (C++ [temp.point]). 976 // This means that name lookup that occurs within the template 977 // instantiation will always happen at the end of the translation unit, 978 // so it will find some names that are not required to be found. This is 979 // valid, but we could do better by diagnosing if an instantiation uses a 980 // name that was not visible at its first point of instantiation. 981 if (ExternalSource) { 982 // Load pending instantiations from the external source. 983 SmallVector<PendingImplicitInstantiation, 4> Pending; 984 ExternalSource->ReadPendingInstantiations(Pending); 985 for (auto PII : Pending) 986 if (auto Func = dyn_cast<FunctionDecl>(PII.first)) 987 Func->setInstantiationIsPending(true); 988 PendingInstantiations.insert(PendingInstantiations.begin(), 989 Pending.begin(), Pending.end()); 990 } 991 992 { 993 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 994 PerformPendingInstantiations(); 995 } 996 997 emitDeferredDiags(); 998 999 assert(LateParsedInstantiations.empty() && 1000 "end of TU template instantiation should not create more " 1001 "late-parsed templates"); 1002 1003 // Report diagnostics for uncorrected delayed typos. Ideally all of them 1004 // should have been corrected by that time, but it is very hard to cover all 1005 // cases in practice. 1006 for (const auto &Typo : DelayedTypos) { 1007 // We pass an empty TypoCorrection to indicate no correction was performed. 1008 Typo.second.DiagHandler(TypoCorrection()); 1009 } 1010 DelayedTypos.clear(); 1011 } 1012 1013 /// ActOnEndOfTranslationUnit - This is called at the very end of the 1014 /// translation unit when EOF is reached and all but the top-level scope is 1015 /// popped. 1016 void Sema::ActOnEndOfTranslationUnit() { 1017 assert(DelayedDiagnostics.getCurrentPool() == nullptr 1018 && "reached end of translation unit with a pool attached?"); 1019 1020 // If code completion is enabled, don't perform any end-of-translation-unit 1021 // work. 1022 if (PP.isCodeCompletionEnabled()) 1023 return; 1024 1025 // Complete translation units and modules define vtables and perform implicit 1026 // instantiations. PCH files do not. 1027 if (TUKind != TU_Prefix) { 1028 DiagnoseUseOfUnimplementedSelectors(); 1029 1030 ActOnEndOfTranslationUnitFragment( 1031 !ModuleScopes.empty() && ModuleScopes.back().Module->Kind == 1032 Module::PrivateModuleFragment 1033 ? TUFragmentKind::Private 1034 : TUFragmentKind::Normal); 1035 1036 if (LateTemplateParserCleanup) 1037 LateTemplateParserCleanup(OpaqueParser); 1038 1039 CheckDelayedMemberExceptionSpecs(); 1040 } else { 1041 // If we are building a TU prefix for serialization, it is safe to transfer 1042 // these over, even though they are not parsed. The end of the TU should be 1043 // outside of any eager template instantiation scope, so when this AST is 1044 // deserialized, these templates will not be parsed until the end of the 1045 // combined TU. 1046 PendingInstantiations.insert(PendingInstantiations.end(), 1047 LateParsedInstantiations.begin(), 1048 LateParsedInstantiations.end()); 1049 LateParsedInstantiations.clear(); 1050 1051 if (LangOpts.PCHInstantiateTemplates) { 1052 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations"); 1053 PerformPendingInstantiations(); 1054 } 1055 } 1056 1057 DiagnoseUnterminatedPragmaAlignPack(); 1058 DiagnoseUnterminatedPragmaAttribute(); 1059 1060 // All delayed member exception specs should be checked or we end up accepting 1061 // incompatible declarations. 1062 assert(DelayedOverridingExceptionSpecChecks.empty()); 1063 assert(DelayedEquivalentExceptionSpecChecks.empty()); 1064 1065 // All dllexport classes should have been processed already. 1066 assert(DelayedDllExportClasses.empty()); 1067 assert(DelayedDllExportMemberFunctions.empty()); 1068 1069 // Remove file scoped decls that turned out to be used. 1070 UnusedFileScopedDecls.erase( 1071 std::remove_if(UnusedFileScopedDecls.begin(nullptr, true), 1072 UnusedFileScopedDecls.end(), 1073 [this](const DeclaratorDecl *DD) { 1074 return ShouldRemoveFromUnused(this, DD); 1075 }), 1076 UnusedFileScopedDecls.end()); 1077 1078 if (TUKind == TU_Prefix) { 1079 // Translation unit prefixes don't need any of the checking below. 1080 if (!PP.isIncrementalProcessingEnabled()) 1081 TUScope = nullptr; 1082 return; 1083 } 1084 1085 // Check for #pragma weak identifiers that were never declared 1086 LoadExternalWeakUndeclaredIdentifiers(); 1087 for (auto WeakID : WeakUndeclaredIdentifiers) { 1088 if (WeakID.second.getUsed()) 1089 continue; 1090 1091 Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(), 1092 LookupOrdinaryName); 1093 if (PrevDecl != nullptr && 1094 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) 1095 Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type) 1096 << "'weak'" << ExpectedVariableOrFunction; 1097 else 1098 Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared) 1099 << WeakID.first; 1100 } 1101 1102 if (LangOpts.CPlusPlus11 && 1103 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation())) 1104 CheckDelegatingCtorCycles(); 1105 1106 if (!Diags.hasErrorOccurred()) { 1107 if (ExternalSource) 1108 ExternalSource->ReadUndefinedButUsed(UndefinedButUsed); 1109 checkUndefinedButUsed(*this); 1110 } 1111 1112 // A global-module-fragment is only permitted within a module unit. 1113 bool DiagnosedMissingModuleDeclaration = false; 1114 if (!ModuleScopes.empty() && 1115 ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment && 1116 !ModuleScopes.back().ImplicitGlobalModuleFragment) { 1117 Diag(ModuleScopes.back().BeginLoc, 1118 diag::err_module_declaration_missing_after_global_module_introducer); 1119 DiagnosedMissingModuleDeclaration = true; 1120 } 1121 1122 if (TUKind == TU_Module) { 1123 // If we are building a module interface unit, we need to have seen the 1124 // module declaration by now. 1125 if (getLangOpts().getCompilingModule() == 1126 LangOptions::CMK_ModuleInterface && 1127 (ModuleScopes.empty() || 1128 !ModuleScopes.back().Module->isModulePurview()) && 1129 !DiagnosedMissingModuleDeclaration) { 1130 // FIXME: Make a better guess as to where to put the module declaration. 1131 Diag(getSourceManager().getLocForStartOfFile( 1132 getSourceManager().getMainFileID()), 1133 diag::err_module_declaration_missing); 1134 } 1135 1136 // If we are building a module, resolve all of the exported declarations 1137 // now. 1138 if (Module *CurrentModule = PP.getCurrentModule()) { 1139 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); 1140 1141 SmallVector<Module *, 2> Stack; 1142 Stack.push_back(CurrentModule); 1143 while (!Stack.empty()) { 1144 Module *Mod = Stack.pop_back_val(); 1145 1146 // Resolve the exported declarations and conflicts. 1147 // FIXME: Actually complain, once we figure out how to teach the 1148 // diagnostic client to deal with complaints in the module map at this 1149 // point. 1150 ModMap.resolveExports(Mod, /*Complain=*/false); 1151 ModMap.resolveUses(Mod, /*Complain=*/false); 1152 ModMap.resolveConflicts(Mod, /*Complain=*/false); 1153 1154 // Queue the submodules, so their exports will also be resolved. 1155 Stack.append(Mod->submodule_begin(), Mod->submodule_end()); 1156 } 1157 } 1158 1159 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for 1160 // modules when they are built, not every time they are used. 1161 emitAndClearUnusedLocalTypedefWarnings(); 1162 } 1163 1164 // C99 6.9.2p2: 1165 // A declaration of an identifier for an object that has file 1166 // scope without an initializer, and without a storage-class 1167 // specifier or with the storage-class specifier static, 1168 // constitutes a tentative definition. If a translation unit 1169 // contains one or more tentative definitions for an identifier, 1170 // and the translation unit contains no external definition for 1171 // that identifier, then the behavior is exactly as if the 1172 // translation unit contains a file scope declaration of that 1173 // identifier, with the composite type as of the end of the 1174 // translation unit, with an initializer equal to 0. 1175 llvm::SmallSet<VarDecl *, 32> Seen; 1176 for (TentativeDefinitionsType::iterator 1177 T = TentativeDefinitions.begin(ExternalSource), 1178 TEnd = TentativeDefinitions.end(); 1179 T != TEnd; ++T) { 1180 VarDecl *VD = (*T)->getActingDefinition(); 1181 1182 // If the tentative definition was completed, getActingDefinition() returns 1183 // null. If we've already seen this variable before, insert()'s second 1184 // return value is false. 1185 if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second) 1186 continue; 1187 1188 if (const IncompleteArrayType *ArrayT 1189 = Context.getAsIncompleteArrayType(VD->getType())) { 1190 // Set the length of the array to 1 (C99 6.9.2p5). 1191 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array); 1192 llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true); 1193 QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One, 1194 nullptr, ArrayType::Normal, 0); 1195 VD->setType(T); 1196 } else if (RequireCompleteType(VD->getLocation(), VD->getType(), 1197 diag::err_tentative_def_incomplete_type)) 1198 VD->setInvalidDecl(); 1199 1200 // No initialization is performed for a tentative definition. 1201 CheckCompleteVariableDeclaration(VD); 1202 1203 // Notify the consumer that we've completed a tentative definition. 1204 if (!VD->isInvalidDecl()) 1205 Consumer.CompleteTentativeDefinition(VD); 1206 } 1207 1208 for (auto D : ExternalDeclarations) { 1209 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed()) 1210 continue; 1211 1212 Consumer.CompleteExternalDeclaration(D); 1213 } 1214 1215 // If there were errors, disable 'unused' warnings since they will mostly be 1216 // noise. Don't warn for a use from a module: either we should warn on all 1217 // file-scope declarations in modules or not at all, but whether the 1218 // declaration is used is immaterial. 1219 if (!Diags.hasErrorOccurred() && TUKind != TU_Module) { 1220 // Output warning for unused file scoped decls. 1221 for (UnusedFileScopedDeclsType::iterator 1222 I = UnusedFileScopedDecls.begin(ExternalSource), 1223 E = UnusedFileScopedDecls.end(); I != E; ++I) { 1224 if (ShouldRemoveFromUnused(this, *I)) 1225 continue; 1226 1227 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { 1228 const FunctionDecl *DiagD; 1229 if (!FD->hasBody(DiagD)) 1230 DiagD = FD; 1231 if (DiagD->isDeleted()) 1232 continue; // Deleted functions are supposed to be unused. 1233 if (DiagD->isReferenced()) { 1234 if (isa<CXXMethodDecl>(DiagD)) 1235 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function) 1236 << DiagD; 1237 else { 1238 if (FD->getStorageClass() == SC_Static && 1239 !FD->isInlineSpecified() && 1240 !SourceMgr.isInMainFile( 1241 SourceMgr.getExpansionLoc(FD->getLocation()))) 1242 Diag(DiagD->getLocation(), 1243 diag::warn_unneeded_static_internal_decl) 1244 << DiagD; 1245 else 1246 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1247 << /*function*/ 0 << DiagD; 1248 } 1249 } else { 1250 if (FD->getDescribedFunctionTemplate()) 1251 Diag(DiagD->getLocation(), diag::warn_unused_template) 1252 << /*function*/ 0 << DiagD; 1253 else 1254 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD) 1255 ? diag::warn_unused_member_function 1256 : diag::warn_unused_function) 1257 << DiagD; 1258 } 1259 } else { 1260 const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition(); 1261 if (!DiagD) 1262 DiagD = cast<VarDecl>(*I); 1263 if (DiagD->isReferenced()) { 1264 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) 1265 << /*variable*/ 1 << DiagD; 1266 } else if (DiagD->getType().isConstQualified()) { 1267 const SourceManager &SM = SourceMgr; 1268 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) || 1269 !PP.getLangOpts().IsHeaderFile) 1270 Diag(DiagD->getLocation(), diag::warn_unused_const_variable) 1271 << DiagD; 1272 } else { 1273 if (DiagD->getDescribedVarTemplate()) 1274 Diag(DiagD->getLocation(), diag::warn_unused_template) 1275 << /*variable*/ 1 << DiagD; 1276 else 1277 Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD; 1278 } 1279 } 1280 } 1281 1282 emitAndClearUnusedLocalTypedefWarnings(); 1283 } 1284 1285 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) { 1286 // FIXME: Load additional unused private field candidates from the external 1287 // source. 1288 RecordCompleteMap RecordsComplete; 1289 RecordCompleteMap MNCComplete; 1290 for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(), 1291 E = UnusedPrivateFields.end(); I != E; ++I) { 1292 const NamedDecl *D = *I; 1293 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); 1294 if (RD && !RD->isUnion() && 1295 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) { 1296 Diag(D->getLocation(), diag::warn_unused_private_field) 1297 << D->getDeclName(); 1298 } 1299 } 1300 } 1301 1302 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { 1303 if (ExternalSource) 1304 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); 1305 for (const auto &DeletedFieldInfo : DeleteExprs) { 1306 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { 1307 AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, 1308 DeleteExprLoc.second); 1309 } 1310 } 1311 } 1312 1313 // Check we've noticed that we're no longer parsing the initializer for every 1314 // variable. If we miss cases, then at best we have a performance issue and 1315 // at worst a rejects-valid bug. 1316 assert(ParsingInitForAutoVars.empty() && 1317 "Didn't unmark var as having its initializer parsed"); 1318 1319 if (!PP.isIncrementalProcessingEnabled()) 1320 TUScope = nullptr; 1321 } 1322 1323 1324 //===----------------------------------------------------------------------===// 1325 // Helper functions. 1326 //===----------------------------------------------------------------------===// 1327 1328 DeclContext *Sema::getFunctionLevelDeclContext() { 1329 DeclContext *DC = CurContext; 1330 1331 while (true) { 1332 if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) || 1333 isa<RequiresExprBodyDecl>(DC)) { 1334 DC = DC->getParent(); 1335 } else if (isa<CXXMethodDecl>(DC) && 1336 cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call && 1337 cast<CXXRecordDecl>(DC->getParent())->isLambda()) { 1338 DC = DC->getParent()->getParent(); 1339 } 1340 else break; 1341 } 1342 1343 return DC; 1344 } 1345 1346 /// getCurFunctionDecl - If inside of a function body, this returns a pointer 1347 /// to the function decl for the function being parsed. If we're currently 1348 /// in a 'block', this returns the containing context. 1349 FunctionDecl *Sema::getCurFunctionDecl() { 1350 DeclContext *DC = getFunctionLevelDeclContext(); 1351 return dyn_cast<FunctionDecl>(DC); 1352 } 1353 1354 ObjCMethodDecl *Sema::getCurMethodDecl() { 1355 DeclContext *DC = getFunctionLevelDeclContext(); 1356 while (isa<RecordDecl>(DC)) 1357 DC = DC->getParent(); 1358 return dyn_cast<ObjCMethodDecl>(DC); 1359 } 1360 1361 NamedDecl *Sema::getCurFunctionOrMethodDecl() { 1362 DeclContext *DC = getFunctionLevelDeclContext(); 1363 if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC)) 1364 return cast<NamedDecl>(DC); 1365 return nullptr; 1366 } 1367 1368 LangAS Sema::getDefaultCXXMethodAddrSpace() const { 1369 if (getLangOpts().OpenCL) 1370 return LangAS::opencl_generic; 1371 return LangAS::Default; 1372 } 1373 1374 void Sema::EmitCurrentDiagnostic(unsigned DiagID) { 1375 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here 1376 // and yet we also use the current diag ID on the DiagnosticsEngine. This has 1377 // been made more painfully obvious by the refactor that introduced this 1378 // function, but it is possible that the incoming argument can be 1379 // eliminated. If it truly cannot be (for example, there is some reentrancy 1380 // issue I am not seeing yet), then there should at least be a clarifying 1381 // comment somewhere. 1382 if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) { 1383 switch (DiagnosticIDs::getDiagnosticSFINAEResponse( 1384 Diags.getCurrentDiagID())) { 1385 case DiagnosticIDs::SFINAE_Report: 1386 // We'll report the diagnostic below. 1387 break; 1388 1389 case DiagnosticIDs::SFINAE_SubstitutionFailure: 1390 // Count this failure so that we know that template argument deduction 1391 // has failed. 1392 ++NumSFINAEErrors; 1393 1394 // Make a copy of this suppressed diagnostic and store it with the 1395 // template-deduction information. 1396 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1397 Diagnostic DiagInfo(&Diags); 1398 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1399 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1400 } 1401 1402 Diags.setLastDiagnosticIgnored(true); 1403 Diags.Clear(); 1404 return; 1405 1406 case DiagnosticIDs::SFINAE_AccessControl: { 1407 // Per C++ Core Issue 1170, access control is part of SFINAE. 1408 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily 1409 // make access control a part of SFINAE for the purposes of checking 1410 // type traits. 1411 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11) 1412 break; 1413 1414 SourceLocation Loc = Diags.getCurrentDiagLoc(); 1415 1416 // Suppress this diagnostic. 1417 ++NumSFINAEErrors; 1418 1419 // Make a copy of this suppressed diagnostic and store it with the 1420 // template-deduction information. 1421 if (*Info && !(*Info)->hasSFINAEDiagnostic()) { 1422 Diagnostic DiagInfo(&Diags); 1423 (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), 1424 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1425 } 1426 1427 Diags.setLastDiagnosticIgnored(true); 1428 Diags.Clear(); 1429 1430 // Now the diagnostic state is clear, produce a C++98 compatibility 1431 // warning. 1432 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control); 1433 1434 // The last diagnostic which Sema produced was ignored. Suppress any 1435 // notes attached to it. 1436 Diags.setLastDiagnosticIgnored(true); 1437 return; 1438 } 1439 1440 case DiagnosticIDs::SFINAE_Suppress: 1441 // Make a copy of this suppressed diagnostic and store it with the 1442 // template-deduction information; 1443 if (*Info) { 1444 Diagnostic DiagInfo(&Diags); 1445 (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(), 1446 PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); 1447 } 1448 1449 // Suppress this diagnostic. 1450 Diags.setLastDiagnosticIgnored(true); 1451 Diags.Clear(); 1452 return; 1453 } 1454 } 1455 1456 // Copy the diagnostic printing policy over the ASTContext printing policy. 1457 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292 1458 Context.setPrintingPolicy(getPrintingPolicy()); 1459 1460 // Emit the diagnostic. 1461 if (!Diags.EmitCurrentDiagnostic()) 1462 return; 1463 1464 // If this is not a note, and we're in a template instantiation 1465 // that is different from the last template instantiation where 1466 // we emitted an error, print a template instantiation 1467 // backtrace. 1468 if (!DiagnosticIDs::isBuiltinNote(DiagID)) 1469 PrintContextStack(); 1470 } 1471 1472 Sema::SemaDiagnosticBuilder 1473 Sema::Diag(SourceLocation Loc, const PartialDiagnostic &PD, bool DeferHint) { 1474 return Diag(Loc, PD.getDiagID(), DeferHint) << PD; 1475 } 1476 1477 bool Sema::hasUncompilableErrorOccurred() const { 1478 if (getDiagnostics().hasUncompilableErrorOccurred()) 1479 return true; 1480 auto *FD = dyn_cast<FunctionDecl>(CurContext); 1481 if (!FD) 1482 return false; 1483 auto Loc = DeviceDeferredDiags.find(FD); 1484 if (Loc == DeviceDeferredDiags.end()) 1485 return false; 1486 for (auto PDAt : Loc->second) { 1487 if (DiagnosticIDs::isDefaultMappingAsError(PDAt.second.getDiagID())) 1488 return true; 1489 } 1490 return false; 1491 } 1492 1493 // Print notes showing how we can reach FD starting from an a priori 1494 // known-callable function. 1495 static void emitCallStackNotes(Sema &S, FunctionDecl *FD) { 1496 auto FnIt = S.DeviceKnownEmittedFns.find(FD); 1497 while (FnIt != S.DeviceKnownEmittedFns.end()) { 1498 // Respect error limit. 1499 if (S.Diags.hasFatalErrorOccurred()) 1500 return; 1501 DiagnosticBuilder Builder( 1502 S.Diags.Report(FnIt->second.Loc, diag::note_called_by)); 1503 Builder << FnIt->second.FD; 1504 FnIt = S.DeviceKnownEmittedFns.find(FnIt->second.FD); 1505 } 1506 } 1507 1508 namespace { 1509 1510 /// Helper class that emits deferred diagnostic messages if an entity directly 1511 /// or indirectly using the function that causes the deferred diagnostic 1512 /// messages is known to be emitted. 1513 /// 1514 /// During parsing of AST, certain diagnostic messages are recorded as deferred 1515 /// diagnostics since it is unknown whether the functions containing such 1516 /// diagnostics will be emitted. A list of potentially emitted functions and 1517 /// variables that may potentially trigger emission of functions are also 1518 /// recorded. DeferredDiagnosticsEmitter recursively visits used functions 1519 /// by each function to emit deferred diagnostics. 1520 /// 1521 /// During the visit, certain OpenMP directives or initializer of variables 1522 /// with certain OpenMP attributes will cause subsequent visiting of any 1523 /// functions enter a state which is called OpenMP device context in this 1524 /// implementation. The state is exited when the directive or initializer is 1525 /// exited. This state can change the emission states of subsequent uses 1526 /// of functions. 1527 /// 1528 /// Conceptually the functions or variables to be visited form a use graph 1529 /// where the parent node uses the child node. At any point of the visit, 1530 /// the tree nodes traversed from the tree root to the current node form a use 1531 /// stack. The emission state of the current node depends on two factors: 1532 /// 1. the emission state of the root node 1533 /// 2. whether the current node is in OpenMP device context 1534 /// If the function is decided to be emitted, its contained deferred diagnostics 1535 /// are emitted, together with the information about the use stack. 1536 /// 1537 class DeferredDiagnosticsEmitter 1538 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> { 1539 public: 1540 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited; 1541 1542 // Whether the function is already in the current use-path. 1543 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath; 1544 1545 // The current use-path. 1546 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath; 1547 1548 // Whether the visiting of the function has been done. Done[0] is for the 1549 // case not in OpenMP device context. Done[1] is for the case in OpenMP 1550 // device context. We need two sets because diagnostics emission may be 1551 // different depending on whether it is in OpenMP device context. 1552 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2]; 1553 1554 // Emission state of the root node of the current use graph. 1555 bool ShouldEmitRootNode; 1556 1557 // Current OpenMP device context level. It is initialized to 0 and each 1558 // entering of device context increases it by 1 and each exit decreases 1559 // it by 1. Non-zero value indicates it is currently in device context. 1560 unsigned InOMPDeviceContext; 1561 1562 DeferredDiagnosticsEmitter(Sema &S) 1563 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {} 1564 1565 void VisitOMPTargetDirective(OMPTargetDirective *Node) { 1566 ++InOMPDeviceContext; 1567 Inherited::VisitOMPTargetDirective(Node); 1568 --InOMPDeviceContext; 1569 } 1570 1571 void visitUsedDecl(SourceLocation Loc, Decl *D) { 1572 if (isa<VarDecl>(D)) 1573 return; 1574 if (auto *FD = dyn_cast<FunctionDecl>(D)) 1575 checkFunc(Loc, FD); 1576 else 1577 Inherited::visitUsedDecl(Loc, D); 1578 } 1579 1580 void checkVar(VarDecl *VD) { 1581 assert(VD->isFileVarDecl() && 1582 "Should only check file-scope variables"); 1583 if (auto *Init = VD->getInit()) { 1584 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD); 1585 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost || 1586 *DevTy == OMPDeclareTargetDeclAttr::DT_Any); 1587 if (IsDev) 1588 ++InOMPDeviceContext; 1589 this->Visit(Init); 1590 if (IsDev) 1591 --InOMPDeviceContext; 1592 } 1593 } 1594 1595 void checkFunc(SourceLocation Loc, FunctionDecl *FD) { 1596 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0]; 1597 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back(); 1598 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) || 1599 S.shouldIgnoreInHostDeviceCheck(FD) || InUsePath.count(FD)) 1600 return; 1601 // Finalize analysis of OpenMP-specific constructs. 1602 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 && 1603 (ShouldEmitRootNode || InOMPDeviceContext)) 1604 S.finalizeOpenMPDelayedAnalysis(Caller, FD, Loc); 1605 if (Caller) 1606 S.DeviceKnownEmittedFns[FD] = {Caller, Loc}; 1607 // Always emit deferred diagnostics for the direct users. This does not 1608 // lead to explosion of diagnostics since each user is visited at most 1609 // twice. 1610 if (ShouldEmitRootNode || InOMPDeviceContext) 1611 emitDeferredDiags(FD, Caller); 1612 // Do not revisit a function if the function body has been completely 1613 // visited before. 1614 if (!Done.insert(FD).second) 1615 return; 1616 InUsePath.insert(FD); 1617 UsePath.push_back(FD); 1618 if (auto *S = FD->getBody()) { 1619 this->Visit(S); 1620 } 1621 UsePath.pop_back(); 1622 InUsePath.erase(FD); 1623 } 1624 1625 void checkRecordedDecl(Decl *D) { 1626 if (auto *FD = dyn_cast<FunctionDecl>(D)) { 1627 ShouldEmitRootNode = S.getEmissionStatus(FD, /*Final=*/true) == 1628 Sema::FunctionEmissionStatus::Emitted; 1629 checkFunc(SourceLocation(), FD); 1630 } else 1631 checkVar(cast<VarDecl>(D)); 1632 } 1633 1634 // Emit any deferred diagnostics for FD 1635 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) { 1636 auto It = S.DeviceDeferredDiags.find(FD); 1637 if (It == S.DeviceDeferredDiags.end()) 1638 return; 1639 bool HasWarningOrError = false; 1640 bool FirstDiag = true; 1641 for (PartialDiagnosticAt &PDAt : It->second) { 1642 // Respect error limit. 1643 if (S.Diags.hasFatalErrorOccurred()) 1644 return; 1645 const SourceLocation &Loc = PDAt.first; 1646 const PartialDiagnostic &PD = PDAt.second; 1647 HasWarningOrError |= 1648 S.getDiagnostics().getDiagnosticLevel(PD.getDiagID(), Loc) >= 1649 DiagnosticsEngine::Warning; 1650 { 1651 DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID())); 1652 PD.Emit(Builder); 1653 } 1654 // Emit the note on the first diagnostic in case too many diagnostics 1655 // cause the note not emitted. 1656 if (FirstDiag && HasWarningOrError && ShowCallStack) { 1657 emitCallStackNotes(S, FD); 1658 FirstDiag = false; 1659 } 1660 } 1661 } 1662 }; 1663 } // namespace 1664 1665 void Sema::emitDeferredDiags() { 1666 if (ExternalSource) 1667 ExternalSource->ReadDeclsToCheckForDeferredDiags( 1668 DeclsToCheckForDeferredDiags); 1669 1670 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) || 1671 DeclsToCheckForDeferredDiags.empty()) 1672 return; 1673 1674 DeferredDiagnosticsEmitter DDE(*this); 1675 for (auto D : DeclsToCheckForDeferredDiags) 1676 DDE.checkRecordedDecl(D); 1677 } 1678 1679 // In CUDA, there are some constructs which may appear in semantically-valid 1680 // code, but trigger errors if we ever generate code for the function in which 1681 // they appear. Essentially every construct you're not allowed to use on the 1682 // device falls into this category, because you are allowed to use these 1683 // constructs in a __host__ __device__ function, but only if that function is 1684 // never codegen'ed on the device. 1685 // 1686 // To handle semantic checking for these constructs, we keep track of the set of 1687 // functions we know will be emitted, either because we could tell a priori that 1688 // they would be emitted, or because they were transitively called by a 1689 // known-emitted function. 1690 // 1691 // We also keep a partial call graph of which not-known-emitted functions call 1692 // which other not-known-emitted functions. 1693 // 1694 // When we see something which is illegal if the current function is emitted 1695 // (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or 1696 // CheckCUDACall), we first check if the current function is known-emitted. If 1697 // so, we immediately output the diagnostic. 1698 // 1699 // Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags 1700 // until we discover that the function is known-emitted, at which point we take 1701 // it out of this map and emit the diagnostic. 1702 1703 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc, 1704 unsigned DiagID, 1705 FunctionDecl *Fn, Sema &S) 1706 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn), 1707 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) { 1708 switch (K) { 1709 case K_Nop: 1710 break; 1711 case K_Immediate: 1712 case K_ImmediateWithCallStack: 1713 ImmediateDiag.emplace( 1714 ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID)); 1715 break; 1716 case K_Deferred: 1717 assert(Fn && "Must have a function to attach the deferred diag to."); 1718 auto &Diags = S.DeviceDeferredDiags[Fn]; 1719 PartialDiagId.emplace(Diags.size()); 1720 Diags.emplace_back(Loc, S.PDiag(DiagID)); 1721 break; 1722 } 1723 } 1724 1725 Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D) 1726 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn), 1727 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag), 1728 PartialDiagId(D.PartialDiagId) { 1729 // Clean the previous diagnostics. 1730 D.ShowCallStack = false; 1731 D.ImmediateDiag.reset(); 1732 D.PartialDiagId.reset(); 1733 } 1734 1735 Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() { 1736 if (ImmediateDiag) { 1737 // Emit our diagnostic and, if it was a warning or error, output a callstack 1738 // if Fn isn't a priori known-emitted. 1739 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel( 1740 DiagID, Loc) >= DiagnosticsEngine::Warning; 1741 ImmediateDiag.reset(); // Emit the immediate diag. 1742 if (IsWarningOrError && ShowCallStack) 1743 emitCallStackNotes(S, Fn); 1744 } else { 1745 assert((!PartialDiagId || ShowCallStack) && 1746 "Must always show call stack for deferred diags."); 1747 } 1748 } 1749 1750 Sema::SemaDiagnosticBuilder 1751 Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) { 1752 FD = FD ? FD : getCurFunctionDecl(); 1753 if (LangOpts.OpenMP) 1754 return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD) 1755 : diagIfOpenMPHostCode(Loc, DiagID, FD); 1756 if (getLangOpts().CUDA) 1757 return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID) 1758 : CUDADiagIfHostCode(Loc, DiagID); 1759 1760 if (getLangOpts().SYCLIsDevice) 1761 return SYCLDiagIfDeviceCode(Loc, DiagID); 1762 1763 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID, 1764 FD, *this); 1765 } 1766 1767 Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID, 1768 bool DeferHint) { 1769 bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID); 1770 bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag && 1771 DiagnosticIDs::isDeferrable(DiagID) && 1772 (DeferHint || !IsError); 1773 auto SetIsLastErrorImmediate = [&](bool Flag) { 1774 if (IsError) 1775 IsLastErrorImmediate = Flag; 1776 }; 1777 if (!ShouldDefer) { 1778 SetIsLastErrorImmediate(true); 1779 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, 1780 DiagID, getCurFunctionDecl(), *this); 1781 } 1782 1783 SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice 1784 ? CUDADiagIfDeviceCode(Loc, DiagID) 1785 : CUDADiagIfHostCode(Loc, DiagID); 1786 SetIsLastErrorImmediate(DB.isImmediate()); 1787 return DB; 1788 } 1789 1790 void Sema::checkDeviceDecl(ValueDecl *D, SourceLocation Loc) { 1791 if (isUnevaluatedContext()) 1792 return; 1793 1794 Decl *C = cast<Decl>(getCurLexicalContext()); 1795 1796 // Memcpy operations for structs containing a member with unsupported type 1797 // are ok, though. 1798 if (const auto *MD = dyn_cast<CXXMethodDecl>(C)) { 1799 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 1800 MD->isTrivial()) 1801 return; 1802 1803 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(MD)) 1804 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial()) 1805 return; 1806 } 1807 1808 // Try to associate errors with the lexical context, if that is a function, or 1809 // the value declaration otherwise. 1810 FunctionDecl *FD = 1811 isa<FunctionDecl>(C) ? cast<FunctionDecl>(C) : dyn_cast<FunctionDecl>(D); 1812 auto CheckType = [&](QualType Ty) { 1813 if (Ty->isDependentType()) 1814 return; 1815 1816 if (Ty->isExtIntType()) { 1817 if (!Context.getTargetInfo().hasExtIntType()) { 1818 targetDiag(Loc, diag::err_device_unsupported_type, FD) 1819 << D << false /*show bit size*/ << 0 /*bitsize*/ 1820 << Ty << Context.getTargetInfo().getTriple().str(); 1821 } 1822 return; 1823 } 1824 1825 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) || 1826 ((Ty->isFloat128Type() || 1827 (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) && 1828 !Context.getTargetInfo().hasFloat128Type()) || 1829 (Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 && 1830 !Context.getTargetInfo().hasInt128Type())) { 1831 if (targetDiag(Loc, diag::err_device_unsupported_type, FD) 1832 << D << true /*show bit size*/ 1833 << static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty 1834 << Context.getTargetInfo().getTriple().str()) 1835 D->setInvalidDecl(); 1836 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D; 1837 } 1838 }; 1839 1840 QualType Ty = D->getType(); 1841 CheckType(Ty); 1842 1843 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) { 1844 for (const auto &ParamTy : FPTy->param_types()) 1845 CheckType(ParamTy); 1846 CheckType(FPTy->getReturnType()); 1847 } 1848 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty)) 1849 CheckType(FNPTy->getReturnType()); 1850 } 1851 1852 /// Looks through the macro-expansion chain for the given 1853 /// location, looking for a macro expansion with the given name. 1854 /// If one is found, returns true and sets the location to that 1855 /// expansion loc. 1856 bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) { 1857 SourceLocation loc = locref; 1858 if (!loc.isMacroID()) return false; 1859 1860 // There's no good way right now to look at the intermediate 1861 // expansions, so just jump to the expansion location. 1862 loc = getSourceManager().getExpansionLoc(loc); 1863 1864 // If that's written with the name, stop here. 1865 SmallString<16> buffer; 1866 if (getPreprocessor().getSpelling(loc, buffer) == name) { 1867 locref = loc; 1868 return true; 1869 } 1870 return false; 1871 } 1872 1873 /// Determines the active Scope associated with the given declaration 1874 /// context. 1875 /// 1876 /// This routine maps a declaration context to the active Scope object that 1877 /// represents that declaration context in the parser. It is typically used 1878 /// from "scope-less" code (e.g., template instantiation, lazy creation of 1879 /// declarations) that injects a name for name-lookup purposes and, therefore, 1880 /// must update the Scope. 1881 /// 1882 /// \returns The scope corresponding to the given declaraion context, or NULL 1883 /// if no such scope is open. 1884 Scope *Sema::getScopeForContext(DeclContext *Ctx) { 1885 1886 if (!Ctx) 1887 return nullptr; 1888 1889 Ctx = Ctx->getPrimaryContext(); 1890 for (Scope *S = getCurScope(); S; S = S->getParent()) { 1891 // Ignore scopes that cannot have declarations. This is important for 1892 // out-of-line definitions of static class members. 1893 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) 1894 if (DeclContext *Entity = S->getEntity()) 1895 if (Ctx == Entity->getPrimaryContext()) 1896 return S; 1897 } 1898 1899 return nullptr; 1900 } 1901 1902 /// Enter a new function scope 1903 void Sema::PushFunctionScope() { 1904 if (FunctionScopes.empty() && CachedFunctionScope) { 1905 // Use CachedFunctionScope to avoid allocating memory when possible. 1906 CachedFunctionScope->Clear(); 1907 FunctionScopes.push_back(CachedFunctionScope.release()); 1908 } else { 1909 FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); 1910 } 1911 if (LangOpts.OpenMP) 1912 pushOpenMPFunctionRegion(); 1913 } 1914 1915 void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { 1916 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(), 1917 BlockScope, Block)); 1918 } 1919 1920 LambdaScopeInfo *Sema::PushLambdaScope() { 1921 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics()); 1922 FunctionScopes.push_back(LSI); 1923 return LSI; 1924 } 1925 1926 void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) { 1927 if (LambdaScopeInfo *const LSI = getCurLambda()) { 1928 LSI->AutoTemplateParameterDepth = Depth; 1929 return; 1930 } 1931 llvm_unreachable( 1932 "Remove assertion if intentionally called in a non-lambda context."); 1933 } 1934 1935 // Check that the type of the VarDecl has an accessible copy constructor and 1936 // resolve its destructor's exception specification. 1937 static void checkEscapingByref(VarDecl *VD, Sema &S) { 1938 QualType T = VD->getType(); 1939 EnterExpressionEvaluationContext scope( 1940 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated); 1941 SourceLocation Loc = VD->getLocation(); 1942 Expr *VarRef = 1943 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc); 1944 ExprResult Result = S.PerformMoveOrCopyInitialization( 1945 InitializedEntity::InitializeBlock(Loc, T, false), VD, VD->getType(), 1946 VarRef, /*AllowNRVO=*/true); 1947 if (!Result.isInvalid()) { 1948 Result = S.MaybeCreateExprWithCleanups(Result); 1949 Expr *Init = Result.getAs<Expr>(); 1950 S.Context.setBlockVarCopyInit(VD, Init, S.canThrow(Init)); 1951 } 1952 1953 // The destructor's exception specification is needed when IRGen generates 1954 // block copy/destroy functions. Resolve it here. 1955 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) 1956 if (CXXDestructorDecl *DD = RD->getDestructor()) { 1957 auto *FPT = DD->getType()->getAs<FunctionProtoType>(); 1958 S.ResolveExceptionSpec(Loc, FPT); 1959 } 1960 } 1961 1962 static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) { 1963 // Set the EscapingByref flag of __block variables captured by 1964 // escaping blocks. 1965 for (const BlockDecl *BD : FSI.Blocks) { 1966 for (const BlockDecl::Capture &BC : BD->captures()) { 1967 VarDecl *VD = BC.getVariable(); 1968 if (VD->hasAttr<BlocksAttr>()) { 1969 // Nothing to do if this is a __block variable captured by a 1970 // non-escaping block. 1971 if (BD->doesNotEscape()) 1972 continue; 1973 VD->setEscapingByref(); 1974 } 1975 // Check whether the captured variable is or contains an object of 1976 // non-trivial C union type. 1977 QualType CapType = BC.getVariable()->getType(); 1978 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() || 1979 CapType.hasNonTrivialToPrimitiveCopyCUnion()) 1980 S.checkNonTrivialCUnion(BC.getVariable()->getType(), 1981 BD->getCaretLocation(), 1982 Sema::NTCUC_BlockCapture, 1983 Sema::NTCUK_Destruct|Sema::NTCUK_Copy); 1984 } 1985 } 1986 1987 for (VarDecl *VD : FSI.ByrefBlockVars) { 1988 // __block variables might require us to capture a copy-initializer. 1989 if (!VD->isEscapingByref()) 1990 continue; 1991 // It's currently invalid to ever have a __block variable with an 1992 // array type; should we diagnose that here? 1993 // Regardless, we don't want to ignore array nesting when 1994 // constructing this copy. 1995 if (VD->getType()->isStructureOrClassType()) 1996 checkEscapingByref(VD, S); 1997 } 1998 } 1999 2000 /// Pop a function (or block or lambda or captured region) scope from the stack. 2001 /// 2002 /// \param WP The warning policy to use for CFG-based warnings, or null if such 2003 /// warnings should not be produced. 2004 /// \param D The declaration corresponding to this function scope, if producing 2005 /// CFG-based warnings. 2006 /// \param BlockType The type of the block expression, if D is a BlockDecl. 2007 Sema::PoppedFunctionScopePtr 2008 Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, 2009 const Decl *D, QualType BlockType) { 2010 assert(!FunctionScopes.empty() && "mismatched push/pop!"); 2011 2012 markEscapingByrefs(*FunctionScopes.back(), *this); 2013 2014 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(), 2015 PoppedFunctionScopeDeleter(this)); 2016 2017 if (LangOpts.OpenMP) 2018 popOpenMPFunctionRegion(Scope.get()); 2019 2020 // Issue any analysis-based warnings. 2021 if (WP && D) 2022 AnalysisWarnings.IssueWarnings(*WP, Scope.get(), D, BlockType); 2023 else 2024 for (const auto &PUD : Scope->PossiblyUnreachableDiags) 2025 Diag(PUD.Loc, PUD.PD); 2026 2027 return Scope; 2028 } 2029 2030 void Sema::PoppedFunctionScopeDeleter:: 2031 operator()(sema::FunctionScopeInfo *Scope) const { 2032 // Stash the function scope for later reuse if it's for a normal function. 2033 if (Scope->isPlainFunction() && !Self->CachedFunctionScope) 2034 Self->CachedFunctionScope.reset(Scope); 2035 else 2036 delete Scope; 2037 } 2038 2039 void Sema::PushCompoundScope(bool IsStmtExpr) { 2040 getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr)); 2041 } 2042 2043 void Sema::PopCompoundScope() { 2044 FunctionScopeInfo *CurFunction = getCurFunction(); 2045 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop"); 2046 2047 CurFunction->CompoundScopes.pop_back(); 2048 } 2049 2050 /// Determine whether any errors occurred within this function/method/ 2051 /// block. 2052 bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const { 2053 return getCurFunction()->hasUnrecoverableErrorOccurred(); 2054 } 2055 2056 void Sema::setFunctionHasBranchIntoScope() { 2057 if (!FunctionScopes.empty()) 2058 FunctionScopes.back()->setHasBranchIntoScope(); 2059 } 2060 2061 void Sema::setFunctionHasBranchProtectedScope() { 2062 if (!FunctionScopes.empty()) 2063 FunctionScopes.back()->setHasBranchProtectedScope(); 2064 } 2065 2066 void Sema::setFunctionHasIndirectGoto() { 2067 if (!FunctionScopes.empty()) 2068 FunctionScopes.back()->setHasIndirectGoto(); 2069 } 2070 2071 BlockScopeInfo *Sema::getCurBlock() { 2072 if (FunctionScopes.empty()) 2073 return nullptr; 2074 2075 auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back()); 2076 if (CurBSI && CurBSI->TheDecl && 2077 !CurBSI->TheDecl->Encloses(CurContext)) { 2078 // We have switched contexts due to template instantiation. 2079 assert(!CodeSynthesisContexts.empty()); 2080 return nullptr; 2081 } 2082 2083 return CurBSI; 2084 } 2085 2086 FunctionScopeInfo *Sema::getEnclosingFunction() const { 2087 if (FunctionScopes.empty()) 2088 return nullptr; 2089 2090 for (int e = FunctionScopes.size() - 1; e >= 0; --e) { 2091 if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) 2092 continue; 2093 return FunctionScopes[e]; 2094 } 2095 return nullptr; 2096 } 2097 2098 LambdaScopeInfo *Sema::getEnclosingLambda() const { 2099 for (auto *Scope : llvm::reverse(FunctionScopes)) { 2100 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) { 2101 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) { 2102 // We have switched contexts due to template instantiation. 2103 // FIXME: We should swap out the FunctionScopes during code synthesis 2104 // so that we don't need to check for this. 2105 assert(!CodeSynthesisContexts.empty()); 2106 return nullptr; 2107 } 2108 return LSI; 2109 } 2110 } 2111 return nullptr; 2112 } 2113 2114 LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) { 2115 if (FunctionScopes.empty()) 2116 return nullptr; 2117 2118 auto I = FunctionScopes.rbegin(); 2119 if (IgnoreNonLambdaCapturingScope) { 2120 auto E = FunctionScopes.rend(); 2121 while (I != E && isa<CapturingScopeInfo>(*I) && !isa<LambdaScopeInfo>(*I)) 2122 ++I; 2123 if (I == E) 2124 return nullptr; 2125 } 2126 auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I); 2127 if (CurLSI && CurLSI->Lambda && 2128 !CurLSI->Lambda->Encloses(CurContext)) { 2129 // We have switched contexts due to template instantiation. 2130 assert(!CodeSynthesisContexts.empty()); 2131 return nullptr; 2132 } 2133 2134 return CurLSI; 2135 } 2136 2137 // We have a generic lambda if we parsed auto parameters, or we have 2138 // an associated template parameter list. 2139 LambdaScopeInfo *Sema::getCurGenericLambda() { 2140 if (LambdaScopeInfo *LSI = getCurLambda()) { 2141 return (LSI->TemplateParams.size() || 2142 LSI->GLTemplateParameterList) ? LSI : nullptr; 2143 } 2144 return nullptr; 2145 } 2146 2147 2148 void Sema::ActOnComment(SourceRange Comment) { 2149 if (!LangOpts.RetainCommentsFromSystemHeaders && 2150 SourceMgr.isInSystemHeader(Comment.getBegin())) 2151 return; 2152 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false); 2153 if (RC.isAlmostTrailingComment()) { 2154 SourceRange MagicMarkerRange(Comment.getBegin(), 2155 Comment.getBegin().getLocWithOffset(3)); 2156 StringRef MagicMarkerText; 2157 switch (RC.getKind()) { 2158 case RawComment::RCK_OrdinaryBCPL: 2159 MagicMarkerText = "///<"; 2160 break; 2161 case RawComment::RCK_OrdinaryC: 2162 MagicMarkerText = "/**<"; 2163 break; 2164 default: 2165 llvm_unreachable("if this is an almost Doxygen comment, " 2166 "it should be ordinary"); 2167 } 2168 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) << 2169 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText); 2170 } 2171 Context.addComment(RC); 2172 } 2173 2174 // Pin this vtable to this file. 2175 ExternalSemaSource::~ExternalSemaSource() {} 2176 char ExternalSemaSource::ID; 2177 2178 void ExternalSemaSource::ReadMethodPool(Selector Sel) { } 2179 void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { } 2180 2181 void ExternalSemaSource::ReadKnownNamespaces( 2182 SmallVectorImpl<NamespaceDecl *> &Namespaces) { 2183 } 2184 2185 void ExternalSemaSource::ReadUndefinedButUsed( 2186 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {} 2187 2188 void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< 2189 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} 2190 2191 /// Figure out if an expression could be turned into a call. 2192 /// 2193 /// Use this when trying to recover from an error where the programmer may have 2194 /// written just the name of a function instead of actually calling it. 2195 /// 2196 /// \param E - The expression to examine. 2197 /// \param ZeroArgCallReturnTy - If the expression can be turned into a call 2198 /// with no arguments, this parameter is set to the type returned by such a 2199 /// call; otherwise, it is set to an empty QualType. 2200 /// \param OverloadSet - If the expression is an overloaded function 2201 /// name, this parameter is populated with the decls of the various overloads. 2202 bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, 2203 UnresolvedSetImpl &OverloadSet) { 2204 ZeroArgCallReturnTy = QualType(); 2205 OverloadSet.clear(); 2206 2207 const OverloadExpr *Overloads = nullptr; 2208 bool IsMemExpr = false; 2209 if (E.getType() == Context.OverloadTy) { 2210 OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); 2211 2212 // Ignore overloads that are pointer-to-member constants. 2213 if (FR.HasFormOfMemberPointer) 2214 return false; 2215 2216 Overloads = FR.Expression; 2217 } else if (E.getType() == Context.BoundMemberTy) { 2218 Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens()); 2219 IsMemExpr = true; 2220 } 2221 2222 bool Ambiguous = false; 2223 bool IsMV = false; 2224 2225 if (Overloads) { 2226 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), 2227 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { 2228 OverloadSet.addDecl(*it); 2229 2230 // Check whether the function is a non-template, non-member which takes no 2231 // arguments. 2232 if (IsMemExpr) 2233 continue; 2234 if (const FunctionDecl *OverloadDecl 2235 = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { 2236 if (OverloadDecl->getMinRequiredArguments() == 0) { 2237 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous && 2238 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() || 2239 OverloadDecl->isCPUSpecificMultiVersion()))) { 2240 ZeroArgCallReturnTy = QualType(); 2241 Ambiguous = true; 2242 } else { 2243 ZeroArgCallReturnTy = OverloadDecl->getReturnType(); 2244 IsMV = OverloadDecl->isCPUDispatchMultiVersion() || 2245 OverloadDecl->isCPUSpecificMultiVersion(); 2246 } 2247 } 2248 } 2249 } 2250 2251 // If it's not a member, use better machinery to try to resolve the call 2252 if (!IsMemExpr) 2253 return !ZeroArgCallReturnTy.isNull(); 2254 } 2255 2256 // Attempt to call the member with no arguments - this will correctly handle 2257 // member templates with defaults/deduction of template arguments, overloads 2258 // with default arguments, etc. 2259 if (IsMemExpr && !E.isTypeDependent()) { 2260 Sema::TentativeAnalysisScope Trap(*this); 2261 ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), 2262 None, SourceLocation()); 2263 if (R.isUsable()) { 2264 ZeroArgCallReturnTy = R.get()->getType(); 2265 return true; 2266 } 2267 return false; 2268 } 2269 2270 if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { 2271 if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { 2272 if (Fun->getMinRequiredArguments() == 0) 2273 ZeroArgCallReturnTy = Fun->getReturnType(); 2274 return true; 2275 } 2276 } 2277 2278 // We don't have an expression that's convenient to get a FunctionDecl from, 2279 // but we can at least check if the type is "function of 0 arguments". 2280 QualType ExprTy = E.getType(); 2281 const FunctionType *FunTy = nullptr; 2282 QualType PointeeTy = ExprTy->getPointeeType(); 2283 if (!PointeeTy.isNull()) 2284 FunTy = PointeeTy->getAs<FunctionType>(); 2285 if (!FunTy) 2286 FunTy = ExprTy->getAs<FunctionType>(); 2287 2288 if (const FunctionProtoType *FPT = 2289 dyn_cast_or_null<FunctionProtoType>(FunTy)) { 2290 if (FPT->getNumParams() == 0) 2291 ZeroArgCallReturnTy = FunTy->getReturnType(); 2292 return true; 2293 } 2294 return false; 2295 } 2296 2297 /// Give notes for a set of overloads. 2298 /// 2299 /// A companion to tryExprAsCall. In cases when the name that the programmer 2300 /// wrote was an overloaded function, we may be able to make some guesses about 2301 /// plausible overloads based on their return types; such guesses can be handed 2302 /// off to this method to be emitted as notes. 2303 /// 2304 /// \param Overloads - The overloads to note. 2305 /// \param FinalNoteLoc - If we've suppressed printing some overloads due to 2306 /// -fshow-overloads=best, this is the location to attach to the note about too 2307 /// many candidates. Typically this will be the location of the original 2308 /// ill-formed expression. 2309 static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, 2310 const SourceLocation FinalNoteLoc) { 2311 unsigned ShownOverloads = 0; 2312 unsigned SuppressedOverloads = 0; 2313 for (UnresolvedSetImpl::iterator It = Overloads.begin(), 2314 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2315 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) { 2316 ++SuppressedOverloads; 2317 continue; 2318 } 2319 2320 NamedDecl *Fn = (*It)->getUnderlyingDecl(); 2321 // Don't print overloads for non-default multiversioned functions. 2322 if (const auto *FD = Fn->getAsFunction()) { 2323 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() && 2324 !FD->getAttr<TargetAttr>()->isDefaultVersion()) 2325 continue; 2326 } 2327 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call); 2328 ++ShownOverloads; 2329 } 2330 2331 S.Diags.overloadCandidatesShown(ShownOverloads); 2332 2333 if (SuppressedOverloads) 2334 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates) 2335 << SuppressedOverloads; 2336 } 2337 2338 static void notePlausibleOverloads(Sema &S, SourceLocation Loc, 2339 const UnresolvedSetImpl &Overloads, 2340 bool (*IsPlausibleResult)(QualType)) { 2341 if (!IsPlausibleResult) 2342 return noteOverloads(S, Overloads, Loc); 2343 2344 UnresolvedSet<2> PlausibleOverloads; 2345 for (OverloadExpr::decls_iterator It = Overloads.begin(), 2346 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { 2347 const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It); 2348 QualType OverloadResultTy = OverloadDecl->getReturnType(); 2349 if (IsPlausibleResult(OverloadResultTy)) 2350 PlausibleOverloads.addDecl(It.getDecl()); 2351 } 2352 noteOverloads(S, PlausibleOverloads, Loc); 2353 } 2354 2355 /// Determine whether the given expression can be called by just 2356 /// putting parentheses after it. Notably, expressions with unary 2357 /// operators can't be because the unary operator will start parsing 2358 /// outside the call. 2359 static bool IsCallableWithAppend(Expr *E) { 2360 E = E->IgnoreImplicit(); 2361 return (!isa<CStyleCastExpr>(E) && 2362 !isa<UnaryOperator>(E) && 2363 !isa<BinaryOperator>(E) && 2364 !isa<CXXOperatorCallExpr>(E)); 2365 } 2366 2367 static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) { 2368 if (const auto *UO = dyn_cast<UnaryOperator>(E)) 2369 E = UO->getSubExpr(); 2370 2371 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { 2372 if (ULE->getNumDecls() == 0) 2373 return false; 2374 2375 const NamedDecl *ND = *ULE->decls_begin(); 2376 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2377 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion(); 2378 } 2379 return false; 2380 } 2381 2382 bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, 2383 bool ForceComplain, 2384 bool (*IsPlausibleResult)(QualType)) { 2385 SourceLocation Loc = E.get()->getExprLoc(); 2386 SourceRange Range = E.get()->getSourceRange(); 2387 2388 QualType ZeroArgCallTy; 2389 UnresolvedSet<4> Overloads; 2390 if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) && 2391 !ZeroArgCallTy.isNull() && 2392 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) { 2393 // At this point, we know E is potentially callable with 0 2394 // arguments and that it returns something of a reasonable type, 2395 // so we can emit a fixit and carry on pretending that E was 2396 // actually a CallExpr. 2397 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd()); 2398 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2399 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range 2400 << (IsCallableWithAppend(E.get()) 2401 ? FixItHint::CreateInsertion(ParenInsertionLoc, "()") 2402 : FixItHint()); 2403 if (!IsMV) 2404 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2405 2406 // FIXME: Try this before emitting the fixit, and suppress diagnostics 2407 // while doing so. 2408 E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None, 2409 Range.getEnd().getLocWithOffset(1)); 2410 return true; 2411 } 2412 2413 if (!ForceComplain) return false; 2414 2415 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get()); 2416 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range; 2417 if (!IsMV) 2418 notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); 2419 E = ExprError(); 2420 return true; 2421 } 2422 2423 IdentifierInfo *Sema::getSuperIdentifier() const { 2424 if (!Ident_super) 2425 Ident_super = &Context.Idents.get("super"); 2426 return Ident_super; 2427 } 2428 2429 IdentifierInfo *Sema::getFloat128Identifier() const { 2430 if (!Ident___float128) 2431 Ident___float128 = &Context.Idents.get("__float128"); 2432 return Ident___float128; 2433 } 2434 2435 void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, 2436 CapturedRegionKind K, 2437 unsigned OpenMPCaptureLevel) { 2438 auto *CSI = new CapturedRegionScopeInfo( 2439 getDiagnostics(), S, CD, RD, CD->getContextParam(), K, 2440 (getLangOpts().OpenMP && K == CR_OpenMP) ? getOpenMPNestingLevel() : 0, 2441 OpenMPCaptureLevel); 2442 CSI->ReturnType = Context.VoidTy; 2443 FunctionScopes.push_back(CSI); 2444 } 2445 2446 CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { 2447 if (FunctionScopes.empty()) 2448 return nullptr; 2449 2450 return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); 2451 } 2452 2453 const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & 2454 Sema::getMismatchingDeleteExpressions() const { 2455 return DeleteExprs; 2456 } 2457 2458 void Sema::setOpenCLExtensionForType(QualType T, llvm::StringRef ExtStr) { 2459 if (ExtStr.empty()) 2460 return; 2461 llvm::SmallVector<StringRef, 1> Exts; 2462 ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false); 2463 auto CanT = T.getCanonicalType().getTypePtr(); 2464 for (auto &I : Exts) 2465 OpenCLTypeExtMap[CanT].insert(I.str()); 2466 } 2467 2468 void Sema::setOpenCLExtensionForDecl(Decl *FD, StringRef ExtStr) { 2469 llvm::SmallVector<StringRef, 1> Exts; 2470 ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false); 2471 if (Exts.empty()) 2472 return; 2473 for (auto &I : Exts) 2474 OpenCLDeclExtMap[FD].insert(I.str()); 2475 } 2476 2477 void Sema::setCurrentOpenCLExtensionForType(QualType T) { 2478 if (CurrOpenCLExtension.empty()) 2479 return; 2480 setOpenCLExtensionForType(T, CurrOpenCLExtension); 2481 } 2482 2483 void Sema::setCurrentOpenCLExtensionForDecl(Decl *D) { 2484 if (CurrOpenCLExtension.empty()) 2485 return; 2486 setOpenCLExtensionForDecl(D, CurrOpenCLExtension); 2487 } 2488 2489 std::string Sema::getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD) { 2490 if (!OpenCLDeclExtMap.empty()) 2491 return getOpenCLExtensionsFromExtMap(FD, OpenCLDeclExtMap); 2492 2493 return ""; 2494 } 2495 2496 std::string Sema::getOpenCLExtensionsFromTypeExtMap(FunctionType *FT) { 2497 if (!OpenCLTypeExtMap.empty()) 2498 return getOpenCLExtensionsFromExtMap(FT, OpenCLTypeExtMap); 2499 2500 return ""; 2501 } 2502 2503 template <typename T, typename MapT> 2504 std::string Sema::getOpenCLExtensionsFromExtMap(T *FDT, MapT &Map) { 2505 auto Loc = Map.find(FDT); 2506 return llvm::join(Loc->second, " "); 2507 } 2508 2509 bool Sema::isOpenCLDisabledDecl(Decl *FD) { 2510 auto Loc = OpenCLDeclExtMap.find(FD); 2511 if (Loc == OpenCLDeclExtMap.end()) 2512 return false; 2513 for (auto &I : Loc->second) { 2514 if (!getOpenCLOptions().isEnabled(I)) 2515 return true; 2516 } 2517 return false; 2518 } 2519 2520 template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> 2521 bool Sema::checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, 2522 DiagInfoT DiagInfo, MapT &Map, 2523 unsigned Selector, 2524 SourceRange SrcRange) { 2525 auto Loc = Map.find(D); 2526 if (Loc == Map.end()) 2527 return false; 2528 bool Disabled = false; 2529 for (auto &I : Loc->second) { 2530 if (I != CurrOpenCLExtension && !getOpenCLOptions().isEnabled(I)) { 2531 Diag(DiagLoc, diag::err_opencl_requires_extension) << Selector << DiagInfo 2532 << I << SrcRange; 2533 Disabled = true; 2534 } 2535 } 2536 return Disabled; 2537 } 2538 2539 bool Sema::checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType QT) { 2540 // Check extensions for declared types. 2541 Decl *Decl = nullptr; 2542 if (auto TypedefT = dyn_cast<TypedefType>(QT.getTypePtr())) 2543 Decl = TypedefT->getDecl(); 2544 if (auto TagT = dyn_cast<TagType>(QT.getCanonicalType().getTypePtr())) 2545 Decl = TagT->getDecl(); 2546 auto Loc = DS.getTypeSpecTypeLoc(); 2547 2548 // Check extensions for vector types. 2549 // e.g. double4 is not allowed when cl_khr_fp64 is absent. 2550 if (QT->isExtVectorType()) { 2551 auto TypePtr = QT->castAs<ExtVectorType>()->getElementType().getTypePtr(); 2552 return checkOpenCLDisabledTypeOrDecl(TypePtr, Loc, QT, OpenCLTypeExtMap); 2553 } 2554 2555 if (checkOpenCLDisabledTypeOrDecl(Decl, Loc, QT, OpenCLDeclExtMap)) 2556 return true; 2557 2558 // Check extensions for builtin types. 2559 return checkOpenCLDisabledTypeOrDecl(QT.getCanonicalType().getTypePtr(), Loc, 2560 QT, OpenCLTypeExtMap); 2561 } 2562 2563 bool Sema::checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E) { 2564 IdentifierInfo *FnName = D.getIdentifier(); 2565 return checkOpenCLDisabledTypeOrDecl(&D, E.getBeginLoc(), FnName, 2566 OpenCLDeclExtMap, 1, D.getSourceRange()); 2567 } 2568