1 //===- Tokens.cpp - collect tokens from preprocessing ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 #include "clang/Tooling/Syntax/Tokens.h" 9 10 #include "clang/Basic/Diagnostic.h" 11 #include "clang/Basic/IdentifierTable.h" 12 #include "clang/Basic/LLVM.h" 13 #include "clang/Basic/LangOptions.h" 14 #include "clang/Basic/SourceLocation.h" 15 #include "clang/Basic/SourceManager.h" 16 #include "clang/Basic/TokenKinds.h" 17 #include "clang/Lex/PPCallbacks.h" 18 #include "clang/Lex/Preprocessor.h" 19 #include "clang/Lex/Token.h" 20 #include "llvm/ADT/ArrayRef.h" 21 #include "llvm/ADT/None.h" 22 #include "llvm/ADT/Optional.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/Support/Debug.h" 25 #include "llvm/Support/ErrorHandling.h" 26 #include "llvm/Support/FormatVariadic.h" 27 #include "llvm/Support/raw_ostream.h" 28 #include <algorithm> 29 #include <cassert> 30 #include <iterator> 31 #include <string> 32 #include <utility> 33 #include <vector> 34 35 using namespace clang; 36 using namespace clang::syntax; 37 38 namespace { 39 // Finds the smallest consecutive subsuquence of Toks that covers R. 40 llvm::ArrayRef<syntax::Token> 41 getTokensCovering(llvm::ArrayRef<syntax::Token> Toks, SourceRange R, 42 const SourceManager &SM) { 43 if (R.isInvalid()) 44 return {}; 45 const syntax::Token *Begin = 46 llvm::partition_point(Toks, [&](const syntax::Token &T) { 47 return SM.isBeforeInTranslationUnit(T.location(), R.getBegin()); 48 }); 49 const syntax::Token *End = 50 llvm::partition_point(Toks, [&](const syntax::Token &T) { 51 return !SM.isBeforeInTranslationUnit(R.getEnd(), T.location()); 52 }); 53 if (Begin > End) 54 return {}; 55 return {Begin, End}; 56 } 57 58 // Finds the smallest expansion range that contains expanded tokens First and 59 // Last, e.g.: 60 // #define ID(x) x 61 // ID(ID(ID(a1) a2)) 62 // ~~ -> a1 63 // ~~ -> a2 64 // ~~~~~~~~~ -> a1 a2 65 SourceRange findCommonRangeForMacroArgs(const syntax::Token &First, 66 const syntax::Token &Last, 67 const SourceManager &SM) { 68 SourceRange Res; 69 auto FirstLoc = First.location(), LastLoc = Last.location(); 70 // Keep traversing up the spelling chain as longs as tokens are part of the 71 // same expansion. 72 while (!FirstLoc.isFileID() && !LastLoc.isFileID()) { 73 auto ExpInfoFirst = SM.getSLocEntry(SM.getFileID(FirstLoc)).getExpansion(); 74 auto ExpInfoLast = SM.getSLocEntry(SM.getFileID(LastLoc)).getExpansion(); 75 // Stop if expansions have diverged. 76 if (ExpInfoFirst.getExpansionLocStart() != 77 ExpInfoLast.getExpansionLocStart()) 78 break; 79 // Do not continue into macro bodies. 80 if (!ExpInfoFirst.isMacroArgExpansion() || 81 !ExpInfoLast.isMacroArgExpansion()) 82 break; 83 FirstLoc = SM.getImmediateSpellingLoc(FirstLoc); 84 LastLoc = SM.getImmediateSpellingLoc(LastLoc); 85 // Update the result afterwards, as we want the tokens that triggered the 86 // expansion. 87 Res = {FirstLoc, LastLoc}; 88 } 89 // Normally mapping back to expansion location here only changes FileID, as 90 // we've already found some tokens expanded from the same macro argument, and 91 // they should map to a consecutive subset of spelled tokens. Unfortunately 92 // SourceManager::isBeforeInTranslationUnit discriminates sourcelocations 93 // based on their FileID in addition to offsets. So even though we are 94 // referring to same tokens, SourceManager might tell us that one is before 95 // the other if they've got different FileIDs. 96 return SM.getExpansionRange(CharSourceRange(Res, true)).getAsRange(); 97 } 98 99 } // namespace 100 101 syntax::Token::Token(SourceLocation Location, unsigned Length, 102 tok::TokenKind Kind) 103 : Location(Location), Length(Length), Kind(Kind) { 104 assert(Location.isValid()); 105 } 106 107 syntax::Token::Token(const clang::Token &T) 108 : Token(T.getLocation(), T.getLength(), T.getKind()) { 109 assert(!T.isAnnotation()); 110 } 111 112 llvm::StringRef syntax::Token::text(const SourceManager &SM) const { 113 bool Invalid = false; 114 const char *Start = SM.getCharacterData(location(), &Invalid); 115 assert(!Invalid); 116 return llvm::StringRef(Start, length()); 117 } 118 119 FileRange syntax::Token::range(const SourceManager &SM) const { 120 assert(location().isFileID() && "must be a spelled token"); 121 FileID File; 122 unsigned StartOffset; 123 std::tie(File, StartOffset) = SM.getDecomposedLoc(location()); 124 return FileRange(File, StartOffset, StartOffset + length()); 125 } 126 127 FileRange syntax::Token::range(const SourceManager &SM, 128 const syntax::Token &First, 129 const syntax::Token &Last) { 130 auto F = First.range(SM); 131 auto L = Last.range(SM); 132 assert(F.file() == L.file() && "tokens from different files"); 133 assert((F == L || F.endOffset() <= L.beginOffset()) && 134 "wrong order of tokens"); 135 return FileRange(F.file(), F.beginOffset(), L.endOffset()); 136 } 137 138 llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, const Token &T) { 139 return OS << T.str(); 140 } 141 142 FileRange::FileRange(FileID File, unsigned BeginOffset, unsigned EndOffset) 143 : File(File), Begin(BeginOffset), End(EndOffset) { 144 assert(File.isValid()); 145 assert(BeginOffset <= EndOffset); 146 } 147 148 FileRange::FileRange(const SourceManager &SM, SourceLocation BeginLoc, 149 unsigned Length) { 150 assert(BeginLoc.isValid()); 151 assert(BeginLoc.isFileID()); 152 153 std::tie(File, Begin) = SM.getDecomposedLoc(BeginLoc); 154 End = Begin + Length; 155 } 156 FileRange::FileRange(const SourceManager &SM, SourceLocation BeginLoc, 157 SourceLocation EndLoc) { 158 assert(BeginLoc.isValid()); 159 assert(BeginLoc.isFileID()); 160 assert(EndLoc.isValid()); 161 assert(EndLoc.isFileID()); 162 assert(SM.getFileID(BeginLoc) == SM.getFileID(EndLoc)); 163 assert(SM.getFileOffset(BeginLoc) <= SM.getFileOffset(EndLoc)); 164 165 std::tie(File, Begin) = SM.getDecomposedLoc(BeginLoc); 166 End = SM.getFileOffset(EndLoc); 167 } 168 169 llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, 170 const FileRange &R) { 171 return OS << llvm::formatv("FileRange(file = {0}, offsets = {1}-{2})", 172 R.file().getHashValue(), R.beginOffset(), 173 R.endOffset()); 174 } 175 176 llvm::StringRef FileRange::text(const SourceManager &SM) const { 177 bool Invalid = false; 178 StringRef Text = SM.getBufferData(File, &Invalid); 179 if (Invalid) 180 return ""; 181 assert(Begin <= Text.size()); 182 assert(End <= Text.size()); 183 return Text.substr(Begin, length()); 184 } 185 186 llvm::ArrayRef<syntax::Token> TokenBuffer::expandedTokens(SourceRange R) const { 187 return getTokensCovering(expandedTokens(), R, *SourceMgr); 188 } 189 190 CharSourceRange FileRange::toCharRange(const SourceManager &SM) const { 191 return CharSourceRange( 192 SourceRange(SM.getComposedLoc(File, Begin), SM.getComposedLoc(File, End)), 193 /*IsTokenRange=*/false); 194 } 195 196 std::pair<const syntax::Token *, const TokenBuffer::Mapping *> 197 TokenBuffer::spelledForExpandedToken(const syntax::Token *Expanded) const { 198 assert(Expanded); 199 assert(ExpandedTokens.data() <= Expanded && 200 Expanded < ExpandedTokens.data() + ExpandedTokens.size()); 201 202 auto FileIt = Files.find( 203 SourceMgr->getFileID(SourceMgr->getExpansionLoc(Expanded->location()))); 204 assert(FileIt != Files.end() && "no file for an expanded token"); 205 206 const MarkedFile &File = FileIt->second; 207 208 unsigned ExpandedIndex = Expanded - ExpandedTokens.data(); 209 // Find the first mapping that produced tokens after \p Expanded. 210 auto It = llvm::partition_point(File.Mappings, [&](const Mapping &M) { 211 return M.BeginExpanded <= ExpandedIndex; 212 }); 213 // Our token could only be produced by the previous mapping. 214 if (It == File.Mappings.begin()) { 215 // No previous mapping, no need to modify offsets. 216 return {&File.SpelledTokens[ExpandedIndex - File.BeginExpanded], nullptr}; 217 } 218 --It; // 'It' now points to last mapping that started before our token. 219 220 // Check if the token is part of the mapping. 221 if (ExpandedIndex < It->EndExpanded) 222 return {&File.SpelledTokens[It->BeginSpelled], /*Mapping*/ &*It}; 223 224 // Not part of the mapping, use the index from previous mapping to compute the 225 // corresponding spelled token. 226 return { 227 &File.SpelledTokens[It->EndSpelled + (ExpandedIndex - It->EndExpanded)], 228 /*Mapping*/ nullptr}; 229 } 230 231 llvm::ArrayRef<syntax::Token> TokenBuffer::spelledTokens(FileID FID) const { 232 auto It = Files.find(FID); 233 assert(It != Files.end()); 234 return It->second.SpelledTokens; 235 } 236 237 const syntax::Token *TokenBuffer::spelledTokenAt(SourceLocation Loc) const { 238 assert(Loc.isFileID()); 239 const auto *Tok = llvm::partition_point( 240 spelledTokens(SourceMgr->getFileID(Loc)), 241 [&](const syntax::Token &Tok) { return Tok.location() < Loc; }); 242 if (!Tok || Tok->location() != Loc) 243 return nullptr; 244 return Tok; 245 } 246 247 std::string TokenBuffer::Mapping::str() const { 248 return std::string( 249 llvm::formatv("spelled tokens: [{0},{1}), expanded tokens: [{2},{3})", 250 BeginSpelled, EndSpelled, BeginExpanded, EndExpanded)); 251 } 252 253 llvm::Optional<llvm::ArrayRef<syntax::Token>> 254 TokenBuffer::spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const { 255 // Mapping an empty range is ambiguous in case of empty mappings at either end 256 // of the range, bail out in that case. 257 if (Expanded.empty()) 258 return llvm::None; 259 260 const syntax::Token *BeginSpelled; 261 const Mapping *BeginMapping; 262 std::tie(BeginSpelled, BeginMapping) = 263 spelledForExpandedToken(&Expanded.front()); 264 265 const syntax::Token *LastSpelled; 266 const Mapping *LastMapping; 267 std::tie(LastSpelled, LastMapping) = 268 spelledForExpandedToken(&Expanded.back()); 269 270 FileID FID = SourceMgr->getFileID(BeginSpelled->location()); 271 // FIXME: Handle multi-file changes by trying to map onto a common root. 272 if (FID != SourceMgr->getFileID(LastSpelled->location())) 273 return llvm::None; 274 275 const MarkedFile &File = Files.find(FID)->second; 276 277 // If both tokens are coming from a macro argument expansion, try and map to 278 // smallest part of the macro argument. BeginMapping && LastMapping check is 279 // only for performance, they are a prerequisite for Expanded.front() and 280 // Expanded.back() being part of a macro arg expansion. 281 if (BeginMapping && LastMapping && 282 SourceMgr->isMacroArgExpansion(Expanded.front().location()) && 283 SourceMgr->isMacroArgExpansion(Expanded.back().location())) { 284 auto CommonRange = findCommonRangeForMacroArgs(Expanded.front(), 285 Expanded.back(), *SourceMgr); 286 // It might be the case that tokens are arguments of different macro calls, 287 // in that case we should continue with the logic below instead of returning 288 // an empty range. 289 if (CommonRange.isValid()) 290 return getTokensCovering(File.SpelledTokens, CommonRange, *SourceMgr); 291 } 292 293 // Do not allow changes that doesn't cover full expansion. 294 unsigned BeginExpanded = Expanded.begin() - ExpandedTokens.data(); 295 unsigned EndExpanded = Expanded.end() - ExpandedTokens.data(); 296 if (BeginMapping && BeginExpanded != BeginMapping->BeginExpanded) 297 return llvm::None; 298 if (LastMapping && LastMapping->EndExpanded != EndExpanded) 299 return llvm::None; 300 // All is good, return the result. 301 return llvm::makeArrayRef( 302 BeginMapping ? File.SpelledTokens.data() + BeginMapping->BeginSpelled 303 : BeginSpelled, 304 LastMapping ? File.SpelledTokens.data() + LastMapping->EndSpelled 305 : LastSpelled + 1); 306 } 307 308 llvm::Optional<TokenBuffer::Expansion> 309 TokenBuffer::expansionStartingAt(const syntax::Token *Spelled) const { 310 assert(Spelled); 311 assert(Spelled->location().isFileID() && "not a spelled token"); 312 auto FileIt = Files.find(SourceMgr->getFileID(Spelled->location())); 313 assert(FileIt != Files.end() && "file not tracked by token buffer"); 314 315 auto &File = FileIt->second; 316 assert(File.SpelledTokens.data() <= Spelled && 317 Spelled < (File.SpelledTokens.data() + File.SpelledTokens.size())); 318 319 unsigned SpelledIndex = Spelled - File.SpelledTokens.data(); 320 auto M = llvm::partition_point(File.Mappings, [&](const Mapping &M) { 321 return M.BeginSpelled < SpelledIndex; 322 }); 323 if (M == File.Mappings.end() || M->BeginSpelled != SpelledIndex) 324 return llvm::None; 325 326 Expansion E; 327 E.Spelled = llvm::makeArrayRef(File.SpelledTokens.data() + M->BeginSpelled, 328 File.SpelledTokens.data() + M->EndSpelled); 329 E.Expanded = llvm::makeArrayRef(ExpandedTokens.data() + M->BeginExpanded, 330 ExpandedTokens.data() + M->EndExpanded); 331 return E; 332 } 333 llvm::ArrayRef<syntax::Token> 334 syntax::spelledTokensTouching(SourceLocation Loc, 335 llvm::ArrayRef<syntax::Token> Tokens) { 336 assert(Loc.isFileID()); 337 338 auto *Right = llvm::partition_point( 339 Tokens, [&](const syntax::Token &Tok) { return Tok.location() < Loc; }); 340 bool AcceptRight = Right != Tokens.end() && Right->location() <= Loc; 341 bool AcceptLeft = 342 Right != Tokens.begin() && (Right - 1)->endLocation() >= Loc; 343 return llvm::makeArrayRef(Right - (AcceptLeft ? 1 : 0), 344 Right + (AcceptRight ? 1 : 0)); 345 } 346 347 llvm::ArrayRef<syntax::Token> 348 syntax::spelledTokensTouching(SourceLocation Loc, 349 const syntax::TokenBuffer &Tokens) { 350 return spelledTokensTouching( 351 Loc, Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc))); 352 } 353 354 const syntax::Token * 355 syntax::spelledIdentifierTouching(SourceLocation Loc, 356 llvm::ArrayRef<syntax::Token> Tokens) { 357 for (const syntax::Token &Tok : spelledTokensTouching(Loc, Tokens)) { 358 if (Tok.kind() == tok::identifier) 359 return &Tok; 360 } 361 return nullptr; 362 } 363 364 const syntax::Token * 365 syntax::spelledIdentifierTouching(SourceLocation Loc, 366 const syntax::TokenBuffer &Tokens) { 367 return spelledIdentifierTouching( 368 Loc, Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc))); 369 } 370 371 std::vector<const syntax::Token *> 372 TokenBuffer::macroExpansions(FileID FID) const { 373 auto FileIt = Files.find(FID); 374 assert(FileIt != Files.end() && "file not tracked by token buffer"); 375 auto &File = FileIt->second; 376 std::vector<const syntax::Token *> Expansions; 377 auto &Spelled = File.SpelledTokens; 378 for (auto Mapping : File.Mappings) { 379 const syntax::Token *Token = &Spelled[Mapping.BeginSpelled]; 380 if (Token->kind() == tok::TokenKind::identifier) 381 Expansions.push_back(Token); 382 } 383 return Expansions; 384 } 385 386 std::vector<syntax::Token> syntax::tokenize(const FileRange &FR, 387 const SourceManager &SM, 388 const LangOptions &LO) { 389 std::vector<syntax::Token> Tokens; 390 IdentifierTable Identifiers(LO); 391 auto AddToken = [&](clang::Token T) { 392 // Fill the proper token kind for keywords, etc. 393 if (T.getKind() == tok::raw_identifier && !T.needsCleaning() && 394 !T.hasUCN()) { // FIXME: support needsCleaning and hasUCN cases. 395 clang::IdentifierInfo &II = Identifiers.get(T.getRawIdentifier()); 396 T.setIdentifierInfo(&II); 397 T.setKind(II.getTokenID()); 398 } 399 Tokens.push_back(syntax::Token(T)); 400 }; 401 402 auto SrcBuffer = SM.getBufferData(FR.file()); 403 Lexer L(SM.getLocForStartOfFile(FR.file()), LO, SrcBuffer.data(), 404 SrcBuffer.data() + FR.beginOffset(), 405 // We can't make BufEnd point to FR.endOffset, as Lexer requires a 406 // null terminated buffer. 407 SrcBuffer.data() + SrcBuffer.size()); 408 409 clang::Token T; 410 while (!L.LexFromRawLexer(T) && L.getCurrentBufferOffset() < FR.endOffset()) 411 AddToken(T); 412 // LexFromRawLexer returns true when it parses the last token of the file, add 413 // it iff it starts within the range we are interested in. 414 if (SM.getFileOffset(T.getLocation()) < FR.endOffset()) 415 AddToken(T); 416 return Tokens; 417 } 418 419 std::vector<syntax::Token> syntax::tokenize(FileID FID, const SourceManager &SM, 420 const LangOptions &LO) { 421 return tokenize(syntax::FileRange(FID, 0, SM.getFileIDSize(FID)), SM, LO); 422 } 423 424 /// Records information reqired to construct mappings for the token buffer that 425 /// we are collecting. 426 class TokenCollector::CollectPPExpansions : public PPCallbacks { 427 public: 428 CollectPPExpansions(TokenCollector &C) : Collector(&C) {} 429 430 /// Disabled instance will stop reporting anything to TokenCollector. 431 /// This ensures that uses of the preprocessor after TokenCollector::consume() 432 /// is called do not access the (possibly invalid) collector instance. 433 void disable() { Collector = nullptr; } 434 435 void MacroExpands(const clang::Token &MacroNameTok, const MacroDefinition &MD, 436 SourceRange Range, const MacroArgs *Args) override { 437 if (!Collector) 438 return; 439 // Only record top-level expansions, not those where: 440 // - the macro use is inside a macro body, 441 // - the macro appears in an argument to another macro. 442 if (!MacroNameTok.getLocation().isFileID() || 443 (LastExpansionEnd.isValid() && 444 Collector->PP.getSourceManager().isBeforeInTranslationUnit( 445 Range.getBegin(), LastExpansionEnd))) 446 return; 447 Collector->Expansions[Range.getBegin().getRawEncoding()] = Range.getEnd(); 448 LastExpansionEnd = Range.getEnd(); 449 } 450 // FIXME: handle directives like #pragma, #include, etc. 451 private: 452 TokenCollector *Collector; 453 /// Used to detect recursive macro expansions. 454 SourceLocation LastExpansionEnd; 455 }; 456 457 /// Fills in the TokenBuffer by tracing the run of a preprocessor. The 458 /// implementation tracks the tokens, macro expansions and directives coming 459 /// from the preprocessor and: 460 /// - for each token, figures out if it is a part of an expanded token stream, 461 /// spelled token stream or both. Stores the tokens appropriately. 462 /// - records mappings from the spelled to expanded token ranges, e.g. for macro 463 /// expansions. 464 /// FIXME: also properly record: 465 /// - #include directives, 466 /// - #pragma, #line and other PP directives, 467 /// - skipped pp regions, 468 /// - ... 469 470 TokenCollector::TokenCollector(Preprocessor &PP) : PP(PP) { 471 // Collect the expanded token stream during preprocessing. 472 PP.setTokenWatcher([this](const clang::Token &T) { 473 if (T.isAnnotation()) 474 return; 475 DEBUG_WITH_TYPE("collect-tokens", llvm::dbgs() 476 << "Token: " 477 << syntax::Token(T).dumpForTests( 478 this->PP.getSourceManager()) 479 << "\n" 480 481 ); 482 Expanded.push_back(syntax::Token(T)); 483 }); 484 // And locations of macro calls, to properly recover boundaries of those in 485 // case of empty expansions. 486 auto CB = std::make_unique<CollectPPExpansions>(*this); 487 this->Collector = CB.get(); 488 PP.addPPCallbacks(std::move(CB)); 489 } 490 491 /// Builds mappings and spelled tokens in the TokenBuffer based on the expanded 492 /// token stream. 493 class TokenCollector::Builder { 494 public: 495 Builder(std::vector<syntax::Token> Expanded, PPExpansions CollectedExpansions, 496 const SourceManager &SM, const LangOptions &LangOpts) 497 : Result(SM), CollectedExpansions(std::move(CollectedExpansions)), SM(SM), 498 LangOpts(LangOpts) { 499 Result.ExpandedTokens = std::move(Expanded); 500 } 501 502 TokenBuffer build() && { 503 assert(!Result.ExpandedTokens.empty()); 504 assert(Result.ExpandedTokens.back().kind() == tok::eof); 505 506 // Tokenize every file that contributed tokens to the expanded stream. 507 buildSpelledTokens(); 508 509 // The expanded token stream consists of runs of tokens that came from 510 // the same source (a macro expansion, part of a file etc). 511 // Between these runs are the logical positions of spelled tokens that 512 // didn't expand to anything. 513 while (NextExpanded < Result.ExpandedTokens.size() - 1 /* eof */) { 514 // Create empty mappings for spelled tokens that expanded to nothing here. 515 // May advance NextSpelled, but NextExpanded is unchanged. 516 discard(); 517 // Create mapping for a contiguous run of expanded tokens. 518 // Advances NextExpanded past the run, and NextSpelled accordingly. 519 unsigned OldPosition = NextExpanded; 520 advance(); 521 if (NextExpanded == OldPosition) 522 diagnoseAdvanceFailure(); 523 } 524 // If any tokens remain in any of the files, they didn't expand to anything. 525 // Create empty mappings up until the end of the file. 526 for (const auto &File : Result.Files) 527 discard(File.first); 528 529 return std::move(Result); 530 } 531 532 private: 533 // Consume a sequence of spelled tokens that didn't expand to anything. 534 // In the simplest case, skips spelled tokens until finding one that produced 535 // the NextExpanded token, and creates an empty mapping for them. 536 // If Drain is provided, skips remaining tokens from that file instead. 537 void discard(llvm::Optional<FileID> Drain = llvm::None) { 538 SourceLocation Target = 539 Drain ? SM.getLocForEndOfFile(*Drain) 540 : SM.getExpansionLoc( 541 Result.ExpandedTokens[NextExpanded].location()); 542 FileID File = SM.getFileID(Target); 543 const auto &SpelledTokens = Result.Files[File].SpelledTokens; 544 auto &NextSpelled = this->NextSpelled[File]; 545 546 TokenBuffer::Mapping Mapping; 547 Mapping.BeginSpelled = NextSpelled; 548 // When dropping trailing tokens from a file, the empty mapping should 549 // be positioned within the file's expanded-token range (at the end). 550 Mapping.BeginExpanded = Mapping.EndExpanded = 551 Drain ? Result.Files[*Drain].EndExpanded : NextExpanded; 552 // We may want to split into several adjacent empty mappings. 553 // FlushMapping() emits the current mapping and starts a new one. 554 auto FlushMapping = [&, this] { 555 Mapping.EndSpelled = NextSpelled; 556 if (Mapping.BeginSpelled != Mapping.EndSpelled) 557 Result.Files[File].Mappings.push_back(Mapping); 558 Mapping.BeginSpelled = NextSpelled; 559 }; 560 561 while (NextSpelled < SpelledTokens.size() && 562 SpelledTokens[NextSpelled].location() < Target) { 563 // If we know mapping bounds at [NextSpelled, KnownEnd] (macro expansion) 564 // then we want to partition our (empty) mapping. 565 // [Start, NextSpelled) [NextSpelled, KnownEnd] (KnownEnd, Target) 566 SourceLocation KnownEnd = CollectedExpansions.lookup( 567 SpelledTokens[NextSpelled].location().getRawEncoding()); 568 if (KnownEnd.isValid()) { 569 FlushMapping(); // Emits [Start, NextSpelled) 570 while (NextSpelled < SpelledTokens.size() && 571 SpelledTokens[NextSpelled].location() <= KnownEnd) 572 ++NextSpelled; 573 FlushMapping(); // Emits [NextSpelled, KnownEnd] 574 // Now the loop contitues and will emit (KnownEnd, Target). 575 } else { 576 ++NextSpelled; 577 } 578 } 579 FlushMapping(); 580 } 581 582 // Consumes the NextExpanded token and others that are part of the same run. 583 // Increases NextExpanded and NextSpelled by at least one, and adds a mapping 584 // (unless this is a run of file tokens, which we represent with no mapping). 585 void advance() { 586 const syntax::Token &Tok = Result.ExpandedTokens[NextExpanded]; 587 SourceLocation Expansion = SM.getExpansionLoc(Tok.location()); 588 FileID File = SM.getFileID(Expansion); 589 const auto &SpelledTokens = Result.Files[File].SpelledTokens; 590 auto &NextSpelled = this->NextSpelled[File]; 591 592 if (Tok.location().isFileID()) { 593 // A run of file tokens continues while the expanded/spelled tokens match. 594 while (NextSpelled < SpelledTokens.size() && 595 NextExpanded < Result.ExpandedTokens.size() && 596 SpelledTokens[NextSpelled].location() == 597 Result.ExpandedTokens[NextExpanded].location()) { 598 ++NextSpelled; 599 ++NextExpanded; 600 } 601 // We need no mapping for file tokens copied to the expanded stream. 602 } else { 603 // We found a new macro expansion. We should have its spelling bounds. 604 auto End = CollectedExpansions.lookup(Expansion.getRawEncoding()); 605 assert(End.isValid() && "Macro expansion wasn't captured?"); 606 607 // Mapping starts here... 608 TokenBuffer::Mapping Mapping; 609 Mapping.BeginExpanded = NextExpanded; 610 Mapping.BeginSpelled = NextSpelled; 611 // ... consumes spelled tokens within bounds we captured ... 612 while (NextSpelled < SpelledTokens.size() && 613 SpelledTokens[NextSpelled].location() <= End) 614 ++NextSpelled; 615 // ... consumes expanded tokens rooted at the same expansion ... 616 while (NextExpanded < Result.ExpandedTokens.size() && 617 SM.getExpansionLoc( 618 Result.ExpandedTokens[NextExpanded].location()) == Expansion) 619 ++NextExpanded; 620 // ... and ends here. 621 Mapping.EndExpanded = NextExpanded; 622 Mapping.EndSpelled = NextSpelled; 623 Result.Files[File].Mappings.push_back(Mapping); 624 } 625 } 626 627 // advance() is supposed to consume at least one token - if not, we crash. 628 void diagnoseAdvanceFailure() { 629 #ifndef NDEBUG 630 // Show the failed-to-map token in context. 631 for (unsigned I = (NextExpanded < 10) ? 0 : NextExpanded - 10; 632 I < NextExpanded + 5 && I < Result.ExpandedTokens.size(); ++I) { 633 const char *L = 634 (I == NextExpanded) ? "!! " : (I < NextExpanded) ? "ok " : " "; 635 llvm::errs() << L << Result.ExpandedTokens[I].dumpForTests(SM) << "\n"; 636 } 637 #endif 638 llvm_unreachable("Couldn't map expanded token to spelled tokens!"); 639 } 640 641 /// Initializes TokenBuffer::Files and fills spelled tokens and expanded 642 /// ranges for each of the files. 643 void buildSpelledTokens() { 644 for (unsigned I = 0; I < Result.ExpandedTokens.size(); ++I) { 645 const auto &Tok = Result.ExpandedTokens[I]; 646 auto FID = SM.getFileID(SM.getExpansionLoc(Tok.location())); 647 auto It = Result.Files.try_emplace(FID); 648 TokenBuffer::MarkedFile &File = It.first->second; 649 650 // The eof token should not be considered part of the main-file's range. 651 File.EndExpanded = Tok.kind() == tok::eof ? I : I + 1; 652 653 if (!It.second) 654 continue; // we have seen this file before. 655 // This is the first time we see this file. 656 File.BeginExpanded = I; 657 File.SpelledTokens = tokenize(FID, SM, LangOpts); 658 } 659 } 660 661 TokenBuffer Result; 662 unsigned NextExpanded = 0; // cursor in ExpandedTokens 663 llvm::DenseMap<FileID, unsigned> NextSpelled; // cursor in SpelledTokens 664 PPExpansions CollectedExpansions; 665 const SourceManager &SM; 666 const LangOptions &LangOpts; 667 }; 668 669 TokenBuffer TokenCollector::consume() && { 670 PP.setTokenWatcher(nullptr); 671 Collector->disable(); 672 return Builder(std::move(Expanded), std::move(Expansions), 673 PP.getSourceManager(), PP.getLangOpts()) 674 .build(); 675 } 676 677 std::string syntax::Token::str() const { 678 return std::string(llvm::formatv("Token({0}, length = {1})", 679 tok::getTokenName(kind()), length())); 680 } 681 682 std::string syntax::Token::dumpForTests(const SourceManager &SM) const { 683 return std::string(llvm::formatv("Token(`{0}`, {1}, length = {2})", text(SM), 684 tok::getTokenName(kind()), length())); 685 } 686 687 std::string TokenBuffer::dumpForTests() const { 688 auto PrintToken = [this](const syntax::Token &T) -> std::string { 689 if (T.kind() == tok::eof) 690 return "<eof>"; 691 return std::string(T.text(*SourceMgr)); 692 }; 693 694 auto DumpTokens = [this, &PrintToken](llvm::raw_ostream &OS, 695 llvm::ArrayRef<syntax::Token> Tokens) { 696 if (Tokens.empty()) { 697 OS << "<empty>"; 698 return; 699 } 700 OS << Tokens[0].text(*SourceMgr); 701 for (unsigned I = 1; I < Tokens.size(); ++I) { 702 if (Tokens[I].kind() == tok::eof) 703 continue; 704 OS << " " << PrintToken(Tokens[I]); 705 } 706 }; 707 708 std::string Dump; 709 llvm::raw_string_ostream OS(Dump); 710 711 OS << "expanded tokens:\n" 712 << " "; 713 // (!) we do not show '<eof>'. 714 DumpTokens(OS, llvm::makeArrayRef(ExpandedTokens).drop_back()); 715 OS << "\n"; 716 717 std::vector<FileID> Keys; 718 for (auto F : Files) 719 Keys.push_back(F.first); 720 llvm::sort(Keys); 721 722 for (FileID ID : Keys) { 723 const MarkedFile &File = Files.find(ID)->second; 724 auto *Entry = SourceMgr->getFileEntryForID(ID); 725 if (!Entry) 726 continue; // Skip builtin files. 727 OS << llvm::formatv("file '{0}'\n", Entry->getName()) 728 << " spelled tokens:\n" 729 << " "; 730 DumpTokens(OS, File.SpelledTokens); 731 OS << "\n"; 732 733 if (File.Mappings.empty()) { 734 OS << " no mappings.\n"; 735 continue; 736 } 737 OS << " mappings:\n"; 738 for (auto &M : File.Mappings) { 739 OS << llvm::formatv( 740 " ['{0}'_{1}, '{2}'_{3}) => ['{4}'_{5}, '{6}'_{7})\n", 741 PrintToken(File.SpelledTokens[M.BeginSpelled]), M.BeginSpelled, 742 M.EndSpelled == File.SpelledTokens.size() 743 ? "<eof>" 744 : PrintToken(File.SpelledTokens[M.EndSpelled]), 745 M.EndSpelled, PrintToken(ExpandedTokens[M.BeginExpanded]), 746 M.BeginExpanded, PrintToken(ExpandedTokens[M.EndExpanded]), 747 M.EndExpanded); 748 } 749 } 750 return OS.str(); 751 } 752