1 //===- Tokens.cpp - collect tokens from preprocessing ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "clang/Tooling/Syntax/Tokens.h"
9
10 #include "clang/Basic/Diagnostic.h"
11 #include "clang/Basic/IdentifierTable.h"
12 #include "clang/Basic/LLVM.h"
13 #include "clang/Basic/LangOptions.h"
14 #include "clang/Basic/SourceLocation.h"
15 #include "clang/Basic/SourceManager.h"
16 #include "clang/Basic/TokenKinds.h"
17 #include "clang/Lex/PPCallbacks.h"
18 #include "clang/Lex/Preprocessor.h"
19 #include "clang/Lex/Token.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/FormatVariadic.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include <algorithm>
29 #include <cassert>
30 #include <iterator>
31 #include <string>
32 #include <utility>
33 #include <vector>
34
35 using namespace clang;
36 using namespace clang::syntax;
37
38 namespace {
39 // Finds the smallest consecutive subsuquence of Toks that covers R.
40 llvm::ArrayRef<syntax::Token>
getTokensCovering(llvm::ArrayRef<syntax::Token> Toks,SourceRange R,const SourceManager & SM)41 getTokensCovering(llvm::ArrayRef<syntax::Token> Toks, SourceRange R,
42 const SourceManager &SM) {
43 if (R.isInvalid())
44 return {};
45 const syntax::Token *Begin =
46 llvm::partition_point(Toks, [&](const syntax::Token &T) {
47 return SM.isBeforeInTranslationUnit(T.location(), R.getBegin());
48 });
49 const syntax::Token *End =
50 llvm::partition_point(Toks, [&](const syntax::Token &T) {
51 return !SM.isBeforeInTranslationUnit(R.getEnd(), T.location());
52 });
53 if (Begin > End)
54 return {};
55 return {Begin, End};
56 }
57
58 // Finds the range within FID corresponding to expanded tokens [First, Last].
59 // Prev precedes First and Next follows Last, these must *not* be included.
60 // If no range satisfies the criteria, returns an invalid range.
61 //
62 // #define ID(x) x
63 // ID(ID(ID(a1) a2))
64 // ~~ -> a1
65 // ~~ -> a2
66 // ~~~~~~~~~ -> a1 a2
spelledForExpandedSlow(SourceLocation First,SourceLocation Last,SourceLocation Prev,SourceLocation Next,FileID TargetFile,const SourceManager & SM)67 SourceRange spelledForExpandedSlow(SourceLocation First, SourceLocation Last,
68 SourceLocation Prev, SourceLocation Next,
69 FileID TargetFile,
70 const SourceManager &SM) {
71 // There are two main parts to this algorithm:
72 // - identifying which spelled range covers the expanded tokens
73 // - validating that this range doesn't cover any extra tokens (First/Last)
74 //
75 // We do these in order. However as we transform the expanded range into the
76 // spelled one, we adjust First/Last so the validation remains simple.
77
78 assert(SM.getSLocEntry(TargetFile).isFile());
79 // In most cases, to select First and Last we must return their expansion
80 // range, i.e. the whole of any macros they are included in.
81 //
82 // When First and Last are part of the *same macro arg* of a macro written
83 // in TargetFile, we that slice of the arg, i.e. their spelling range.
84 //
85 // Unwrap such macro calls. If the target file has A(B(C)), the
86 // SourceLocation stack of a token inside C shows us the expansion of A first,
87 // then B, then any macros inside C's body, then C itself.
88 // (This is the reverse of the order the PP applies the expansions in).
89 while (First.isMacroID() && Last.isMacroID()) {
90 auto DecFirst = SM.getDecomposedLoc(First);
91 auto DecLast = SM.getDecomposedLoc(Last);
92 auto &ExpFirst = SM.getSLocEntry(DecFirst.first).getExpansion();
93 auto &ExpLast = SM.getSLocEntry(DecLast.first).getExpansion();
94
95 if (!ExpFirst.isMacroArgExpansion() || !ExpLast.isMacroArgExpansion())
96 break;
97 // Locations are in the same macro arg if they expand to the same place.
98 // (They may still have different FileIDs - an arg can have >1 chunks!)
99 if (ExpFirst.getExpansionLocStart() != ExpLast.getExpansionLocStart())
100 break;
101 // Careful, given:
102 // #define HIDE ID(ID(a))
103 // ID(ID(HIDE))
104 // The token `a` is wrapped in 4 arg-expansions, we only want to unwrap 2.
105 // We distinguish them by whether the macro expands into the target file.
106 // Fortunately, the target file ones will always appear first.
107 auto &ExpMacro =
108 SM.getSLocEntry(SM.getFileID(ExpFirst.getExpansionLocStart()))
109 .getExpansion();
110 if (ExpMacro.getExpansionLocStart().isMacroID())
111 break;
112 // Replace each endpoint with its spelling inside the macro arg.
113 // (This is getImmediateSpellingLoc without repeating lookups).
114 First = ExpFirst.getSpellingLoc().getLocWithOffset(DecFirst.second);
115 Last = ExpLast.getSpellingLoc().getLocWithOffset(DecLast.second);
116
117 // Now: how do we adjust the previous/next bounds? Three cases:
118 // A) If they are also part of the same macro arg, we translate them too.
119 // This will ensure that we don't select any macros nested within the
120 // macro arg that cover extra tokens. Critical case:
121 // #define ID(X) X
122 // ID(prev target) // selecting 'target' succeeds
123 // #define LARGE ID(prev target)
124 // LARGE // selecting 'target' fails.
125 // B) They are not in the macro at all, then their expansion range is a
126 // sibling to it, and we can safely substitute that.
127 // #define PREV prev
128 // #define ID(X) X
129 // PREV ID(target) // selecting 'target' succeeds.
130 // #define LARGE PREV ID(target)
131 // LARGE // selecting 'target' fails.
132 // C) They are in a different arg of this macro, or the macro body.
133 // Now selecting the whole macro arg is fine, but the whole macro is not.
134 // Model this by setting using the edge of the macro call as the bound.
135 // #define ID2(X, Y) X Y
136 // ID2(prev, target) // selecting 'target' succeeds
137 // #define LARGE ID2(prev, target)
138 // LARGE // selecting 'target' fails
139 auto AdjustBound = [&](SourceLocation &Bound) {
140 if (Bound.isInvalid() || !Bound.isMacroID()) // Non-macro must be case B.
141 return;
142 auto DecBound = SM.getDecomposedLoc(Bound);
143 auto &ExpBound = SM.getSLocEntry(DecBound.first).getExpansion();
144 if (ExpBound.isMacroArgExpansion() &&
145 ExpBound.getExpansionLocStart() == ExpFirst.getExpansionLocStart()) {
146 // Case A: translate to (spelling) loc within the macro arg.
147 Bound = ExpBound.getSpellingLoc().getLocWithOffset(DecBound.second);
148 return;
149 }
150 while (Bound.isMacroID()) {
151 SourceRange Exp = SM.getImmediateExpansionRange(Bound).getAsRange();
152 if (Exp.getBegin() == ExpMacro.getExpansionLocStart()) {
153 // Case B: bounds become the macro call itself.
154 Bound = (&Bound == &Prev) ? Exp.getBegin() : Exp.getEnd();
155 return;
156 }
157 // Either case C, or expansion location will later find case B.
158 // We choose the upper bound for Prev and the lower one for Next:
159 // ID(prev) target ID(next)
160 // ^ ^
161 // new-prev new-next
162 Bound = (&Bound == &Prev) ? Exp.getEnd() : Exp.getBegin();
163 }
164 };
165 AdjustBound(Prev);
166 AdjustBound(Next);
167 }
168
169 // In all remaining cases we need the full containing macros.
170 // If this overlaps Prev or Next, then no range is possible.
171 SourceRange Candidate =
172 SM.getExpansionRange(SourceRange(First, Last)).getAsRange();
173 auto DecFirst = SM.getDecomposedExpansionLoc(Candidate.getBegin());
174 auto DecLast = SM.getDecomposedLoc(Candidate.getEnd());
175 // Can end up in the wrong file due to bad input or token-pasting shenanigans.
176 if (Candidate.isInvalid() || DecFirst.first != TargetFile || DecLast.first != TargetFile)
177 return SourceRange();
178 // Check bounds, which may still be inside macros.
179 if (Prev.isValid()) {
180 auto Dec = SM.getDecomposedLoc(SM.getExpansionRange(Prev).getBegin());
181 if (Dec.first != DecFirst.first || Dec.second >= DecFirst.second)
182 return SourceRange();
183 }
184 if (Next.isValid()) {
185 auto Dec = SM.getDecomposedLoc(SM.getExpansionRange(Next).getEnd());
186 if (Dec.first != DecLast.first || Dec.second <= DecLast.second)
187 return SourceRange();
188 }
189 // Now we know that Candidate is a file range that covers [First, Last]
190 // without encroaching on {Prev, Next}. Ship it!
191 return Candidate;
192 }
193
194 } // namespace
195
Token(SourceLocation Location,unsigned Length,tok::TokenKind Kind)196 syntax::Token::Token(SourceLocation Location, unsigned Length,
197 tok::TokenKind Kind)
198 : Location(Location), Length(Length), Kind(Kind) {
199 assert(Location.isValid());
200 }
201
Token(const clang::Token & T)202 syntax::Token::Token(const clang::Token &T)
203 : Token(T.getLocation(), T.getLength(), T.getKind()) {
204 assert(!T.isAnnotation());
205 }
206
text(const SourceManager & SM) const207 llvm::StringRef syntax::Token::text(const SourceManager &SM) const {
208 bool Invalid = false;
209 const char *Start = SM.getCharacterData(location(), &Invalid);
210 assert(!Invalid);
211 return llvm::StringRef(Start, length());
212 }
213
range(const SourceManager & SM) const214 FileRange syntax::Token::range(const SourceManager &SM) const {
215 assert(location().isFileID() && "must be a spelled token");
216 FileID File;
217 unsigned StartOffset;
218 std::tie(File, StartOffset) = SM.getDecomposedLoc(location());
219 return FileRange(File, StartOffset, StartOffset + length());
220 }
221
range(const SourceManager & SM,const syntax::Token & First,const syntax::Token & Last)222 FileRange syntax::Token::range(const SourceManager &SM,
223 const syntax::Token &First,
224 const syntax::Token &Last) {
225 auto F = First.range(SM);
226 auto L = Last.range(SM);
227 assert(F.file() == L.file() && "tokens from different files");
228 assert((F == L || F.endOffset() <= L.beginOffset()) &&
229 "wrong order of tokens");
230 return FileRange(F.file(), F.beginOffset(), L.endOffset());
231 }
232
operator <<(llvm::raw_ostream & OS,const Token & T)233 llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS, const Token &T) {
234 return OS << T.str();
235 }
236
FileRange(FileID File,unsigned BeginOffset,unsigned EndOffset)237 FileRange::FileRange(FileID File, unsigned BeginOffset, unsigned EndOffset)
238 : File(File), Begin(BeginOffset), End(EndOffset) {
239 assert(File.isValid());
240 assert(BeginOffset <= EndOffset);
241 }
242
FileRange(const SourceManager & SM,SourceLocation BeginLoc,unsigned Length)243 FileRange::FileRange(const SourceManager &SM, SourceLocation BeginLoc,
244 unsigned Length) {
245 assert(BeginLoc.isValid());
246 assert(BeginLoc.isFileID());
247
248 std::tie(File, Begin) = SM.getDecomposedLoc(BeginLoc);
249 End = Begin + Length;
250 }
FileRange(const SourceManager & SM,SourceLocation BeginLoc,SourceLocation EndLoc)251 FileRange::FileRange(const SourceManager &SM, SourceLocation BeginLoc,
252 SourceLocation EndLoc) {
253 assert(BeginLoc.isValid());
254 assert(BeginLoc.isFileID());
255 assert(EndLoc.isValid());
256 assert(EndLoc.isFileID());
257 assert(SM.getFileID(BeginLoc) == SM.getFileID(EndLoc));
258 assert(SM.getFileOffset(BeginLoc) <= SM.getFileOffset(EndLoc));
259
260 std::tie(File, Begin) = SM.getDecomposedLoc(BeginLoc);
261 End = SM.getFileOffset(EndLoc);
262 }
263
operator <<(llvm::raw_ostream & OS,const FileRange & R)264 llvm::raw_ostream &syntax::operator<<(llvm::raw_ostream &OS,
265 const FileRange &R) {
266 return OS << llvm::formatv("FileRange(file = {0}, offsets = {1}-{2})",
267 R.file().getHashValue(), R.beginOffset(),
268 R.endOffset());
269 }
270
text(const SourceManager & SM) const271 llvm::StringRef FileRange::text(const SourceManager &SM) const {
272 bool Invalid = false;
273 StringRef Text = SM.getBufferData(File, &Invalid);
274 if (Invalid)
275 return "";
276 assert(Begin <= Text.size());
277 assert(End <= Text.size());
278 return Text.substr(Begin, length());
279 }
280
indexExpandedTokens()281 void TokenBuffer::indexExpandedTokens() {
282 // No-op if the index is already created.
283 if (!ExpandedTokIndex.empty())
284 return;
285 ExpandedTokIndex.reserve(ExpandedTokens.size());
286 // Index ExpandedTokens for faster lookups by SourceLocation.
287 for (size_t I = 0, E = ExpandedTokens.size(); I != E; ++I) {
288 SourceLocation Loc = ExpandedTokens[I].location();
289 if (Loc.isValid())
290 ExpandedTokIndex[Loc] = I;
291 }
292 }
293
expandedTokens(SourceRange R) const294 llvm::ArrayRef<syntax::Token> TokenBuffer::expandedTokens(SourceRange R) const {
295 if (R.isInvalid())
296 return {};
297 if (!ExpandedTokIndex.empty()) {
298 // Quick lookup if `R` is a token range.
299 // This is a huge win since majority of the users use ranges provided by an
300 // AST. Ranges in AST are token ranges from expanded token stream.
301 const auto B = ExpandedTokIndex.find(R.getBegin());
302 const auto E = ExpandedTokIndex.find(R.getEnd());
303 if (B != ExpandedTokIndex.end() && E != ExpandedTokIndex.end()) {
304 const Token *L = ExpandedTokens.data() + B->getSecond();
305 // Add 1 to End to make a half-open range.
306 const Token *R = ExpandedTokens.data() + E->getSecond() + 1;
307 if (L > R)
308 return {};
309 return {L, R};
310 }
311 }
312 // Slow case. Use `isBeforeInTranslationUnit` to binary search for the
313 // required range.
314 return getTokensCovering(expandedTokens(), R, *SourceMgr);
315 }
316
toCharRange(const SourceManager & SM) const317 CharSourceRange FileRange::toCharRange(const SourceManager &SM) const {
318 return CharSourceRange(
319 SourceRange(SM.getComposedLoc(File, Begin), SM.getComposedLoc(File, End)),
320 /*IsTokenRange=*/false);
321 }
322
323 std::pair<const syntax::Token *, const TokenBuffer::Mapping *>
spelledForExpandedToken(const syntax::Token * Expanded) const324 TokenBuffer::spelledForExpandedToken(const syntax::Token *Expanded) const {
325 assert(Expanded);
326 assert(ExpandedTokens.data() <= Expanded &&
327 Expanded < ExpandedTokens.data() + ExpandedTokens.size());
328
329 auto FileIt = Files.find(
330 SourceMgr->getFileID(SourceMgr->getExpansionLoc(Expanded->location())));
331 assert(FileIt != Files.end() && "no file for an expanded token");
332
333 const MarkedFile &File = FileIt->second;
334
335 unsigned ExpandedIndex = Expanded - ExpandedTokens.data();
336 // Find the first mapping that produced tokens after \p Expanded.
337 auto It = llvm::partition_point(File.Mappings, [&](const Mapping &M) {
338 return M.BeginExpanded <= ExpandedIndex;
339 });
340 // Our token could only be produced by the previous mapping.
341 if (It == File.Mappings.begin()) {
342 // No previous mapping, no need to modify offsets.
343 return {&File.SpelledTokens[ExpandedIndex - File.BeginExpanded],
344 /*Mapping=*/nullptr};
345 }
346 --It; // 'It' now points to last mapping that started before our token.
347
348 // Check if the token is part of the mapping.
349 if (ExpandedIndex < It->EndExpanded)
350 return {&File.SpelledTokens[It->BeginSpelled], /*Mapping=*/&*It};
351
352 // Not part of the mapping, use the index from previous mapping to compute the
353 // corresponding spelled token.
354 return {
355 &File.SpelledTokens[It->EndSpelled + (ExpandedIndex - It->EndExpanded)],
356 /*Mapping=*/nullptr};
357 }
358
359 const TokenBuffer::Mapping *
mappingStartingBeforeSpelled(const MarkedFile & F,const syntax::Token * Spelled)360 TokenBuffer::mappingStartingBeforeSpelled(const MarkedFile &F,
361 const syntax::Token *Spelled) {
362 assert(F.SpelledTokens.data() <= Spelled);
363 unsigned SpelledI = Spelled - F.SpelledTokens.data();
364 assert(SpelledI < F.SpelledTokens.size());
365
366 auto It = llvm::partition_point(F.Mappings, [SpelledI](const Mapping &M) {
367 return M.BeginSpelled <= SpelledI;
368 });
369 if (It == F.Mappings.begin())
370 return nullptr;
371 --It;
372 return &*It;
373 }
374
375 llvm::SmallVector<llvm::ArrayRef<syntax::Token>, 1>
expandedForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const376 TokenBuffer::expandedForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const {
377 if (Spelled.empty())
378 return {};
379 const auto &File = fileForSpelled(Spelled);
380
381 auto *FrontMapping = mappingStartingBeforeSpelled(File, &Spelled.front());
382 unsigned SpelledFrontI = &Spelled.front() - File.SpelledTokens.data();
383 assert(SpelledFrontI < File.SpelledTokens.size());
384 unsigned ExpandedBegin;
385 if (!FrontMapping) {
386 // No mapping that starts before the first token of Spelled, we don't have
387 // to modify offsets.
388 ExpandedBegin = File.BeginExpanded + SpelledFrontI;
389 } else if (SpelledFrontI < FrontMapping->EndSpelled) {
390 // This mapping applies to Spelled tokens.
391 if (SpelledFrontI != FrontMapping->BeginSpelled) {
392 // Spelled tokens don't cover the entire mapping, returning empty result.
393 return {}; // FIXME: support macro arguments.
394 }
395 // Spelled tokens start at the beginning of this mapping.
396 ExpandedBegin = FrontMapping->BeginExpanded;
397 } else {
398 // Spelled tokens start after the mapping ends (they start in the hole
399 // between 2 mappings, or between a mapping and end of the file).
400 ExpandedBegin =
401 FrontMapping->EndExpanded + (SpelledFrontI - FrontMapping->EndSpelled);
402 }
403
404 auto *BackMapping = mappingStartingBeforeSpelled(File, &Spelled.back());
405 unsigned SpelledBackI = &Spelled.back() - File.SpelledTokens.data();
406 unsigned ExpandedEnd;
407 if (!BackMapping) {
408 // No mapping that starts before the last token of Spelled, we don't have to
409 // modify offsets.
410 ExpandedEnd = File.BeginExpanded + SpelledBackI + 1;
411 } else if (SpelledBackI < BackMapping->EndSpelled) {
412 // This mapping applies to Spelled tokens.
413 if (SpelledBackI + 1 != BackMapping->EndSpelled) {
414 // Spelled tokens don't cover the entire mapping, returning empty result.
415 return {}; // FIXME: support macro arguments.
416 }
417 ExpandedEnd = BackMapping->EndExpanded;
418 } else {
419 // Spelled tokens end after the mapping ends.
420 ExpandedEnd =
421 BackMapping->EndExpanded + (SpelledBackI - BackMapping->EndSpelled) + 1;
422 }
423
424 assert(ExpandedBegin < ExpandedTokens.size());
425 assert(ExpandedEnd < ExpandedTokens.size());
426 // Avoid returning empty ranges.
427 if (ExpandedBegin == ExpandedEnd)
428 return {};
429 return {llvm::makeArrayRef(ExpandedTokens.data() + ExpandedBegin,
430 ExpandedTokens.data() + ExpandedEnd)};
431 }
432
spelledTokens(FileID FID) const433 llvm::ArrayRef<syntax::Token> TokenBuffer::spelledTokens(FileID FID) const {
434 auto It = Files.find(FID);
435 assert(It != Files.end());
436 return It->second.SpelledTokens;
437 }
438
spelledTokenAt(SourceLocation Loc) const439 const syntax::Token *TokenBuffer::spelledTokenAt(SourceLocation Loc) const {
440 assert(Loc.isFileID());
441 const auto *Tok = llvm::partition_point(
442 spelledTokens(SourceMgr->getFileID(Loc)),
443 [&](const syntax::Token &Tok) { return Tok.location() < Loc; });
444 if (!Tok || Tok->location() != Loc)
445 return nullptr;
446 return Tok;
447 }
448
str() const449 std::string TokenBuffer::Mapping::str() const {
450 return std::string(
451 llvm::formatv("spelled tokens: [{0},{1}), expanded tokens: [{2},{3})",
452 BeginSpelled, EndSpelled, BeginExpanded, EndExpanded));
453 }
454
455 llvm::Optional<llvm::ArrayRef<syntax::Token>>
spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const456 TokenBuffer::spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const {
457 // Mapping an empty range is ambiguous in case of empty mappings at either end
458 // of the range, bail out in that case.
459 if (Expanded.empty())
460 return llvm::None;
461 const syntax::Token *First = &Expanded.front();
462 const syntax::Token *Last = &Expanded.back();
463 const syntax::Token *FirstSpelled, *LastSpelled;
464 const TokenBuffer::Mapping *FirstMapping, *LastMapping;
465 std::tie(FirstSpelled, FirstMapping) = spelledForExpandedToken(First);
466 std::tie(LastSpelled, LastMapping) = spelledForExpandedToken(Last);
467
468 FileID FID = SourceMgr->getFileID(FirstSpelled->location());
469 // FIXME: Handle multi-file changes by trying to map onto a common root.
470 if (FID != SourceMgr->getFileID(LastSpelled->location()))
471 return llvm::None;
472
473 const MarkedFile &File = Files.find(FID)->second;
474
475 // If the range is within one macro argument, the result may be only part of a
476 // Mapping. We must use the general (SourceManager-based) algorithm.
477 if (FirstMapping && FirstMapping == LastMapping &&
478 SourceMgr->isMacroArgExpansion(First->location()) &&
479 SourceMgr->isMacroArgExpansion(Last->location())) {
480 // We use excluded Prev/Next token for bounds checking.
481 SourceLocation Prev = (First == &ExpandedTokens.front())
482 ? SourceLocation()
483 : (First - 1)->location();
484 SourceLocation Next = (Last == &ExpandedTokens.back())
485 ? SourceLocation()
486 : (Last + 1)->location();
487 SourceRange Range = spelledForExpandedSlow(
488 First->location(), Last->location(), Prev, Next, FID, *SourceMgr);
489 if (Range.isInvalid())
490 return llvm::None;
491 return getTokensCovering(File.SpelledTokens, Range, *SourceMgr);
492 }
493
494 // Otherwise, use the fast version based on Mappings.
495 // Do not allow changes that doesn't cover full expansion.
496 unsigned FirstExpanded = Expanded.begin() - ExpandedTokens.data();
497 unsigned LastExpanded = Expanded.end() - ExpandedTokens.data();
498 if (FirstMapping && FirstExpanded != FirstMapping->BeginExpanded)
499 return llvm::None;
500 if (LastMapping && LastMapping->EndExpanded != LastExpanded)
501 return llvm::None;
502 return llvm::makeArrayRef(
503 FirstMapping ? File.SpelledTokens.data() + FirstMapping->BeginSpelled
504 : FirstSpelled,
505 LastMapping ? File.SpelledTokens.data() + LastMapping->EndSpelled
506 : LastSpelled + 1);
507 }
508
makeExpansion(const MarkedFile & F,const Mapping & M) const509 TokenBuffer::Expansion TokenBuffer::makeExpansion(const MarkedFile &F,
510 const Mapping &M) const {
511 Expansion E;
512 E.Spelled = llvm::makeArrayRef(F.SpelledTokens.data() + M.BeginSpelled,
513 F.SpelledTokens.data() + M.EndSpelled);
514 E.Expanded = llvm::makeArrayRef(ExpandedTokens.data() + M.BeginExpanded,
515 ExpandedTokens.data() + M.EndExpanded);
516 return E;
517 }
518
519 const TokenBuffer::MarkedFile &
fileForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const520 TokenBuffer::fileForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const {
521 assert(!Spelled.empty());
522 assert(Spelled.front().location().isFileID() && "not a spelled token");
523 auto FileIt = Files.find(SourceMgr->getFileID(Spelled.front().location()));
524 assert(FileIt != Files.end() && "file not tracked by token buffer");
525 const auto &File = FileIt->second;
526 assert(File.SpelledTokens.data() <= Spelled.data() &&
527 Spelled.end() <=
528 (File.SpelledTokens.data() + File.SpelledTokens.size()) &&
529 "Tokens not in spelled range");
530 #ifndef NDEBUG
531 auto T1 = Spelled.back().location();
532 auto T2 = File.SpelledTokens.back().location();
533 assert(T1 == T2 || sourceManager().isBeforeInTranslationUnit(T1, T2));
534 #endif
535 return File;
536 }
537
538 llvm::Optional<TokenBuffer::Expansion>
expansionStartingAt(const syntax::Token * Spelled) const539 TokenBuffer::expansionStartingAt(const syntax::Token *Spelled) const {
540 assert(Spelled);
541 const auto &File = fileForSpelled(*Spelled);
542
543 unsigned SpelledIndex = Spelled - File.SpelledTokens.data();
544 auto M = llvm::partition_point(File.Mappings, [&](const Mapping &M) {
545 return M.BeginSpelled < SpelledIndex;
546 });
547 if (M == File.Mappings.end() || M->BeginSpelled != SpelledIndex)
548 return llvm::None;
549 return makeExpansion(File, *M);
550 }
551
expansionsOverlapping(llvm::ArrayRef<syntax::Token> Spelled) const552 std::vector<TokenBuffer::Expansion> TokenBuffer::expansionsOverlapping(
553 llvm::ArrayRef<syntax::Token> Spelled) const {
554 if (Spelled.empty())
555 return {};
556 const auto &File = fileForSpelled(Spelled);
557
558 // Find the first overlapping range, and then copy until we stop overlapping.
559 unsigned SpelledBeginIndex = Spelled.begin() - File.SpelledTokens.data();
560 unsigned SpelledEndIndex = Spelled.end() - File.SpelledTokens.data();
561 auto M = llvm::partition_point(File.Mappings, [&](const Mapping &M) {
562 return M.EndSpelled <= SpelledBeginIndex;
563 });
564 std::vector<TokenBuffer::Expansion> Expansions;
565 for (; M != File.Mappings.end() && M->BeginSpelled < SpelledEndIndex; ++M)
566 Expansions.push_back(makeExpansion(File, *M));
567 return Expansions;
568 }
569
570 llvm::ArrayRef<syntax::Token>
spelledTokensTouching(SourceLocation Loc,llvm::ArrayRef<syntax::Token> Tokens)571 syntax::spelledTokensTouching(SourceLocation Loc,
572 llvm::ArrayRef<syntax::Token> Tokens) {
573 assert(Loc.isFileID());
574
575 auto *Right = llvm::partition_point(
576 Tokens, [&](const syntax::Token &Tok) { return Tok.location() < Loc; });
577 bool AcceptRight = Right != Tokens.end() && Right->location() <= Loc;
578 bool AcceptLeft =
579 Right != Tokens.begin() && (Right - 1)->endLocation() >= Loc;
580 return llvm::makeArrayRef(Right - (AcceptLeft ? 1 : 0),
581 Right + (AcceptRight ? 1 : 0));
582 }
583
584 llvm::ArrayRef<syntax::Token>
spelledTokensTouching(SourceLocation Loc,const syntax::TokenBuffer & Tokens)585 syntax::spelledTokensTouching(SourceLocation Loc,
586 const syntax::TokenBuffer &Tokens) {
587 return spelledTokensTouching(
588 Loc, Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc)));
589 }
590
591 const syntax::Token *
spelledIdentifierTouching(SourceLocation Loc,llvm::ArrayRef<syntax::Token> Tokens)592 syntax::spelledIdentifierTouching(SourceLocation Loc,
593 llvm::ArrayRef<syntax::Token> Tokens) {
594 for (const syntax::Token &Tok : spelledTokensTouching(Loc, Tokens)) {
595 if (Tok.kind() == tok::identifier)
596 return &Tok;
597 }
598 return nullptr;
599 }
600
601 const syntax::Token *
spelledIdentifierTouching(SourceLocation Loc,const syntax::TokenBuffer & Tokens)602 syntax::spelledIdentifierTouching(SourceLocation Loc,
603 const syntax::TokenBuffer &Tokens) {
604 return spelledIdentifierTouching(
605 Loc, Tokens.spelledTokens(Tokens.sourceManager().getFileID(Loc)));
606 }
607
608 std::vector<const syntax::Token *>
macroExpansions(FileID FID) const609 TokenBuffer::macroExpansions(FileID FID) const {
610 auto FileIt = Files.find(FID);
611 assert(FileIt != Files.end() && "file not tracked by token buffer");
612 auto &File = FileIt->second;
613 std::vector<const syntax::Token *> Expansions;
614 auto &Spelled = File.SpelledTokens;
615 for (auto Mapping : File.Mappings) {
616 const syntax::Token *Token = &Spelled[Mapping.BeginSpelled];
617 if (Token->kind() == tok::TokenKind::identifier)
618 Expansions.push_back(Token);
619 }
620 return Expansions;
621 }
622
tokenize(const FileRange & FR,const SourceManager & SM,const LangOptions & LO)623 std::vector<syntax::Token> syntax::tokenize(const FileRange &FR,
624 const SourceManager &SM,
625 const LangOptions &LO) {
626 std::vector<syntax::Token> Tokens;
627 IdentifierTable Identifiers(LO);
628 auto AddToken = [&](clang::Token T) {
629 // Fill the proper token kind for keywords, etc.
630 if (T.getKind() == tok::raw_identifier && !T.needsCleaning() &&
631 !T.hasUCN()) { // FIXME: support needsCleaning and hasUCN cases.
632 clang::IdentifierInfo &II = Identifiers.get(T.getRawIdentifier());
633 T.setIdentifierInfo(&II);
634 T.setKind(II.getTokenID());
635 }
636 Tokens.push_back(syntax::Token(T));
637 };
638
639 auto SrcBuffer = SM.getBufferData(FR.file());
640 Lexer L(SM.getLocForStartOfFile(FR.file()), LO, SrcBuffer.data(),
641 SrcBuffer.data() + FR.beginOffset(),
642 // We can't make BufEnd point to FR.endOffset, as Lexer requires a
643 // null terminated buffer.
644 SrcBuffer.data() + SrcBuffer.size());
645
646 clang::Token T;
647 while (!L.LexFromRawLexer(T) && L.getCurrentBufferOffset() < FR.endOffset())
648 AddToken(T);
649 // LexFromRawLexer returns true when it parses the last token of the file, add
650 // it iff it starts within the range we are interested in.
651 if (SM.getFileOffset(T.getLocation()) < FR.endOffset())
652 AddToken(T);
653 return Tokens;
654 }
655
tokenize(FileID FID,const SourceManager & SM,const LangOptions & LO)656 std::vector<syntax::Token> syntax::tokenize(FileID FID, const SourceManager &SM,
657 const LangOptions &LO) {
658 return tokenize(syntax::FileRange(FID, 0, SM.getFileIDSize(FID)), SM, LO);
659 }
660
661 /// Records information reqired to construct mappings for the token buffer that
662 /// we are collecting.
663 class TokenCollector::CollectPPExpansions : public PPCallbacks {
664 public:
CollectPPExpansions(TokenCollector & C)665 CollectPPExpansions(TokenCollector &C) : Collector(&C) {}
666
667 /// Disabled instance will stop reporting anything to TokenCollector.
668 /// This ensures that uses of the preprocessor after TokenCollector::consume()
669 /// is called do not access the (possibly invalid) collector instance.
disable()670 void disable() { Collector = nullptr; }
671
MacroExpands(const clang::Token & MacroNameTok,const MacroDefinition & MD,SourceRange Range,const MacroArgs * Args)672 void MacroExpands(const clang::Token &MacroNameTok, const MacroDefinition &MD,
673 SourceRange Range, const MacroArgs *Args) override {
674 if (!Collector)
675 return;
676 const auto &SM = Collector->PP.getSourceManager();
677 // Only record top-level expansions that directly produce expanded tokens.
678 // This excludes those where:
679 // - the macro use is inside a macro body,
680 // - the macro appears in an argument to another macro.
681 // However macro expansion isn't really a tree, it's token rewrite rules,
682 // so there are other cases, e.g.
683 // #define B(X) X
684 // #define A 1 + B
685 // A(2)
686 // Both A and B produce expanded tokens, though the macro name 'B' comes
687 // from an expansion. The best we can do is merge the mappings for both.
688
689 // The *last* token of any top-level macro expansion must be in a file.
690 // (In the example above, see the closing paren of the expansion of B).
691 if (!Range.getEnd().isFileID())
692 return;
693 // If there's a current expansion that encloses this one, this one can't be
694 // top-level.
695 if (LastExpansionEnd.isValid() &&
696 !SM.isBeforeInTranslationUnit(LastExpansionEnd, Range.getEnd()))
697 return;
698
699 // If the macro invocation (B) starts in a macro (A) but ends in a file,
700 // we'll create a merged mapping for A + B by overwriting the endpoint for
701 // A's startpoint.
702 if (!Range.getBegin().isFileID()) {
703 Range.setBegin(SM.getExpansionLoc(Range.getBegin()));
704 assert(Collector->Expansions.count(Range.getBegin()) &&
705 "Overlapping macros should have same expansion location");
706 }
707
708 Collector->Expansions[Range.getBegin()] = Range.getEnd();
709 LastExpansionEnd = Range.getEnd();
710 }
711 // FIXME: handle directives like #pragma, #include, etc.
712 private:
713 TokenCollector *Collector;
714 /// Used to detect recursive macro expansions.
715 SourceLocation LastExpansionEnd;
716 };
717
718 /// Fills in the TokenBuffer by tracing the run of a preprocessor. The
719 /// implementation tracks the tokens, macro expansions and directives coming
720 /// from the preprocessor and:
721 /// - for each token, figures out if it is a part of an expanded token stream,
722 /// spelled token stream or both. Stores the tokens appropriately.
723 /// - records mappings from the spelled to expanded token ranges, e.g. for macro
724 /// expansions.
725 /// FIXME: also properly record:
726 /// - #include directives,
727 /// - #pragma, #line and other PP directives,
728 /// - skipped pp regions,
729 /// - ...
730
TokenCollector(Preprocessor & PP)731 TokenCollector::TokenCollector(Preprocessor &PP) : PP(PP) {
732 // Collect the expanded token stream during preprocessing.
733 PP.setTokenWatcher([this](const clang::Token &T) {
734 if (T.isAnnotation())
735 return;
736 DEBUG_WITH_TYPE("collect-tokens", llvm::dbgs()
737 << "Token: "
738 << syntax::Token(T).dumpForTests(
739 this->PP.getSourceManager())
740 << "\n"
741
742 );
743 Expanded.push_back(syntax::Token(T));
744 });
745 // And locations of macro calls, to properly recover boundaries of those in
746 // case of empty expansions.
747 auto CB = std::make_unique<CollectPPExpansions>(*this);
748 this->Collector = CB.get();
749 PP.addPPCallbacks(std::move(CB));
750 }
751
752 /// Builds mappings and spelled tokens in the TokenBuffer based on the expanded
753 /// token stream.
754 class TokenCollector::Builder {
755 public:
Builder(std::vector<syntax::Token> Expanded,PPExpansions CollectedExpansions,const SourceManager & SM,const LangOptions & LangOpts)756 Builder(std::vector<syntax::Token> Expanded, PPExpansions CollectedExpansions,
757 const SourceManager &SM, const LangOptions &LangOpts)
758 : Result(SM), CollectedExpansions(std::move(CollectedExpansions)), SM(SM),
759 LangOpts(LangOpts) {
760 Result.ExpandedTokens = std::move(Expanded);
761 }
762
build()763 TokenBuffer build() && {
764 assert(!Result.ExpandedTokens.empty());
765 assert(Result.ExpandedTokens.back().kind() == tok::eof);
766
767 // Tokenize every file that contributed tokens to the expanded stream.
768 buildSpelledTokens();
769
770 // The expanded token stream consists of runs of tokens that came from
771 // the same source (a macro expansion, part of a file etc).
772 // Between these runs are the logical positions of spelled tokens that
773 // didn't expand to anything.
774 while (NextExpanded < Result.ExpandedTokens.size() - 1 /* eof */) {
775 // Create empty mappings for spelled tokens that expanded to nothing here.
776 // May advance NextSpelled, but NextExpanded is unchanged.
777 discard();
778 // Create mapping for a contiguous run of expanded tokens.
779 // Advances NextExpanded past the run, and NextSpelled accordingly.
780 unsigned OldPosition = NextExpanded;
781 advance();
782 if (NextExpanded == OldPosition)
783 diagnoseAdvanceFailure();
784 }
785 // If any tokens remain in any of the files, they didn't expand to anything.
786 // Create empty mappings up until the end of the file.
787 for (const auto &File : Result.Files)
788 discard(File.first);
789
790 #ifndef NDEBUG
791 for (auto &pair : Result.Files) {
792 auto &mappings = pair.second.Mappings;
793 assert(llvm::is_sorted(mappings, [](const TokenBuffer::Mapping &M1,
794 const TokenBuffer::Mapping &M2) {
795 return M1.BeginSpelled < M2.BeginSpelled &&
796 M1.EndSpelled < M2.EndSpelled &&
797 M1.BeginExpanded < M2.BeginExpanded &&
798 M1.EndExpanded < M2.EndExpanded;
799 }));
800 }
801 #endif
802
803 return std::move(Result);
804 }
805
806 private:
807 // Consume a sequence of spelled tokens that didn't expand to anything.
808 // In the simplest case, skips spelled tokens until finding one that produced
809 // the NextExpanded token, and creates an empty mapping for them.
810 // If Drain is provided, skips remaining tokens from that file instead.
discard(llvm::Optional<FileID> Drain=llvm::None)811 void discard(llvm::Optional<FileID> Drain = llvm::None) {
812 SourceLocation Target =
813 Drain ? SM.getLocForEndOfFile(*Drain)
814 : SM.getExpansionLoc(
815 Result.ExpandedTokens[NextExpanded].location());
816 FileID File = SM.getFileID(Target);
817 const auto &SpelledTokens = Result.Files[File].SpelledTokens;
818 auto &NextSpelled = this->NextSpelled[File];
819
820 TokenBuffer::Mapping Mapping;
821 Mapping.BeginSpelled = NextSpelled;
822 // When dropping trailing tokens from a file, the empty mapping should
823 // be positioned within the file's expanded-token range (at the end).
824 Mapping.BeginExpanded = Mapping.EndExpanded =
825 Drain ? Result.Files[*Drain].EndExpanded : NextExpanded;
826 // We may want to split into several adjacent empty mappings.
827 // FlushMapping() emits the current mapping and starts a new one.
828 auto FlushMapping = [&, this] {
829 Mapping.EndSpelled = NextSpelled;
830 if (Mapping.BeginSpelled != Mapping.EndSpelled)
831 Result.Files[File].Mappings.push_back(Mapping);
832 Mapping.BeginSpelled = NextSpelled;
833 };
834
835 while (NextSpelled < SpelledTokens.size() &&
836 SpelledTokens[NextSpelled].location() < Target) {
837 // If we know mapping bounds at [NextSpelled, KnownEnd] (macro expansion)
838 // then we want to partition our (empty) mapping.
839 // [Start, NextSpelled) [NextSpelled, KnownEnd] (KnownEnd, Target)
840 SourceLocation KnownEnd =
841 CollectedExpansions.lookup(SpelledTokens[NextSpelled].location());
842 if (KnownEnd.isValid()) {
843 FlushMapping(); // Emits [Start, NextSpelled)
844 while (NextSpelled < SpelledTokens.size() &&
845 SpelledTokens[NextSpelled].location() <= KnownEnd)
846 ++NextSpelled;
847 FlushMapping(); // Emits [NextSpelled, KnownEnd]
848 // Now the loop contitues and will emit (KnownEnd, Target).
849 } else {
850 ++NextSpelled;
851 }
852 }
853 FlushMapping();
854 }
855
856 // Consumes the NextExpanded token and others that are part of the same run.
857 // Increases NextExpanded and NextSpelled by at least one, and adds a mapping
858 // (unless this is a run of file tokens, which we represent with no mapping).
advance()859 void advance() {
860 const syntax::Token &Tok = Result.ExpandedTokens[NextExpanded];
861 SourceLocation Expansion = SM.getExpansionLoc(Tok.location());
862 FileID File = SM.getFileID(Expansion);
863 const auto &SpelledTokens = Result.Files[File].SpelledTokens;
864 auto &NextSpelled = this->NextSpelled[File];
865
866 if (Tok.location().isFileID()) {
867 // A run of file tokens continues while the expanded/spelled tokens match.
868 while (NextSpelled < SpelledTokens.size() &&
869 NextExpanded < Result.ExpandedTokens.size() &&
870 SpelledTokens[NextSpelled].location() ==
871 Result.ExpandedTokens[NextExpanded].location()) {
872 ++NextSpelled;
873 ++NextExpanded;
874 }
875 // We need no mapping for file tokens copied to the expanded stream.
876 } else {
877 // We found a new macro expansion. We should have its spelling bounds.
878 auto End = CollectedExpansions.lookup(Expansion);
879 assert(End.isValid() && "Macro expansion wasn't captured?");
880
881 // Mapping starts here...
882 TokenBuffer::Mapping Mapping;
883 Mapping.BeginExpanded = NextExpanded;
884 Mapping.BeginSpelled = NextSpelled;
885 // ... consumes spelled tokens within bounds we captured ...
886 while (NextSpelled < SpelledTokens.size() &&
887 SpelledTokens[NextSpelled].location() <= End)
888 ++NextSpelled;
889 // ... consumes expanded tokens rooted at the same expansion ...
890 while (NextExpanded < Result.ExpandedTokens.size() &&
891 SM.getExpansionLoc(
892 Result.ExpandedTokens[NextExpanded].location()) == Expansion)
893 ++NextExpanded;
894 // ... and ends here.
895 Mapping.EndExpanded = NextExpanded;
896 Mapping.EndSpelled = NextSpelled;
897 Result.Files[File].Mappings.push_back(Mapping);
898 }
899 }
900
901 // advance() is supposed to consume at least one token - if not, we crash.
diagnoseAdvanceFailure()902 void diagnoseAdvanceFailure() {
903 #ifndef NDEBUG
904 // Show the failed-to-map token in context.
905 for (unsigned I = (NextExpanded < 10) ? 0 : NextExpanded - 10;
906 I < NextExpanded + 5 && I < Result.ExpandedTokens.size(); ++I) {
907 const char *L =
908 (I == NextExpanded) ? "!! " : (I < NextExpanded) ? "ok " : " ";
909 llvm::errs() << L << Result.ExpandedTokens[I].dumpForTests(SM) << "\n";
910 }
911 #endif
912 llvm_unreachable("Couldn't map expanded token to spelled tokens!");
913 }
914
915 /// Initializes TokenBuffer::Files and fills spelled tokens and expanded
916 /// ranges for each of the files.
buildSpelledTokens()917 void buildSpelledTokens() {
918 for (unsigned I = 0; I < Result.ExpandedTokens.size(); ++I) {
919 const auto &Tok = Result.ExpandedTokens[I];
920 auto FID = SM.getFileID(SM.getExpansionLoc(Tok.location()));
921 auto It = Result.Files.try_emplace(FID);
922 TokenBuffer::MarkedFile &File = It.first->second;
923
924 // The eof token should not be considered part of the main-file's range.
925 File.EndExpanded = Tok.kind() == tok::eof ? I : I + 1;
926
927 if (!It.second)
928 continue; // we have seen this file before.
929 // This is the first time we see this file.
930 File.BeginExpanded = I;
931 File.SpelledTokens = tokenize(FID, SM, LangOpts);
932 }
933 }
934
935 TokenBuffer Result;
936 unsigned NextExpanded = 0; // cursor in ExpandedTokens
937 llvm::DenseMap<FileID, unsigned> NextSpelled; // cursor in SpelledTokens
938 PPExpansions CollectedExpansions;
939 const SourceManager &SM;
940 const LangOptions &LangOpts;
941 };
942
consume()943 TokenBuffer TokenCollector::consume() && {
944 PP.setTokenWatcher(nullptr);
945 Collector->disable();
946 return Builder(std::move(Expanded), std::move(Expansions),
947 PP.getSourceManager(), PP.getLangOpts())
948 .build();
949 }
950
str() const951 std::string syntax::Token::str() const {
952 return std::string(llvm::formatv("Token({0}, length = {1})",
953 tok::getTokenName(kind()), length()));
954 }
955
dumpForTests(const SourceManager & SM) const956 std::string syntax::Token::dumpForTests(const SourceManager &SM) const {
957 return std::string(llvm::formatv("Token(`{0}`, {1}, length = {2})", text(SM),
958 tok::getTokenName(kind()), length()));
959 }
960
dumpForTests() const961 std::string TokenBuffer::dumpForTests() const {
962 auto PrintToken = [this](const syntax::Token &T) -> std::string {
963 if (T.kind() == tok::eof)
964 return "<eof>";
965 return std::string(T.text(*SourceMgr));
966 };
967
968 auto DumpTokens = [this, &PrintToken](llvm::raw_ostream &OS,
969 llvm::ArrayRef<syntax::Token> Tokens) {
970 if (Tokens.empty()) {
971 OS << "<empty>";
972 return;
973 }
974 OS << Tokens[0].text(*SourceMgr);
975 for (unsigned I = 1; I < Tokens.size(); ++I) {
976 if (Tokens[I].kind() == tok::eof)
977 continue;
978 OS << " " << PrintToken(Tokens[I]);
979 }
980 };
981
982 std::string Dump;
983 llvm::raw_string_ostream OS(Dump);
984
985 OS << "expanded tokens:\n"
986 << " ";
987 // (!) we do not show '<eof>'.
988 DumpTokens(OS, llvm::makeArrayRef(ExpandedTokens).drop_back());
989 OS << "\n";
990
991 std::vector<FileID> Keys;
992 for (auto F : Files)
993 Keys.push_back(F.first);
994 llvm::sort(Keys);
995
996 for (FileID ID : Keys) {
997 const MarkedFile &File = Files.find(ID)->second;
998 auto *Entry = SourceMgr->getFileEntryForID(ID);
999 if (!Entry)
1000 continue; // Skip builtin files.
1001 OS << llvm::formatv("file '{0}'\n", Entry->getName())
1002 << " spelled tokens:\n"
1003 << " ";
1004 DumpTokens(OS, File.SpelledTokens);
1005 OS << "\n";
1006
1007 if (File.Mappings.empty()) {
1008 OS << " no mappings.\n";
1009 continue;
1010 }
1011 OS << " mappings:\n";
1012 for (auto &M : File.Mappings) {
1013 OS << llvm::formatv(
1014 " ['{0}'_{1}, '{2}'_{3}) => ['{4}'_{5}, '{6}'_{7})\n",
1015 PrintToken(File.SpelledTokens[M.BeginSpelled]), M.BeginSpelled,
1016 M.EndSpelled == File.SpelledTokens.size()
1017 ? "<eof>"
1018 : PrintToken(File.SpelledTokens[M.EndSpelled]),
1019 M.EndSpelled, PrintToken(ExpandedTokens[M.BeginExpanded]),
1020 M.BeginExpanded, PrintToken(ExpandedTokens[M.EndExpanded]),
1021 M.EndExpanded);
1022 }
1023 }
1024 return Dump;
1025 }
1026