1 //===--- FormatTokenLexer.cpp - Lex FormatTokens -------------*- C++ ----*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements FormatTokenLexer, which tokenizes a source file
11 /// into a FormatToken stream suitable for ClangFormat.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "FormatTokenLexer.h"
16 #include "FormatToken.h"
17 #include "clang/Basic/SourceLocation.h"
18 #include "clang/Basic/SourceManager.h"
19 #include "clang/Format/Format.h"
20 #include "llvm/Support/Regex.h"
21 
22 namespace clang {
23 namespace format {
24 
25 FormatTokenLexer::FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
26                                    unsigned Column, const FormatStyle &Style,
27                                    encoding::Encoding Encoding)
28     : FormatTok(nullptr), IsFirstToken(true), StateStack({LexerState::NORMAL}),
29       Column(Column), TrailingWhitespace(0), SourceMgr(SourceMgr), ID(ID),
30       Style(Style), IdentTable(getFormattingLangOpts(Style)),
31       Keywords(IdentTable), Encoding(Encoding), FirstInLineIndex(0),
32       FormattingDisabled(false), MacroBlockBeginRegex(Style.MacroBlockBegin),
33       MacroBlockEndRegex(Style.MacroBlockEnd) {
34   Lex.reset(new Lexer(ID, SourceMgr.getBuffer(ID), SourceMgr,
35                       getFormattingLangOpts(Style)));
36   Lex->SetKeepWhitespaceMode(true);
37 
38   for (const std::string &ForEachMacro : Style.ForEachMacros)
39     Macros.insert({&IdentTable.get(ForEachMacro), TT_ForEachMacro});
40   for (const std::string &StatementMacro : Style.StatementMacros)
41     Macros.insert({&IdentTable.get(StatementMacro), TT_StatementMacro});
42   for (const std::string &TypenameMacro : Style.TypenameMacros)
43     Macros.insert({&IdentTable.get(TypenameMacro), TT_TypenameMacro});
44   for (const std::string &NamespaceMacro : Style.NamespaceMacros)
45     Macros.insert({&IdentTable.get(NamespaceMacro), TT_NamespaceMacro});
46 }
47 
48 ArrayRef<FormatToken *> FormatTokenLexer::lex() {
49   assert(Tokens.empty());
50   assert(FirstInLineIndex == 0);
51   do {
52     Tokens.push_back(getNextToken());
53     if (Style.Language == FormatStyle::LK_JavaScript) {
54       tryParseJSRegexLiteral();
55       handleTemplateStrings();
56     }
57     if (Style.Language == FormatStyle::LK_TextProto)
58       tryParsePythonComment();
59     tryMergePreviousTokens();
60     if (Style.isCSharp())
61       // This needs to come after tokens have been merged so that C#
62       // string literals are correctly identified.
63       handleCSharpVerbatimAndInterpolatedStrings();
64     if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
65       FirstInLineIndex = Tokens.size() - 1;
66   } while (Tokens.back()->Tok.isNot(tok::eof));
67   return Tokens;
68 }
69 
70 void FormatTokenLexer::tryMergePreviousTokens() {
71   if (tryMerge_TMacro())
72     return;
73   if (tryMergeConflictMarkers())
74     return;
75   if (tryMergeLessLess())
76     return;
77 
78   if (Style.isCSharp()) {
79     if (tryMergeCSharpKeywordVariables())
80       return;
81     if (tryMergeCSharpStringLiteral())
82       return;
83     if (tryMergeCSharpDoubleQuestion())
84       return;
85     if (tryMergeCSharpNullConditional())
86       return;
87     if (tryTransformCSharpForEach())
88       return;
89     static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater};
90     if (tryMergeTokens(JSRightArrow, TT_JsFatArrow))
91       return;
92   }
93 
94   if (tryMergeNSStringLiteral())
95     return;
96 
97   if (Style.Language == FormatStyle::LK_JavaScript) {
98     static const tok::TokenKind JSIdentity[] = {tok::equalequal, tok::equal};
99     static const tok::TokenKind JSNotIdentity[] = {tok::exclaimequal,
100                                                    tok::equal};
101     static const tok::TokenKind JSShiftEqual[] = {tok::greater, tok::greater,
102                                                   tok::greaterequal};
103     static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater};
104     static const tok::TokenKind JSExponentiation[] = {tok::star, tok::star};
105     static const tok::TokenKind JSExponentiationEqual[] = {tok::star,
106                                                            tok::starequal};
107     static const tok::TokenKind JSNullPropagatingOperator[] = {tok::question,
108                                                                tok::period};
109     static const tok::TokenKind JSNullishOperator[] = {tok::question,
110                                                        tok::question};
111 
112     // FIXME: Investigate what token type gives the correct operator priority.
113     if (tryMergeTokens(JSIdentity, TT_BinaryOperator))
114       return;
115     if (tryMergeTokens(JSNotIdentity, TT_BinaryOperator))
116       return;
117     if (tryMergeTokens(JSShiftEqual, TT_BinaryOperator))
118       return;
119     if (tryMergeTokens(JSRightArrow, TT_JsFatArrow))
120       return;
121     if (tryMergeTokens(JSExponentiation, TT_JsExponentiation))
122       return;
123     if (tryMergeTokens(JSExponentiationEqual, TT_JsExponentiationEqual)) {
124       Tokens.back()->Tok.setKind(tok::starequal);
125       return;
126     }
127     if (tryMergeTokens(JSNullishOperator, TT_JsNullishCoalescingOperator)) {
128       // Treat like the "||" operator (as opposed to the ternary ?).
129       Tokens.back()->Tok.setKind(tok::pipepipe);
130       return;
131     }
132     if (tryMergeTokens(JSNullPropagatingOperator,
133                        TT_JsNullPropagatingOperator)) {
134       // Treat like a regular "." access.
135       Tokens.back()->Tok.setKind(tok::period);
136       return;
137     }
138     if (tryMergeJSPrivateIdentifier())
139       return;
140   }
141 
142   if (Style.Language == FormatStyle::LK_Java) {
143     static const tok::TokenKind JavaRightLogicalShiftAssign[] = {
144         tok::greater, tok::greater, tok::greaterequal};
145     if (tryMergeTokens(JavaRightLogicalShiftAssign, TT_BinaryOperator))
146       return;
147   }
148 }
149 
150 bool FormatTokenLexer::tryMergeNSStringLiteral() {
151   if (Tokens.size() < 2)
152     return false;
153   auto &At = *(Tokens.end() - 2);
154   auto &String = *(Tokens.end() - 1);
155   if (!At->is(tok::at) || !String->is(tok::string_literal))
156     return false;
157   At->Tok.setKind(tok::string_literal);
158   At->TokenText = StringRef(At->TokenText.begin(),
159                             String->TokenText.end() - At->TokenText.begin());
160   At->ColumnWidth += String->ColumnWidth;
161   At->Type = TT_ObjCStringLiteral;
162   Tokens.erase(Tokens.end() - 1);
163   return true;
164 }
165 
166 bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
167   // Merges #idenfier into a single identifier with the text #identifier
168   // but the token tok::identifier.
169   if (Tokens.size() < 2)
170     return false;
171   auto &Hash = *(Tokens.end() - 2);
172   auto &Identifier = *(Tokens.end() - 1);
173   if (!Hash->is(tok::hash) || !Identifier->is(tok::identifier))
174     return false;
175   Hash->Tok.setKind(tok::identifier);
176   Hash->TokenText =
177       StringRef(Hash->TokenText.begin(),
178                 Identifier->TokenText.end() - Hash->TokenText.begin());
179   Hash->ColumnWidth += Identifier->ColumnWidth;
180   Hash->Type = TT_JsPrivateIdentifier;
181   Tokens.erase(Tokens.end() - 1);
182   return true;
183 }
184 
185 // Search for verbatim or interpolated string literals @"ABC" or
186 // $"aaaaa{abc}aaaaa" i and mark the token as TT_CSharpStringLiteral, and to
187 // prevent splitting of @, $ and ".
188 // Merging of multiline verbatim strings with embedded '"' is handled in
189 // handleCSharpVerbatimAndInterpolatedStrings with lower-level lexing.
190 bool FormatTokenLexer::tryMergeCSharpStringLiteral() {
191   if (Tokens.size() < 2)
192     return false;
193 
194   // Interpolated strings could contain { } with " characters inside.
195   // $"{x ?? "null"}"
196   // should not be split into $"{x ?? ", null, "}" but should treated as a
197   // single string-literal.
198   //
199   // We opt not to try and format expressions inside {} within a C#
200   // interpolated string. Formatting expressions within an interpolated string
201   // would require similar work as that done for JavaScript template strings
202   // in `handleTemplateStrings()`.
203   auto &CSharpInterpolatedString = *(Tokens.end() - 2);
204   if (CSharpInterpolatedString->Type == TT_CSharpStringLiteral &&
205       (CSharpInterpolatedString->TokenText.startswith(R"($")") ||
206        CSharpInterpolatedString->TokenText.startswith(R"($@")"))) {
207     int UnmatchedOpeningBraceCount = 0;
208 
209     auto TokenTextSize = CSharpInterpolatedString->TokenText.size();
210     for (size_t Index = 0; Index < TokenTextSize; ++Index) {
211       char C = CSharpInterpolatedString->TokenText[Index];
212       if (C == '{') {
213         // "{{"  inside an interpolated string is an escaped '{' so skip it.
214         if (Index + 1 < TokenTextSize &&
215             CSharpInterpolatedString->TokenText[Index + 1] == '{') {
216           ++Index;
217           continue;
218         }
219         ++UnmatchedOpeningBraceCount;
220       } else if (C == '}') {
221         // "}}"  inside an interpolated string is an escaped '}' so skip it.
222         if (Index + 1 < TokenTextSize &&
223             CSharpInterpolatedString->TokenText[Index + 1] == '}') {
224           ++Index;
225           continue;
226         }
227         --UnmatchedOpeningBraceCount;
228       }
229     }
230 
231     if (UnmatchedOpeningBraceCount > 0) {
232       auto &NextToken = *(Tokens.end() - 1);
233       CSharpInterpolatedString->TokenText =
234           StringRef(CSharpInterpolatedString->TokenText.begin(),
235                     NextToken->TokenText.end() -
236                         CSharpInterpolatedString->TokenText.begin());
237       CSharpInterpolatedString->ColumnWidth += NextToken->ColumnWidth;
238       Tokens.erase(Tokens.end() - 1);
239       return true;
240     }
241   }
242 
243   // Look for @"aaaaaa" or $"aaaaaa".
244   auto &String = *(Tokens.end() - 1);
245   if (!String->is(tok::string_literal))
246     return false;
247 
248   auto &At = *(Tokens.end() - 2);
249   if (!(At->is(tok::at) || At->TokenText == "$"))
250     return false;
251 
252   if (Tokens.size() > 2 && At->is(tok::at)) {
253     auto &Dollar = *(Tokens.end() - 3);
254     if (Dollar->TokenText == "$") {
255       // This looks like $@"aaaaa" so we need to combine all 3 tokens.
256       Dollar->Tok.setKind(tok::string_literal);
257       Dollar->TokenText =
258           StringRef(Dollar->TokenText.begin(),
259                     String->TokenText.end() - Dollar->TokenText.begin());
260       Dollar->ColumnWidth += (At->ColumnWidth + String->ColumnWidth);
261       Dollar->Type = TT_CSharpStringLiteral;
262       Tokens.erase(Tokens.end() - 2);
263       Tokens.erase(Tokens.end() - 1);
264       return true;
265     }
266   }
267 
268   // Convert back into just a string_literal.
269   At->Tok.setKind(tok::string_literal);
270   At->TokenText = StringRef(At->TokenText.begin(),
271                             String->TokenText.end() - At->TokenText.begin());
272   At->ColumnWidth += String->ColumnWidth;
273   At->Type = TT_CSharpStringLiteral;
274   Tokens.erase(Tokens.end() - 1);
275   return true;
276 }
277 
278 // Valid C# attribute targets:
279 // https://docs.microsoft.com/en-us/dotnet/csharp/programming-guide/concepts/attributes/#attribute-targets
280 const llvm::StringSet<> FormatTokenLexer::CSharpAttributeTargets = {
281     "assembly", "module",   "field",  "event", "method",
282     "param",    "property", "return", "type",
283 };
284 
285 bool FormatTokenLexer::tryMergeCSharpDoubleQuestion() {
286   if (Tokens.size() < 2)
287     return false;
288   auto &FirstQuestion = *(Tokens.end() - 2);
289   auto &SecondQuestion = *(Tokens.end() - 1);
290   if (!FirstQuestion->is(tok::question) || !SecondQuestion->is(tok::question))
291     return false;
292   FirstQuestion->Tok.setKind(tok::question); // no '??' in clang tokens.
293   FirstQuestion->TokenText = StringRef(FirstQuestion->TokenText.begin(),
294                                        SecondQuestion->TokenText.end() -
295                                            FirstQuestion->TokenText.begin());
296   FirstQuestion->ColumnWidth += SecondQuestion->ColumnWidth;
297   FirstQuestion->Type = TT_CSharpNullCoalescing;
298   Tokens.erase(Tokens.end() - 1);
299   return true;
300 }
301 
302 // Merge '?[' and '?.' pairs into single tokens.
303 bool FormatTokenLexer::tryMergeCSharpNullConditional() {
304   if (Tokens.size() < 2)
305     return false;
306   auto &Question = *(Tokens.end() - 2);
307   auto &PeriodOrLSquare = *(Tokens.end() - 1);
308   if (!Question->is(tok::question) ||
309       !PeriodOrLSquare->isOneOf(tok::l_square, tok::period))
310     return false;
311   Question->TokenText =
312       StringRef(Question->TokenText.begin(),
313                 PeriodOrLSquare->TokenText.end() - Question->TokenText.begin());
314   Question->ColumnWidth += PeriodOrLSquare->ColumnWidth;
315 
316   if (PeriodOrLSquare->is(tok::l_square)) {
317     Question->Tok.setKind(tok::question); // no '?[' in clang tokens.
318     Question->Type = TT_CSharpNullConditionalLSquare;
319   } else {
320     Question->Tok.setKind(tok::question); // no '?.' in clang tokens.
321     Question->Type = TT_CSharpNullConditional;
322   }
323 
324   Tokens.erase(Tokens.end() - 1);
325   return true;
326 }
327 
328 bool FormatTokenLexer::tryMergeCSharpKeywordVariables() {
329   if (Tokens.size() < 2)
330     return false;
331   auto &At = *(Tokens.end() - 2);
332   auto &Keyword = *(Tokens.end() - 1);
333   if (!At->is(tok::at))
334     return false;
335   if (!Keywords.isCSharpKeyword(*Keyword))
336     return false;
337 
338   At->Tok.setKind(tok::identifier);
339   At->TokenText = StringRef(At->TokenText.begin(),
340                             Keyword->TokenText.end() - At->TokenText.begin());
341   At->ColumnWidth += Keyword->ColumnWidth;
342   At->Type = Keyword->Type;
343   Tokens.erase(Tokens.end() - 1);
344   return true;
345 }
346 
347 // In C# transform identifier foreach into kw_foreach
348 bool FormatTokenLexer::tryTransformCSharpForEach() {
349   if (Tokens.size() < 1)
350     return false;
351   auto &Identifier = *(Tokens.end() - 1);
352   if (!Identifier->is(tok::identifier))
353     return false;
354   if (Identifier->TokenText != "foreach")
355     return false;
356 
357   Identifier->Type = TT_ForEachMacro;
358   Identifier->Tok.setKind(tok::kw_for);
359   return true;
360 }
361 
362 bool FormatTokenLexer::tryMergeLessLess() {
363   // Merge X,less,less,Y into X,lessless,Y unless X or Y is less.
364   if (Tokens.size() < 3)
365     return false;
366 
367   bool FourthTokenIsLess = false;
368   if (Tokens.size() > 3)
369     FourthTokenIsLess = (Tokens.end() - 4)[0]->is(tok::less);
370 
371   auto First = Tokens.end() - 3;
372   if (First[2]->is(tok::less) || First[1]->isNot(tok::less) ||
373       First[0]->isNot(tok::less) || FourthTokenIsLess)
374     return false;
375 
376   // Only merge if there currently is no whitespace between the two "<".
377   if (First[1]->WhitespaceRange.getBegin() !=
378       First[1]->WhitespaceRange.getEnd())
379     return false;
380 
381   First[0]->Tok.setKind(tok::lessless);
382   First[0]->TokenText = "<<";
383   First[0]->ColumnWidth += 1;
384   Tokens.erase(Tokens.end() - 2);
385   return true;
386 }
387 
388 bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
389                                       TokenType NewType) {
390   if (Tokens.size() < Kinds.size())
391     return false;
392 
393   SmallVectorImpl<FormatToken *>::const_iterator First =
394       Tokens.end() - Kinds.size();
395   if (!First[0]->is(Kinds[0]))
396     return false;
397   unsigned AddLength = 0;
398   for (unsigned i = 1; i < Kinds.size(); ++i) {
399     if (!First[i]->is(Kinds[i]) || First[i]->WhitespaceRange.getBegin() !=
400                                        First[i]->WhitespaceRange.getEnd())
401       return false;
402     AddLength += First[i]->TokenText.size();
403   }
404   Tokens.resize(Tokens.size() - Kinds.size() + 1);
405   First[0]->TokenText = StringRef(First[0]->TokenText.data(),
406                                   First[0]->TokenText.size() + AddLength);
407   First[0]->ColumnWidth += AddLength;
408   First[0]->Type = NewType;
409   return true;
410 }
411 
412 // Returns \c true if \p Tok can only be followed by an operand in JavaScript.
413 bool FormatTokenLexer::precedesOperand(FormatToken *Tok) {
414   // NB: This is not entirely correct, as an r_paren can introduce an operand
415   // location in e.g. `if (foo) /bar/.exec(...);`. That is a rare enough
416   // corner case to not matter in practice, though.
417   return Tok->isOneOf(tok::period, tok::l_paren, tok::comma, tok::l_brace,
418                       tok::r_brace, tok::l_square, tok::semi, tok::exclaim,
419                       tok::colon, tok::question, tok::tilde) ||
420          Tok->isOneOf(tok::kw_return, tok::kw_do, tok::kw_case, tok::kw_throw,
421                       tok::kw_else, tok::kw_new, tok::kw_delete, tok::kw_void,
422                       tok::kw_typeof, Keywords.kw_instanceof, Keywords.kw_in) ||
423          Tok->isBinaryOperator();
424 }
425 
426 bool FormatTokenLexer::canPrecedeRegexLiteral(FormatToken *Prev) {
427   if (!Prev)
428     return true;
429 
430   // Regex literals can only follow after prefix unary operators, not after
431   // postfix unary operators. If the '++' is followed by a non-operand
432   // introducing token, the slash here is the operand and not the start of a
433   // regex.
434   // `!` is an unary prefix operator, but also a post-fix operator that casts
435   // away nullability, so the same check applies.
436   if (Prev->isOneOf(tok::plusplus, tok::minusminus, tok::exclaim))
437     return (Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]));
438 
439   // The previous token must introduce an operand location where regex
440   // literals can occur.
441   if (!precedesOperand(Prev))
442     return false;
443 
444   return true;
445 }
446 
447 // Tries to parse a JavaScript Regex literal starting at the current token,
448 // if that begins with a slash and is in a location where JavaScript allows
449 // regex literals. Changes the current token to a regex literal and updates
450 // its text if successful.
451 void FormatTokenLexer::tryParseJSRegexLiteral() {
452   FormatToken *RegexToken = Tokens.back();
453   if (!RegexToken->isOneOf(tok::slash, tok::slashequal))
454     return;
455 
456   FormatToken *Prev = nullptr;
457   for (auto I = Tokens.rbegin() + 1, E = Tokens.rend(); I != E; ++I) {
458     // NB: Because previous pointers are not initialized yet, this cannot use
459     // Token.getPreviousNonComment.
460     if ((*I)->isNot(tok::comment)) {
461       Prev = *I;
462       break;
463     }
464   }
465 
466   if (!canPrecedeRegexLiteral(Prev))
467     return;
468 
469   // 'Manually' lex ahead in the current file buffer.
470   const char *Offset = Lex->getBufferLocation();
471   const char *RegexBegin = Offset - RegexToken->TokenText.size();
472   StringRef Buffer = Lex->getBuffer();
473   bool InCharacterClass = false;
474   bool HaveClosingSlash = false;
475   for (; !HaveClosingSlash && Offset != Buffer.end(); ++Offset) {
476     // Regular expressions are terminated with a '/', which can only be
477     // escaped using '\' or a character class between '[' and ']'.
478     // See http://www.ecma-international.org/ecma-262/5.1/#sec-7.8.5.
479     switch (*Offset) {
480     case '\\':
481       // Skip the escaped character.
482       ++Offset;
483       break;
484     case '[':
485       InCharacterClass = true;
486       break;
487     case ']':
488       InCharacterClass = false;
489       break;
490     case '/':
491       if (!InCharacterClass)
492         HaveClosingSlash = true;
493       break;
494     }
495   }
496 
497   RegexToken->Type = TT_RegexLiteral;
498   // Treat regex literals like other string_literals.
499   RegexToken->Tok.setKind(tok::string_literal);
500   RegexToken->TokenText = StringRef(RegexBegin, Offset - RegexBegin);
501   RegexToken->ColumnWidth = RegexToken->TokenText.size();
502 
503   resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
504 }
505 
506 void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
507   FormatToken *CSharpStringLiteral = Tokens.back();
508 
509   if (CSharpStringLiteral->Type != TT_CSharpStringLiteral)
510     return;
511 
512   // Deal with multiline strings.
513   if (!(CSharpStringLiteral->TokenText.startswith(R"(@")") ||
514         CSharpStringLiteral->TokenText.startswith(R"($@")")))
515     return;
516 
517   const char *StrBegin =
518       Lex->getBufferLocation() - CSharpStringLiteral->TokenText.size();
519   const char *Offset = StrBegin;
520   if (CSharpStringLiteral->TokenText.startswith(R"(@")"))
521     Offset += 2;
522   else // CSharpStringLiteral->TokenText.startswith(R"($@")")
523     Offset += 3;
524 
525   // Look for a terminating '"' in the current file buffer.
526   // Make no effort to format code within an interpolated or verbatim string.
527   for (; Offset != Lex->getBuffer().end(); ++Offset) {
528     if (Offset[0] == '"') {
529       // "" within a verbatim string is an escaped double quote: skip it.
530       if (Offset + 1 < Lex->getBuffer().end() && Offset[1] == '"')
531         ++Offset;
532       else
533         break;
534     }
535   }
536 
537   // Make no attempt to format code properly if a verbatim string is
538   // unterminated.
539   if (Offset == Lex->getBuffer().end())
540     return;
541 
542   StringRef LiteralText(StrBegin, Offset - StrBegin + 1);
543   CSharpStringLiteral->TokenText = LiteralText;
544 
545   // Adjust width for potentially multiline string literals.
546   size_t FirstBreak = LiteralText.find('\n');
547   StringRef FirstLineText = FirstBreak == StringRef::npos
548                                 ? LiteralText
549                                 : LiteralText.substr(0, FirstBreak);
550   CSharpStringLiteral->ColumnWidth = encoding::columnWidthWithTabs(
551       FirstLineText, CSharpStringLiteral->OriginalColumn, Style.TabWidth,
552       Encoding);
553   size_t LastBreak = LiteralText.rfind('\n');
554   if (LastBreak != StringRef::npos) {
555     CSharpStringLiteral->IsMultiline = true;
556     unsigned StartColumn = 0;
557     CSharpStringLiteral->LastLineColumnWidth = encoding::columnWidthWithTabs(
558         LiteralText.substr(LastBreak + 1, LiteralText.size()), StartColumn,
559         Style.TabWidth, Encoding);
560   }
561 
562   SourceLocation loc = Offset < Lex->getBuffer().end()
563                            ? Lex->getSourceLocation(Offset + 1)
564                            : SourceMgr.getLocForEndOfFile(ID);
565   resetLexer(SourceMgr.getFileOffset(loc));
566 }
567 
568 void FormatTokenLexer::handleTemplateStrings() {
569   FormatToken *BacktickToken = Tokens.back();
570 
571   if (BacktickToken->is(tok::l_brace)) {
572     StateStack.push(LexerState::NORMAL);
573     return;
574   }
575   if (BacktickToken->is(tok::r_brace)) {
576     if (StateStack.size() == 1)
577       return;
578     StateStack.pop();
579     if (StateStack.top() != LexerState::TEMPLATE_STRING)
580       return;
581     // If back in TEMPLATE_STRING, fallthrough and continue parsing the
582   } else if (BacktickToken->is(tok::unknown) &&
583              BacktickToken->TokenText == "`") {
584     StateStack.push(LexerState::TEMPLATE_STRING);
585   } else {
586     return; // Not actually a template
587   }
588 
589   // 'Manually' lex ahead in the current file buffer.
590   const char *Offset = Lex->getBufferLocation();
591   const char *TmplBegin = Offset - BacktickToken->TokenText.size(); // at "`"
592   for (; Offset != Lex->getBuffer().end(); ++Offset) {
593     if (Offset[0] == '`') {
594       StateStack.pop();
595       break;
596     }
597     if (Offset[0] == '\\') {
598       ++Offset; // Skip the escaped character.
599     } else if (Offset + 1 < Lex->getBuffer().end() && Offset[0] == '$' &&
600                Offset[1] == '{') {
601       // '${' introduces an expression interpolation in the template string.
602       StateStack.push(LexerState::NORMAL);
603       ++Offset;
604       break;
605     }
606   }
607 
608   StringRef LiteralText(TmplBegin, Offset - TmplBegin + 1);
609   BacktickToken->Type = TT_TemplateString;
610   BacktickToken->Tok.setKind(tok::string_literal);
611   BacktickToken->TokenText = LiteralText;
612 
613   // Adjust width for potentially multiline string literals.
614   size_t FirstBreak = LiteralText.find('\n');
615   StringRef FirstLineText = FirstBreak == StringRef::npos
616                                 ? LiteralText
617                                 : LiteralText.substr(0, FirstBreak);
618   BacktickToken->ColumnWidth = encoding::columnWidthWithTabs(
619       FirstLineText, BacktickToken->OriginalColumn, Style.TabWidth, Encoding);
620   size_t LastBreak = LiteralText.rfind('\n');
621   if (LastBreak != StringRef::npos) {
622     BacktickToken->IsMultiline = true;
623     unsigned StartColumn = 0; // The template tail spans the entire line.
624     BacktickToken->LastLineColumnWidth = encoding::columnWidthWithTabs(
625         LiteralText.substr(LastBreak + 1, LiteralText.size()), StartColumn,
626         Style.TabWidth, Encoding);
627   }
628 
629   SourceLocation loc = Offset < Lex->getBuffer().end()
630                            ? Lex->getSourceLocation(Offset + 1)
631                            : SourceMgr.getLocForEndOfFile(ID);
632   resetLexer(SourceMgr.getFileOffset(loc));
633 }
634 
635 void FormatTokenLexer::tryParsePythonComment() {
636   FormatToken *HashToken = Tokens.back();
637   if (!HashToken->isOneOf(tok::hash, tok::hashhash))
638     return;
639   // Turn the remainder of this line into a comment.
640   const char *CommentBegin =
641       Lex->getBufferLocation() - HashToken->TokenText.size(); // at "#"
642   size_t From = CommentBegin - Lex->getBuffer().begin();
643   size_t To = Lex->getBuffer().find_first_of('\n', From);
644   if (To == StringRef::npos)
645     To = Lex->getBuffer().size();
646   size_t Len = To - From;
647   HashToken->Type = TT_LineComment;
648   HashToken->Tok.setKind(tok::comment);
649   HashToken->TokenText = Lex->getBuffer().substr(From, Len);
650   SourceLocation Loc = To < Lex->getBuffer().size()
651                            ? Lex->getSourceLocation(CommentBegin + Len)
652                            : SourceMgr.getLocForEndOfFile(ID);
653   resetLexer(SourceMgr.getFileOffset(Loc));
654 }
655 
656 bool FormatTokenLexer::tryMerge_TMacro() {
657   if (Tokens.size() < 4)
658     return false;
659   FormatToken *Last = Tokens.back();
660   if (!Last->is(tok::r_paren))
661     return false;
662 
663   FormatToken *String = Tokens[Tokens.size() - 2];
664   if (!String->is(tok::string_literal) || String->IsMultiline)
665     return false;
666 
667   if (!Tokens[Tokens.size() - 3]->is(tok::l_paren))
668     return false;
669 
670   FormatToken *Macro = Tokens[Tokens.size() - 4];
671   if (Macro->TokenText != "_T")
672     return false;
673 
674   const char *Start = Macro->TokenText.data();
675   const char *End = Last->TokenText.data() + Last->TokenText.size();
676   String->TokenText = StringRef(Start, End - Start);
677   String->IsFirst = Macro->IsFirst;
678   String->LastNewlineOffset = Macro->LastNewlineOffset;
679   String->WhitespaceRange = Macro->WhitespaceRange;
680   String->OriginalColumn = Macro->OriginalColumn;
681   String->ColumnWidth = encoding::columnWidthWithTabs(
682       String->TokenText, String->OriginalColumn, Style.TabWidth, Encoding);
683   String->NewlinesBefore = Macro->NewlinesBefore;
684   String->HasUnescapedNewline = Macro->HasUnescapedNewline;
685 
686   Tokens.pop_back();
687   Tokens.pop_back();
688   Tokens.pop_back();
689   Tokens.back() = String;
690   return true;
691 }
692 
693 bool FormatTokenLexer::tryMergeConflictMarkers() {
694   if (Tokens.back()->NewlinesBefore == 0 && Tokens.back()->isNot(tok::eof))
695     return false;
696 
697   // Conflict lines look like:
698   // <marker> <text from the vcs>
699   // For example:
700   // >>>>>>> /file/in/file/system at revision 1234
701   //
702   // We merge all tokens in a line that starts with a conflict marker
703   // into a single token with a special token type that the unwrapped line
704   // parser will use to correctly rebuild the underlying code.
705 
706   FileID ID;
707   // Get the position of the first token in the line.
708   unsigned FirstInLineOffset;
709   std::tie(ID, FirstInLineOffset) = SourceMgr.getDecomposedLoc(
710       Tokens[FirstInLineIndex]->getStartOfNonWhitespace());
711   StringRef Buffer = SourceMgr.getBuffer(ID)->getBuffer();
712   // Calculate the offset of the start of the current line.
713   auto LineOffset = Buffer.rfind('\n', FirstInLineOffset);
714   if (LineOffset == StringRef::npos) {
715     LineOffset = 0;
716   } else {
717     ++LineOffset;
718   }
719 
720   auto FirstSpace = Buffer.find_first_of(" \n", LineOffset);
721   StringRef LineStart;
722   if (FirstSpace == StringRef::npos) {
723     LineStart = Buffer.substr(LineOffset);
724   } else {
725     LineStart = Buffer.substr(LineOffset, FirstSpace - LineOffset);
726   }
727 
728   TokenType Type = TT_Unknown;
729   if (LineStart == "<<<<<<<" || LineStart == ">>>>") {
730     Type = TT_ConflictStart;
731   } else if (LineStart == "|||||||" || LineStart == "=======" ||
732              LineStart == "====") {
733     Type = TT_ConflictAlternative;
734   } else if (LineStart == ">>>>>>>" || LineStart == "<<<<") {
735     Type = TT_ConflictEnd;
736   }
737 
738   if (Type != TT_Unknown) {
739     FormatToken *Next = Tokens.back();
740 
741     Tokens.resize(FirstInLineIndex + 1);
742     // We do not need to build a complete token here, as we will skip it
743     // during parsing anyway (as we must not touch whitespace around conflict
744     // markers).
745     Tokens.back()->Type = Type;
746     Tokens.back()->Tok.setKind(tok::kw___unknown_anytype);
747 
748     Tokens.push_back(Next);
749     return true;
750   }
751 
752   return false;
753 }
754 
755 FormatToken *FormatTokenLexer::getStashedToken() {
756   // Create a synthesized second '>' or '<' token.
757   Token Tok = FormatTok->Tok;
758   StringRef TokenText = FormatTok->TokenText;
759 
760   unsigned OriginalColumn = FormatTok->OriginalColumn;
761   FormatTok = new (Allocator.Allocate()) FormatToken;
762   FormatTok->Tok = Tok;
763   SourceLocation TokLocation =
764       FormatTok->Tok.getLocation().getLocWithOffset(Tok.getLength() - 1);
765   FormatTok->Tok.setLocation(TokLocation);
766   FormatTok->WhitespaceRange = SourceRange(TokLocation, TokLocation);
767   FormatTok->TokenText = TokenText;
768   FormatTok->ColumnWidth = 1;
769   FormatTok->OriginalColumn = OriginalColumn + 1;
770 
771   return FormatTok;
772 }
773 
774 FormatToken *FormatTokenLexer::getNextToken() {
775   if (StateStack.top() == LexerState::TOKEN_STASHED) {
776     StateStack.pop();
777     return getStashedToken();
778   }
779 
780   FormatTok = new (Allocator.Allocate()) FormatToken;
781   readRawToken(*FormatTok);
782   SourceLocation WhitespaceStart =
783       FormatTok->Tok.getLocation().getLocWithOffset(-TrailingWhitespace);
784   FormatTok->IsFirst = IsFirstToken;
785   IsFirstToken = false;
786 
787   // Consume and record whitespace until we find a significant token.
788   unsigned WhitespaceLength = TrailingWhitespace;
789   while (FormatTok->Tok.is(tok::unknown)) {
790     StringRef Text = FormatTok->TokenText;
791     auto EscapesNewline = [&](int pos) {
792       // A '\r' here is just part of '\r\n'. Skip it.
793       if (pos >= 0 && Text[pos] == '\r')
794         --pos;
795       // See whether there is an odd number of '\' before this.
796       // FIXME: This is wrong. A '\' followed by a newline is always removed,
797       // regardless of whether there is another '\' before it.
798       // FIXME: Newlines can also be escaped by a '?' '?' '/' trigraph.
799       unsigned count = 0;
800       for (; pos >= 0; --pos, ++count)
801         if (Text[pos] != '\\')
802           break;
803       return count & 1;
804     };
805     // FIXME: This miscounts tok:unknown tokens that are not just
806     // whitespace, e.g. a '`' character.
807     for (int i = 0, e = Text.size(); i != e; ++i) {
808       switch (Text[i]) {
809       case '\n':
810         ++FormatTok->NewlinesBefore;
811         FormatTok->HasUnescapedNewline = !EscapesNewline(i - 1);
812         FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
813         Column = 0;
814         break;
815       case '\r':
816         FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
817         Column = 0;
818         break;
819       case '\f':
820       case '\v':
821         Column = 0;
822         break;
823       case ' ':
824         ++Column;
825         break;
826       case '\t':
827         Column +=
828             Style.TabWidth - (Style.TabWidth ? Column % Style.TabWidth : 0);
829         break;
830       case '\\':
831         if (i + 1 == e || (Text[i + 1] != '\r' && Text[i + 1] != '\n'))
832           FormatTok->Type = TT_ImplicitStringLiteral;
833         break;
834       default:
835         FormatTok->Type = TT_ImplicitStringLiteral;
836         break;
837       }
838       if (FormatTok->Type == TT_ImplicitStringLiteral)
839         break;
840     }
841 
842     if (FormatTok->is(TT_ImplicitStringLiteral))
843       break;
844     WhitespaceLength += FormatTok->Tok.getLength();
845 
846     readRawToken(*FormatTok);
847   }
848 
849   // JavaScript and Java do not allow to escape the end of the line with a
850   // backslash. Backslashes are syntax errors in plain source, but can occur in
851   // comments. When a single line comment ends with a \, it'll cause the next
852   // line of code to be lexed as a comment, breaking formatting. The code below
853   // finds comments that contain a backslash followed by a line break, truncates
854   // the comment token at the backslash, and resets the lexer to restart behind
855   // the backslash.
856   if ((Style.Language == FormatStyle::LK_JavaScript ||
857        Style.Language == FormatStyle::LK_Java) &&
858       FormatTok->is(tok::comment) && FormatTok->TokenText.startswith("//")) {
859     size_t BackslashPos = FormatTok->TokenText.find('\\');
860     while (BackslashPos != StringRef::npos) {
861       if (BackslashPos + 1 < FormatTok->TokenText.size() &&
862           FormatTok->TokenText[BackslashPos + 1] == '\n') {
863         const char *Offset = Lex->getBufferLocation();
864         Offset -= FormatTok->TokenText.size();
865         Offset += BackslashPos + 1;
866         resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
867         FormatTok->TokenText = FormatTok->TokenText.substr(0, BackslashPos + 1);
868         FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
869             FormatTok->TokenText, FormatTok->OriginalColumn, Style.TabWidth,
870             Encoding);
871         break;
872       }
873       BackslashPos = FormatTok->TokenText.find('\\', BackslashPos + 1);
874     }
875   }
876 
877   // In case the token starts with escaped newlines, we want to
878   // take them into account as whitespace - this pattern is quite frequent
879   // in macro definitions.
880   // FIXME: Add a more explicit test.
881   while (FormatTok->TokenText.size() > 1 && FormatTok->TokenText[0] == '\\') {
882     unsigned SkippedWhitespace = 0;
883     if (FormatTok->TokenText.size() > 2 &&
884         (FormatTok->TokenText[1] == '\r' && FormatTok->TokenText[2] == '\n'))
885       SkippedWhitespace = 3;
886     else if (FormatTok->TokenText[1] == '\n')
887       SkippedWhitespace = 2;
888     else
889       break;
890 
891     ++FormatTok->NewlinesBefore;
892     WhitespaceLength += SkippedWhitespace;
893     FormatTok->LastNewlineOffset = SkippedWhitespace;
894     Column = 0;
895     FormatTok->TokenText = FormatTok->TokenText.substr(SkippedWhitespace);
896   }
897 
898   FormatTok->WhitespaceRange = SourceRange(
899       WhitespaceStart, WhitespaceStart.getLocWithOffset(WhitespaceLength));
900 
901   FormatTok->OriginalColumn = Column;
902 
903   TrailingWhitespace = 0;
904   if (FormatTok->Tok.is(tok::comment)) {
905     // FIXME: Add the trimmed whitespace to Column.
906     StringRef UntrimmedText = FormatTok->TokenText;
907     FormatTok->TokenText = FormatTok->TokenText.rtrim(" \t\v\f");
908     TrailingWhitespace = UntrimmedText.size() - FormatTok->TokenText.size();
909   } else if (FormatTok->Tok.is(tok::raw_identifier)) {
910     IdentifierInfo &Info = IdentTable.get(FormatTok->TokenText);
911     FormatTok->Tok.setIdentifierInfo(&Info);
912     FormatTok->Tok.setKind(Info.getTokenID());
913     if (Style.Language == FormatStyle::LK_Java &&
914         FormatTok->isOneOf(tok::kw_struct, tok::kw_union, tok::kw_delete,
915                            tok::kw_operator)) {
916       FormatTok->Tok.setKind(tok::identifier);
917       FormatTok->Tok.setIdentifierInfo(nullptr);
918     } else if (Style.Language == FormatStyle::LK_JavaScript &&
919                FormatTok->isOneOf(tok::kw_struct, tok::kw_union,
920                                   tok::kw_operator)) {
921       FormatTok->Tok.setKind(tok::identifier);
922       FormatTok->Tok.setIdentifierInfo(nullptr);
923     }
924   } else if (FormatTok->Tok.is(tok::greatergreater)) {
925     FormatTok->Tok.setKind(tok::greater);
926     FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
927     ++Column;
928     StateStack.push(LexerState::TOKEN_STASHED);
929   } else if (FormatTok->Tok.is(tok::lessless)) {
930     FormatTok->Tok.setKind(tok::less);
931     FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
932     ++Column;
933     StateStack.push(LexerState::TOKEN_STASHED);
934   }
935 
936   // Now FormatTok is the next non-whitespace token.
937 
938   StringRef Text = FormatTok->TokenText;
939   size_t FirstNewlinePos = Text.find('\n');
940   if (FirstNewlinePos == StringRef::npos) {
941     // FIXME: ColumnWidth actually depends on the start column, we need to
942     // take this into account when the token is moved.
943     FormatTok->ColumnWidth =
944         encoding::columnWidthWithTabs(Text, Column, Style.TabWidth, Encoding);
945     Column += FormatTok->ColumnWidth;
946   } else {
947     FormatTok->IsMultiline = true;
948     // FIXME: ColumnWidth actually depends on the start column, we need to
949     // take this into account when the token is moved.
950     FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
951         Text.substr(0, FirstNewlinePos), Column, Style.TabWidth, Encoding);
952 
953     // The last line of the token always starts in column 0.
954     // Thus, the length can be precomputed even in the presence of tabs.
955     FormatTok->LastLineColumnWidth = encoding::columnWidthWithTabs(
956         Text.substr(Text.find_last_of('\n') + 1), 0, Style.TabWidth, Encoding);
957     Column = FormatTok->LastLineColumnWidth;
958   }
959 
960   if (Style.isCpp()) {
961     auto it = Macros.find(FormatTok->Tok.getIdentifierInfo());
962     if (!(Tokens.size() > 0 && Tokens.back()->Tok.getIdentifierInfo() &&
963           Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
964               tok::pp_define) &&
965         it != Macros.end()) {
966       FormatTok->Type = it->second;
967     } else if (FormatTok->is(tok::identifier)) {
968       if (MacroBlockBeginRegex.match(Text)) {
969         FormatTok->Type = TT_MacroBlockBegin;
970       } else if (MacroBlockEndRegex.match(Text)) {
971         FormatTok->Type = TT_MacroBlockEnd;
972       }
973     }
974   }
975 
976   return FormatTok;
977 }
978 
979 void FormatTokenLexer::readRawToken(FormatToken &Tok) {
980   Lex->LexFromRawLexer(Tok.Tok);
981   Tok.TokenText = StringRef(SourceMgr.getCharacterData(Tok.Tok.getLocation()),
982                             Tok.Tok.getLength());
983   // For formatting, treat unterminated string literals like normal string
984   // literals.
985   if (Tok.is(tok::unknown)) {
986     if (!Tok.TokenText.empty() && Tok.TokenText[0] == '"') {
987       Tok.Tok.setKind(tok::string_literal);
988       Tok.IsUnterminatedLiteral = true;
989     } else if (Style.Language == FormatStyle::LK_JavaScript &&
990                Tok.TokenText == "''") {
991       Tok.Tok.setKind(tok::string_literal);
992     }
993   }
994 
995   if ((Style.Language == FormatStyle::LK_JavaScript ||
996        Style.Language == FormatStyle::LK_Proto ||
997        Style.Language == FormatStyle::LK_TextProto) &&
998       Tok.is(tok::char_constant)) {
999     Tok.Tok.setKind(tok::string_literal);
1000   }
1001 
1002   if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format on" ||
1003                                Tok.TokenText == "/* clang-format on */")) {
1004     FormattingDisabled = false;
1005   }
1006 
1007   Tok.Finalized = FormattingDisabled;
1008 
1009   if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format off" ||
1010                                Tok.TokenText == "/* clang-format off */")) {
1011     FormattingDisabled = true;
1012   }
1013 }
1014 
1015 void FormatTokenLexer::resetLexer(unsigned Offset) {
1016   StringRef Buffer = SourceMgr.getBufferData(ID);
1017   Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID),
1018                       getFormattingLangOpts(Style), Buffer.begin(),
1019                       Buffer.begin() + Offset, Buffer.end()));
1020   Lex->SetKeepWhitespaceMode(true);
1021   TrailingWhitespace = 0;
1022 }
1023 
1024 } // namespace format
1025 } // namespace clang
1026