1 //===- Lexer.cpp - C Language Family Lexer --------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Lexer and Token interfaces. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/Lex/Lexer.h" 14 #include "UnicodeCharSets.h" 15 #include "clang/Basic/CharInfo.h" 16 #include "clang/Basic/Diagnostic.h" 17 #include "clang/Basic/IdentifierTable.h" 18 #include "clang/Basic/LLVM.h" 19 #include "clang/Basic/LangOptions.h" 20 #include "clang/Basic/SourceLocation.h" 21 #include "clang/Basic/SourceManager.h" 22 #include "clang/Basic/TokenKinds.h" 23 #include "clang/Lex/LexDiagnostic.h" 24 #include "clang/Lex/LiteralSupport.h" 25 #include "clang/Lex/MultipleIncludeOpt.h" 26 #include "clang/Lex/Preprocessor.h" 27 #include "clang/Lex/PreprocessorOptions.h" 28 #include "clang/Lex/Token.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/Optional.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/StringExtras.h" 33 #include "llvm/ADT/StringRef.h" 34 #include "llvm/ADT/StringSwitch.h" 35 #include "llvm/Support/Compiler.h" 36 #include "llvm/Support/ConvertUTF.h" 37 #include "llvm/Support/MathExtras.h" 38 #include "llvm/Support/MemoryBufferRef.h" 39 #include "llvm/Support/NativeFormatting.h" 40 #include "llvm/Support/UnicodeCharRanges.h" 41 #include <algorithm> 42 #include <cassert> 43 #include <cstddef> 44 #include <cstdint> 45 #include <cstring> 46 #include <string> 47 #include <tuple> 48 #include <utility> 49 50 using namespace clang; 51 52 //===----------------------------------------------------------------------===// 53 // Token Class Implementation 54 //===----------------------------------------------------------------------===// 55 56 /// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. 57 bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { 58 if (isAnnotation()) 59 return false; 60 if (IdentifierInfo *II = getIdentifierInfo()) 61 return II->getObjCKeywordID() == objcKey; 62 return false; 63 } 64 65 /// getObjCKeywordID - Return the ObjC keyword kind. 66 tok::ObjCKeywordKind Token::getObjCKeywordID() const { 67 if (isAnnotation()) 68 return tok::objc_not_keyword; 69 IdentifierInfo *specId = getIdentifierInfo(); 70 return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword; 71 } 72 73 //===----------------------------------------------------------------------===// 74 // Lexer Class Implementation 75 //===----------------------------------------------------------------------===// 76 77 void Lexer::anchor() {} 78 79 void Lexer::InitLexer(const char *BufStart, const char *BufPtr, 80 const char *BufEnd) { 81 BufferStart = BufStart; 82 BufferPtr = BufPtr; 83 BufferEnd = BufEnd; 84 85 assert(BufEnd[0] == 0 && 86 "We assume that the input buffer has a null character at the end" 87 " to simplify lexing!"); 88 89 // Check whether we have a BOM in the beginning of the buffer. If yes - act 90 // accordingly. Right now we support only UTF-8 with and without BOM, so, just 91 // skip the UTF-8 BOM if it's present. 92 if (BufferStart == BufferPtr) { 93 // Determine the size of the BOM. 94 StringRef Buf(BufferStart, BufferEnd - BufferStart); 95 size_t BOMLength = llvm::StringSwitch<size_t>(Buf) 96 .StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM 97 .Default(0); 98 99 // Skip the BOM. 100 BufferPtr += BOMLength; 101 } 102 103 Is_PragmaLexer = false; 104 CurrentConflictMarkerState = CMK_None; 105 106 // Start of the file is a start of line. 107 IsAtStartOfLine = true; 108 IsAtPhysicalStartOfLine = true; 109 110 HasLeadingSpace = false; 111 HasLeadingEmptyMacro = false; 112 113 // We are not after parsing a #. 114 ParsingPreprocessorDirective = false; 115 116 // We are not after parsing #include. 117 ParsingFilename = false; 118 119 // We are not in raw mode. Raw mode disables diagnostics and interpretation 120 // of tokens (e.g. identifiers, thus disabling macro expansion). It is used 121 // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block 122 // or otherwise skipping over tokens. 123 LexingRawMode = false; 124 125 // Default to not keeping comments. 126 ExtendedTokenMode = 0; 127 128 NewLinePtr = nullptr; 129 } 130 131 /// Lexer constructor - Create a new lexer object for the specified buffer 132 /// with the specified preprocessor managing the lexing process. This lexer 133 /// assumes that the associated file buffer and Preprocessor objects will 134 /// outlive it, so it doesn't take ownership of either of them. 135 Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile, 136 Preprocessor &PP, bool IsFirstIncludeOfFile) 137 : PreprocessorLexer(&PP, FID), 138 FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)), 139 LangOpts(PP.getLangOpts()), LineComment(LangOpts.LineComment), 140 IsFirstTimeLexingFile(IsFirstIncludeOfFile) { 141 InitLexer(InputFile.getBufferStart(), InputFile.getBufferStart(), 142 InputFile.getBufferEnd()); 143 144 resetExtendedTokenMode(); 145 } 146 147 /// Lexer constructor - Create a new raw lexer object. This object is only 148 /// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text 149 /// range will outlive it, so it doesn't take ownership of it. 150 Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts, 151 const char *BufStart, const char *BufPtr, const char *BufEnd, 152 bool IsFirstIncludeOfFile) 153 : FileLoc(fileloc), LangOpts(langOpts), LineComment(LangOpts.LineComment), 154 IsFirstTimeLexingFile(IsFirstIncludeOfFile) { 155 InitLexer(BufStart, BufPtr, BufEnd); 156 157 // We *are* in raw mode. 158 LexingRawMode = true; 159 } 160 161 /// Lexer constructor - Create a new raw lexer object. This object is only 162 /// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text 163 /// range will outlive it, so it doesn't take ownership of it. 164 Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &FromFile, 165 const SourceManager &SM, const LangOptions &langOpts, 166 bool IsFirstIncludeOfFile) 167 : Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile.getBufferStart(), 168 FromFile.getBufferStart(), FromFile.getBufferEnd(), 169 IsFirstIncludeOfFile) {} 170 171 void Lexer::resetExtendedTokenMode() { 172 assert(PP && "Cannot reset token mode without a preprocessor"); 173 if (LangOpts.TraditionalCPP) 174 SetKeepWhitespaceMode(true); 175 else 176 SetCommentRetentionState(PP->getCommentRetentionState()); 177 } 178 179 /// Create_PragmaLexer: Lexer constructor - Create a new lexer object for 180 /// _Pragma expansion. This has a variety of magic semantics that this method 181 /// sets up. It returns a new'd Lexer that must be delete'd when done. 182 /// 183 /// On entrance to this routine, TokStartLoc is a macro location which has a 184 /// spelling loc that indicates the bytes to be lexed for the token and an 185 /// expansion location that indicates where all lexed tokens should be 186 /// "expanded from". 187 /// 188 /// TODO: It would really be nice to make _Pragma just be a wrapper around a 189 /// normal lexer that remaps tokens as they fly by. This would require making 190 /// Preprocessor::Lex virtual. Given that, we could just dump in a magic lexer 191 /// interface that could handle this stuff. This would pull GetMappedTokenLoc 192 /// out of the critical path of the lexer! 193 /// 194 Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc, 195 SourceLocation ExpansionLocStart, 196 SourceLocation ExpansionLocEnd, 197 unsigned TokLen, Preprocessor &PP) { 198 SourceManager &SM = PP.getSourceManager(); 199 200 // Create the lexer as if we were going to lex the file normally. 201 FileID SpellingFID = SM.getFileID(SpellingLoc); 202 llvm::MemoryBufferRef InputFile = SM.getBufferOrFake(SpellingFID); 203 Lexer *L = new Lexer(SpellingFID, InputFile, PP); 204 205 // Now that the lexer is created, change the start/end locations so that we 206 // just lex the subsection of the file that we want. This is lexing from a 207 // scratch buffer. 208 const char *StrData = SM.getCharacterData(SpellingLoc); 209 210 L->BufferPtr = StrData; 211 L->BufferEnd = StrData+TokLen; 212 assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!"); 213 214 // Set the SourceLocation with the remapping information. This ensures that 215 // GetMappedTokenLoc will remap the tokens as they are lexed. 216 L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID), 217 ExpansionLocStart, 218 ExpansionLocEnd, TokLen); 219 220 // Ensure that the lexer thinks it is inside a directive, so that end \n will 221 // return an EOD token. 222 L->ParsingPreprocessorDirective = true; 223 224 // This lexer really is for _Pragma. 225 L->Is_PragmaLexer = true; 226 return L; 227 } 228 229 bool Lexer::skipOver(unsigned NumBytes) { 230 IsAtPhysicalStartOfLine = true; 231 IsAtStartOfLine = true; 232 if ((BufferPtr + NumBytes) > BufferEnd) 233 return true; 234 BufferPtr += NumBytes; 235 return false; 236 } 237 238 template <typename T> static void StringifyImpl(T &Str, char Quote) { 239 typename T::size_type i = 0, e = Str.size(); 240 while (i < e) { 241 if (Str[i] == '\\' || Str[i] == Quote) { 242 Str.insert(Str.begin() + i, '\\'); 243 i += 2; 244 ++e; 245 } else if (Str[i] == '\n' || Str[i] == '\r') { 246 // Replace '\r\n' and '\n\r' to '\\' followed by 'n'. 247 if ((i < e - 1) && (Str[i + 1] == '\n' || Str[i + 1] == '\r') && 248 Str[i] != Str[i + 1]) { 249 Str[i] = '\\'; 250 Str[i + 1] = 'n'; 251 } else { 252 // Replace '\n' and '\r' to '\\' followed by 'n'. 253 Str[i] = '\\'; 254 Str.insert(Str.begin() + i + 1, 'n'); 255 ++e; 256 } 257 i += 2; 258 } else 259 ++i; 260 } 261 } 262 263 std::string Lexer::Stringify(StringRef Str, bool Charify) { 264 std::string Result = std::string(Str); 265 char Quote = Charify ? '\'' : '"'; 266 StringifyImpl(Result, Quote); 267 return Result; 268 } 269 270 void Lexer::Stringify(SmallVectorImpl<char> &Str) { StringifyImpl(Str, '"'); } 271 272 //===----------------------------------------------------------------------===// 273 // Token Spelling 274 //===----------------------------------------------------------------------===// 275 276 /// Slow case of getSpelling. Extract the characters comprising the 277 /// spelling of this token from the provided input buffer. 278 static size_t getSpellingSlow(const Token &Tok, const char *BufPtr, 279 const LangOptions &LangOpts, char *Spelling) { 280 assert(Tok.needsCleaning() && "getSpellingSlow called on simple token"); 281 282 size_t Length = 0; 283 const char *BufEnd = BufPtr + Tok.getLength(); 284 285 if (tok::isStringLiteral(Tok.getKind())) { 286 // Munch the encoding-prefix and opening double-quote. 287 while (BufPtr < BufEnd) { 288 unsigned Size; 289 Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts); 290 BufPtr += Size; 291 292 if (Spelling[Length - 1] == '"') 293 break; 294 } 295 296 // Raw string literals need special handling; trigraph expansion and line 297 // splicing do not occur within their d-char-sequence nor within their 298 // r-char-sequence. 299 if (Length >= 2 && 300 Spelling[Length - 2] == 'R' && Spelling[Length - 1] == '"') { 301 // Search backwards from the end of the token to find the matching closing 302 // quote. 303 const char *RawEnd = BufEnd; 304 do --RawEnd; while (*RawEnd != '"'); 305 size_t RawLength = RawEnd - BufPtr + 1; 306 307 // Everything between the quotes is included verbatim in the spelling. 308 memcpy(Spelling + Length, BufPtr, RawLength); 309 Length += RawLength; 310 BufPtr += RawLength; 311 312 // The rest of the token is lexed normally. 313 } 314 } 315 316 while (BufPtr < BufEnd) { 317 unsigned Size; 318 Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts); 319 BufPtr += Size; 320 } 321 322 assert(Length < Tok.getLength() && 323 "NeedsCleaning flag set on token that didn't need cleaning!"); 324 return Length; 325 } 326 327 /// getSpelling() - Return the 'spelling' of this token. The spelling of a 328 /// token are the characters used to represent the token in the source file 329 /// after trigraph expansion and escaped-newline folding. In particular, this 330 /// wants to get the true, uncanonicalized, spelling of things like digraphs 331 /// UCNs, etc. 332 StringRef Lexer::getSpelling(SourceLocation loc, 333 SmallVectorImpl<char> &buffer, 334 const SourceManager &SM, 335 const LangOptions &options, 336 bool *invalid) { 337 // Break down the source location. 338 std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc); 339 340 // Try to the load the file buffer. 341 bool invalidTemp = false; 342 StringRef file = SM.getBufferData(locInfo.first, &invalidTemp); 343 if (invalidTemp) { 344 if (invalid) *invalid = true; 345 return {}; 346 } 347 348 const char *tokenBegin = file.data() + locInfo.second; 349 350 // Lex from the start of the given location. 351 Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options, 352 file.begin(), tokenBegin, file.end()); 353 Token token; 354 lexer.LexFromRawLexer(token); 355 356 unsigned length = token.getLength(); 357 358 // Common case: no need for cleaning. 359 if (!token.needsCleaning()) 360 return StringRef(tokenBegin, length); 361 362 // Hard case, we need to relex the characters into the string. 363 buffer.resize(length); 364 buffer.resize(getSpellingSlow(token, tokenBegin, options, buffer.data())); 365 return StringRef(buffer.data(), buffer.size()); 366 } 367 368 /// getSpelling() - Return the 'spelling' of this token. The spelling of a 369 /// token are the characters used to represent the token in the source file 370 /// after trigraph expansion and escaped-newline folding. In particular, this 371 /// wants to get the true, uncanonicalized, spelling of things like digraphs 372 /// UCNs, etc. 373 std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr, 374 const LangOptions &LangOpts, bool *Invalid) { 375 assert((int)Tok.getLength() >= 0 && "Token character range is bogus!"); 376 377 bool CharDataInvalid = false; 378 const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation(), 379 &CharDataInvalid); 380 if (Invalid) 381 *Invalid = CharDataInvalid; 382 if (CharDataInvalid) 383 return {}; 384 385 // If this token contains nothing interesting, return it directly. 386 if (!Tok.needsCleaning()) 387 return std::string(TokStart, TokStart + Tok.getLength()); 388 389 std::string Result; 390 Result.resize(Tok.getLength()); 391 Result.resize(getSpellingSlow(Tok, TokStart, LangOpts, &*Result.begin())); 392 return Result; 393 } 394 395 /// getSpelling - This method is used to get the spelling of a token into a 396 /// preallocated buffer, instead of as an std::string. The caller is required 397 /// to allocate enough space for the token, which is guaranteed to be at least 398 /// Tok.getLength() bytes long. The actual length of the token is returned. 399 /// 400 /// Note that this method may do two possible things: it may either fill in 401 /// the buffer specified with characters, or it may *change the input pointer* 402 /// to point to a constant buffer with the data already in it (avoiding a 403 /// copy). The caller is not allowed to modify the returned buffer pointer 404 /// if an internal buffer is returned. 405 unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer, 406 const SourceManager &SourceMgr, 407 const LangOptions &LangOpts, bool *Invalid) { 408 assert((int)Tok.getLength() >= 0 && "Token character range is bogus!"); 409 410 const char *TokStart = nullptr; 411 // NOTE: this has to be checked *before* testing for an IdentifierInfo. 412 if (Tok.is(tok::raw_identifier)) 413 TokStart = Tok.getRawIdentifier().data(); 414 else if (!Tok.hasUCN()) { 415 if (const IdentifierInfo *II = Tok.getIdentifierInfo()) { 416 // Just return the string from the identifier table, which is very quick. 417 Buffer = II->getNameStart(); 418 return II->getLength(); 419 } 420 } 421 422 // NOTE: this can be checked even after testing for an IdentifierInfo. 423 if (Tok.isLiteral()) 424 TokStart = Tok.getLiteralData(); 425 426 if (!TokStart) { 427 // Compute the start of the token in the input lexer buffer. 428 bool CharDataInvalid = false; 429 TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid); 430 if (Invalid) 431 *Invalid = CharDataInvalid; 432 if (CharDataInvalid) { 433 Buffer = ""; 434 return 0; 435 } 436 } 437 438 // If this token contains nothing interesting, return it directly. 439 if (!Tok.needsCleaning()) { 440 Buffer = TokStart; 441 return Tok.getLength(); 442 } 443 444 // Otherwise, hard case, relex the characters into the string. 445 return getSpellingSlow(Tok, TokStart, LangOpts, const_cast<char*>(Buffer)); 446 } 447 448 /// MeasureTokenLength - Relex the token at the specified location and return 449 /// its length in bytes in the input file. If the token needs cleaning (e.g. 450 /// includes a trigraph or an escaped newline) then this count includes bytes 451 /// that are part of that. 452 unsigned Lexer::MeasureTokenLength(SourceLocation Loc, 453 const SourceManager &SM, 454 const LangOptions &LangOpts) { 455 Token TheTok; 456 if (getRawToken(Loc, TheTok, SM, LangOpts)) 457 return 0; 458 return TheTok.getLength(); 459 } 460 461 /// Relex the token at the specified location. 462 /// \returns true if there was a failure, false on success. 463 bool Lexer::getRawToken(SourceLocation Loc, Token &Result, 464 const SourceManager &SM, 465 const LangOptions &LangOpts, 466 bool IgnoreWhiteSpace) { 467 // TODO: this could be special cased for common tokens like identifiers, ')', 468 // etc to make this faster, if it mattered. Just look at StrData[0] to handle 469 // all obviously single-char tokens. This could use 470 // Lexer::isObviouslySimpleCharacter for example to handle identifiers or 471 // something. 472 473 // If this comes from a macro expansion, we really do want the macro name, not 474 // the token this macro expanded to. 475 Loc = SM.getExpansionLoc(Loc); 476 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 477 bool Invalid = false; 478 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 479 if (Invalid) 480 return true; 481 482 const char *StrData = Buffer.data()+LocInfo.second; 483 484 if (!IgnoreWhiteSpace && isWhitespace(StrData[0])) 485 return true; 486 487 // Create a lexer starting at the beginning of this token. 488 Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, 489 Buffer.begin(), StrData, Buffer.end()); 490 TheLexer.SetCommentRetentionState(true); 491 TheLexer.LexFromRawLexer(Result); 492 return false; 493 } 494 495 /// Returns the pointer that points to the beginning of line that contains 496 /// the given offset, or null if the offset if invalid. 497 static const char *findBeginningOfLine(StringRef Buffer, unsigned Offset) { 498 const char *BufStart = Buffer.data(); 499 if (Offset >= Buffer.size()) 500 return nullptr; 501 502 const char *LexStart = BufStart + Offset; 503 for (; LexStart != BufStart; --LexStart) { 504 if (isVerticalWhitespace(LexStart[0]) && 505 !Lexer::isNewLineEscaped(BufStart, LexStart)) { 506 // LexStart should point at first character of logical line. 507 ++LexStart; 508 break; 509 } 510 } 511 return LexStart; 512 } 513 514 static SourceLocation getBeginningOfFileToken(SourceLocation Loc, 515 const SourceManager &SM, 516 const LangOptions &LangOpts) { 517 assert(Loc.isFileID()); 518 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 519 if (LocInfo.first.isInvalid()) 520 return Loc; 521 522 bool Invalid = false; 523 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 524 if (Invalid) 525 return Loc; 526 527 // Back up from the current location until we hit the beginning of a line 528 // (or the buffer). We'll relex from that point. 529 const char *StrData = Buffer.data() + LocInfo.second; 530 const char *LexStart = findBeginningOfLine(Buffer, LocInfo.second); 531 if (!LexStart || LexStart == StrData) 532 return Loc; 533 534 // Create a lexer starting at the beginning of this token. 535 SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second); 536 Lexer TheLexer(LexerStartLoc, LangOpts, Buffer.data(), LexStart, 537 Buffer.end()); 538 TheLexer.SetCommentRetentionState(true); 539 540 // Lex tokens until we find the token that contains the source location. 541 Token TheTok; 542 do { 543 TheLexer.LexFromRawLexer(TheTok); 544 545 if (TheLexer.getBufferLocation() > StrData) { 546 // Lexing this token has taken the lexer past the source location we're 547 // looking for. If the current token encompasses our source location, 548 // return the beginning of that token. 549 if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData) 550 return TheTok.getLocation(); 551 552 // We ended up skipping over the source location entirely, which means 553 // that it points into whitespace. We're done here. 554 break; 555 } 556 } while (TheTok.getKind() != tok::eof); 557 558 // We've passed our source location; just return the original source location. 559 return Loc; 560 } 561 562 SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc, 563 const SourceManager &SM, 564 const LangOptions &LangOpts) { 565 if (Loc.isFileID()) 566 return getBeginningOfFileToken(Loc, SM, LangOpts); 567 568 if (!SM.isMacroArgExpansion(Loc)) 569 return Loc; 570 571 SourceLocation FileLoc = SM.getSpellingLoc(Loc); 572 SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts); 573 std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc); 574 std::pair<FileID, unsigned> BeginFileLocInfo = 575 SM.getDecomposedLoc(BeginFileLoc); 576 assert(FileLocInfo.first == BeginFileLocInfo.first && 577 FileLocInfo.second >= BeginFileLocInfo.second); 578 return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second); 579 } 580 581 namespace { 582 583 enum PreambleDirectiveKind { 584 PDK_Skipped, 585 PDK_Unknown 586 }; 587 588 } // namespace 589 590 PreambleBounds Lexer::ComputePreamble(StringRef Buffer, 591 const LangOptions &LangOpts, 592 unsigned MaxLines) { 593 // Create a lexer starting at the beginning of the file. Note that we use a 594 // "fake" file source location at offset 1 so that the lexer will track our 595 // position within the file. 596 const SourceLocation::UIntTy StartOffset = 1; 597 SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset); 598 Lexer TheLexer(FileLoc, LangOpts, Buffer.begin(), Buffer.begin(), 599 Buffer.end()); 600 TheLexer.SetCommentRetentionState(true); 601 602 bool InPreprocessorDirective = false; 603 Token TheTok; 604 SourceLocation ActiveCommentLoc; 605 606 unsigned MaxLineOffset = 0; 607 if (MaxLines) { 608 const char *CurPtr = Buffer.begin(); 609 unsigned CurLine = 0; 610 while (CurPtr != Buffer.end()) { 611 char ch = *CurPtr++; 612 if (ch == '\n') { 613 ++CurLine; 614 if (CurLine == MaxLines) 615 break; 616 } 617 } 618 if (CurPtr != Buffer.end()) 619 MaxLineOffset = CurPtr - Buffer.begin(); 620 } 621 622 do { 623 TheLexer.LexFromRawLexer(TheTok); 624 625 if (InPreprocessorDirective) { 626 // If we've hit the end of the file, we're done. 627 if (TheTok.getKind() == tok::eof) { 628 break; 629 } 630 631 // If we haven't hit the end of the preprocessor directive, skip this 632 // token. 633 if (!TheTok.isAtStartOfLine()) 634 continue; 635 636 // We've passed the end of the preprocessor directive, and will look 637 // at this token again below. 638 InPreprocessorDirective = false; 639 } 640 641 // Keep track of the # of lines in the preamble. 642 if (TheTok.isAtStartOfLine()) { 643 unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset; 644 645 // If we were asked to limit the number of lines in the preamble, 646 // and we're about to exceed that limit, we're done. 647 if (MaxLineOffset && TokOffset >= MaxLineOffset) 648 break; 649 } 650 651 // Comments are okay; skip over them. 652 if (TheTok.getKind() == tok::comment) { 653 if (ActiveCommentLoc.isInvalid()) 654 ActiveCommentLoc = TheTok.getLocation(); 655 continue; 656 } 657 658 if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) { 659 // This is the start of a preprocessor directive. 660 Token HashTok = TheTok; 661 InPreprocessorDirective = true; 662 ActiveCommentLoc = SourceLocation(); 663 664 // Figure out which directive this is. Since we're lexing raw tokens, 665 // we don't have an identifier table available. Instead, just look at 666 // the raw identifier to recognize and categorize preprocessor directives. 667 TheLexer.LexFromRawLexer(TheTok); 668 if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) { 669 StringRef Keyword = TheTok.getRawIdentifier(); 670 PreambleDirectiveKind PDK 671 = llvm::StringSwitch<PreambleDirectiveKind>(Keyword) 672 .Case("include", PDK_Skipped) 673 .Case("__include_macros", PDK_Skipped) 674 .Case("define", PDK_Skipped) 675 .Case("undef", PDK_Skipped) 676 .Case("line", PDK_Skipped) 677 .Case("error", PDK_Skipped) 678 .Case("pragma", PDK_Skipped) 679 .Case("import", PDK_Skipped) 680 .Case("include_next", PDK_Skipped) 681 .Case("warning", PDK_Skipped) 682 .Case("ident", PDK_Skipped) 683 .Case("sccs", PDK_Skipped) 684 .Case("assert", PDK_Skipped) 685 .Case("unassert", PDK_Skipped) 686 .Case("if", PDK_Skipped) 687 .Case("ifdef", PDK_Skipped) 688 .Case("ifndef", PDK_Skipped) 689 .Case("elif", PDK_Skipped) 690 .Case("elifdef", PDK_Skipped) 691 .Case("elifndef", PDK_Skipped) 692 .Case("else", PDK_Skipped) 693 .Case("endif", PDK_Skipped) 694 .Default(PDK_Unknown); 695 696 switch (PDK) { 697 case PDK_Skipped: 698 continue; 699 700 case PDK_Unknown: 701 // We don't know what this directive is; stop at the '#'. 702 break; 703 } 704 } 705 706 // We only end up here if we didn't recognize the preprocessor 707 // directive or it was one that can't occur in the preamble at this 708 // point. Roll back the current token to the location of the '#'. 709 TheTok = HashTok; 710 } 711 712 // We hit a token that we don't recognize as being in the 713 // "preprocessing only" part of the file, so we're no longer in 714 // the preamble. 715 break; 716 } while (true); 717 718 SourceLocation End; 719 if (ActiveCommentLoc.isValid()) 720 End = ActiveCommentLoc; // don't truncate a decl comment. 721 else 722 End = TheTok.getLocation(); 723 724 return PreambleBounds(End.getRawEncoding() - FileLoc.getRawEncoding(), 725 TheTok.isAtStartOfLine()); 726 } 727 728 unsigned Lexer::getTokenPrefixLength(SourceLocation TokStart, unsigned CharNo, 729 const SourceManager &SM, 730 const LangOptions &LangOpts) { 731 // Figure out how many physical characters away the specified expansion 732 // character is. This needs to take into consideration newlines and 733 // trigraphs. 734 bool Invalid = false; 735 const char *TokPtr = SM.getCharacterData(TokStart, &Invalid); 736 737 // If they request the first char of the token, we're trivially done. 738 if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr))) 739 return 0; 740 741 unsigned PhysOffset = 0; 742 743 // The usual case is that tokens don't contain anything interesting. Skip 744 // over the uninteresting characters. If a token only consists of simple 745 // chars, this method is extremely fast. 746 while (Lexer::isObviouslySimpleCharacter(*TokPtr)) { 747 if (CharNo == 0) 748 return PhysOffset; 749 ++TokPtr; 750 --CharNo; 751 ++PhysOffset; 752 } 753 754 // If we have a character that may be a trigraph or escaped newline, use a 755 // lexer to parse it correctly. 756 for (; CharNo; --CharNo) { 757 unsigned Size; 758 Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts); 759 TokPtr += Size; 760 PhysOffset += Size; 761 } 762 763 // Final detail: if we end up on an escaped newline, we want to return the 764 // location of the actual byte of the token. For example foo\<newline>bar 765 // advanced by 3 should return the location of b, not of \\. One compounding 766 // detail of this is that the escape may be made by a trigraph. 767 if (!Lexer::isObviouslySimpleCharacter(*TokPtr)) 768 PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr; 769 770 return PhysOffset; 771 } 772 773 /// Computes the source location just past the end of the 774 /// token at this source location. 775 /// 776 /// This routine can be used to produce a source location that 777 /// points just past the end of the token referenced by \p Loc, and 778 /// is generally used when a diagnostic needs to point just after a 779 /// token where it expected something different that it received. If 780 /// the returned source location would not be meaningful (e.g., if 781 /// it points into a macro), this routine returns an invalid 782 /// source location. 783 /// 784 /// \param Offset an offset from the end of the token, where the source 785 /// location should refer to. The default offset (0) produces a source 786 /// location pointing just past the end of the token; an offset of 1 produces 787 /// a source location pointing to the last character in the token, etc. 788 SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset, 789 const SourceManager &SM, 790 const LangOptions &LangOpts) { 791 if (Loc.isInvalid()) 792 return {}; 793 794 if (Loc.isMacroID()) { 795 if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc)) 796 return {}; // Points inside the macro expansion. 797 } 798 799 unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 800 if (Len > Offset) 801 Len = Len - Offset; 802 else 803 return Loc; 804 805 return Loc.getLocWithOffset(Len); 806 } 807 808 /// Returns true if the given MacroID location points at the first 809 /// token of the macro expansion. 810 bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc, 811 const SourceManager &SM, 812 const LangOptions &LangOpts, 813 SourceLocation *MacroBegin) { 814 assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc"); 815 816 SourceLocation expansionLoc; 817 if (!SM.isAtStartOfImmediateMacroExpansion(loc, &expansionLoc)) 818 return false; 819 820 if (expansionLoc.isFileID()) { 821 // No other macro expansions, this is the first. 822 if (MacroBegin) 823 *MacroBegin = expansionLoc; 824 return true; 825 } 826 827 return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin); 828 } 829 830 /// Returns true if the given MacroID location points at the last 831 /// token of the macro expansion. 832 bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc, 833 const SourceManager &SM, 834 const LangOptions &LangOpts, 835 SourceLocation *MacroEnd) { 836 assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc"); 837 838 SourceLocation spellLoc = SM.getSpellingLoc(loc); 839 unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts); 840 if (tokLen == 0) 841 return false; 842 843 SourceLocation afterLoc = loc.getLocWithOffset(tokLen); 844 SourceLocation expansionLoc; 845 if (!SM.isAtEndOfImmediateMacroExpansion(afterLoc, &expansionLoc)) 846 return false; 847 848 if (expansionLoc.isFileID()) { 849 // No other macro expansions. 850 if (MacroEnd) 851 *MacroEnd = expansionLoc; 852 return true; 853 } 854 855 return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd); 856 } 857 858 static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range, 859 const SourceManager &SM, 860 const LangOptions &LangOpts) { 861 SourceLocation Begin = Range.getBegin(); 862 SourceLocation End = Range.getEnd(); 863 assert(Begin.isFileID() && End.isFileID()); 864 if (Range.isTokenRange()) { 865 End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts); 866 if (End.isInvalid()) 867 return {}; 868 } 869 870 // Break down the source locations. 871 FileID FID; 872 unsigned BeginOffs; 873 std::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin); 874 if (FID.isInvalid()) 875 return {}; 876 877 unsigned EndOffs; 878 if (!SM.isInFileID(End, FID, &EndOffs) || 879 BeginOffs > EndOffs) 880 return {}; 881 882 return CharSourceRange::getCharRange(Begin, End); 883 } 884 885 // Assumes that `Loc` is in an expansion. 886 static bool isInExpansionTokenRange(const SourceLocation Loc, 887 const SourceManager &SM) { 888 return SM.getSLocEntry(SM.getFileID(Loc)) 889 .getExpansion() 890 .isExpansionTokenRange(); 891 } 892 893 CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range, 894 const SourceManager &SM, 895 const LangOptions &LangOpts) { 896 SourceLocation Begin = Range.getBegin(); 897 SourceLocation End = Range.getEnd(); 898 if (Begin.isInvalid() || End.isInvalid()) 899 return {}; 900 901 if (Begin.isFileID() && End.isFileID()) 902 return makeRangeFromFileLocs(Range, SM, LangOpts); 903 904 if (Begin.isMacroID() && End.isFileID()) { 905 if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin)) 906 return {}; 907 Range.setBegin(Begin); 908 return makeRangeFromFileLocs(Range, SM, LangOpts); 909 } 910 911 if (Begin.isFileID() && End.isMacroID()) { 912 if (Range.isTokenRange()) { 913 if (!isAtEndOfMacroExpansion(End, SM, LangOpts, &End)) 914 return {}; 915 // Use the *original* end, not the expanded one in `End`. 916 Range.setTokenRange(isInExpansionTokenRange(Range.getEnd(), SM)); 917 } else if (!isAtStartOfMacroExpansion(End, SM, LangOpts, &End)) 918 return {}; 919 Range.setEnd(End); 920 return makeRangeFromFileLocs(Range, SM, LangOpts); 921 } 922 923 assert(Begin.isMacroID() && End.isMacroID()); 924 SourceLocation MacroBegin, MacroEnd; 925 if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) && 926 ((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts, 927 &MacroEnd)) || 928 (Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts, 929 &MacroEnd)))) { 930 Range.setBegin(MacroBegin); 931 Range.setEnd(MacroEnd); 932 // Use the *original* `End`, not the expanded one in `MacroEnd`. 933 if (Range.isTokenRange()) 934 Range.setTokenRange(isInExpansionTokenRange(End, SM)); 935 return makeRangeFromFileLocs(Range, SM, LangOpts); 936 } 937 938 bool Invalid = false; 939 const SrcMgr::SLocEntry &BeginEntry = SM.getSLocEntry(SM.getFileID(Begin), 940 &Invalid); 941 if (Invalid) 942 return {}; 943 944 if (BeginEntry.getExpansion().isMacroArgExpansion()) { 945 const SrcMgr::SLocEntry &EndEntry = SM.getSLocEntry(SM.getFileID(End), 946 &Invalid); 947 if (Invalid) 948 return {}; 949 950 if (EndEntry.getExpansion().isMacroArgExpansion() && 951 BeginEntry.getExpansion().getExpansionLocStart() == 952 EndEntry.getExpansion().getExpansionLocStart()) { 953 Range.setBegin(SM.getImmediateSpellingLoc(Begin)); 954 Range.setEnd(SM.getImmediateSpellingLoc(End)); 955 return makeFileCharRange(Range, SM, LangOpts); 956 } 957 } 958 959 return {}; 960 } 961 962 StringRef Lexer::getSourceText(CharSourceRange Range, 963 const SourceManager &SM, 964 const LangOptions &LangOpts, 965 bool *Invalid) { 966 Range = makeFileCharRange(Range, SM, LangOpts); 967 if (Range.isInvalid()) { 968 if (Invalid) *Invalid = true; 969 return {}; 970 } 971 972 // Break down the source location. 973 std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin()); 974 if (beginInfo.first.isInvalid()) { 975 if (Invalid) *Invalid = true; 976 return {}; 977 } 978 979 unsigned EndOffs; 980 if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) || 981 beginInfo.second > EndOffs) { 982 if (Invalid) *Invalid = true; 983 return {}; 984 } 985 986 // Try to the load the file buffer. 987 bool invalidTemp = false; 988 StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp); 989 if (invalidTemp) { 990 if (Invalid) *Invalid = true; 991 return {}; 992 } 993 994 if (Invalid) *Invalid = false; 995 return file.substr(beginInfo.second, EndOffs - beginInfo.second); 996 } 997 998 StringRef Lexer::getImmediateMacroName(SourceLocation Loc, 999 const SourceManager &SM, 1000 const LangOptions &LangOpts) { 1001 assert(Loc.isMacroID() && "Only reasonable to call this on macros"); 1002 1003 // Find the location of the immediate macro expansion. 1004 while (true) { 1005 FileID FID = SM.getFileID(Loc); 1006 const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID); 1007 const SrcMgr::ExpansionInfo &Expansion = E->getExpansion(); 1008 Loc = Expansion.getExpansionLocStart(); 1009 if (!Expansion.isMacroArgExpansion()) 1010 break; 1011 1012 // For macro arguments we need to check that the argument did not come 1013 // from an inner macro, e.g: "MAC1( MAC2(foo) )" 1014 1015 // Loc points to the argument id of the macro definition, move to the 1016 // macro expansion. 1017 Loc = SM.getImmediateExpansionRange(Loc).getBegin(); 1018 SourceLocation SpellLoc = Expansion.getSpellingLoc(); 1019 if (SpellLoc.isFileID()) 1020 break; // No inner macro. 1021 1022 // If spelling location resides in the same FileID as macro expansion 1023 // location, it means there is no inner macro. 1024 FileID MacroFID = SM.getFileID(Loc); 1025 if (SM.isInFileID(SpellLoc, MacroFID)) 1026 break; 1027 1028 // Argument came from inner macro. 1029 Loc = SpellLoc; 1030 } 1031 1032 // Find the spelling location of the start of the non-argument expansion 1033 // range. This is where the macro name was spelled in order to begin 1034 // expanding this macro. 1035 Loc = SM.getSpellingLoc(Loc); 1036 1037 // Dig out the buffer where the macro name was spelled and the extents of the 1038 // name so that we can render it into the expansion note. 1039 std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc); 1040 unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 1041 StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first); 1042 return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength); 1043 } 1044 1045 StringRef Lexer::getImmediateMacroNameForDiagnostics( 1046 SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) { 1047 assert(Loc.isMacroID() && "Only reasonable to call this on macros"); 1048 // Walk past macro argument expansions. 1049 while (SM.isMacroArgExpansion(Loc)) 1050 Loc = SM.getImmediateExpansionRange(Loc).getBegin(); 1051 1052 // If the macro's spelling has no FileID, then it's actually a token paste 1053 // or stringization (or similar) and not a macro at all. 1054 if (!SM.getFileEntryForID(SM.getFileID(SM.getSpellingLoc(Loc)))) 1055 return {}; 1056 1057 // Find the spelling location of the start of the non-argument expansion 1058 // range. This is where the macro name was spelled in order to begin 1059 // expanding this macro. 1060 Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).getBegin()); 1061 1062 // Dig out the buffer where the macro name was spelled and the extents of the 1063 // name so that we can render it into the expansion note. 1064 std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc); 1065 unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 1066 StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first); 1067 return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength); 1068 } 1069 1070 bool Lexer::isAsciiIdentifierContinueChar(char c, const LangOptions &LangOpts) { 1071 return isAsciiIdentifierContinue(c, LangOpts.DollarIdents); 1072 } 1073 1074 bool Lexer::isNewLineEscaped(const char *BufferStart, const char *Str) { 1075 assert(isVerticalWhitespace(Str[0])); 1076 if (Str - 1 < BufferStart) 1077 return false; 1078 1079 if ((Str[0] == '\n' && Str[-1] == '\r') || 1080 (Str[0] == '\r' && Str[-1] == '\n')) { 1081 if (Str - 2 < BufferStart) 1082 return false; 1083 --Str; 1084 } 1085 --Str; 1086 1087 // Rewind to first non-space character: 1088 while (Str > BufferStart && isHorizontalWhitespace(*Str)) 1089 --Str; 1090 1091 return *Str == '\\'; 1092 } 1093 1094 StringRef Lexer::getIndentationForLine(SourceLocation Loc, 1095 const SourceManager &SM) { 1096 if (Loc.isInvalid() || Loc.isMacroID()) 1097 return {}; 1098 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 1099 if (LocInfo.first.isInvalid()) 1100 return {}; 1101 bool Invalid = false; 1102 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 1103 if (Invalid) 1104 return {}; 1105 const char *Line = findBeginningOfLine(Buffer, LocInfo.second); 1106 if (!Line) 1107 return {}; 1108 StringRef Rest = Buffer.substr(Line - Buffer.data()); 1109 size_t NumWhitespaceChars = Rest.find_first_not_of(" \t"); 1110 return NumWhitespaceChars == StringRef::npos 1111 ? "" 1112 : Rest.take_front(NumWhitespaceChars); 1113 } 1114 1115 //===----------------------------------------------------------------------===// 1116 // Diagnostics forwarding code. 1117 //===----------------------------------------------------------------------===// 1118 1119 /// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the 1120 /// lexer buffer was all expanded at a single point, perform the mapping. 1121 /// This is currently only used for _Pragma implementation, so it is the slow 1122 /// path of the hot getSourceLocation method. Do not allow it to be inlined. 1123 static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc( 1124 Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen); 1125 static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 1126 SourceLocation FileLoc, 1127 unsigned CharNo, unsigned TokLen) { 1128 assert(FileLoc.isMacroID() && "Must be a macro expansion"); 1129 1130 // Otherwise, we're lexing "mapped tokens". This is used for things like 1131 // _Pragma handling. Combine the expansion location of FileLoc with the 1132 // spelling location. 1133 SourceManager &SM = PP.getSourceManager(); 1134 1135 // Create a new SLoc which is expanded from Expansion(FileLoc) but whose 1136 // characters come from spelling(FileLoc)+Offset. 1137 SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc); 1138 SpellingLoc = SpellingLoc.getLocWithOffset(CharNo); 1139 1140 // Figure out the expansion loc range, which is the range covered by the 1141 // original _Pragma(...) sequence. 1142 CharSourceRange II = SM.getImmediateExpansionRange(FileLoc); 1143 1144 return SM.createExpansionLoc(SpellingLoc, II.getBegin(), II.getEnd(), TokLen); 1145 } 1146 1147 /// getSourceLocation - Return a source location identifier for the specified 1148 /// offset in the current file. 1149 SourceLocation Lexer::getSourceLocation(const char *Loc, 1150 unsigned TokLen) const { 1151 assert(Loc >= BufferStart && Loc <= BufferEnd && 1152 "Location out of range for this buffer!"); 1153 1154 // In the normal case, we're just lexing from a simple file buffer, return 1155 // the file id from FileLoc with the offset specified. 1156 unsigned CharNo = Loc-BufferStart; 1157 if (FileLoc.isFileID()) 1158 return FileLoc.getLocWithOffset(CharNo); 1159 1160 // Otherwise, this is the _Pragma lexer case, which pretends that all of the 1161 // tokens are lexed from where the _Pragma was defined. 1162 assert(PP && "This doesn't work on raw lexers"); 1163 return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen); 1164 } 1165 1166 /// Diag - Forwarding function for diagnostics. This translate a source 1167 /// position in the current buffer into a SourceLocation object for rendering. 1168 DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const { 1169 return PP->Diag(getSourceLocation(Loc), DiagID); 1170 } 1171 1172 //===----------------------------------------------------------------------===// 1173 // Trigraph and Escaped Newline Handling Code. 1174 //===----------------------------------------------------------------------===// 1175 1176 /// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair, 1177 /// return the decoded trigraph letter it corresponds to, or '\0' if nothing. 1178 static char GetTrigraphCharForLetter(char Letter) { 1179 switch (Letter) { 1180 default: return 0; 1181 case '=': return '#'; 1182 case ')': return ']'; 1183 case '(': return '['; 1184 case '!': return '|'; 1185 case '\'': return '^'; 1186 case '>': return '}'; 1187 case '/': return '\\'; 1188 case '<': return '{'; 1189 case '-': return '~'; 1190 } 1191 } 1192 1193 /// DecodeTrigraphChar - If the specified character is a legal trigraph when 1194 /// prefixed with ??, emit a trigraph warning. If trigraphs are enabled, 1195 /// return the result character. Finally, emit a warning about trigraph use 1196 /// whether trigraphs are enabled or not. 1197 static char DecodeTrigraphChar(const char *CP, Lexer *L, bool Trigraphs) { 1198 char Res = GetTrigraphCharForLetter(*CP); 1199 if (!Res || !L) return Res; 1200 1201 if (!Trigraphs) { 1202 if (!L->isLexingRawMode()) 1203 L->Diag(CP-2, diag::trigraph_ignored); 1204 return 0; 1205 } 1206 1207 if (!L->isLexingRawMode()) 1208 L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1); 1209 return Res; 1210 } 1211 1212 /// getEscapedNewLineSize - Return the size of the specified escaped newline, 1213 /// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a 1214 /// trigraph equivalent on entry to this function. 1215 unsigned Lexer::getEscapedNewLineSize(const char *Ptr) { 1216 unsigned Size = 0; 1217 while (isWhitespace(Ptr[Size])) { 1218 ++Size; 1219 1220 if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r') 1221 continue; 1222 1223 // If this is a \r\n or \n\r, skip the other half. 1224 if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') && 1225 Ptr[Size-1] != Ptr[Size]) 1226 ++Size; 1227 1228 return Size; 1229 } 1230 1231 // Not an escaped newline, must be a \t or something else. 1232 return 0; 1233 } 1234 1235 /// SkipEscapedNewLines - If P points to an escaped newline (or a series of 1236 /// them), skip over them and return the first non-escaped-newline found, 1237 /// otherwise return P. 1238 const char *Lexer::SkipEscapedNewLines(const char *P) { 1239 while (true) { 1240 const char *AfterEscape; 1241 if (*P == '\\') { 1242 AfterEscape = P+1; 1243 } else if (*P == '?') { 1244 // If not a trigraph for escape, bail out. 1245 if (P[1] != '?' || P[2] != '/') 1246 return P; 1247 // FIXME: Take LangOpts into account; the language might not 1248 // support trigraphs. 1249 AfterEscape = P+3; 1250 } else { 1251 return P; 1252 } 1253 1254 unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape); 1255 if (NewLineSize == 0) return P; 1256 P = AfterEscape+NewLineSize; 1257 } 1258 } 1259 1260 Optional<Token> Lexer::findNextToken(SourceLocation Loc, 1261 const SourceManager &SM, 1262 const LangOptions &LangOpts) { 1263 if (Loc.isMacroID()) { 1264 if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc)) 1265 return None; 1266 } 1267 Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts); 1268 1269 // Break down the source location. 1270 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 1271 1272 // Try to load the file buffer. 1273 bool InvalidTemp = false; 1274 StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp); 1275 if (InvalidTemp) 1276 return None; 1277 1278 const char *TokenBegin = File.data() + LocInfo.second; 1279 1280 // Lex from the start of the given location. 1281 Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(), 1282 TokenBegin, File.end()); 1283 // Find the token. 1284 Token Tok; 1285 lexer.LexFromRawLexer(Tok); 1286 return Tok; 1287 } 1288 1289 /// Checks that the given token is the first token that occurs after the 1290 /// given location (this excludes comments and whitespace). Returns the location 1291 /// immediately after the specified token. If the token is not found or the 1292 /// location is inside a macro, the returned source location will be invalid. 1293 SourceLocation Lexer::findLocationAfterToken( 1294 SourceLocation Loc, tok::TokenKind TKind, const SourceManager &SM, 1295 const LangOptions &LangOpts, bool SkipTrailingWhitespaceAndNewLine) { 1296 Optional<Token> Tok = findNextToken(Loc, SM, LangOpts); 1297 if (!Tok || Tok->isNot(TKind)) 1298 return {}; 1299 SourceLocation TokenLoc = Tok->getLocation(); 1300 1301 // Calculate how much whitespace needs to be skipped if any. 1302 unsigned NumWhitespaceChars = 0; 1303 if (SkipTrailingWhitespaceAndNewLine) { 1304 const char *TokenEnd = SM.getCharacterData(TokenLoc) + Tok->getLength(); 1305 unsigned char C = *TokenEnd; 1306 while (isHorizontalWhitespace(C)) { 1307 C = *(++TokenEnd); 1308 NumWhitespaceChars++; 1309 } 1310 1311 // Skip \r, \n, \r\n, or \n\r 1312 if (C == '\n' || C == '\r') { 1313 char PrevC = C; 1314 C = *(++TokenEnd); 1315 NumWhitespaceChars++; 1316 if ((C == '\n' || C == '\r') && C != PrevC) 1317 NumWhitespaceChars++; 1318 } 1319 } 1320 1321 return TokenLoc.getLocWithOffset(Tok->getLength() + NumWhitespaceChars); 1322 } 1323 1324 /// getCharAndSizeSlow - Peek a single 'character' from the specified buffer, 1325 /// get its size, and return it. This is tricky in several cases: 1326 /// 1. If currently at the start of a trigraph, we warn about the trigraph, 1327 /// then either return the trigraph (skipping 3 chars) or the '?', 1328 /// depending on whether trigraphs are enabled or not. 1329 /// 2. If this is an escaped newline (potentially with whitespace between 1330 /// the backslash and newline), implicitly skip the newline and return 1331 /// the char after it. 1332 /// 1333 /// This handles the slow/uncommon case of the getCharAndSize method. Here we 1334 /// know that we can accumulate into Size, and that we have already incremented 1335 /// Ptr by Size bytes. 1336 /// 1337 /// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should 1338 /// be updated to match. 1339 char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size, 1340 Token *Tok) { 1341 // If we have a slash, look for an escaped newline. 1342 if (Ptr[0] == '\\') { 1343 ++Size; 1344 ++Ptr; 1345 Slash: 1346 // Common case, backslash-char where the char is not whitespace. 1347 if (!isWhitespace(Ptr[0])) return '\\'; 1348 1349 // See if we have optional whitespace characters between the slash and 1350 // newline. 1351 if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) { 1352 // Remember that this token needs to be cleaned. 1353 if (Tok) Tok->setFlag(Token::NeedsCleaning); 1354 1355 // Warn if there was whitespace between the backslash and newline. 1356 if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode()) 1357 Diag(Ptr, diag::backslash_newline_space); 1358 1359 // Found backslash<whitespace><newline>. Parse the char after it. 1360 Size += EscapedNewLineSize; 1361 Ptr += EscapedNewLineSize; 1362 1363 // Use slow version to accumulate a correct size field. 1364 return getCharAndSizeSlow(Ptr, Size, Tok); 1365 } 1366 1367 // Otherwise, this is not an escaped newline, just return the slash. 1368 return '\\'; 1369 } 1370 1371 // If this is a trigraph, process it. 1372 if (Ptr[0] == '?' && Ptr[1] == '?') { 1373 // If this is actually a legal trigraph (not something like "??x"), emit 1374 // a trigraph warning. If so, and if trigraphs are enabled, return it. 1375 if (char C = DecodeTrigraphChar(Ptr + 2, Tok ? this : nullptr, 1376 LangOpts.Trigraphs)) { 1377 // Remember that this token needs to be cleaned. 1378 if (Tok) Tok->setFlag(Token::NeedsCleaning); 1379 1380 Ptr += 3; 1381 Size += 3; 1382 if (C == '\\') goto Slash; 1383 return C; 1384 } 1385 } 1386 1387 // If this is neither, return a single character. 1388 ++Size; 1389 return *Ptr; 1390 } 1391 1392 /// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the 1393 /// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size, 1394 /// and that we have already incremented Ptr by Size bytes. 1395 /// 1396 /// NOTE: When this method is updated, getCharAndSizeSlow (above) should 1397 /// be updated to match. 1398 char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size, 1399 const LangOptions &LangOpts) { 1400 // If we have a slash, look for an escaped newline. 1401 if (Ptr[0] == '\\') { 1402 ++Size; 1403 ++Ptr; 1404 Slash: 1405 // Common case, backslash-char where the char is not whitespace. 1406 if (!isWhitespace(Ptr[0])) return '\\'; 1407 1408 // See if we have optional whitespace characters followed by a newline. 1409 if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) { 1410 // Found backslash<whitespace><newline>. Parse the char after it. 1411 Size += EscapedNewLineSize; 1412 Ptr += EscapedNewLineSize; 1413 1414 // Use slow version to accumulate a correct size field. 1415 return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts); 1416 } 1417 1418 // Otherwise, this is not an escaped newline, just return the slash. 1419 return '\\'; 1420 } 1421 1422 // If this is a trigraph, process it. 1423 if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') { 1424 // If this is actually a legal trigraph (not something like "??x"), return 1425 // it. 1426 if (char C = GetTrigraphCharForLetter(Ptr[2])) { 1427 Ptr += 3; 1428 Size += 3; 1429 if (C == '\\') goto Slash; 1430 return C; 1431 } 1432 } 1433 1434 // If this is neither, return a single character. 1435 ++Size; 1436 return *Ptr; 1437 } 1438 1439 //===----------------------------------------------------------------------===// 1440 // Helper methods for lexing. 1441 //===----------------------------------------------------------------------===// 1442 1443 /// Routine that indiscriminately sets the offset into the source file. 1444 void Lexer::SetByteOffset(unsigned Offset, bool StartOfLine) { 1445 BufferPtr = BufferStart + Offset; 1446 if (BufferPtr > BufferEnd) 1447 BufferPtr = BufferEnd; 1448 // FIXME: What exactly does the StartOfLine bit mean? There are two 1449 // possible meanings for the "start" of the line: the first token on the 1450 // unexpanded line, or the first token on the expanded line. 1451 IsAtStartOfLine = StartOfLine; 1452 IsAtPhysicalStartOfLine = StartOfLine; 1453 } 1454 1455 static bool isUnicodeWhitespace(uint32_t Codepoint) { 1456 static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars( 1457 UnicodeWhitespaceCharRanges); 1458 return UnicodeWhitespaceChars.contains(Codepoint); 1459 } 1460 1461 static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) { 1462 if (LangOpts.AsmPreprocessor) { 1463 return false; 1464 } else if (LangOpts.DollarIdents && '$' == C) { 1465 return true; 1466 } else if (LangOpts.CPlusPlus) { 1467 // A non-leading codepoint must have the XID_Continue property. 1468 // XIDContinueRanges doesn't contains characters also in XIDStartRanges, 1469 // so we need to check both tables. 1470 // '_' doesn't have the XID_Continue property but is allowed in C++. 1471 static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges); 1472 static const llvm::sys::UnicodeCharSet XIDContinueChars(XIDContinueRanges); 1473 return C == '_' || XIDStartChars.contains(C) || 1474 XIDContinueChars.contains(C); 1475 } else if (LangOpts.C11) { 1476 static const llvm::sys::UnicodeCharSet C11AllowedIDChars( 1477 C11AllowedIDCharRanges); 1478 return C11AllowedIDChars.contains(C); 1479 } else { 1480 static const llvm::sys::UnicodeCharSet C99AllowedIDChars( 1481 C99AllowedIDCharRanges); 1482 return C99AllowedIDChars.contains(C); 1483 } 1484 } 1485 1486 static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts) { 1487 if (LangOpts.AsmPreprocessor) { 1488 return false; 1489 } 1490 if (LangOpts.CPlusPlus) { 1491 static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges); 1492 // '_' doesn't have the XID_Start property but is allowed in C++. 1493 return C == '_' || XIDStartChars.contains(C); 1494 } 1495 if (!isAllowedIDChar(C, LangOpts)) 1496 return false; 1497 if (LangOpts.C11) { 1498 static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars( 1499 C11DisallowedInitialIDCharRanges); 1500 return !C11DisallowedInitialIDChars.contains(C); 1501 } 1502 static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars( 1503 C99DisallowedInitialIDCharRanges); 1504 return !C99DisallowedInitialIDChars.contains(C); 1505 } 1506 1507 static inline CharSourceRange makeCharRange(Lexer &L, const char *Begin, 1508 const char *End) { 1509 return CharSourceRange::getCharRange(L.getSourceLocation(Begin), 1510 L.getSourceLocation(End)); 1511 } 1512 1513 static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C, 1514 CharSourceRange Range, bool IsFirst) { 1515 // Check C99 compatibility. 1516 if (!Diags.isIgnored(diag::warn_c99_compat_unicode_id, Range.getBegin())) { 1517 enum { 1518 CannotAppearInIdentifier = 0, 1519 CannotStartIdentifier 1520 }; 1521 1522 static const llvm::sys::UnicodeCharSet C99AllowedIDChars( 1523 C99AllowedIDCharRanges); 1524 static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars( 1525 C99DisallowedInitialIDCharRanges); 1526 if (!C99AllowedIDChars.contains(C)) { 1527 Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id) 1528 << Range 1529 << CannotAppearInIdentifier; 1530 } else if (IsFirst && C99DisallowedInitialIDChars.contains(C)) { 1531 Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id) 1532 << Range 1533 << CannotStartIdentifier; 1534 } 1535 } 1536 } 1537 1538 /// After encountering UTF-8 character C and interpreting it as an identifier 1539 /// character, check whether it's a homoglyph for a common non-identifier 1540 /// source character that is unlikely to be an intentional identifier 1541 /// character and warn if so. 1542 static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C, 1543 CharSourceRange Range) { 1544 // FIXME: Handle Unicode quotation marks (smart quotes, fullwidth quotes). 1545 struct HomoglyphPair { 1546 uint32_t Character; 1547 char LooksLike; 1548 bool operator<(HomoglyphPair R) const { return Character < R.Character; } 1549 }; 1550 static constexpr HomoglyphPair SortedHomoglyphs[] = { 1551 {U'\u00ad', 0}, // SOFT HYPHEN 1552 {U'\u01c3', '!'}, // LATIN LETTER RETROFLEX CLICK 1553 {U'\u037e', ';'}, // GREEK QUESTION MARK 1554 {U'\u200b', 0}, // ZERO WIDTH SPACE 1555 {U'\u200c', 0}, // ZERO WIDTH NON-JOINER 1556 {U'\u200d', 0}, // ZERO WIDTH JOINER 1557 {U'\u2060', 0}, // WORD JOINER 1558 {U'\u2061', 0}, // FUNCTION APPLICATION 1559 {U'\u2062', 0}, // INVISIBLE TIMES 1560 {U'\u2063', 0}, // INVISIBLE SEPARATOR 1561 {U'\u2064', 0}, // INVISIBLE PLUS 1562 {U'\u2212', '-'}, // MINUS SIGN 1563 {U'\u2215', '/'}, // DIVISION SLASH 1564 {U'\u2216', '\\'}, // SET MINUS 1565 {U'\u2217', '*'}, // ASTERISK OPERATOR 1566 {U'\u2223', '|'}, // DIVIDES 1567 {U'\u2227', '^'}, // LOGICAL AND 1568 {U'\u2236', ':'}, // RATIO 1569 {U'\u223c', '~'}, // TILDE OPERATOR 1570 {U'\ua789', ':'}, // MODIFIER LETTER COLON 1571 {U'\ufeff', 0}, // ZERO WIDTH NO-BREAK SPACE 1572 {U'\uff01', '!'}, // FULLWIDTH EXCLAMATION MARK 1573 {U'\uff03', '#'}, // FULLWIDTH NUMBER SIGN 1574 {U'\uff04', '$'}, // FULLWIDTH DOLLAR SIGN 1575 {U'\uff05', '%'}, // FULLWIDTH PERCENT SIGN 1576 {U'\uff06', '&'}, // FULLWIDTH AMPERSAND 1577 {U'\uff08', '('}, // FULLWIDTH LEFT PARENTHESIS 1578 {U'\uff09', ')'}, // FULLWIDTH RIGHT PARENTHESIS 1579 {U'\uff0a', '*'}, // FULLWIDTH ASTERISK 1580 {U'\uff0b', '+'}, // FULLWIDTH ASTERISK 1581 {U'\uff0c', ','}, // FULLWIDTH COMMA 1582 {U'\uff0d', '-'}, // FULLWIDTH HYPHEN-MINUS 1583 {U'\uff0e', '.'}, // FULLWIDTH FULL STOP 1584 {U'\uff0f', '/'}, // FULLWIDTH SOLIDUS 1585 {U'\uff1a', ':'}, // FULLWIDTH COLON 1586 {U'\uff1b', ';'}, // FULLWIDTH SEMICOLON 1587 {U'\uff1c', '<'}, // FULLWIDTH LESS-THAN SIGN 1588 {U'\uff1d', '='}, // FULLWIDTH EQUALS SIGN 1589 {U'\uff1e', '>'}, // FULLWIDTH GREATER-THAN SIGN 1590 {U'\uff1f', '?'}, // FULLWIDTH QUESTION MARK 1591 {U'\uff20', '@'}, // FULLWIDTH COMMERCIAL AT 1592 {U'\uff3b', '['}, // FULLWIDTH LEFT SQUARE BRACKET 1593 {U'\uff3c', '\\'}, // FULLWIDTH REVERSE SOLIDUS 1594 {U'\uff3d', ']'}, // FULLWIDTH RIGHT SQUARE BRACKET 1595 {U'\uff3e', '^'}, // FULLWIDTH CIRCUMFLEX ACCENT 1596 {U'\uff5b', '{'}, // FULLWIDTH LEFT CURLY BRACKET 1597 {U'\uff5c', '|'}, // FULLWIDTH VERTICAL LINE 1598 {U'\uff5d', '}'}, // FULLWIDTH RIGHT CURLY BRACKET 1599 {U'\uff5e', '~'}, // FULLWIDTH TILDE 1600 {0, 0} 1601 }; 1602 auto Homoglyph = 1603 std::lower_bound(std::begin(SortedHomoglyphs), 1604 std::end(SortedHomoglyphs) - 1, HomoglyphPair{C, '\0'}); 1605 if (Homoglyph->Character == C) { 1606 llvm::SmallString<5> CharBuf; 1607 { 1608 llvm::raw_svector_ostream CharOS(CharBuf); 1609 llvm::write_hex(CharOS, C, llvm::HexPrintStyle::Upper, 4); 1610 } 1611 if (Homoglyph->LooksLike) { 1612 const char LooksLikeStr[] = {Homoglyph->LooksLike, 0}; 1613 Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph) 1614 << Range << CharBuf << LooksLikeStr; 1615 } else { 1616 Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_zero_width) 1617 << Range << CharBuf; 1618 } 1619 } 1620 } 1621 1622 static void diagnoseInvalidUnicodeCodepointInIdentifier( 1623 DiagnosticsEngine &Diags, const LangOptions &LangOpts, uint32_t CodePoint, 1624 CharSourceRange Range, bool IsFirst) { 1625 if (isASCII(CodePoint)) 1626 return; 1627 1628 bool IsIDStart = isAllowedInitiallyIDChar(CodePoint, LangOpts); 1629 bool IsIDContinue = IsIDStart || isAllowedIDChar(CodePoint, LangOpts); 1630 1631 if ((IsFirst && IsIDStart) || (!IsFirst && IsIDContinue)) 1632 return; 1633 1634 bool InvalidOnlyAtStart = IsFirst && !IsIDStart && IsIDContinue; 1635 1636 llvm::SmallString<5> CharBuf; 1637 llvm::raw_svector_ostream CharOS(CharBuf); 1638 llvm::write_hex(CharOS, CodePoint, llvm::HexPrintStyle::Upper, 4); 1639 1640 if (!IsFirst || InvalidOnlyAtStart) { 1641 Diags.Report(Range.getBegin(), diag::err_character_not_allowed_identifier) 1642 << Range << CharBuf << int(InvalidOnlyAtStart) 1643 << FixItHint::CreateRemoval(Range); 1644 } else { 1645 Diags.Report(Range.getBegin(), diag::err_character_not_allowed) 1646 << Range << CharBuf << FixItHint::CreateRemoval(Range); 1647 } 1648 } 1649 1650 bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size, 1651 Token &Result) { 1652 const char *UCNPtr = CurPtr + Size; 1653 uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/nullptr); 1654 if (CodePoint == 0) { 1655 return false; 1656 } 1657 1658 if (!isAllowedIDChar(CodePoint, LangOpts)) { 1659 if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint)) 1660 return false; 1661 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1662 !PP->isPreprocessedOutput()) 1663 diagnoseInvalidUnicodeCodepointInIdentifier( 1664 PP->getDiagnostics(), LangOpts, CodePoint, 1665 makeCharRange(*this, CurPtr, UCNPtr), 1666 /*IsFirst=*/false); 1667 1668 // We got a unicode codepoint that is neither a space nor a 1669 // a valid identifier part. 1670 // Carry on as if the codepoint was valid for recovery purposes. 1671 } else if (!isLexingRawMode()) 1672 maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint, 1673 makeCharRange(*this, CurPtr, UCNPtr), 1674 /*IsFirst=*/false); 1675 1676 Result.setFlag(Token::HasUCN); 1677 if ((UCNPtr - CurPtr == 6 && CurPtr[1] == 'u') || 1678 (UCNPtr - CurPtr == 10 && CurPtr[1] == 'U')) 1679 CurPtr = UCNPtr; 1680 else 1681 while (CurPtr != UCNPtr) 1682 (void)getAndAdvanceChar(CurPtr, Result); 1683 return true; 1684 } 1685 1686 bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) { 1687 const char *UnicodePtr = CurPtr; 1688 llvm::UTF32 CodePoint; 1689 llvm::ConversionResult Result = 1690 llvm::convertUTF8Sequence((const llvm::UTF8 **)&UnicodePtr, 1691 (const llvm::UTF8 *)BufferEnd, 1692 &CodePoint, 1693 llvm::strictConversion); 1694 if (Result != llvm::conversionOK) 1695 return false; 1696 1697 if (!isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts)) { 1698 if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint)) 1699 return false; 1700 1701 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1702 !PP->isPreprocessedOutput()) 1703 diagnoseInvalidUnicodeCodepointInIdentifier( 1704 PP->getDiagnostics(), LangOpts, CodePoint, 1705 makeCharRange(*this, CurPtr, UnicodePtr), /*IsFirst=*/false); 1706 // We got a unicode codepoint that is neither a space nor a 1707 // a valid identifier part. Carry on as if the codepoint was 1708 // valid for recovery purposes. 1709 } else if (!isLexingRawMode()) { 1710 maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint, 1711 makeCharRange(*this, CurPtr, UnicodePtr), 1712 /*IsFirst=*/false); 1713 maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), CodePoint, 1714 makeCharRange(*this, CurPtr, UnicodePtr)); 1715 } 1716 1717 CurPtr = UnicodePtr; 1718 return true; 1719 } 1720 1721 bool Lexer::LexUnicodeIdentifierStart(Token &Result, uint32_t C, 1722 const char *CurPtr) { 1723 if (isAllowedInitiallyIDChar(C, LangOpts)) { 1724 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1725 !PP->isPreprocessedOutput()) { 1726 maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C, 1727 makeCharRange(*this, BufferPtr, CurPtr), 1728 /*IsFirst=*/true); 1729 maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), C, 1730 makeCharRange(*this, BufferPtr, CurPtr)); 1731 } 1732 1733 MIOpt.ReadToken(); 1734 return LexIdentifierContinue(Result, CurPtr); 1735 } 1736 1737 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1738 !PP->isPreprocessedOutput() && !isASCII(*BufferPtr) && 1739 !isAllowedInitiallyIDChar(C, LangOpts) && !isUnicodeWhitespace(C)) { 1740 // Non-ASCII characters tend to creep into source code unintentionally. 1741 // Instead of letting the parser complain about the unknown token, 1742 // just drop the character. 1743 // Note that we can /only/ do this when the non-ASCII character is actually 1744 // spelled as Unicode, not written as a UCN. The standard requires that 1745 // we not throw away any possible preprocessor tokens, but there's a 1746 // loophole in the mapping of Unicode characters to basic character set 1747 // characters that allows us to map these particular characters to, say, 1748 // whitespace. 1749 diagnoseInvalidUnicodeCodepointInIdentifier( 1750 PP->getDiagnostics(), LangOpts, C, 1751 makeCharRange(*this, BufferPtr, CurPtr), /*IsStart*/ true); 1752 BufferPtr = CurPtr; 1753 return false; 1754 } 1755 1756 // Otherwise, we have an explicit UCN or a character that's unlikely to show 1757 // up by accident. 1758 MIOpt.ReadToken(); 1759 FormTokenWithChars(Result, CurPtr, tok::unknown); 1760 return true; 1761 } 1762 1763 bool Lexer::LexIdentifierContinue(Token &Result, const char *CurPtr) { 1764 // Match [_A-Za-z0-9]*, we have already matched an identifier start. 1765 while (true) { 1766 unsigned char C = *CurPtr; 1767 // Fast path. 1768 if (isAsciiIdentifierContinue(C)) { 1769 ++CurPtr; 1770 continue; 1771 } 1772 1773 unsigned Size; 1774 // Slow path: handle trigraph, unicode codepoints, UCNs. 1775 C = getCharAndSize(CurPtr, Size); 1776 if (isAsciiIdentifierContinue(C)) { 1777 CurPtr = ConsumeChar(CurPtr, Size, Result); 1778 continue; 1779 } 1780 if (C == '$') { 1781 // If we hit a $ and they are not supported in identifiers, we are done. 1782 if (!LangOpts.DollarIdents) 1783 break; 1784 // Otherwise, emit a diagnostic and continue. 1785 if (!isLexingRawMode()) 1786 Diag(CurPtr, diag::ext_dollar_in_identifier); 1787 CurPtr = ConsumeChar(CurPtr, Size, Result); 1788 continue; 1789 } 1790 if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) 1791 continue; 1792 if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) 1793 continue; 1794 // Neither an expected Unicode codepoint nor a UCN. 1795 break; 1796 } 1797 1798 const char *IdStart = BufferPtr; 1799 FormTokenWithChars(Result, CurPtr, tok::raw_identifier); 1800 Result.setRawIdentifierData(IdStart); 1801 1802 // If we are in raw mode, return this identifier raw. There is no need to 1803 // look up identifier information or attempt to macro expand it. 1804 if (LexingRawMode) 1805 return true; 1806 1807 // Fill in Result.IdentifierInfo and update the token kind, 1808 // looking up the identifier in the identifier table. 1809 IdentifierInfo *II = PP->LookUpIdentifierInfo(Result); 1810 // Note that we have to call PP->LookUpIdentifierInfo() even for code 1811 // completion, it writes IdentifierInfo into Result, and callers rely on it. 1812 1813 // If the completion point is at the end of an identifier, we want to treat 1814 // the identifier as incomplete even if it resolves to a macro or a keyword. 1815 // This allows e.g. 'class^' to complete to 'classifier'. 1816 if (isCodeCompletionPoint(CurPtr)) { 1817 // Return the code-completion token. 1818 Result.setKind(tok::code_completion); 1819 // Skip the code-completion char and all immediate identifier characters. 1820 // This ensures we get consistent behavior when completing at any point in 1821 // an identifier (i.e. at the start, in the middle, at the end). Note that 1822 // only simple cases (i.e. [a-zA-Z0-9_]) are supported to keep the code 1823 // simpler. 1824 assert(*CurPtr == 0 && "Completion character must be 0"); 1825 ++CurPtr; 1826 // Note that code completion token is not added as a separate character 1827 // when the completion point is at the end of the buffer. Therefore, we need 1828 // to check if the buffer has ended. 1829 if (CurPtr < BufferEnd) { 1830 while (isAsciiIdentifierContinue(*CurPtr)) 1831 ++CurPtr; 1832 } 1833 BufferPtr = CurPtr; 1834 return true; 1835 } 1836 1837 // Finally, now that we know we have an identifier, pass this off to the 1838 // preprocessor, which may macro expand it or something. 1839 if (II->isHandleIdentifierCase()) 1840 return PP->HandleIdentifier(Result); 1841 1842 return true; 1843 } 1844 1845 /// isHexaLiteral - Return true if Start points to a hex constant. 1846 /// in microsoft mode (where this is supposed to be several different tokens). 1847 bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) { 1848 unsigned Size; 1849 char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts); 1850 if (C1 != '0') 1851 return false; 1852 char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts); 1853 return (C2 == 'x' || C2 == 'X'); 1854 } 1855 1856 /// LexNumericConstant - Lex the remainder of a integer or floating point 1857 /// constant. From[-1] is the first character lexed. Return the end of the 1858 /// constant. 1859 bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) { 1860 unsigned Size; 1861 char C = getCharAndSize(CurPtr, Size); 1862 char PrevCh = 0; 1863 while (isPreprocessingNumberBody(C)) { 1864 CurPtr = ConsumeChar(CurPtr, Size, Result); 1865 PrevCh = C; 1866 C = getCharAndSize(CurPtr, Size); 1867 } 1868 1869 // If we fell out, check for a sign, due to 1e+12. If we have one, continue. 1870 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) { 1871 // If we are in Microsoft mode, don't continue if the constant is hex. 1872 // For example, MSVC will accept the following as 3 tokens: 0x1234567e+1 1873 if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts)) 1874 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 1875 } 1876 1877 // If we have a hex FP constant, continue. 1878 if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) { 1879 // Outside C99 and C++17, we accept hexadecimal floating point numbers as a 1880 // not-quite-conforming extension. Only do so if this looks like it's 1881 // actually meant to be a hexfloat, and not if it has a ud-suffix. 1882 bool IsHexFloat = true; 1883 if (!LangOpts.C99) { 1884 if (!isHexaLiteral(BufferPtr, LangOpts)) 1885 IsHexFloat = false; 1886 else if (!LangOpts.CPlusPlus17 && 1887 std::find(BufferPtr, CurPtr, '_') != CurPtr) 1888 IsHexFloat = false; 1889 } 1890 if (IsHexFloat) 1891 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 1892 } 1893 1894 // If we have a digit separator, continue. 1895 if (C == '\'' && (LangOpts.CPlusPlus14 || LangOpts.C2x)) { 1896 unsigned NextSize; 1897 char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, LangOpts); 1898 if (isAsciiIdentifierContinue(Next)) { 1899 if (!isLexingRawMode()) 1900 Diag(CurPtr, LangOpts.CPlusPlus 1901 ? diag::warn_cxx11_compat_digit_separator 1902 : diag::warn_c2x_compat_digit_separator); 1903 CurPtr = ConsumeChar(CurPtr, Size, Result); 1904 CurPtr = ConsumeChar(CurPtr, NextSize, Result); 1905 return LexNumericConstant(Result, CurPtr); 1906 } 1907 } 1908 1909 // If we have a UCN or UTF-8 character (perhaps in a ud-suffix), continue. 1910 if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) 1911 return LexNumericConstant(Result, CurPtr); 1912 if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) 1913 return LexNumericConstant(Result, CurPtr); 1914 1915 // Update the location of token as well as BufferPtr. 1916 const char *TokStart = BufferPtr; 1917 FormTokenWithChars(Result, CurPtr, tok::numeric_constant); 1918 Result.setLiteralData(TokStart); 1919 return true; 1920 } 1921 1922 /// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes 1923 /// in C++11, or warn on a ud-suffix in C++98. 1924 const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr, 1925 bool IsStringLiteral) { 1926 assert(LangOpts.CPlusPlus); 1927 1928 // Maximally munch an identifier. 1929 unsigned Size; 1930 char C = getCharAndSize(CurPtr, Size); 1931 bool Consumed = false; 1932 1933 if (!isAsciiIdentifierStart(C)) { 1934 if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) 1935 Consumed = true; 1936 else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) 1937 Consumed = true; 1938 else 1939 return CurPtr; 1940 } 1941 1942 if (!LangOpts.CPlusPlus11) { 1943 if (!isLexingRawMode()) 1944 Diag(CurPtr, 1945 C == '_' ? diag::warn_cxx11_compat_user_defined_literal 1946 : diag::warn_cxx11_compat_reserved_user_defined_literal) 1947 << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " "); 1948 return CurPtr; 1949 } 1950 1951 // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix 1952 // that does not start with an underscore is ill-formed. As a conforming 1953 // extension, we treat all such suffixes as if they had whitespace before 1954 // them. We assume a suffix beginning with a UCN or UTF-8 character is more 1955 // likely to be a ud-suffix than a macro, however, and accept that. 1956 if (!Consumed) { 1957 bool IsUDSuffix = false; 1958 if (C == '_') 1959 IsUDSuffix = true; 1960 else if (IsStringLiteral && LangOpts.CPlusPlus14) { 1961 // In C++1y, we need to look ahead a few characters to see if this is a 1962 // valid suffix for a string literal or a numeric literal (this could be 1963 // the 'operator""if' defining a numeric literal operator). 1964 const unsigned MaxStandardSuffixLength = 3; 1965 char Buffer[MaxStandardSuffixLength] = { C }; 1966 unsigned Consumed = Size; 1967 unsigned Chars = 1; 1968 while (true) { 1969 unsigned NextSize; 1970 char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize, LangOpts); 1971 if (!isAsciiIdentifierContinue(Next)) { 1972 // End of suffix. Check whether this is on the allowed list. 1973 const StringRef CompleteSuffix(Buffer, Chars); 1974 IsUDSuffix = 1975 StringLiteralParser::isValidUDSuffix(LangOpts, CompleteSuffix); 1976 break; 1977 } 1978 1979 if (Chars == MaxStandardSuffixLength) 1980 // Too long: can't be a standard suffix. 1981 break; 1982 1983 Buffer[Chars++] = Next; 1984 Consumed += NextSize; 1985 } 1986 } 1987 1988 if (!IsUDSuffix) { 1989 if (!isLexingRawMode()) 1990 Diag(CurPtr, LangOpts.MSVCCompat 1991 ? diag::ext_ms_reserved_user_defined_literal 1992 : diag::ext_reserved_user_defined_literal) 1993 << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " "); 1994 return CurPtr; 1995 } 1996 1997 CurPtr = ConsumeChar(CurPtr, Size, Result); 1998 } 1999 2000 Result.setFlag(Token::HasUDSuffix); 2001 while (true) { 2002 C = getCharAndSize(CurPtr, Size); 2003 if (isAsciiIdentifierContinue(C)) { 2004 CurPtr = ConsumeChar(CurPtr, Size, Result); 2005 } else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) { 2006 } else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) { 2007 } else 2008 break; 2009 } 2010 2011 return CurPtr; 2012 } 2013 2014 /// LexStringLiteral - Lex the remainder of a string literal, after having lexed 2015 /// either " or L" or u8" or u" or U". 2016 bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr, 2017 tok::TokenKind Kind) { 2018 const char *AfterQuote = CurPtr; 2019 // Does this string contain the \0 character? 2020 const char *NulCharacter = nullptr; 2021 2022 if (!isLexingRawMode() && 2023 (Kind == tok::utf8_string_literal || 2024 Kind == tok::utf16_string_literal || 2025 Kind == tok::utf32_string_literal)) 2026 Diag(BufferPtr, LangOpts.CPlusPlus ? diag::warn_cxx98_compat_unicode_literal 2027 : diag::warn_c99_compat_unicode_literal); 2028 2029 char C = getAndAdvanceChar(CurPtr, Result); 2030 while (C != '"') { 2031 // Skip escaped characters. Escaped newlines will already be processed by 2032 // getAndAdvanceChar. 2033 if (C == '\\') 2034 C = getAndAdvanceChar(CurPtr, Result); 2035 2036 if (C == '\n' || C == '\r' || // Newline. 2037 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 2038 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 2039 Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 1; 2040 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2041 return true; 2042 } 2043 2044 if (C == 0) { 2045 if (isCodeCompletionPoint(CurPtr-1)) { 2046 if (ParsingFilename) 2047 codeCompleteIncludedFile(AfterQuote, CurPtr - 1, /*IsAngled=*/false); 2048 else 2049 PP->CodeCompleteNaturalLanguage(); 2050 FormTokenWithChars(Result, CurPtr - 1, tok::unknown); 2051 cutOffLexing(); 2052 return true; 2053 } 2054 2055 NulCharacter = CurPtr-1; 2056 } 2057 C = getAndAdvanceChar(CurPtr, Result); 2058 } 2059 2060 // If we are in C++11, lex the optional ud-suffix. 2061 if (LangOpts.CPlusPlus) 2062 CurPtr = LexUDSuffix(Result, CurPtr, true); 2063 2064 // If a nul character existed in the string, warn about it. 2065 if (NulCharacter && !isLexingRawMode()) 2066 Diag(NulCharacter, diag::null_in_char_or_string) << 1; 2067 2068 // Update the location of the token as well as the BufferPtr instance var. 2069 const char *TokStart = BufferPtr; 2070 FormTokenWithChars(Result, CurPtr, Kind); 2071 Result.setLiteralData(TokStart); 2072 return true; 2073 } 2074 2075 /// LexRawStringLiteral - Lex the remainder of a raw string literal, after 2076 /// having lexed R", LR", u8R", uR", or UR". 2077 bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr, 2078 tok::TokenKind Kind) { 2079 // This function doesn't use getAndAdvanceChar because C++0x [lex.pptoken]p3: 2080 // Between the initial and final double quote characters of the raw string, 2081 // any transformations performed in phases 1 and 2 (trigraphs, 2082 // universal-character-names, and line splicing) are reverted. 2083 2084 if (!isLexingRawMode()) 2085 Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal); 2086 2087 unsigned PrefixLen = 0; 2088 2089 while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen])) 2090 ++PrefixLen; 2091 2092 // If the last character was not a '(', then we didn't lex a valid delimiter. 2093 if (CurPtr[PrefixLen] != '(') { 2094 if (!isLexingRawMode()) { 2095 const char *PrefixEnd = &CurPtr[PrefixLen]; 2096 if (PrefixLen == 16) { 2097 Diag(PrefixEnd, diag::err_raw_delim_too_long); 2098 } else { 2099 Diag(PrefixEnd, diag::err_invalid_char_raw_delim) 2100 << StringRef(PrefixEnd, 1); 2101 } 2102 } 2103 2104 // Search for the next '"' in hopes of salvaging the lexer. Unfortunately, 2105 // it's possible the '"' was intended to be part of the raw string, but 2106 // there's not much we can do about that. 2107 while (true) { 2108 char C = *CurPtr++; 2109 2110 if (C == '"') 2111 break; 2112 if (C == 0 && CurPtr-1 == BufferEnd) { 2113 --CurPtr; 2114 break; 2115 } 2116 } 2117 2118 FormTokenWithChars(Result, CurPtr, tok::unknown); 2119 return true; 2120 } 2121 2122 // Save prefix and move CurPtr past it 2123 const char *Prefix = CurPtr; 2124 CurPtr += PrefixLen + 1; // skip over prefix and '(' 2125 2126 while (true) { 2127 char C = *CurPtr++; 2128 2129 if (C == ')') { 2130 // Check for prefix match and closing quote. 2131 if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') { 2132 CurPtr += PrefixLen + 1; // skip over prefix and '"' 2133 break; 2134 } 2135 } else if (C == 0 && CurPtr-1 == BufferEnd) { // End of file. 2136 if (!isLexingRawMode()) 2137 Diag(BufferPtr, diag::err_unterminated_raw_string) 2138 << StringRef(Prefix, PrefixLen); 2139 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2140 return true; 2141 } 2142 } 2143 2144 // If we are in C++11, lex the optional ud-suffix. 2145 if (LangOpts.CPlusPlus) 2146 CurPtr = LexUDSuffix(Result, CurPtr, true); 2147 2148 // Update the location of token as well as BufferPtr. 2149 const char *TokStart = BufferPtr; 2150 FormTokenWithChars(Result, CurPtr, Kind); 2151 Result.setLiteralData(TokStart); 2152 return true; 2153 } 2154 2155 /// LexAngledStringLiteral - Lex the remainder of an angled string literal, 2156 /// after having lexed the '<' character. This is used for #include filenames. 2157 bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { 2158 // Does this string contain the \0 character? 2159 const char *NulCharacter = nullptr; 2160 const char *AfterLessPos = CurPtr; 2161 char C = getAndAdvanceChar(CurPtr, Result); 2162 while (C != '>') { 2163 // Skip escaped characters. Escaped newlines will already be processed by 2164 // getAndAdvanceChar. 2165 if (C == '\\') 2166 C = getAndAdvanceChar(CurPtr, Result); 2167 2168 if (isVerticalWhitespace(C) || // Newline. 2169 (C == 0 && (CurPtr - 1 == BufferEnd))) { // End of file. 2170 // If the filename is unterminated, then it must just be a lone < 2171 // character. Return this as such. 2172 FormTokenWithChars(Result, AfterLessPos, tok::less); 2173 return true; 2174 } 2175 2176 if (C == 0) { 2177 if (isCodeCompletionPoint(CurPtr - 1)) { 2178 codeCompleteIncludedFile(AfterLessPos, CurPtr - 1, /*IsAngled=*/true); 2179 cutOffLexing(); 2180 FormTokenWithChars(Result, CurPtr - 1, tok::unknown); 2181 return true; 2182 } 2183 NulCharacter = CurPtr-1; 2184 } 2185 C = getAndAdvanceChar(CurPtr, Result); 2186 } 2187 2188 // If a nul character existed in the string, warn about it. 2189 if (NulCharacter && !isLexingRawMode()) 2190 Diag(NulCharacter, diag::null_in_char_or_string) << 1; 2191 2192 // Update the location of token as well as BufferPtr. 2193 const char *TokStart = BufferPtr; 2194 FormTokenWithChars(Result, CurPtr, tok::header_name); 2195 Result.setLiteralData(TokStart); 2196 return true; 2197 } 2198 2199 void Lexer::codeCompleteIncludedFile(const char *PathStart, 2200 const char *CompletionPoint, 2201 bool IsAngled) { 2202 // Completion only applies to the filename, after the last slash. 2203 StringRef PartialPath(PathStart, CompletionPoint - PathStart); 2204 llvm::StringRef SlashChars = LangOpts.MSVCCompat ? "/\\" : "/"; 2205 auto Slash = PartialPath.find_last_of(SlashChars); 2206 StringRef Dir = 2207 (Slash == StringRef::npos) ? "" : PartialPath.take_front(Slash); 2208 const char *StartOfFilename = 2209 (Slash == StringRef::npos) ? PathStart : PathStart + Slash + 1; 2210 // Code completion filter range is the filename only, up to completion point. 2211 PP->setCodeCompletionIdentifierInfo(&PP->getIdentifierTable().get( 2212 StringRef(StartOfFilename, CompletionPoint - StartOfFilename))); 2213 // We should replace the characters up to the closing quote or closest slash, 2214 // if any. 2215 while (CompletionPoint < BufferEnd) { 2216 char Next = *(CompletionPoint + 1); 2217 if (Next == 0 || Next == '\r' || Next == '\n') 2218 break; 2219 ++CompletionPoint; 2220 if (Next == (IsAngled ? '>' : '"')) 2221 break; 2222 if (llvm::is_contained(SlashChars, Next)) 2223 break; 2224 } 2225 2226 PP->setCodeCompletionTokenRange( 2227 FileLoc.getLocWithOffset(StartOfFilename - BufferStart), 2228 FileLoc.getLocWithOffset(CompletionPoint - BufferStart)); 2229 PP->CodeCompleteIncludedFile(Dir, IsAngled); 2230 } 2231 2232 /// LexCharConstant - Lex the remainder of a character constant, after having 2233 /// lexed either ' or L' or u8' or u' or U'. 2234 bool Lexer::LexCharConstant(Token &Result, const char *CurPtr, 2235 tok::TokenKind Kind) { 2236 // Does this character contain the \0 character? 2237 const char *NulCharacter = nullptr; 2238 2239 if (!isLexingRawMode()) { 2240 if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant) 2241 Diag(BufferPtr, LangOpts.CPlusPlus 2242 ? diag::warn_cxx98_compat_unicode_literal 2243 : diag::warn_c99_compat_unicode_literal); 2244 else if (Kind == tok::utf8_char_constant) 2245 Diag(BufferPtr, diag::warn_cxx14_compat_u8_character_literal); 2246 } 2247 2248 char C = getAndAdvanceChar(CurPtr, Result); 2249 if (C == '\'') { 2250 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 2251 Diag(BufferPtr, diag::ext_empty_character); 2252 FormTokenWithChars(Result, CurPtr, tok::unknown); 2253 return true; 2254 } 2255 2256 while (C != '\'') { 2257 // Skip escaped characters. 2258 if (C == '\\') 2259 C = getAndAdvanceChar(CurPtr, Result); 2260 2261 if (C == '\n' || C == '\r' || // Newline. 2262 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 2263 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 2264 Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 0; 2265 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2266 return true; 2267 } 2268 2269 if (C == 0) { 2270 if (isCodeCompletionPoint(CurPtr-1)) { 2271 PP->CodeCompleteNaturalLanguage(); 2272 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2273 cutOffLexing(); 2274 return true; 2275 } 2276 2277 NulCharacter = CurPtr-1; 2278 } 2279 C = getAndAdvanceChar(CurPtr, Result); 2280 } 2281 2282 // If we are in C++11, lex the optional ud-suffix. 2283 if (LangOpts.CPlusPlus) 2284 CurPtr = LexUDSuffix(Result, CurPtr, false); 2285 2286 // If a nul character existed in the character, warn about it. 2287 if (NulCharacter && !isLexingRawMode()) 2288 Diag(NulCharacter, diag::null_in_char_or_string) << 0; 2289 2290 // Update the location of token as well as BufferPtr. 2291 const char *TokStart = BufferPtr; 2292 FormTokenWithChars(Result, CurPtr, Kind); 2293 Result.setLiteralData(TokStart); 2294 return true; 2295 } 2296 2297 /// SkipWhitespace - Efficiently skip over a series of whitespace characters. 2298 /// Update BufferPtr to point to the next non-whitespace character and return. 2299 /// 2300 /// This method forms a token and returns true if KeepWhitespaceMode is enabled. 2301 bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr, 2302 bool &TokAtPhysicalStartOfLine) { 2303 // Whitespace - Skip it, then return the token after the whitespace. 2304 bool SawNewline = isVerticalWhitespace(CurPtr[-1]); 2305 2306 unsigned char Char = *CurPtr; 2307 2308 const char *lastNewLine = nullptr; 2309 auto setLastNewLine = [&](const char *Ptr) { 2310 lastNewLine = Ptr; 2311 if (!NewLinePtr) 2312 NewLinePtr = Ptr; 2313 }; 2314 if (SawNewline) 2315 setLastNewLine(CurPtr - 1); 2316 2317 // Skip consecutive spaces efficiently. 2318 while (true) { 2319 // Skip horizontal whitespace very aggressively. 2320 while (isHorizontalWhitespace(Char)) 2321 Char = *++CurPtr; 2322 2323 // Otherwise if we have something other than whitespace, we're done. 2324 if (!isVerticalWhitespace(Char)) 2325 break; 2326 2327 if (ParsingPreprocessorDirective) { 2328 // End of preprocessor directive line, let LexTokenInternal handle this. 2329 BufferPtr = CurPtr; 2330 return false; 2331 } 2332 2333 // OK, but handle newline. 2334 if (*CurPtr == '\n') 2335 setLastNewLine(CurPtr); 2336 SawNewline = true; 2337 Char = *++CurPtr; 2338 } 2339 2340 // If the client wants us to return whitespace, return it now. 2341 if (isKeepWhitespaceMode()) { 2342 FormTokenWithChars(Result, CurPtr, tok::unknown); 2343 if (SawNewline) { 2344 IsAtStartOfLine = true; 2345 IsAtPhysicalStartOfLine = true; 2346 } 2347 // FIXME: The next token will not have LeadingSpace set. 2348 return true; 2349 } 2350 2351 // If this isn't immediately after a newline, there is leading space. 2352 char PrevChar = CurPtr[-1]; 2353 bool HasLeadingSpace = !isVerticalWhitespace(PrevChar); 2354 2355 Result.setFlagValue(Token::LeadingSpace, HasLeadingSpace); 2356 if (SawNewline) { 2357 Result.setFlag(Token::StartOfLine); 2358 TokAtPhysicalStartOfLine = true; 2359 2360 if (NewLinePtr && lastNewLine && NewLinePtr != lastNewLine && PP) { 2361 if (auto *Handler = PP->getEmptylineHandler()) 2362 Handler->HandleEmptyline(SourceRange(getSourceLocation(NewLinePtr + 1), 2363 getSourceLocation(lastNewLine))); 2364 } 2365 } 2366 2367 BufferPtr = CurPtr; 2368 return false; 2369 } 2370 2371 /// We have just read the // characters from input. Skip until we find the 2372 /// newline character that terminates the comment. Then update BufferPtr and 2373 /// return. 2374 /// 2375 /// If we're in KeepCommentMode or any CommentHandler has inserted 2376 /// some tokens, this will store the first token and return true. 2377 bool Lexer::SkipLineComment(Token &Result, const char *CurPtr, 2378 bool &TokAtPhysicalStartOfLine) { 2379 // If Line comments aren't explicitly enabled for this language, emit an 2380 // extension warning. 2381 if (!LineComment) { 2382 if (!isLexingRawMode()) // There's no PP in raw mode, so can't emit diags. 2383 Diag(BufferPtr, diag::ext_line_comment); 2384 2385 // Mark them enabled so we only emit one warning for this translation 2386 // unit. 2387 LineComment = true; 2388 } 2389 2390 // Scan over the body of the comment. The common case, when scanning, is that 2391 // the comment contains normal ascii characters with nothing interesting in 2392 // them. As such, optimize for this case with the inner loop. 2393 // 2394 // This loop terminates with CurPtr pointing at the newline (or end of buffer) 2395 // character that ends the line comment. 2396 char C; 2397 while (true) { 2398 C = *CurPtr; 2399 // Skip over characters in the fast loop. 2400 while (C != 0 && // Potentially EOF. 2401 C != '\n' && C != '\r') // Newline or DOS-style newline. 2402 C = *++CurPtr; 2403 2404 const char *NextLine = CurPtr; 2405 if (C != 0) { 2406 // We found a newline, see if it's escaped. 2407 const char *EscapePtr = CurPtr-1; 2408 bool HasSpace = false; 2409 while (isHorizontalWhitespace(*EscapePtr)) { // Skip whitespace. 2410 --EscapePtr; 2411 HasSpace = true; 2412 } 2413 2414 if (*EscapePtr == '\\') 2415 // Escaped newline. 2416 CurPtr = EscapePtr; 2417 else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' && 2418 EscapePtr[-2] == '?' && LangOpts.Trigraphs) 2419 // Trigraph-escaped newline. 2420 CurPtr = EscapePtr-2; 2421 else 2422 break; // This is a newline, we're done. 2423 2424 // If there was space between the backslash and newline, warn about it. 2425 if (HasSpace && !isLexingRawMode()) 2426 Diag(EscapePtr, diag::backslash_newline_space); 2427 } 2428 2429 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to 2430 // properly decode the character. Read it in raw mode to avoid emitting 2431 // diagnostics about things like trigraphs. If we see an escaped newline, 2432 // we'll handle it below. 2433 const char *OldPtr = CurPtr; 2434 bool OldRawMode = isLexingRawMode(); 2435 LexingRawMode = true; 2436 C = getAndAdvanceChar(CurPtr, Result); 2437 LexingRawMode = OldRawMode; 2438 2439 // If we only read only one character, then no special handling is needed. 2440 // We're done and can skip forward to the newline. 2441 if (C != 0 && CurPtr == OldPtr+1) { 2442 CurPtr = NextLine; 2443 break; 2444 } 2445 2446 // If we read multiple characters, and one of those characters was a \r or 2447 // \n, then we had an escaped newline within the comment. Emit diagnostic 2448 // unless the next line is also a // comment. 2449 if (CurPtr != OldPtr + 1 && C != '/' && 2450 (CurPtr == BufferEnd + 1 || CurPtr[0] != '/')) { 2451 for (; OldPtr != CurPtr; ++OldPtr) 2452 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') { 2453 // Okay, we found a // comment that ends in a newline, if the next 2454 // line is also a // comment, but has spaces, don't emit a diagnostic. 2455 if (isWhitespace(C)) { 2456 const char *ForwardPtr = CurPtr; 2457 while (isWhitespace(*ForwardPtr)) // Skip whitespace. 2458 ++ForwardPtr; 2459 if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/') 2460 break; 2461 } 2462 2463 if (!isLexingRawMode()) 2464 Diag(OldPtr-1, diag::ext_multi_line_line_comment); 2465 break; 2466 } 2467 } 2468 2469 if (C == '\r' || C == '\n' || CurPtr == BufferEnd + 1) { 2470 --CurPtr; 2471 break; 2472 } 2473 2474 if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) { 2475 PP->CodeCompleteNaturalLanguage(); 2476 cutOffLexing(); 2477 return false; 2478 } 2479 } 2480 2481 // Found but did not consume the newline. Notify comment handlers about the 2482 // comment unless we're in a #if 0 block. 2483 if (PP && !isLexingRawMode() && 2484 PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr), 2485 getSourceLocation(CurPtr)))) { 2486 BufferPtr = CurPtr; 2487 return true; // A token has to be returned. 2488 } 2489 2490 // If we are returning comments as tokens, return this comment as a token. 2491 if (inKeepCommentMode()) 2492 return SaveLineComment(Result, CurPtr); 2493 2494 // If we are inside a preprocessor directive and we see the end of line, 2495 // return immediately, so that the lexer can return this as an EOD token. 2496 if (ParsingPreprocessorDirective || CurPtr == BufferEnd) { 2497 BufferPtr = CurPtr; 2498 return false; 2499 } 2500 2501 // Otherwise, eat the \n character. We don't care if this is a \n\r or 2502 // \r\n sequence. This is an efficiency hack (because we know the \n can't 2503 // contribute to another token), it isn't needed for correctness. Note that 2504 // this is ok even in KeepWhitespaceMode, because we would have returned the 2505 /// comment above in that mode. 2506 NewLinePtr = CurPtr++; 2507 2508 // The next returned token is at the start of the line. 2509 Result.setFlag(Token::StartOfLine); 2510 TokAtPhysicalStartOfLine = true; 2511 // No leading whitespace seen so far. 2512 Result.clearFlag(Token::LeadingSpace); 2513 BufferPtr = CurPtr; 2514 return false; 2515 } 2516 2517 /// If in save-comment mode, package up this Line comment in an appropriate 2518 /// way and return it. 2519 bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) { 2520 // If we're not in a preprocessor directive, just return the // comment 2521 // directly. 2522 FormTokenWithChars(Result, CurPtr, tok::comment); 2523 2524 if (!ParsingPreprocessorDirective || LexingRawMode) 2525 return true; 2526 2527 // If this Line-style comment is in a macro definition, transmogrify it into 2528 // a C-style block comment. 2529 bool Invalid = false; 2530 std::string Spelling = PP->getSpelling(Result, &Invalid); 2531 if (Invalid) 2532 return true; 2533 2534 assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?"); 2535 Spelling[1] = '*'; // Change prefix to "/*". 2536 Spelling += "*/"; // add suffix. 2537 2538 Result.setKind(tok::comment); 2539 PP->CreateString(Spelling, Result, 2540 Result.getLocation(), Result.getLocation()); 2541 return true; 2542 } 2543 2544 /// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline 2545 /// character (either \\n or \\r) is part of an escaped newline sequence. Issue 2546 /// a diagnostic if so. We know that the newline is inside of a block comment. 2547 static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, Lexer *L, 2548 bool Trigraphs) { 2549 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r'); 2550 2551 // Position of the first trigraph in the ending sequence. 2552 const char *TrigraphPos = nullptr; 2553 // Position of the first whitespace after a '\' in the ending sequence. 2554 const char *SpacePos = nullptr; 2555 2556 while (true) { 2557 // Back up off the newline. 2558 --CurPtr; 2559 2560 // If this is a two-character newline sequence, skip the other character. 2561 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') { 2562 // \n\n or \r\r -> not escaped newline. 2563 if (CurPtr[0] == CurPtr[1]) 2564 return false; 2565 // \n\r or \r\n -> skip the newline. 2566 --CurPtr; 2567 } 2568 2569 // If we have horizontal whitespace, skip over it. We allow whitespace 2570 // between the slash and newline. 2571 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) { 2572 SpacePos = CurPtr; 2573 --CurPtr; 2574 } 2575 2576 // If we have a slash, this is an escaped newline. 2577 if (*CurPtr == '\\') { 2578 --CurPtr; 2579 } else if (CurPtr[0] == '/' && CurPtr[-1] == '?' && CurPtr[-2] == '?') { 2580 // This is a trigraph encoding of a slash. 2581 TrigraphPos = CurPtr - 2; 2582 CurPtr -= 3; 2583 } else { 2584 return false; 2585 } 2586 2587 // If the character preceding the escaped newline is a '*', then after line 2588 // splicing we have a '*/' ending the comment. 2589 if (*CurPtr == '*') 2590 break; 2591 2592 if (*CurPtr != '\n' && *CurPtr != '\r') 2593 return false; 2594 } 2595 2596 if (TrigraphPos) { 2597 // If no trigraphs are enabled, warn that we ignored this trigraph and 2598 // ignore this * character. 2599 if (!Trigraphs) { 2600 if (!L->isLexingRawMode()) 2601 L->Diag(TrigraphPos, diag::trigraph_ignored_block_comment); 2602 return false; 2603 } 2604 if (!L->isLexingRawMode()) 2605 L->Diag(TrigraphPos, diag::trigraph_ends_block_comment); 2606 } 2607 2608 // Warn about having an escaped newline between the */ characters. 2609 if (!L->isLexingRawMode()) 2610 L->Diag(CurPtr + 1, diag::escaped_newline_block_comment_end); 2611 2612 // If there was space between the backslash and newline, warn about it. 2613 if (SpacePos && !L->isLexingRawMode()) 2614 L->Diag(SpacePos, diag::backslash_newline_space); 2615 2616 return true; 2617 } 2618 2619 #ifdef __SSE2__ 2620 #include <emmintrin.h> 2621 #elif __ALTIVEC__ 2622 #include <altivec.h> 2623 #undef bool 2624 #endif 2625 2626 /// We have just read from input the / and * characters that started a comment. 2627 /// Read until we find the * and / characters that terminate the comment. 2628 /// Note that we don't bother decoding trigraphs or escaped newlines in block 2629 /// comments, because they cannot cause the comment to end. The only thing 2630 /// that can happen is the comment could end with an escaped newline between 2631 /// the terminating * and /. 2632 /// 2633 /// If we're in KeepCommentMode or any CommentHandler has inserted 2634 /// some tokens, this will store the first token and return true. 2635 bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr, 2636 bool &TokAtPhysicalStartOfLine) { 2637 // Scan one character past where we should, looking for a '/' character. Once 2638 // we find it, check to see if it was preceded by a *. This common 2639 // optimization helps people who like to put a lot of * characters in their 2640 // comments. 2641 2642 // The first character we get with newlines and trigraphs skipped to handle 2643 // the degenerate /*/ case below correctly if the * has an escaped newline 2644 // after it. 2645 unsigned CharSize; 2646 unsigned char C = getCharAndSize(CurPtr, CharSize); 2647 CurPtr += CharSize; 2648 if (C == 0 && CurPtr == BufferEnd+1) { 2649 if (!isLexingRawMode()) 2650 Diag(BufferPtr, diag::err_unterminated_block_comment); 2651 --CurPtr; 2652 2653 // KeepWhitespaceMode should return this broken comment as a token. Since 2654 // it isn't a well formed comment, just return it as an 'unknown' token. 2655 if (isKeepWhitespaceMode()) { 2656 FormTokenWithChars(Result, CurPtr, tok::unknown); 2657 return true; 2658 } 2659 2660 BufferPtr = CurPtr; 2661 return false; 2662 } 2663 2664 // Check to see if the first character after the '/*' is another /. If so, 2665 // then this slash does not end the block comment, it is part of it. 2666 if (C == '/') 2667 C = *CurPtr++; 2668 2669 while (true) { 2670 // Skip over all non-interesting characters until we find end of buffer or a 2671 // (probably ending) '/' character. 2672 if (CurPtr + 24 < BufferEnd && 2673 // If there is a code-completion point avoid the fast scan because it 2674 // doesn't check for '\0'. 2675 !(PP && PP->getCodeCompletionFileLoc() == FileLoc)) { 2676 // While not aligned to a 16-byte boundary. 2677 while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0) 2678 C = *CurPtr++; 2679 2680 if (C == '/') goto FoundSlash; 2681 2682 #ifdef __SSE2__ 2683 __m128i Slashes = _mm_set1_epi8('/'); 2684 while (CurPtr+16 <= BufferEnd) { 2685 int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr, 2686 Slashes)); 2687 if (cmp != 0) { 2688 // Adjust the pointer to point directly after the first slash. It's 2689 // not necessary to set C here, it will be overwritten at the end of 2690 // the outer loop. 2691 CurPtr += llvm::countTrailingZeros<unsigned>(cmp) + 1; 2692 goto FoundSlash; 2693 } 2694 CurPtr += 16; 2695 } 2696 #elif __ALTIVEC__ 2697 __vector unsigned char Slashes = { 2698 '/', '/', '/', '/', '/', '/', '/', '/', 2699 '/', '/', '/', '/', '/', '/', '/', '/' 2700 }; 2701 while (CurPtr + 16 <= BufferEnd && 2702 !vec_any_eq(*(const __vector unsigned char *)CurPtr, Slashes)) 2703 CurPtr += 16; 2704 #else 2705 // Scan for '/' quickly. Many block comments are very large. 2706 while (CurPtr[0] != '/' && 2707 CurPtr[1] != '/' && 2708 CurPtr[2] != '/' && 2709 CurPtr[3] != '/' && 2710 CurPtr+4 < BufferEnd) { 2711 CurPtr += 4; 2712 } 2713 #endif 2714 2715 // It has to be one of the bytes scanned, increment to it and read one. 2716 C = *CurPtr++; 2717 } 2718 2719 // Loop to scan the remainder. 2720 while (C != '/' && C != '\0') 2721 C = *CurPtr++; 2722 2723 if (C == '/') { 2724 FoundSlash: 2725 if (CurPtr[-2] == '*') // We found the final */. We're done! 2726 break; 2727 2728 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) { 2729 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr - 2, this, 2730 LangOpts.Trigraphs)) { 2731 // We found the final */, though it had an escaped newline between the 2732 // * and /. We're done! 2733 break; 2734 } 2735 } 2736 if (CurPtr[0] == '*' && CurPtr[1] != '/') { 2737 // If this is a /* inside of the comment, emit a warning. Don't do this 2738 // if this is a /*/, which will end the comment. This misses cases with 2739 // embedded escaped newlines, but oh well. 2740 if (!isLexingRawMode()) 2741 Diag(CurPtr-1, diag::warn_nested_block_comment); 2742 } 2743 } else if (C == 0 && CurPtr == BufferEnd+1) { 2744 if (!isLexingRawMode()) 2745 Diag(BufferPtr, diag::err_unterminated_block_comment); 2746 // Note: the user probably forgot a */. We could continue immediately 2747 // after the /*, but this would involve lexing a lot of what really is the 2748 // comment, which surely would confuse the parser. 2749 --CurPtr; 2750 2751 // KeepWhitespaceMode should return this broken comment as a token. Since 2752 // it isn't a well formed comment, just return it as an 'unknown' token. 2753 if (isKeepWhitespaceMode()) { 2754 FormTokenWithChars(Result, CurPtr, tok::unknown); 2755 return true; 2756 } 2757 2758 BufferPtr = CurPtr; 2759 return false; 2760 } else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) { 2761 PP->CodeCompleteNaturalLanguage(); 2762 cutOffLexing(); 2763 return false; 2764 } 2765 2766 C = *CurPtr++; 2767 } 2768 2769 // Notify comment handlers about the comment unless we're in a #if 0 block. 2770 if (PP && !isLexingRawMode() && 2771 PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr), 2772 getSourceLocation(CurPtr)))) { 2773 BufferPtr = CurPtr; 2774 return true; // A token has to be returned. 2775 } 2776 2777 // If we are returning comments as tokens, return this comment as a token. 2778 if (inKeepCommentMode()) { 2779 FormTokenWithChars(Result, CurPtr, tok::comment); 2780 return true; 2781 } 2782 2783 // It is common for the tokens immediately after a /**/ comment to be 2784 // whitespace. Instead of going through the big switch, handle it 2785 // efficiently now. This is safe even in KeepWhitespaceMode because we would 2786 // have already returned above with the comment as a token. 2787 if (isHorizontalWhitespace(*CurPtr)) { 2788 SkipWhitespace(Result, CurPtr+1, TokAtPhysicalStartOfLine); 2789 return false; 2790 } 2791 2792 // Otherwise, just return so that the next character will be lexed as a token. 2793 BufferPtr = CurPtr; 2794 Result.setFlag(Token::LeadingSpace); 2795 return false; 2796 } 2797 2798 //===----------------------------------------------------------------------===// 2799 // Primary Lexing Entry Points 2800 //===----------------------------------------------------------------------===// 2801 2802 /// ReadToEndOfLine - Read the rest of the current preprocessor line as an 2803 /// uninterpreted string. This switches the lexer out of directive mode. 2804 void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) { 2805 assert(ParsingPreprocessorDirective && ParsingFilename == false && 2806 "Must be in a preprocessing directive!"); 2807 Token Tmp; 2808 Tmp.startToken(); 2809 2810 // CurPtr - Cache BufferPtr in an automatic variable. 2811 const char *CurPtr = BufferPtr; 2812 while (true) { 2813 char Char = getAndAdvanceChar(CurPtr, Tmp); 2814 switch (Char) { 2815 default: 2816 if (Result) 2817 Result->push_back(Char); 2818 break; 2819 case 0: // Null. 2820 // Found end of file? 2821 if (CurPtr-1 != BufferEnd) { 2822 if (isCodeCompletionPoint(CurPtr-1)) { 2823 PP->CodeCompleteNaturalLanguage(); 2824 cutOffLexing(); 2825 return; 2826 } 2827 2828 // Nope, normal character, continue. 2829 if (Result) 2830 Result->push_back(Char); 2831 break; 2832 } 2833 // FALL THROUGH. 2834 LLVM_FALLTHROUGH; 2835 case '\r': 2836 case '\n': 2837 // Okay, we found the end of the line. First, back up past the \0, \r, \n. 2838 assert(CurPtr[-1] == Char && "Trigraphs for newline?"); 2839 BufferPtr = CurPtr-1; 2840 2841 // Next, lex the character, which should handle the EOD transition. 2842 Lex(Tmp); 2843 if (Tmp.is(tok::code_completion)) { 2844 if (PP) 2845 PP->CodeCompleteNaturalLanguage(); 2846 Lex(Tmp); 2847 } 2848 assert(Tmp.is(tok::eod) && "Unexpected token!"); 2849 2850 // Finally, we're done; 2851 return; 2852 } 2853 } 2854 } 2855 2856 /// LexEndOfFile - CurPtr points to the end of this file. Handle this 2857 /// condition, reporting diagnostics and handling other edge cases as required. 2858 /// This returns true if Result contains a token, false if PP.Lex should be 2859 /// called again. 2860 bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) { 2861 // If we hit the end of the file while parsing a preprocessor directive, 2862 // end the preprocessor directive first. The next token returned will 2863 // then be the end of file. 2864 if (ParsingPreprocessorDirective) { 2865 // Done parsing the "line". 2866 ParsingPreprocessorDirective = false; 2867 // Update the location of token as well as BufferPtr. 2868 FormTokenWithChars(Result, CurPtr, tok::eod); 2869 2870 // Restore comment saving mode, in case it was disabled for directive. 2871 if (PP) 2872 resetExtendedTokenMode(); 2873 return true; // Have a token. 2874 } 2875 2876 // If we are in raw mode, return this event as an EOF token. Let the caller 2877 // that put us in raw mode handle the event. 2878 if (isLexingRawMode()) { 2879 Result.startToken(); 2880 BufferPtr = BufferEnd; 2881 FormTokenWithChars(Result, BufferEnd, tok::eof); 2882 return true; 2883 } 2884 2885 if (PP->isRecordingPreamble() && PP->isInPrimaryFile()) { 2886 PP->setRecordedPreambleConditionalStack(ConditionalStack); 2887 // If the preamble cuts off the end of a header guard, consider it guarded. 2888 // The guard is valid for the preamble content itself, and for tools the 2889 // most useful answer is "yes, this file has a header guard". 2890 if (!ConditionalStack.empty()) 2891 MIOpt.ExitTopLevelConditional(); 2892 ConditionalStack.clear(); 2893 } 2894 2895 // Issue diagnostics for unterminated #if and missing newline. 2896 2897 // If we are in a #if directive, emit an error. 2898 while (!ConditionalStack.empty()) { 2899 if (PP->getCodeCompletionFileLoc() != FileLoc) 2900 PP->Diag(ConditionalStack.back().IfLoc, 2901 diag::err_pp_unterminated_conditional); 2902 ConditionalStack.pop_back(); 2903 } 2904 2905 SourceLocation EndLoc = getSourceLocation(BufferEnd); 2906 // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue 2907 // a pedwarn. 2908 if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) { 2909 DiagnosticsEngine &Diags = PP->getDiagnostics(); 2910 unsigned DiagID; 2911 2912 if (LangOpts.CPlusPlus11) { 2913 // C++11 [lex.phases] 2.2 p2 2914 // Prefer the C++98 pedantic compatibility warning over the generic, 2915 // non-extension, user-requested "missing newline at EOF" warning. 2916 if (!Diags.isIgnored(diag::warn_cxx98_compat_no_newline_eof, EndLoc)) { 2917 DiagID = diag::warn_cxx98_compat_no_newline_eof; 2918 } else { 2919 DiagID = diag::warn_no_newline_eof; 2920 } 2921 } else { 2922 DiagID = diag::ext_no_newline_eof; 2923 } 2924 2925 Diag(BufferEnd, DiagID) 2926 << FixItHint::CreateInsertion(EndLoc, "\n"); 2927 } 2928 2929 BufferPtr = CurPtr; 2930 2931 // Finally, let the preprocessor handle this. 2932 return PP->HandleEndOfFile(Result, EndLoc, isPragmaLexer()); 2933 } 2934 2935 /// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from 2936 /// the specified lexer will return a tok::l_paren token, 0 if it is something 2937 /// else and 2 if there are no more tokens in the buffer controlled by the 2938 /// lexer. 2939 unsigned Lexer::isNextPPTokenLParen() { 2940 assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?"); 2941 2942 // Switch to 'skipping' mode. This will ensure that we can lex a token 2943 // without emitting diagnostics, disables macro expansion, and will cause EOF 2944 // to return an EOF token instead of popping the include stack. 2945 LexingRawMode = true; 2946 2947 // Save state that can be changed while lexing so that we can restore it. 2948 const char *TmpBufferPtr = BufferPtr; 2949 bool inPPDirectiveMode = ParsingPreprocessorDirective; 2950 bool atStartOfLine = IsAtStartOfLine; 2951 bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine; 2952 bool leadingSpace = HasLeadingSpace; 2953 2954 Token Tok; 2955 Lex(Tok); 2956 2957 // Restore state that may have changed. 2958 BufferPtr = TmpBufferPtr; 2959 ParsingPreprocessorDirective = inPPDirectiveMode; 2960 HasLeadingSpace = leadingSpace; 2961 IsAtStartOfLine = atStartOfLine; 2962 IsAtPhysicalStartOfLine = atPhysicalStartOfLine; 2963 2964 // Restore the lexer back to non-skipping mode. 2965 LexingRawMode = false; 2966 2967 if (Tok.is(tok::eof)) 2968 return 2; 2969 return Tok.is(tok::l_paren); 2970 } 2971 2972 /// Find the end of a version control conflict marker. 2973 static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd, 2974 ConflictMarkerKind CMK) { 2975 const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>"; 2976 size_t TermLen = CMK == CMK_Perforce ? 5 : 7; 2977 auto RestOfBuffer = StringRef(CurPtr, BufferEnd - CurPtr).substr(TermLen); 2978 size_t Pos = RestOfBuffer.find(Terminator); 2979 while (Pos != StringRef::npos) { 2980 // Must occur at start of line. 2981 if (Pos == 0 || 2982 (RestOfBuffer[Pos - 1] != '\r' && RestOfBuffer[Pos - 1] != '\n')) { 2983 RestOfBuffer = RestOfBuffer.substr(Pos+TermLen); 2984 Pos = RestOfBuffer.find(Terminator); 2985 continue; 2986 } 2987 return RestOfBuffer.data()+Pos; 2988 } 2989 return nullptr; 2990 } 2991 2992 /// IsStartOfConflictMarker - If the specified pointer is the start of a version 2993 /// control conflict marker like '<<<<<<<', recognize it as such, emit an error 2994 /// and recover nicely. This returns true if it is a conflict marker and false 2995 /// if not. 2996 bool Lexer::IsStartOfConflictMarker(const char *CurPtr) { 2997 // Only a conflict marker if it starts at the beginning of a line. 2998 if (CurPtr != BufferStart && 2999 CurPtr[-1] != '\n' && CurPtr[-1] != '\r') 3000 return false; 3001 3002 // Check to see if we have <<<<<<< or >>>>. 3003 if (!StringRef(CurPtr, BufferEnd - CurPtr).startswith("<<<<<<<") && 3004 !StringRef(CurPtr, BufferEnd - CurPtr).startswith(">>>> ")) 3005 return false; 3006 3007 // If we have a situation where we don't care about conflict markers, ignore 3008 // it. 3009 if (CurrentConflictMarkerState || isLexingRawMode()) 3010 return false; 3011 3012 ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce; 3013 3014 // Check to see if there is an ending marker somewhere in the buffer at the 3015 // start of a line to terminate this conflict marker. 3016 if (FindConflictEnd(CurPtr, BufferEnd, Kind)) { 3017 // We found a match. We are really in a conflict marker. 3018 // Diagnose this, and ignore to the end of line. 3019 Diag(CurPtr, diag::err_conflict_marker); 3020 CurrentConflictMarkerState = Kind; 3021 3022 // Skip ahead to the end of line. We know this exists because the 3023 // end-of-conflict marker starts with \r or \n. 3024 while (*CurPtr != '\r' && *CurPtr != '\n') { 3025 assert(CurPtr != BufferEnd && "Didn't find end of line"); 3026 ++CurPtr; 3027 } 3028 BufferPtr = CurPtr; 3029 return true; 3030 } 3031 3032 // No end of conflict marker found. 3033 return false; 3034 } 3035 3036 /// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if 3037 /// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it 3038 /// is the end of a conflict marker. Handle it by ignoring up until the end of 3039 /// the line. This returns true if it is a conflict marker and false if not. 3040 bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) { 3041 // Only a conflict marker if it starts at the beginning of a line. 3042 if (CurPtr != BufferStart && 3043 CurPtr[-1] != '\n' && CurPtr[-1] != '\r') 3044 return false; 3045 3046 // If we have a situation where we don't care about conflict markers, ignore 3047 // it. 3048 if (!CurrentConflictMarkerState || isLexingRawMode()) 3049 return false; 3050 3051 // Check to see if we have the marker (4 characters in a row). 3052 for (unsigned i = 1; i != 4; ++i) 3053 if (CurPtr[i] != CurPtr[0]) 3054 return false; 3055 3056 // If we do have it, search for the end of the conflict marker. This could 3057 // fail if it got skipped with a '#if 0' or something. Note that CurPtr might 3058 // be the end of conflict marker. 3059 if (const char *End = FindConflictEnd(CurPtr, BufferEnd, 3060 CurrentConflictMarkerState)) { 3061 CurPtr = End; 3062 3063 // Skip ahead to the end of line. 3064 while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n') 3065 ++CurPtr; 3066 3067 BufferPtr = CurPtr; 3068 3069 // No longer in the conflict marker. 3070 CurrentConflictMarkerState = CMK_None; 3071 return true; 3072 } 3073 3074 return false; 3075 } 3076 3077 static const char *findPlaceholderEnd(const char *CurPtr, 3078 const char *BufferEnd) { 3079 if (CurPtr == BufferEnd) 3080 return nullptr; 3081 BufferEnd -= 1; // Scan until the second last character. 3082 for (; CurPtr != BufferEnd; ++CurPtr) { 3083 if (CurPtr[0] == '#' && CurPtr[1] == '>') 3084 return CurPtr + 2; 3085 } 3086 return nullptr; 3087 } 3088 3089 bool Lexer::lexEditorPlaceholder(Token &Result, const char *CurPtr) { 3090 assert(CurPtr[-1] == '<' && CurPtr[0] == '#' && "Not a placeholder!"); 3091 if (!PP || !PP->getPreprocessorOpts().LexEditorPlaceholders || LexingRawMode) 3092 return false; 3093 const char *End = findPlaceholderEnd(CurPtr + 1, BufferEnd); 3094 if (!End) 3095 return false; 3096 const char *Start = CurPtr - 1; 3097 if (!LangOpts.AllowEditorPlaceholders) 3098 Diag(Start, diag::err_placeholder_in_source); 3099 Result.startToken(); 3100 FormTokenWithChars(Result, End, tok::raw_identifier); 3101 Result.setRawIdentifierData(Start); 3102 PP->LookUpIdentifierInfo(Result); 3103 Result.setFlag(Token::IsEditorPlaceholder); 3104 BufferPtr = End; 3105 return true; 3106 } 3107 3108 bool Lexer::isCodeCompletionPoint(const char *CurPtr) const { 3109 if (PP && PP->isCodeCompletionEnabled()) { 3110 SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart); 3111 return Loc == PP->getCodeCompletionLoc(); 3112 } 3113 3114 return false; 3115 } 3116 3117 uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc, 3118 Token *Result) { 3119 unsigned CharSize; 3120 char Kind = getCharAndSize(StartPtr, CharSize); 3121 bool Delimited = false; 3122 bool FoundEndDelimiter = false; 3123 unsigned Count = 0; 3124 bool Diagnose = Result && !isLexingRawMode(); 3125 3126 unsigned NumHexDigits; 3127 if (Kind == 'u') 3128 NumHexDigits = 4; 3129 else if (Kind == 'U') 3130 NumHexDigits = 8; 3131 else 3132 return 0; 3133 3134 if (!LangOpts.CPlusPlus && !LangOpts.C99) { 3135 if (Diagnose) 3136 Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89); 3137 return 0; 3138 } 3139 3140 const char *CurPtr = StartPtr + CharSize; 3141 const char *KindLoc = &CurPtr[-1]; 3142 3143 uint32_t CodePoint = 0; 3144 while (Count != NumHexDigits || Delimited) { 3145 char C = getCharAndSize(CurPtr, CharSize); 3146 if (!Delimited && C == '{') { 3147 Delimited = true; 3148 CurPtr += CharSize; 3149 continue; 3150 } 3151 3152 if (Delimited && C == '}') { 3153 CurPtr += CharSize; 3154 FoundEndDelimiter = true; 3155 break; 3156 } 3157 3158 unsigned Value = llvm::hexDigitValue(C); 3159 if (Value == -1U) { 3160 if (!Delimited) 3161 break; 3162 if (Diagnose) 3163 Diag(BufferPtr, diag::warn_delimited_ucn_incomplete) 3164 << StringRef(&C, 1); 3165 return 0; 3166 } 3167 3168 if (CodePoint & 0xF000'0000) { 3169 if (Diagnose) 3170 Diag(KindLoc, diag::err_escape_too_large) << 0; 3171 return 0; 3172 } 3173 3174 CodePoint <<= 4; 3175 CodePoint |= Value; 3176 CurPtr += CharSize; 3177 Count++; 3178 } 3179 3180 if (Count == 0) { 3181 if (Diagnose) 3182 Diag(StartPtr, FoundEndDelimiter ? diag::warn_delimited_ucn_empty 3183 : diag::warn_ucn_escape_no_digits) 3184 << StringRef(KindLoc, 1); 3185 return 0; 3186 } 3187 3188 if (!Delimited && Count != NumHexDigits) { 3189 if (Diagnose) { 3190 Diag(BufferPtr, diag::warn_ucn_escape_incomplete); 3191 // If the user wrote \U1234, suggest a fixit to \u. 3192 if (Count == 4 && NumHexDigits == 8) { 3193 CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1); 3194 Diag(KindLoc, diag::note_ucn_four_not_eight) 3195 << FixItHint::CreateReplacement(URange, "u"); 3196 } 3197 } 3198 return 0; 3199 } 3200 3201 if (Delimited && PP) { 3202 Diag(BufferPtr, diag::ext_delimited_escape_sequence); 3203 } 3204 3205 if (Result) { 3206 Result->setFlag(Token::HasUCN); 3207 if (CurPtr - StartPtr == (ptrdiff_t)(Count + 2 + (Delimited ? 2 : 0))) 3208 StartPtr = CurPtr; 3209 else 3210 while (StartPtr != CurPtr) 3211 (void)getAndAdvanceChar(StartPtr, *Result); 3212 } else { 3213 StartPtr = CurPtr; 3214 } 3215 3216 // Don't apply C family restrictions to UCNs in assembly mode 3217 if (LangOpts.AsmPreprocessor) 3218 return CodePoint; 3219 3220 // C99 6.4.3p2: A universal character name shall not specify a character whose 3221 // short identifier is less than 00A0 other than 0024 ($), 0040 (@), or 3222 // 0060 (`), nor one in the range D800 through DFFF inclusive.) 3223 // C++11 [lex.charset]p2: If the hexadecimal value for a 3224 // universal-character-name corresponds to a surrogate code point (in the 3225 // range 0xD800-0xDFFF, inclusive), the program is ill-formed. Additionally, 3226 // if the hexadecimal value for a universal-character-name outside the 3227 // c-char-sequence, s-char-sequence, or r-char-sequence of a character or 3228 // string literal corresponds to a control character (in either of the 3229 // ranges 0x00-0x1F or 0x7F-0x9F, both inclusive) or to a character in the 3230 // basic source character set, the program is ill-formed. 3231 if (CodePoint < 0xA0) { 3232 if (CodePoint == 0x24 || CodePoint == 0x40 || CodePoint == 0x60) 3233 return CodePoint; 3234 3235 // We don't use isLexingRawMode() here because we need to warn about bad 3236 // UCNs even when skipping preprocessing tokens in a #if block. 3237 if (Result && PP) { 3238 if (CodePoint < 0x20 || CodePoint >= 0x7F) 3239 Diag(BufferPtr, diag::err_ucn_control_character); 3240 else { 3241 char C = static_cast<char>(CodePoint); 3242 Diag(BufferPtr, diag::err_ucn_escape_basic_scs) << StringRef(&C, 1); 3243 } 3244 } 3245 3246 return 0; 3247 } else if (CodePoint >= 0xD800 && CodePoint <= 0xDFFF) { 3248 // C++03 allows UCNs representing surrogate characters. C99 and C++11 don't. 3249 // We don't use isLexingRawMode() here because we need to diagnose bad 3250 // UCNs even when skipping preprocessing tokens in a #if block. 3251 if (Result && PP) { 3252 if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus11) 3253 Diag(BufferPtr, diag::warn_ucn_escape_surrogate); 3254 else 3255 Diag(BufferPtr, diag::err_ucn_escape_invalid); 3256 } 3257 return 0; 3258 } 3259 3260 return CodePoint; 3261 } 3262 3263 bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C, 3264 const char *CurPtr) { 3265 if (!isLexingRawMode() && !PP->isPreprocessedOutput() && 3266 isUnicodeWhitespace(C)) { 3267 Diag(BufferPtr, diag::ext_unicode_whitespace) 3268 << makeCharRange(*this, BufferPtr, CurPtr); 3269 3270 Result.setFlag(Token::LeadingSpace); 3271 return true; 3272 } 3273 return false; 3274 } 3275 3276 void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) { 3277 IsAtStartOfLine = Result.isAtStartOfLine(); 3278 HasLeadingSpace = Result.hasLeadingSpace(); 3279 HasLeadingEmptyMacro = Result.hasLeadingEmptyMacro(); 3280 // Note that this doesn't affect IsAtPhysicalStartOfLine. 3281 } 3282 3283 bool Lexer::Lex(Token &Result) { 3284 // Start a new token. 3285 Result.startToken(); 3286 3287 // Set up misc whitespace flags for LexTokenInternal. 3288 if (IsAtStartOfLine) { 3289 Result.setFlag(Token::StartOfLine); 3290 IsAtStartOfLine = false; 3291 } 3292 3293 if (HasLeadingSpace) { 3294 Result.setFlag(Token::LeadingSpace); 3295 HasLeadingSpace = false; 3296 } 3297 3298 if (HasLeadingEmptyMacro) { 3299 Result.setFlag(Token::LeadingEmptyMacro); 3300 HasLeadingEmptyMacro = false; 3301 } 3302 3303 bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine; 3304 IsAtPhysicalStartOfLine = false; 3305 bool isRawLex = isLexingRawMode(); 3306 (void) isRawLex; 3307 bool returnedToken = LexTokenInternal(Result, atPhysicalStartOfLine); 3308 // (After the LexTokenInternal call, the lexer might be destroyed.) 3309 assert((returnedToken || !isRawLex) && "Raw lex must succeed"); 3310 return returnedToken; 3311 } 3312 3313 /// LexTokenInternal - This implements a simple C family lexer. It is an 3314 /// extremely performance critical piece of code. This assumes that the buffer 3315 /// has a null character at the end of the file. This returns a preprocessing 3316 /// token, not a normal token, as such, it is an internal interface. It assumes 3317 /// that the Flags of result have been cleared before calling this. 3318 bool Lexer::LexTokenInternal(Token &Result, bool TokAtPhysicalStartOfLine) { 3319 LexNextToken: 3320 // New token, can't need cleaning yet. 3321 Result.clearFlag(Token::NeedsCleaning); 3322 Result.setIdentifierInfo(nullptr); 3323 3324 // CurPtr - Cache BufferPtr in an automatic variable. 3325 const char *CurPtr = BufferPtr; 3326 3327 // Small amounts of horizontal whitespace is very common between tokens. 3328 if (isHorizontalWhitespace(*CurPtr)) { 3329 do { 3330 ++CurPtr; 3331 } while (isHorizontalWhitespace(*CurPtr)); 3332 3333 // If we are keeping whitespace and other tokens, just return what we just 3334 // skipped. The next lexer invocation will return the token after the 3335 // whitespace. 3336 if (isKeepWhitespaceMode()) { 3337 FormTokenWithChars(Result, CurPtr, tok::unknown); 3338 // FIXME: The next token will not have LeadingSpace set. 3339 return true; 3340 } 3341 3342 BufferPtr = CurPtr; 3343 Result.setFlag(Token::LeadingSpace); 3344 } 3345 3346 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below. 3347 3348 // Read a character, advancing over it. 3349 char Char = getAndAdvanceChar(CurPtr, Result); 3350 tok::TokenKind Kind; 3351 3352 if (!isVerticalWhitespace(Char)) 3353 NewLinePtr = nullptr; 3354 3355 switch (Char) { 3356 case 0: // Null. 3357 // Found end of file? 3358 if (CurPtr-1 == BufferEnd) 3359 return LexEndOfFile(Result, CurPtr-1); 3360 3361 // Check if we are performing code completion. 3362 if (isCodeCompletionPoint(CurPtr-1)) { 3363 // Return the code-completion token. 3364 Result.startToken(); 3365 FormTokenWithChars(Result, CurPtr, tok::code_completion); 3366 return true; 3367 } 3368 3369 if (!isLexingRawMode()) 3370 Diag(CurPtr-1, diag::null_in_file); 3371 Result.setFlag(Token::LeadingSpace); 3372 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 3373 return true; // KeepWhitespaceMode 3374 3375 // We know the lexer hasn't changed, so just try again with this lexer. 3376 // (We manually eliminate the tail call to avoid recursion.) 3377 goto LexNextToken; 3378 3379 case 26: // DOS & CP/M EOF: "^Z". 3380 // If we're in Microsoft extensions mode, treat this as end of file. 3381 if (LangOpts.MicrosoftExt) { 3382 if (!isLexingRawMode()) 3383 Diag(CurPtr-1, diag::ext_ctrl_z_eof_microsoft); 3384 return LexEndOfFile(Result, CurPtr-1); 3385 } 3386 3387 // If Microsoft extensions are disabled, this is just random garbage. 3388 Kind = tok::unknown; 3389 break; 3390 3391 case '\r': 3392 if (CurPtr[0] == '\n') 3393 (void)getAndAdvanceChar(CurPtr, Result); 3394 LLVM_FALLTHROUGH; 3395 case '\n': 3396 // If we are inside a preprocessor directive and we see the end of line, 3397 // we know we are done with the directive, so return an EOD token. 3398 if (ParsingPreprocessorDirective) { 3399 // Done parsing the "line". 3400 ParsingPreprocessorDirective = false; 3401 3402 // Restore comment saving mode, in case it was disabled for directive. 3403 if (PP) 3404 resetExtendedTokenMode(); 3405 3406 // Since we consumed a newline, we are back at the start of a line. 3407 IsAtStartOfLine = true; 3408 IsAtPhysicalStartOfLine = true; 3409 NewLinePtr = CurPtr - 1; 3410 3411 Kind = tok::eod; 3412 break; 3413 } 3414 3415 // No leading whitespace seen so far. 3416 Result.clearFlag(Token::LeadingSpace); 3417 3418 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 3419 return true; // KeepWhitespaceMode 3420 3421 // We only saw whitespace, so just try again with this lexer. 3422 // (We manually eliminate the tail call to avoid recursion.) 3423 goto LexNextToken; 3424 case ' ': 3425 case '\t': 3426 case '\f': 3427 case '\v': 3428 SkipHorizontalWhitespace: 3429 Result.setFlag(Token::LeadingSpace); 3430 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 3431 return true; // KeepWhitespaceMode 3432 3433 SkipIgnoredUnits: 3434 CurPtr = BufferPtr; 3435 3436 // If the next token is obviously a // or /* */ comment, skip it efficiently 3437 // too (without going through the big switch stmt). 3438 if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() && 3439 LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) { 3440 if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine)) 3441 return true; // There is a token to return. 3442 goto SkipIgnoredUnits; 3443 } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) { 3444 if (SkipBlockComment(Result, CurPtr+2, TokAtPhysicalStartOfLine)) 3445 return true; // There is a token to return. 3446 goto SkipIgnoredUnits; 3447 } else if (isHorizontalWhitespace(*CurPtr)) { 3448 goto SkipHorizontalWhitespace; 3449 } 3450 // We only saw whitespace, so just try again with this lexer. 3451 // (We manually eliminate the tail call to avoid recursion.) 3452 goto LexNextToken; 3453 3454 // C99 6.4.4.1: Integer Constants. 3455 // C99 6.4.4.2: Floating Constants. 3456 case '0': case '1': case '2': case '3': case '4': 3457 case '5': case '6': case '7': case '8': case '9': 3458 // Notify MIOpt that we read a non-whitespace/non-comment token. 3459 MIOpt.ReadToken(); 3460 return LexNumericConstant(Result, CurPtr); 3461 3462 case 'u': // Identifier (uber) or C11/C++11 UTF-8 or UTF-16 string literal 3463 // Notify MIOpt that we read a non-whitespace/non-comment token. 3464 MIOpt.ReadToken(); 3465 3466 if (LangOpts.CPlusPlus11 || LangOpts.C11) { 3467 Char = getCharAndSize(CurPtr, SizeTmp); 3468 3469 // UTF-16 string literal 3470 if (Char == '"') 3471 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3472 tok::utf16_string_literal); 3473 3474 // UTF-16 character constant 3475 if (Char == '\'') 3476 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3477 tok::utf16_char_constant); 3478 3479 // UTF-16 raw string literal 3480 if (Char == 'R' && LangOpts.CPlusPlus11 && 3481 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 3482 return LexRawStringLiteral(Result, 3483 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3484 SizeTmp2, Result), 3485 tok::utf16_string_literal); 3486 3487 if (Char == '8') { 3488 char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2); 3489 3490 // UTF-8 string literal 3491 if (Char2 == '"') 3492 return LexStringLiteral(Result, 3493 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3494 SizeTmp2, Result), 3495 tok::utf8_string_literal); 3496 if (Char2 == '\'' && LangOpts.CPlusPlus17) 3497 return LexCharConstant( 3498 Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3499 SizeTmp2, Result), 3500 tok::utf8_char_constant); 3501 3502 if (Char2 == 'R' && LangOpts.CPlusPlus11) { 3503 unsigned SizeTmp3; 3504 char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3); 3505 // UTF-8 raw string literal 3506 if (Char3 == '"') { 3507 return LexRawStringLiteral(Result, 3508 ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3509 SizeTmp2, Result), 3510 SizeTmp3, Result), 3511 tok::utf8_string_literal); 3512 } 3513 } 3514 } 3515 } 3516 3517 // treat u like the start of an identifier. 3518 return LexIdentifierContinue(Result, CurPtr); 3519 3520 case 'U': // Identifier (Uber) or C11/C++11 UTF-32 string literal 3521 // Notify MIOpt that we read a non-whitespace/non-comment token. 3522 MIOpt.ReadToken(); 3523 3524 if (LangOpts.CPlusPlus11 || LangOpts.C11) { 3525 Char = getCharAndSize(CurPtr, SizeTmp); 3526 3527 // UTF-32 string literal 3528 if (Char == '"') 3529 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3530 tok::utf32_string_literal); 3531 3532 // UTF-32 character constant 3533 if (Char == '\'') 3534 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3535 tok::utf32_char_constant); 3536 3537 // UTF-32 raw string literal 3538 if (Char == 'R' && LangOpts.CPlusPlus11 && 3539 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 3540 return LexRawStringLiteral(Result, 3541 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3542 SizeTmp2, Result), 3543 tok::utf32_string_literal); 3544 } 3545 3546 // treat U like the start of an identifier. 3547 return LexIdentifierContinue(Result, CurPtr); 3548 3549 case 'R': // Identifier or C++0x raw string literal 3550 // Notify MIOpt that we read a non-whitespace/non-comment token. 3551 MIOpt.ReadToken(); 3552 3553 if (LangOpts.CPlusPlus11) { 3554 Char = getCharAndSize(CurPtr, SizeTmp); 3555 3556 if (Char == '"') 3557 return LexRawStringLiteral(Result, 3558 ConsumeChar(CurPtr, SizeTmp, Result), 3559 tok::string_literal); 3560 } 3561 3562 // treat R like the start of an identifier. 3563 return LexIdentifierContinue(Result, CurPtr); 3564 3565 case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz"). 3566 // Notify MIOpt that we read a non-whitespace/non-comment token. 3567 MIOpt.ReadToken(); 3568 Char = getCharAndSize(CurPtr, SizeTmp); 3569 3570 // Wide string literal. 3571 if (Char == '"') 3572 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3573 tok::wide_string_literal); 3574 3575 // Wide raw string literal. 3576 if (LangOpts.CPlusPlus11 && Char == 'R' && 3577 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 3578 return LexRawStringLiteral(Result, 3579 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3580 SizeTmp2, Result), 3581 tok::wide_string_literal); 3582 3583 // Wide character constant. 3584 if (Char == '\'') 3585 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3586 tok::wide_char_constant); 3587 // FALL THROUGH, treating L like the start of an identifier. 3588 LLVM_FALLTHROUGH; 3589 3590 // C99 6.4.2: Identifiers. 3591 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': 3592 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N': 3593 case 'O': case 'P': case 'Q': /*'R'*/case 'S': case 'T': /*'U'*/ 3594 case 'V': case 'W': case 'X': case 'Y': case 'Z': 3595 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': 3596 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': 3597 case 'o': case 'p': case 'q': case 'r': case 's': case 't': /*'u'*/ 3598 case 'v': case 'w': case 'x': case 'y': case 'z': 3599 case '_': 3600 // Notify MIOpt that we read a non-whitespace/non-comment token. 3601 MIOpt.ReadToken(); 3602 return LexIdentifierContinue(Result, CurPtr); 3603 3604 case '$': // $ in identifiers. 3605 if (LangOpts.DollarIdents) { 3606 if (!isLexingRawMode()) 3607 Diag(CurPtr-1, diag::ext_dollar_in_identifier); 3608 // Notify MIOpt that we read a non-whitespace/non-comment token. 3609 MIOpt.ReadToken(); 3610 return LexIdentifierContinue(Result, CurPtr); 3611 } 3612 3613 Kind = tok::unknown; 3614 break; 3615 3616 // C99 6.4.4: Character Constants. 3617 case '\'': 3618 // Notify MIOpt that we read a non-whitespace/non-comment token. 3619 MIOpt.ReadToken(); 3620 return LexCharConstant(Result, CurPtr, tok::char_constant); 3621 3622 // C99 6.4.5: String Literals. 3623 case '"': 3624 // Notify MIOpt that we read a non-whitespace/non-comment token. 3625 MIOpt.ReadToken(); 3626 return LexStringLiteral(Result, CurPtr, 3627 ParsingFilename ? tok::header_name 3628 : tok::string_literal); 3629 3630 // C99 6.4.6: Punctuators. 3631 case '?': 3632 Kind = tok::question; 3633 break; 3634 case '[': 3635 Kind = tok::l_square; 3636 break; 3637 case ']': 3638 Kind = tok::r_square; 3639 break; 3640 case '(': 3641 Kind = tok::l_paren; 3642 break; 3643 case ')': 3644 Kind = tok::r_paren; 3645 break; 3646 case '{': 3647 Kind = tok::l_brace; 3648 break; 3649 case '}': 3650 Kind = tok::r_brace; 3651 break; 3652 case '.': 3653 Char = getCharAndSize(CurPtr, SizeTmp); 3654 if (Char >= '0' && Char <= '9') { 3655 // Notify MIOpt that we read a non-whitespace/non-comment token. 3656 MIOpt.ReadToken(); 3657 3658 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 3659 } else if (LangOpts.CPlusPlus && Char == '*') { 3660 Kind = tok::periodstar; 3661 CurPtr += SizeTmp; 3662 } else if (Char == '.' && 3663 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') { 3664 Kind = tok::ellipsis; 3665 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3666 SizeTmp2, Result); 3667 } else { 3668 Kind = tok::period; 3669 } 3670 break; 3671 case '&': 3672 Char = getCharAndSize(CurPtr, SizeTmp); 3673 if (Char == '&') { 3674 Kind = tok::ampamp; 3675 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3676 } else if (Char == '=') { 3677 Kind = tok::ampequal; 3678 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3679 } else { 3680 Kind = tok::amp; 3681 } 3682 break; 3683 case '*': 3684 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 3685 Kind = tok::starequal; 3686 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3687 } else { 3688 Kind = tok::star; 3689 } 3690 break; 3691 case '+': 3692 Char = getCharAndSize(CurPtr, SizeTmp); 3693 if (Char == '+') { 3694 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3695 Kind = tok::plusplus; 3696 } else if (Char == '=') { 3697 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3698 Kind = tok::plusequal; 3699 } else { 3700 Kind = tok::plus; 3701 } 3702 break; 3703 case '-': 3704 Char = getCharAndSize(CurPtr, SizeTmp); 3705 if (Char == '-') { // -- 3706 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3707 Kind = tok::minusminus; 3708 } else if (Char == '>' && LangOpts.CPlusPlus && 3709 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->* 3710 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3711 SizeTmp2, Result); 3712 Kind = tok::arrowstar; 3713 } else if (Char == '>') { // -> 3714 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3715 Kind = tok::arrow; 3716 } else if (Char == '=') { // -= 3717 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3718 Kind = tok::minusequal; 3719 } else { 3720 Kind = tok::minus; 3721 } 3722 break; 3723 case '~': 3724 Kind = tok::tilde; 3725 break; 3726 case '!': 3727 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 3728 Kind = tok::exclaimequal; 3729 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3730 } else { 3731 Kind = tok::exclaim; 3732 } 3733 break; 3734 case '/': 3735 // 6.4.9: Comments 3736 Char = getCharAndSize(CurPtr, SizeTmp); 3737 if (Char == '/') { // Line comment. 3738 // Even if Line comments are disabled (e.g. in C89 mode), we generally 3739 // want to lex this as a comment. There is one problem with this though, 3740 // that in one particular corner case, this can change the behavior of the 3741 // resultant program. For example, In "foo //**/ bar", C89 would lex 3742 // this as "foo / bar" and languages with Line comments would lex it as 3743 // "foo". Check to see if the character after the second slash is a '*'. 3744 // If so, we will lex that as a "/" instead of the start of a comment. 3745 // However, we never do this if we are just preprocessing. 3746 bool TreatAsComment = 3747 LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP); 3748 if (!TreatAsComment) 3749 if (!(PP && PP->isPreprocessedOutput())) 3750 TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*'; 3751 3752 if (TreatAsComment) { 3753 if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3754 TokAtPhysicalStartOfLine)) 3755 return true; // There is a token to return. 3756 3757 // It is common for the tokens immediately after a // comment to be 3758 // whitespace (indentation for the next line). Instead of going through 3759 // the big switch, handle it efficiently now. 3760 goto SkipIgnoredUnits; 3761 } 3762 } 3763 3764 if (Char == '*') { // /**/ comment. 3765 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3766 TokAtPhysicalStartOfLine)) 3767 return true; // There is a token to return. 3768 3769 // We only saw whitespace, so just try again with this lexer. 3770 // (We manually eliminate the tail call to avoid recursion.) 3771 goto LexNextToken; 3772 } 3773 3774 if (Char == '=') { 3775 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3776 Kind = tok::slashequal; 3777 } else { 3778 Kind = tok::slash; 3779 } 3780 break; 3781 case '%': 3782 Char = getCharAndSize(CurPtr, SizeTmp); 3783 if (Char == '=') { 3784 Kind = tok::percentequal; 3785 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3786 } else if (LangOpts.Digraphs && Char == '>') { 3787 Kind = tok::r_brace; // '%>' -> '}' 3788 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3789 } else if (LangOpts.Digraphs && Char == ':') { 3790 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3791 Char = getCharAndSize(CurPtr, SizeTmp); 3792 if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') { 3793 Kind = tok::hashhash; // '%:%:' -> '##' 3794 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3795 SizeTmp2, Result); 3796 } else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize 3797 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3798 if (!isLexingRawMode()) 3799 Diag(BufferPtr, diag::ext_charize_microsoft); 3800 Kind = tok::hashat; 3801 } else { // '%:' -> '#' 3802 // We parsed a # character. If this occurs at the start of the line, 3803 // it's actually the start of a preprocessing directive. Callback to 3804 // the preprocessor to handle it. 3805 // TODO: -fpreprocessed mode?? 3806 if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer) 3807 goto HandleDirective; 3808 3809 Kind = tok::hash; 3810 } 3811 } else { 3812 Kind = tok::percent; 3813 } 3814 break; 3815 case '<': 3816 Char = getCharAndSize(CurPtr, SizeTmp); 3817 if (ParsingFilename) { 3818 return LexAngledStringLiteral(Result, CurPtr); 3819 } else if (Char == '<') { 3820 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 3821 if (After == '=') { 3822 Kind = tok::lesslessequal; 3823 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3824 SizeTmp2, Result); 3825 } else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) { 3826 // If this is actually a '<<<<<<<' version control conflict marker, 3827 // recognize it as such and recover nicely. 3828 goto LexNextToken; 3829 } else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) { 3830 // If this is '<<<<' and we're in a Perforce-style conflict marker, 3831 // ignore it. 3832 goto LexNextToken; 3833 } else if (LangOpts.CUDA && After == '<') { 3834 Kind = tok::lesslessless; 3835 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3836 SizeTmp2, Result); 3837 } else { 3838 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3839 Kind = tok::lessless; 3840 } 3841 } else if (Char == '=') { 3842 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 3843 if (After == '>') { 3844 if (LangOpts.CPlusPlus20) { 3845 if (!isLexingRawMode()) 3846 Diag(BufferPtr, diag::warn_cxx17_compat_spaceship); 3847 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3848 SizeTmp2, Result); 3849 Kind = tok::spaceship; 3850 break; 3851 } 3852 // Suggest adding a space between the '<=' and the '>' to avoid a 3853 // change in semantics if this turns up in C++ <=17 mode. 3854 if (LangOpts.CPlusPlus && !isLexingRawMode()) { 3855 Diag(BufferPtr, diag::warn_cxx20_compat_spaceship) 3856 << FixItHint::CreateInsertion( 3857 getSourceLocation(CurPtr + SizeTmp, SizeTmp2), " "); 3858 } 3859 } 3860 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3861 Kind = tok::lessequal; 3862 } else if (LangOpts.Digraphs && Char == ':') { // '<:' -> '[' 3863 if (LangOpts.CPlusPlus11 && 3864 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') { 3865 // C++0x [lex.pptoken]p3: 3866 // Otherwise, if the next three characters are <:: and the subsequent 3867 // character is neither : nor >, the < is treated as a preprocessor 3868 // token by itself and not as the first character of the alternative 3869 // token <:. 3870 unsigned SizeTmp3; 3871 char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3); 3872 if (After != ':' && After != '>') { 3873 Kind = tok::less; 3874 if (!isLexingRawMode()) 3875 Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon); 3876 break; 3877 } 3878 } 3879 3880 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3881 Kind = tok::l_square; 3882 } else if (LangOpts.Digraphs && Char == '%') { // '<%' -> '{' 3883 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3884 Kind = tok::l_brace; 3885 } else if (Char == '#' && /*Not a trigraph*/ SizeTmp == 1 && 3886 lexEditorPlaceholder(Result, CurPtr)) { 3887 return true; 3888 } else { 3889 Kind = tok::less; 3890 } 3891 break; 3892 case '>': 3893 Char = getCharAndSize(CurPtr, SizeTmp); 3894 if (Char == '=') { 3895 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3896 Kind = tok::greaterequal; 3897 } else if (Char == '>') { 3898 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 3899 if (After == '=') { 3900 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3901 SizeTmp2, Result); 3902 Kind = tok::greatergreaterequal; 3903 } else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) { 3904 // If this is actually a '>>>>' conflict marker, recognize it as such 3905 // and recover nicely. 3906 goto LexNextToken; 3907 } else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) { 3908 // If this is '>>>>>>>' and we're in a conflict marker, ignore it. 3909 goto LexNextToken; 3910 } else if (LangOpts.CUDA && After == '>') { 3911 Kind = tok::greatergreatergreater; 3912 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3913 SizeTmp2, Result); 3914 } else { 3915 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3916 Kind = tok::greatergreater; 3917 } 3918 } else { 3919 Kind = tok::greater; 3920 } 3921 break; 3922 case '^': 3923 Char = getCharAndSize(CurPtr, SizeTmp); 3924 if (Char == '=') { 3925 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3926 Kind = tok::caretequal; 3927 } else if (LangOpts.OpenCL && Char == '^') { 3928 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3929 Kind = tok::caretcaret; 3930 } else { 3931 Kind = tok::caret; 3932 } 3933 break; 3934 case '|': 3935 Char = getCharAndSize(CurPtr, SizeTmp); 3936 if (Char == '=') { 3937 Kind = tok::pipeequal; 3938 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3939 } else if (Char == '|') { 3940 // If this is '|||||||' and we're in a conflict marker, ignore it. 3941 if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1)) 3942 goto LexNextToken; 3943 Kind = tok::pipepipe; 3944 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3945 } else { 3946 Kind = tok::pipe; 3947 } 3948 break; 3949 case ':': 3950 Char = getCharAndSize(CurPtr, SizeTmp); 3951 if (LangOpts.Digraphs && Char == '>') { 3952 Kind = tok::r_square; // ':>' -> ']' 3953 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3954 } else if ((LangOpts.CPlusPlus || 3955 LangOpts.DoubleSquareBracketAttributes) && 3956 Char == ':') { 3957 Kind = tok::coloncolon; 3958 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3959 } else { 3960 Kind = tok::colon; 3961 } 3962 break; 3963 case ';': 3964 Kind = tok::semi; 3965 break; 3966 case '=': 3967 Char = getCharAndSize(CurPtr, SizeTmp); 3968 if (Char == '=') { 3969 // If this is '====' and we're in a conflict marker, ignore it. 3970 if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1)) 3971 goto LexNextToken; 3972 3973 Kind = tok::equalequal; 3974 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3975 } else { 3976 Kind = tok::equal; 3977 } 3978 break; 3979 case ',': 3980 Kind = tok::comma; 3981 break; 3982 case '#': 3983 Char = getCharAndSize(CurPtr, SizeTmp); 3984 if (Char == '#') { 3985 Kind = tok::hashhash; 3986 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3987 } else if (Char == '@' && LangOpts.MicrosoftExt) { // #@ -> Charize 3988 Kind = tok::hashat; 3989 if (!isLexingRawMode()) 3990 Diag(BufferPtr, diag::ext_charize_microsoft); 3991 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3992 } else { 3993 // We parsed a # character. If this occurs at the start of the line, 3994 // it's actually the start of a preprocessing directive. Callback to 3995 // the preprocessor to handle it. 3996 // TODO: -fpreprocessed mode?? 3997 if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer) 3998 goto HandleDirective; 3999 4000 Kind = tok::hash; 4001 } 4002 break; 4003 4004 case '@': 4005 // Objective C support. 4006 if (CurPtr[-1] == '@' && LangOpts.ObjC) 4007 Kind = tok::at; 4008 else 4009 Kind = tok::unknown; 4010 break; 4011 4012 // UCNs (C99 6.4.3, C++11 [lex.charset]p2) 4013 case '\\': 4014 if (!LangOpts.AsmPreprocessor) { 4015 if (uint32_t CodePoint = tryReadUCN(CurPtr, BufferPtr, &Result)) { 4016 if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) { 4017 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 4018 return true; // KeepWhitespaceMode 4019 4020 // We only saw whitespace, so just try again with this lexer. 4021 // (We manually eliminate the tail call to avoid recursion.) 4022 goto LexNextToken; 4023 } 4024 4025 return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr); 4026 } 4027 } 4028 4029 Kind = tok::unknown; 4030 break; 4031 4032 default: { 4033 if (isASCII(Char)) { 4034 Kind = tok::unknown; 4035 break; 4036 } 4037 4038 llvm::UTF32 CodePoint; 4039 4040 // We can't just reset CurPtr to BufferPtr because BufferPtr may point to 4041 // an escaped newline. 4042 --CurPtr; 4043 llvm::ConversionResult Status = 4044 llvm::convertUTF8Sequence((const llvm::UTF8 **)&CurPtr, 4045 (const llvm::UTF8 *)BufferEnd, 4046 &CodePoint, 4047 llvm::strictConversion); 4048 if (Status == llvm::conversionOK) { 4049 if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) { 4050 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 4051 return true; // KeepWhitespaceMode 4052 4053 // We only saw whitespace, so just try again with this lexer. 4054 // (We manually eliminate the tail call to avoid recursion.) 4055 goto LexNextToken; 4056 } 4057 return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr); 4058 } 4059 4060 if (isLexingRawMode() || ParsingPreprocessorDirective || 4061 PP->isPreprocessedOutput()) { 4062 ++CurPtr; 4063 Kind = tok::unknown; 4064 break; 4065 } 4066 4067 // Non-ASCII characters tend to creep into source code unintentionally. 4068 // Instead of letting the parser complain about the unknown token, 4069 // just diagnose the invalid UTF-8, then drop the character. 4070 Diag(CurPtr, diag::err_invalid_utf8); 4071 4072 BufferPtr = CurPtr+1; 4073 // We're pretending the character didn't exist, so just try again with 4074 // this lexer. 4075 // (We manually eliminate the tail call to avoid recursion.) 4076 goto LexNextToken; 4077 } 4078 } 4079 4080 // Notify MIOpt that we read a non-whitespace/non-comment token. 4081 MIOpt.ReadToken(); 4082 4083 // Update the location of token as well as BufferPtr. 4084 FormTokenWithChars(Result, CurPtr, Kind); 4085 return true; 4086 4087 HandleDirective: 4088 // We parsed a # character and it's the start of a preprocessing directive. 4089 4090 FormTokenWithChars(Result, CurPtr, tok::hash); 4091 PP->HandleDirective(Result); 4092 4093 if (PP->hadModuleLoaderFatalFailure()) { 4094 // With a fatal failure in the module loader, we abort parsing. 4095 assert(Result.is(tok::eof) && "Preprocessor did not set tok:eof"); 4096 return true; 4097 } 4098 4099 // We parsed the directive; lex a token with the new state. 4100 return false; 4101 } 4102