1 //===- TGLexer.cpp - Lexer for TableGen -----------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Implement the Lexer for TableGen. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "TGLexer.h" 14 #include "llvm/ADT/ArrayRef.h" 15 #include "llvm/ADT/StringSwitch.h" 16 #include "llvm/ADT/Twine.h" 17 #include "llvm/Config/config.h" // for strtoull()/strtoll() define 18 #include "llvm/Support/Compiler.h" 19 #include "llvm/Support/MemoryBuffer.h" 20 #include "llvm/Support/SourceMgr.h" 21 #include "llvm/TableGen/Error.h" 22 #include <algorithm> 23 #include <cctype> 24 #include <cerrno> 25 #include <cstdint> 26 #include <cstdio> 27 #include <cstdlib> 28 #include <cstring> 29 30 using namespace llvm; 31 32 namespace { 33 // A list of supported preprocessing directives with their 34 // internal token kinds and names. 35 struct { 36 tgtok::TokKind Kind; 37 const char *Word; 38 } PreprocessorDirs[] = { 39 { tgtok::Ifdef, "ifdef" }, 40 { tgtok::Ifndef, "ifndef" }, 41 { tgtok::Else, "else" }, 42 { tgtok::Endif, "endif" }, 43 { tgtok::Define, "define" } 44 }; 45 } // end anonymous namespace 46 47 TGLexer::TGLexer(SourceMgr &SM, ArrayRef<std::string> Macros) : SrcMgr(SM) { 48 CurBuffer = SrcMgr.getMainFileID(); 49 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 50 CurPtr = CurBuf.begin(); 51 TokStart = nullptr; 52 53 // Pretend that we enter the "top-level" include file. 54 PrepIncludeStack.push_back( 55 std::make_unique<std::vector<PreprocessorControlDesc>>()); 56 57 // Put all macros defined in the command line into the DefinedMacros set. 58 for (const std::string &MacroName : Macros) 59 DefinedMacros.insert(MacroName); 60 } 61 62 SMLoc TGLexer::getLoc() const { 63 return SMLoc::getFromPointer(TokStart); 64 } 65 66 /// ReturnError - Set the error to the specified string at the specified 67 /// location. This is defined to always return tgtok::Error. 68 tgtok::TokKind TGLexer::ReturnError(SMLoc Loc, const Twine &Msg) { 69 PrintError(Loc, Msg); 70 return tgtok::Error; 71 } 72 73 tgtok::TokKind TGLexer::ReturnError(const char *Loc, const Twine &Msg) { 74 return ReturnError(SMLoc::getFromPointer(Loc), Msg); 75 } 76 77 bool TGLexer::processEOF() { 78 SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer); 79 if (ParentIncludeLoc != SMLoc()) { 80 // If prepExitInclude() detects a problem with the preprocessing 81 // control stack, it will return false. Pretend that we reached 82 // the final EOF and stop lexing more tokens by returning false 83 // to LexToken(). 84 if (!prepExitInclude(false)) 85 return false; 86 87 CurBuffer = SrcMgr.FindBufferContainingLoc(ParentIncludeLoc); 88 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 89 CurPtr = ParentIncludeLoc.getPointer(); 90 // Make sure TokStart points into the parent file's buffer. 91 // LexToken() assigns to it before calling getNextChar(), 92 // so it is pointing into the included file now. 93 TokStart = CurPtr; 94 return true; 95 } 96 97 // Pretend that we exit the "top-level" include file. 98 // Note that in case of an error (e.g. control stack imbalance) 99 // the routine will issue a fatal error. 100 prepExitInclude(true); 101 return false; 102 } 103 104 int TGLexer::getNextChar() { 105 char CurChar = *CurPtr++; 106 switch (CurChar) { 107 default: 108 return (unsigned char)CurChar; 109 110 case 0: { 111 // A NUL character in the stream is either the end of the current buffer or 112 // a spurious NUL in the file. Disambiguate that here. 113 if (CurPtr - 1 == CurBuf.end()) { 114 --CurPtr; // Arrange for another call to return EOF again. 115 return EOF; 116 } 117 PrintError(getLoc(), 118 "NUL character is invalid in source; treated as space"); 119 return ' '; 120 } 121 122 case '\n': 123 case '\r': 124 // Handle the newline character by ignoring it and incrementing the line 125 // count. However, be careful about 'dos style' files with \n\r in them. 126 // Only treat a \n\r or \r\n as a single line. 127 if ((*CurPtr == '\n' || (*CurPtr == '\r')) && 128 *CurPtr != CurChar) 129 ++CurPtr; // Eat the two char newline sequence. 130 return '\n'; 131 } 132 } 133 134 int TGLexer::peekNextChar(int Index) const { 135 return *(CurPtr + Index); 136 } 137 138 tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) { 139 TokStart = CurPtr; 140 // This always consumes at least one character. 141 int CurChar = getNextChar(); 142 143 switch (CurChar) { 144 default: 145 // Handle letters: [a-zA-Z_] 146 if (isalpha(CurChar) || CurChar == '_') 147 return LexIdentifier(); 148 149 // Unknown character, emit an error. 150 return ReturnError(TokStart, "Unexpected character"); 151 case EOF: 152 // Lex next token, if we just left an include file. 153 // Note that leaving an include file means that the next 154 // symbol is located at the end of the 'include "..."' 155 // construct, so LexToken() is called with default 156 // false parameter. 157 if (processEOF()) 158 return LexToken(); 159 160 // Return EOF denoting the end of lexing. 161 return tgtok::Eof; 162 163 case ':': return tgtok::colon; 164 case ';': return tgtok::semi; 165 case ',': return tgtok::comma; 166 case '<': return tgtok::less; 167 case '>': return tgtok::greater; 168 case ']': return tgtok::r_square; 169 case '{': return tgtok::l_brace; 170 case '}': return tgtok::r_brace; 171 case '(': return tgtok::l_paren; 172 case ')': return tgtok::r_paren; 173 case '=': return tgtok::equal; 174 case '?': return tgtok::question; 175 case '#': 176 if (FileOrLineStart) { 177 tgtok::TokKind Kind = prepIsDirective(); 178 if (Kind != tgtok::Error) 179 return lexPreprocessor(Kind); 180 } 181 182 return tgtok::paste; 183 184 // The period is a separate case so we can recognize the "..." 185 // range punctuator. 186 case '.': 187 if (peekNextChar(0) == '.') { 188 ++CurPtr; // Eat second dot. 189 if (peekNextChar(0) == '.') { 190 ++CurPtr; // Eat third dot. 191 return tgtok::dotdotdot; 192 } 193 return ReturnError(TokStart, "Invalid '..' punctuation"); 194 } 195 return tgtok::dot; 196 197 case '\r': 198 PrintFatalError("getNextChar() must never return '\r'"); 199 return tgtok::Error; 200 201 case ' ': 202 case '\t': 203 // Ignore whitespace. 204 return LexToken(FileOrLineStart); 205 case '\n': 206 // Ignore whitespace, and identify the new line. 207 return LexToken(true); 208 case '/': 209 // If this is the start of a // comment, skip until the end of the line or 210 // the end of the buffer. 211 if (*CurPtr == '/') 212 SkipBCPLComment(); 213 else if (*CurPtr == '*') { 214 if (SkipCComment()) 215 return tgtok::Error; 216 } else // Otherwise, this is an error. 217 return ReturnError(TokStart, "Unexpected character"); 218 return LexToken(FileOrLineStart); 219 case '-': case '+': 220 case '0': case '1': case '2': case '3': case '4': case '5': case '6': 221 case '7': case '8': case '9': { 222 int NextChar = 0; 223 if (isdigit(CurChar)) { 224 // Allow identifiers to start with a number if it is followed by 225 // an identifier. This can happen with paste operations like 226 // foo#8i. 227 int i = 0; 228 do { 229 NextChar = peekNextChar(i++); 230 } while (isdigit(NextChar)); 231 232 if (NextChar == 'x' || NextChar == 'b') { 233 // If this is [0-9]b[01] or [0-9]x[0-9A-fa-f] this is most 234 // likely a number. 235 int NextNextChar = peekNextChar(i); 236 switch (NextNextChar) { 237 default: 238 break; 239 case '0': case '1': 240 if (NextChar == 'b') 241 return LexNumber(); 242 LLVM_FALLTHROUGH; 243 case '2': case '3': case '4': case '5': 244 case '6': case '7': case '8': case '9': 245 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 246 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 247 if (NextChar == 'x') 248 return LexNumber(); 249 break; 250 } 251 } 252 } 253 254 if (isalpha(NextChar) || NextChar == '_') 255 return LexIdentifier(); 256 257 return LexNumber(); 258 } 259 case '"': return LexString(); 260 case '$': return LexVarName(); 261 case '[': return LexBracket(); 262 case '!': return LexExclaim(); 263 } 264 } 265 266 /// LexString - Lex "[^"]*" 267 tgtok::TokKind TGLexer::LexString() { 268 const char *StrStart = CurPtr; 269 270 CurStrVal = ""; 271 272 while (*CurPtr != '"') { 273 // If we hit the end of the buffer, report an error. 274 if (*CurPtr == 0 && CurPtr == CurBuf.end()) 275 return ReturnError(StrStart, "End of file in string literal"); 276 277 if (*CurPtr == '\n' || *CurPtr == '\r') 278 return ReturnError(StrStart, "End of line in string literal"); 279 280 if (*CurPtr != '\\') { 281 CurStrVal += *CurPtr++; 282 continue; 283 } 284 285 ++CurPtr; 286 287 switch (*CurPtr) { 288 case '\\': case '\'': case '"': 289 // These turn into their literal character. 290 CurStrVal += *CurPtr++; 291 break; 292 case 't': 293 CurStrVal += '\t'; 294 ++CurPtr; 295 break; 296 case 'n': 297 CurStrVal += '\n'; 298 ++CurPtr; 299 break; 300 301 case '\n': 302 case '\r': 303 return ReturnError(CurPtr, "escaped newlines not supported in tblgen"); 304 305 // If we hit the end of the buffer, report an error. 306 case '\0': 307 if (CurPtr == CurBuf.end()) 308 return ReturnError(StrStart, "End of file in string literal"); 309 LLVM_FALLTHROUGH; 310 default: 311 return ReturnError(CurPtr, "invalid escape in string literal"); 312 } 313 } 314 315 ++CurPtr; 316 return tgtok::StrVal; 317 } 318 319 tgtok::TokKind TGLexer::LexVarName() { 320 if (!isalpha(CurPtr[0]) && CurPtr[0] != '_') 321 return ReturnError(TokStart, "Invalid variable name"); 322 323 // Otherwise, we're ok, consume the rest of the characters. 324 const char *VarNameStart = CurPtr++; 325 326 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 327 ++CurPtr; 328 329 CurStrVal.assign(VarNameStart, CurPtr); 330 return tgtok::VarName; 331 } 332 333 tgtok::TokKind TGLexer::LexIdentifier() { 334 // The first letter is [a-zA-Z_]. 335 const char *IdentStart = TokStart; 336 337 // Match the rest of the identifier regex: [0-9a-zA-Z_]* 338 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 339 ++CurPtr; 340 341 // Check to see if this identifier is a reserved keyword. 342 StringRef Str(IdentStart, CurPtr-IdentStart); 343 344 tgtok::TokKind Kind = StringSwitch<tgtok::TokKind>(Str) 345 .Case("int", tgtok::Int) 346 .Case("bit", tgtok::Bit) 347 .Case("bits", tgtok::Bits) 348 .Case("string", tgtok::String) 349 .Case("list", tgtok::List) 350 .Case("code", tgtok::Code) 351 .Case("dag", tgtok::Dag) 352 .Case("class", tgtok::Class) 353 .Case("def", tgtok::Def) 354 .Case("true", tgtok::TrueVal) 355 .Case("false", tgtok::FalseVal) 356 .Case("foreach", tgtok::Foreach) 357 .Case("defm", tgtok::Defm) 358 .Case("defset", tgtok::Defset) 359 .Case("multiclass", tgtok::MultiClass) 360 .Case("field", tgtok::Field) 361 .Case("let", tgtok::Let) 362 .Case("in", tgtok::In) 363 .Case("defvar", tgtok::Defvar) 364 .Case("include", tgtok::Include) 365 .Case("if", tgtok::If) 366 .Case("then", tgtok::Then) 367 .Case("else", tgtok::ElseKW) 368 .Case("assert", tgtok::Assert) 369 .Default(tgtok::Id); 370 371 // A couple of tokens require special processing. 372 switch (Kind) { 373 case tgtok::Include: 374 if (LexInclude()) return tgtok::Error; 375 return Lex(); 376 case tgtok::Id: 377 CurStrVal.assign(Str.begin(), Str.end()); 378 break; 379 default: 380 break; 381 } 382 383 return Kind; 384 } 385 386 /// LexInclude - We just read the "include" token. Get the string token that 387 /// comes next and enter the include. 388 bool TGLexer::LexInclude() { 389 // The token after the include must be a string. 390 tgtok::TokKind Tok = LexToken(); 391 if (Tok == tgtok::Error) return true; 392 if (Tok != tgtok::StrVal) { 393 PrintError(getLoc(), "Expected filename after include"); 394 return true; 395 } 396 397 // Get the string. 398 std::string Filename = CurStrVal; 399 std::string IncludedFile; 400 401 CurBuffer = SrcMgr.AddIncludeFile(Filename, SMLoc::getFromPointer(CurPtr), 402 IncludedFile); 403 if (!CurBuffer) { 404 PrintError(getLoc(), "Could not find include file '" + Filename + "'"); 405 return true; 406 } 407 408 Dependencies.insert(IncludedFile); 409 // Save the line number and lex buffer of the includer. 410 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 411 CurPtr = CurBuf.begin(); 412 413 PrepIncludeStack.push_back( 414 std::make_unique<std::vector<PreprocessorControlDesc>>()); 415 return false; 416 } 417 418 /// SkipBCPLComment - Skip over the comment by finding the next CR or LF. 419 /// Or we may end up at the end of the buffer. 420 void TGLexer::SkipBCPLComment() { 421 ++CurPtr; // skip the second slash. 422 auto EOLPos = CurBuf.find_first_of("\r\n", CurPtr - CurBuf.data()); 423 CurPtr = (EOLPos == StringRef::npos) ? CurBuf.end() : CurBuf.data() + EOLPos; 424 } 425 426 /// SkipCComment - This skips C-style /**/ comments. The only difference from C 427 /// is that we allow nesting. 428 bool TGLexer::SkipCComment() { 429 ++CurPtr; // skip the star. 430 unsigned CommentDepth = 1; 431 432 while (true) { 433 int CurChar = getNextChar(); 434 switch (CurChar) { 435 case EOF: 436 PrintError(TokStart, "Unterminated comment!"); 437 return true; 438 case '*': 439 // End of the comment? 440 if (CurPtr[0] != '/') break; 441 442 ++CurPtr; // End the */. 443 if (--CommentDepth == 0) 444 return false; 445 break; 446 case '/': 447 // Start of a nested comment? 448 if (CurPtr[0] != '*') break; 449 ++CurPtr; 450 ++CommentDepth; 451 break; 452 } 453 } 454 } 455 456 /// LexNumber - Lex: 457 /// [-+]?[0-9]+ 458 /// 0x[0-9a-fA-F]+ 459 /// 0b[01]+ 460 tgtok::TokKind TGLexer::LexNumber() { 461 if (CurPtr[-1] == '0') { 462 if (CurPtr[0] == 'x') { 463 ++CurPtr; 464 const char *NumStart = CurPtr; 465 while (isxdigit(CurPtr[0])) 466 ++CurPtr; 467 468 // Requires at least one hex digit. 469 if (CurPtr == NumStart) 470 return ReturnError(TokStart, "Invalid hexadecimal number"); 471 472 errno = 0; 473 CurIntVal = strtoll(NumStart, nullptr, 16); 474 if (errno == EINVAL) 475 return ReturnError(TokStart, "Invalid hexadecimal number"); 476 if (errno == ERANGE) { 477 errno = 0; 478 CurIntVal = (int64_t)strtoull(NumStart, nullptr, 16); 479 if (errno == EINVAL) 480 return ReturnError(TokStart, "Invalid hexadecimal number"); 481 if (errno == ERANGE) 482 return ReturnError(TokStart, "Hexadecimal number out of range"); 483 } 484 return tgtok::IntVal; 485 } else if (CurPtr[0] == 'b') { 486 ++CurPtr; 487 const char *NumStart = CurPtr; 488 while (CurPtr[0] == '0' || CurPtr[0] == '1') 489 ++CurPtr; 490 491 // Requires at least one binary digit. 492 if (CurPtr == NumStart) 493 return ReturnError(CurPtr-2, "Invalid binary number"); 494 CurIntVal = strtoll(NumStart, nullptr, 2); 495 return tgtok::BinaryIntVal; 496 } 497 } 498 499 // Check for a sign without a digit. 500 if (!isdigit(CurPtr[0])) { 501 if (CurPtr[-1] == '-') 502 return tgtok::minus; 503 else if (CurPtr[-1] == '+') 504 return tgtok::plus; 505 } 506 507 while (isdigit(CurPtr[0])) 508 ++CurPtr; 509 CurIntVal = strtoll(TokStart, nullptr, 10); 510 return tgtok::IntVal; 511 } 512 513 /// LexBracket - We just read '['. If this is a code block, return it, 514 /// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]' 515 tgtok::TokKind TGLexer::LexBracket() { 516 if (CurPtr[0] != '{') 517 return tgtok::l_square; 518 ++CurPtr; 519 const char *CodeStart = CurPtr; 520 while (true) { 521 int Char = getNextChar(); 522 if (Char == EOF) break; 523 524 if (Char != '}') continue; 525 526 Char = getNextChar(); 527 if (Char == EOF) break; 528 if (Char == ']') { 529 CurStrVal.assign(CodeStart, CurPtr-2); 530 return tgtok::CodeFragment; 531 } 532 } 533 534 return ReturnError(CodeStart - 2, "Unterminated code block"); 535 } 536 537 /// LexExclaim - Lex '!' and '![a-zA-Z]+'. 538 tgtok::TokKind TGLexer::LexExclaim() { 539 if (!isalpha(*CurPtr)) 540 return ReturnError(CurPtr - 1, "Invalid \"!operator\""); 541 542 const char *Start = CurPtr++; 543 while (isalpha(*CurPtr)) 544 ++CurPtr; 545 546 // Check to see which operator this is. 547 tgtok::TokKind Kind = 548 StringSwitch<tgtok::TokKind>(StringRef(Start, CurPtr - Start)) 549 .Case("eq", tgtok::XEq) 550 .Case("ne", tgtok::XNe) 551 .Case("le", tgtok::XLe) 552 .Case("lt", tgtok::XLt) 553 .Case("ge", tgtok::XGe) 554 .Case("gt", tgtok::XGt) 555 .Case("if", tgtok::XIf) 556 .Case("cond", tgtok::XCond) 557 .Case("isa", tgtok::XIsA) 558 .Case("head", tgtok::XHead) 559 .Case("tail", tgtok::XTail) 560 .Case("size", tgtok::XSize) 561 .Case("con", tgtok::XConcat) 562 .Case("dag", tgtok::XDag) 563 .Case("add", tgtok::XADD) 564 .Case("sub", tgtok::XSUB) 565 .Case("mul", tgtok::XMUL) 566 .Case("not", tgtok::XNOT) 567 .Case("and", tgtok::XAND) 568 .Case("or", tgtok::XOR) 569 .Case("xor", tgtok::XXOR) 570 .Case("shl", tgtok::XSHL) 571 .Case("sra", tgtok::XSRA) 572 .Case("srl", tgtok::XSRL) 573 .Case("cast", tgtok::XCast) 574 .Case("empty", tgtok::XEmpty) 575 .Case("subst", tgtok::XSubst) 576 .Case("foldl", tgtok::XFoldl) 577 .Case("foreach", tgtok::XForEach) 578 .Case("filter", tgtok::XFilter) 579 .Case("listconcat", tgtok::XListConcat) 580 .Case("listsplat", tgtok::XListSplat) 581 .Case("strconcat", tgtok::XStrConcat) 582 .Case("interleave", tgtok::XInterleave) 583 .Case("substr", tgtok::XSubstr) 584 .Case("find", tgtok::XFind) 585 .Cases("setdagop", "setop", tgtok::XSetDagOp) // !setop is deprecated. 586 .Cases("getdagop", "getop", tgtok::XGetDagOp) // !getop is deprecated. 587 .Default(tgtok::Error); 588 589 return Kind != tgtok::Error ? Kind : ReturnError(Start-1, "Unknown operator"); 590 } 591 592 bool TGLexer::prepExitInclude(bool IncludeStackMustBeEmpty) { 593 // Report an error, if preprocessor control stack for the current 594 // file is not empty. 595 if (!PrepIncludeStack.back()->empty()) { 596 prepReportPreprocessorStackError(); 597 598 return false; 599 } 600 601 // Pop the preprocessing controls from the include stack. 602 if (PrepIncludeStack.empty()) { 603 PrintFatalError("Preprocessor include stack is empty"); 604 } 605 606 PrepIncludeStack.pop_back(); 607 608 if (IncludeStackMustBeEmpty) { 609 if (!PrepIncludeStack.empty()) 610 PrintFatalError("Preprocessor include stack is not empty"); 611 } else { 612 if (PrepIncludeStack.empty()) 613 PrintFatalError("Preprocessor include stack is empty"); 614 } 615 616 return true; 617 } 618 619 tgtok::TokKind TGLexer::prepIsDirective() const { 620 for (const auto &PD : PreprocessorDirs) { 621 int NextChar = *CurPtr; 622 bool Match = true; 623 unsigned I = 0; 624 for (; I < strlen(PD.Word); ++I) { 625 if (NextChar != PD.Word[I]) { 626 Match = false; 627 break; 628 } 629 630 NextChar = peekNextChar(I + 1); 631 } 632 633 // Check for whitespace after the directive. If there is no whitespace, 634 // then we do not recognize it as a preprocessing directive. 635 if (Match) { 636 tgtok::TokKind Kind = PD.Kind; 637 638 // New line and EOF may follow only #else/#endif. It will be reported 639 // as an error for #ifdef/#define after the call to prepLexMacroName(). 640 if (NextChar == ' ' || NextChar == '\t' || NextChar == EOF || 641 NextChar == '\n' || 642 // It looks like TableGen does not support '\r' as the actual 643 // carriage return, e.g. getNextChar() treats a single '\r' 644 // as '\n'. So we do the same here. 645 NextChar == '\r') 646 return Kind; 647 648 // Allow comments after some directives, e.g.: 649 // #else// OR #else/**/ 650 // #endif// OR #endif/**/ 651 // 652 // Note that we do allow comments after #ifdef/#define here, e.g. 653 // #ifdef/**/ AND #ifdef// 654 // #define/**/ AND #define// 655 // 656 // These cases will be reported as incorrect after calling 657 // prepLexMacroName(). We could have supported C-style comments 658 // after #ifdef/#define, but this would complicate the code 659 // for little benefit. 660 if (NextChar == '/') { 661 NextChar = peekNextChar(I + 1); 662 663 if (NextChar == '*' || NextChar == '/') 664 return Kind; 665 666 // Pretend that we do not recognize the directive. 667 } 668 } 669 } 670 671 return tgtok::Error; 672 } 673 674 bool TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind) { 675 TokStart = CurPtr; 676 677 for (const auto &PD : PreprocessorDirs) 678 if (PD.Kind == Kind) { 679 // Advance CurPtr to the end of the preprocessing word. 680 CurPtr += strlen(PD.Word); 681 return true; 682 } 683 684 PrintFatalError("Unsupported preprocessing token in " 685 "prepEatPreprocessorDirective()"); 686 return false; 687 } 688 689 tgtok::TokKind TGLexer::lexPreprocessor( 690 tgtok::TokKind Kind, bool ReturnNextLiveToken) { 691 692 // We must be looking at a preprocessing directive. Eat it! 693 if (!prepEatPreprocessorDirective(Kind)) 694 PrintFatalError("lexPreprocessor() called for unknown " 695 "preprocessor directive"); 696 697 if (Kind == tgtok::Ifdef || Kind == tgtok::Ifndef) { 698 StringRef MacroName = prepLexMacroName(); 699 StringRef IfTokName = Kind == tgtok::Ifdef ? "#ifdef" : "#ifndef"; 700 if (MacroName.empty()) 701 return ReturnError(TokStart, "Expected macro name after " + IfTokName); 702 703 bool MacroIsDefined = DefinedMacros.count(MacroName) != 0; 704 705 // Canonicalize ifndef to ifdef equivalent 706 if (Kind == tgtok::Ifndef) { 707 MacroIsDefined = !MacroIsDefined; 708 Kind = tgtok::Ifdef; 709 } 710 711 // Regardless of whether we are processing tokens or not, 712 // we put the #ifdef control on stack. 713 PrepIncludeStack.back()->push_back( 714 {Kind, MacroIsDefined, SMLoc::getFromPointer(TokStart)}); 715 716 if (!prepSkipDirectiveEnd()) 717 return ReturnError(CurPtr, "Only comments are supported after " + 718 IfTokName + " NAME"); 719 720 // If we were not processing tokens before this #ifdef, 721 // then just return back to the lines skipping code. 722 if (!ReturnNextLiveToken) 723 return Kind; 724 725 // If we were processing tokens before this #ifdef, 726 // and the macro is defined, then just return the next token. 727 if (MacroIsDefined) 728 return LexToken(); 729 730 // We were processing tokens before this #ifdef, and the macro 731 // is not defined, so we have to start skipping the lines. 732 // If the skipping is successful, it will return the token following 733 // either #else or #endif corresponding to this #ifdef. 734 if (prepSkipRegion(ReturnNextLiveToken)) 735 return LexToken(); 736 737 return tgtok::Error; 738 } else if (Kind == tgtok::Else) { 739 // Check if this #else is correct before calling prepSkipDirectiveEnd(), 740 // which will move CurPtr away from the beginning of #else. 741 if (PrepIncludeStack.back()->empty()) 742 return ReturnError(TokStart, "#else without #ifdef or #ifndef"); 743 744 PreprocessorControlDesc IfdefEntry = PrepIncludeStack.back()->back(); 745 746 if (IfdefEntry.Kind != tgtok::Ifdef) { 747 PrintError(TokStart, "double #else"); 748 return ReturnError(IfdefEntry.SrcPos, "Previous #else is here"); 749 } 750 751 // Replace the corresponding #ifdef's control with its negation 752 // on the control stack. 753 PrepIncludeStack.back()->pop_back(); 754 PrepIncludeStack.back()->push_back( 755 {Kind, !IfdefEntry.IsDefined, SMLoc::getFromPointer(TokStart)}); 756 757 if (!prepSkipDirectiveEnd()) 758 return ReturnError(CurPtr, "Only comments are supported after #else"); 759 760 // If we were processing tokens before this #else, 761 // we have to start skipping lines until the matching #endif. 762 if (ReturnNextLiveToken) { 763 if (prepSkipRegion(ReturnNextLiveToken)) 764 return LexToken(); 765 766 return tgtok::Error; 767 } 768 769 // Return to the lines skipping code. 770 return Kind; 771 } else if (Kind == tgtok::Endif) { 772 // Check if this #endif is correct before calling prepSkipDirectiveEnd(), 773 // which will move CurPtr away from the beginning of #endif. 774 if (PrepIncludeStack.back()->empty()) 775 return ReturnError(TokStart, "#endif without #ifdef"); 776 777 auto &IfdefOrElseEntry = PrepIncludeStack.back()->back(); 778 779 if (IfdefOrElseEntry.Kind != tgtok::Ifdef && 780 IfdefOrElseEntry.Kind != tgtok::Else) { 781 PrintFatalError("Invalid preprocessor control on the stack"); 782 return tgtok::Error; 783 } 784 785 if (!prepSkipDirectiveEnd()) 786 return ReturnError(CurPtr, "Only comments are supported after #endif"); 787 788 PrepIncludeStack.back()->pop_back(); 789 790 // If we were processing tokens before this #endif, then 791 // we should continue it. 792 if (ReturnNextLiveToken) { 793 return LexToken(); 794 } 795 796 // Return to the lines skipping code. 797 return Kind; 798 } else if (Kind == tgtok::Define) { 799 StringRef MacroName = prepLexMacroName(); 800 if (MacroName.empty()) 801 return ReturnError(TokStart, "Expected macro name after #define"); 802 803 if (!DefinedMacros.insert(MacroName).second) 804 PrintWarning(getLoc(), 805 "Duplicate definition of macro: " + Twine(MacroName)); 806 807 if (!prepSkipDirectiveEnd()) 808 return ReturnError(CurPtr, 809 "Only comments are supported after #define NAME"); 810 811 if (!ReturnNextLiveToken) { 812 PrintFatalError("#define must be ignored during the lines skipping"); 813 return tgtok::Error; 814 } 815 816 return LexToken(); 817 } 818 819 PrintFatalError("Preprocessing directive is not supported"); 820 return tgtok::Error; 821 } 822 823 bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) { 824 if (!MustNeverBeFalse) 825 PrintFatalError("Invalid recursion."); 826 827 do { 828 // Skip all symbols to the line end. 829 prepSkipToLineEnd(); 830 831 // Find the first non-whitespace symbol in the next line(s). 832 if (!prepSkipLineBegin()) 833 return false; 834 835 // If the first non-blank/comment symbol on the line is '#', 836 // it may be a start of preprocessing directive. 837 // 838 // If it is not '#' just go to the next line. 839 if (*CurPtr == '#') 840 ++CurPtr; 841 else 842 continue; 843 844 tgtok::TokKind Kind = prepIsDirective(); 845 846 // If we did not find a preprocessing directive or it is #define, 847 // then just skip to the next line. We do not have to do anything 848 // for #define in the line-skipping mode. 849 if (Kind == tgtok::Error || Kind == tgtok::Define) 850 continue; 851 852 tgtok::TokKind ProcessedKind = lexPreprocessor(Kind, false); 853 854 // If lexPreprocessor() encountered an error during lexing this 855 // preprocessor idiom, then return false to the calling lexPreprocessor(). 856 // This will force tgtok::Error to be returned to the tokens processing. 857 if (ProcessedKind == tgtok::Error) 858 return false; 859 860 if (Kind != ProcessedKind) 861 PrintFatalError("prepIsDirective() and lexPreprocessor() " 862 "returned different token kinds"); 863 864 // If this preprocessing directive enables tokens processing, 865 // then return to the lexPreprocessor() and get to the next token. 866 // We can move from line-skipping mode to processing tokens only 867 // due to #else or #endif. 868 if (prepIsProcessingEnabled()) { 869 if (Kind != tgtok::Else && Kind != tgtok::Endif) { 870 PrintFatalError("Tokens processing was enabled by an unexpected " 871 "preprocessing directive"); 872 return false; 873 } 874 875 return true; 876 } 877 } while (CurPtr != CurBuf.end()); 878 879 // We have reached the end of the file, but never left the lines-skipping 880 // mode. This means there is no matching #endif. 881 prepReportPreprocessorStackError(); 882 return false; 883 } 884 885 StringRef TGLexer::prepLexMacroName() { 886 // Skip whitespaces between the preprocessing directive and the macro name. 887 while (*CurPtr == ' ' || *CurPtr == '\t') 888 ++CurPtr; 889 890 TokStart = CurPtr; 891 // Macro names start with [a-zA-Z_]. 892 if (*CurPtr != '_' && !isalpha(*CurPtr)) 893 return ""; 894 895 // Match the rest of the identifier regex: [0-9a-zA-Z_]* 896 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 897 ++CurPtr; 898 899 return StringRef(TokStart, CurPtr - TokStart); 900 } 901 902 bool TGLexer::prepSkipLineBegin() { 903 while (CurPtr != CurBuf.end()) { 904 switch (*CurPtr) { 905 case ' ': 906 case '\t': 907 case '\n': 908 case '\r': 909 break; 910 911 case '/': { 912 int NextChar = peekNextChar(1); 913 if (NextChar == '*') { 914 // Skip C-style comment. 915 // Note that we do not care about skipping the C++-style comments. 916 // If the line contains "//", it may not contain any processable 917 // preprocessing directive. Just return CurPtr pointing to 918 // the first '/' in this case. We also do not care about 919 // incorrect symbols after the first '/' - we are in lines-skipping 920 // mode, so incorrect code is allowed to some extent. 921 922 // Set TokStart to the beginning of the comment to enable proper 923 // diagnostic printing in case of error in SkipCComment(). 924 TokStart = CurPtr; 925 926 // CurPtr must point to '*' before call to SkipCComment(). 927 ++CurPtr; 928 if (SkipCComment()) 929 return false; 930 } else { 931 // CurPtr points to the non-whitespace '/'. 932 return true; 933 } 934 935 // We must not increment CurPtr after the comment was lexed. 936 continue; 937 } 938 939 default: 940 return true; 941 } 942 943 ++CurPtr; 944 } 945 946 // We have reached the end of the file. Return to the lines skipping 947 // code, and allow it to handle the EOF as needed. 948 return true; 949 } 950 951 bool TGLexer::prepSkipDirectiveEnd() { 952 while (CurPtr != CurBuf.end()) { 953 switch (*CurPtr) { 954 case ' ': 955 case '\t': 956 break; 957 958 case '\n': 959 case '\r': 960 return true; 961 962 case '/': { 963 int NextChar = peekNextChar(1); 964 if (NextChar == '/') { 965 // Skip C++-style comment. 966 // We may just return true now, but let's skip to the line/buffer end 967 // to simplify the method specification. 968 ++CurPtr; 969 SkipBCPLComment(); 970 } else if (NextChar == '*') { 971 // When we are skipping C-style comment at the end of a preprocessing 972 // directive, we can skip several lines. If any meaningful TD token 973 // follows the end of the C-style comment on the same line, it will 974 // be considered as an invalid usage of TD token. 975 // For example, we want to forbid usages like this one: 976 // #define MACRO class Class {} 977 // But with C-style comments we also disallow the following: 978 // #define MACRO /* This macro is used 979 // to ... */ class Class {} 980 // One can argue that this should be allowed, but it does not seem 981 // to be worth of the complication. Moreover, this matches 982 // the C preprocessor behavior. 983 984 // Set TokStart to the beginning of the comment to enable proper 985 // diagnostic printer in case of error in SkipCComment(). 986 TokStart = CurPtr; 987 ++CurPtr; 988 if (SkipCComment()) 989 return false; 990 } else { 991 TokStart = CurPtr; 992 PrintError(CurPtr, "Unexpected character"); 993 return false; 994 } 995 996 // We must not increment CurPtr after the comment was lexed. 997 continue; 998 } 999 1000 default: 1001 // Do not allow any non-whitespaces after the directive. 1002 TokStart = CurPtr; 1003 return false; 1004 } 1005 1006 ++CurPtr; 1007 } 1008 1009 return true; 1010 } 1011 1012 void TGLexer::prepSkipToLineEnd() { 1013 while (*CurPtr != '\n' && *CurPtr != '\r' && CurPtr != CurBuf.end()) 1014 ++CurPtr; 1015 } 1016 1017 bool TGLexer::prepIsProcessingEnabled() { 1018 for (const PreprocessorControlDesc &I : 1019 llvm::reverse(*PrepIncludeStack.back())) 1020 if (!I.IsDefined) 1021 return false; 1022 1023 return true; 1024 } 1025 1026 void TGLexer::prepReportPreprocessorStackError() { 1027 if (PrepIncludeStack.back()->empty()) 1028 PrintFatalError("prepReportPreprocessorStackError() called with " 1029 "empty control stack"); 1030 1031 auto &PrepControl = PrepIncludeStack.back()->back(); 1032 PrintError(CurBuf.end(), "Reached EOF without matching #endif"); 1033 PrintError(PrepControl.SrcPos, "The latest preprocessor control is here"); 1034 1035 TokStart = CurPtr; 1036 } 1037