1 //===- TGLexer.cpp - Lexer for TableGen -----------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Implement the Lexer for TableGen. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "TGLexer.h" 14 #include "llvm/ADT/ArrayRef.h" 15 #include "llvm/ADT/StringSwitch.h" 16 #include "llvm/ADT/Twine.h" 17 #include "llvm/Config/config.h" // for strtoull()/strtoll() define 18 #include "llvm/Support/Compiler.h" 19 #include "llvm/Support/MemoryBuffer.h" 20 #include "llvm/Support/SourceMgr.h" 21 #include "llvm/TableGen/Error.h" 22 #include <algorithm> 23 #include <cctype> 24 #include <cerrno> 25 #include <cstdint> 26 #include <cstdio> 27 #include <cstdlib> 28 #include <cstring> 29 30 using namespace llvm; 31 32 namespace { 33 // A list of supported preprocessing directives with their 34 // internal token kinds and names. 35 struct { 36 tgtok::TokKind Kind; 37 const char *Word; 38 } PreprocessorDirs[] = { 39 { tgtok::Ifdef, "ifdef" }, 40 { tgtok::Ifndef, "ifndef" }, 41 { tgtok::Else, "else" }, 42 { tgtok::Endif, "endif" }, 43 { tgtok::Define, "define" } 44 }; 45 } // end anonymous namespace 46 47 TGLexer::TGLexer(SourceMgr &SM, ArrayRef<std::string> Macros) : SrcMgr(SM) { 48 CurBuffer = SrcMgr.getMainFileID(); 49 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 50 CurPtr = CurBuf.begin(); 51 TokStart = nullptr; 52 53 // Pretend that we enter the "top-level" include file. 54 PrepIncludeStack.push_back( 55 std::make_unique<std::vector<PreprocessorControlDesc>>()); 56 57 // Put all macros defined in the command line into the DefinedMacros set. 58 std::for_each(Macros.begin(), Macros.end(), 59 [this](const std::string &MacroName) { 60 DefinedMacros.insert(MacroName); 61 }); 62 } 63 64 SMLoc TGLexer::getLoc() const { 65 return SMLoc::getFromPointer(TokStart); 66 } 67 68 /// ReturnError - Set the error to the specified string at the specified 69 /// location. This is defined to always return tgtok::Error. 70 tgtok::TokKind TGLexer::ReturnError(SMLoc Loc, const Twine &Msg) { 71 PrintError(Loc, Msg); 72 return tgtok::Error; 73 } 74 75 tgtok::TokKind TGLexer::ReturnError(const char *Loc, const Twine &Msg) { 76 return ReturnError(SMLoc::getFromPointer(Loc), Msg); 77 } 78 79 bool TGLexer::processEOF() { 80 SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer); 81 if (ParentIncludeLoc != SMLoc()) { 82 // If prepExitInclude() detects a problem with the preprocessing 83 // control stack, it will return false. Pretend that we reached 84 // the final EOF and stop lexing more tokens by returning false 85 // to LexToken(). 86 if (!prepExitInclude(false)) 87 return false; 88 89 CurBuffer = SrcMgr.FindBufferContainingLoc(ParentIncludeLoc); 90 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 91 CurPtr = ParentIncludeLoc.getPointer(); 92 // Make sure TokStart points into the parent file's buffer. 93 // LexToken() assigns to it before calling getNextChar(), 94 // so it is pointing into the included file now. 95 TokStart = CurPtr; 96 return true; 97 } 98 99 // Pretend that we exit the "top-level" include file. 100 // Note that in case of an error (e.g. control stack imbalance) 101 // the routine will issue a fatal error. 102 prepExitInclude(true); 103 return false; 104 } 105 106 int TGLexer::getNextChar() { 107 char CurChar = *CurPtr++; 108 switch (CurChar) { 109 default: 110 return (unsigned char)CurChar; 111 case 0: { 112 // A nul character in the stream is either the end of the current buffer or 113 // a random nul in the file. Disambiguate that here. 114 if (CurPtr-1 != CurBuf.end()) 115 return 0; // Just whitespace. 116 117 // Otherwise, return end of file. 118 --CurPtr; // Another call to lex will return EOF again. 119 return EOF; 120 } 121 case '\n': 122 case '\r': 123 // Handle the newline character by ignoring it and incrementing the line 124 // count. However, be careful about 'dos style' files with \n\r in them. 125 // Only treat a \n\r or \r\n as a single line. 126 if ((*CurPtr == '\n' || (*CurPtr == '\r')) && 127 *CurPtr != CurChar) 128 ++CurPtr; // Eat the two char newline sequence. 129 return '\n'; 130 } 131 } 132 133 int TGLexer::peekNextChar(int Index) const { 134 return *(CurPtr + Index); 135 } 136 137 tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) { 138 TokStart = CurPtr; 139 // This always consumes at least one character. 140 int CurChar = getNextChar(); 141 142 switch (CurChar) { 143 default: 144 // Handle letters: [a-zA-Z_] 145 if (isalpha(CurChar) || CurChar == '_') 146 return LexIdentifier(); 147 148 // Unknown character, emit an error. 149 return ReturnError(TokStart, "Unexpected character"); 150 case EOF: 151 // Lex next token, if we just left an include file. 152 // Note that leaving an include file means that the next 153 // symbol is located at the end of the 'include "..."' 154 // construct, so LexToken() is called with default 155 // false parameter. 156 if (processEOF()) 157 return LexToken(); 158 159 // Return EOF denoting the end of lexing. 160 return tgtok::Eof; 161 162 case ':': return tgtok::colon; 163 case ';': return tgtok::semi; 164 case ',': return tgtok::comma; 165 case '<': return tgtok::less; 166 case '>': return tgtok::greater; 167 case ']': return tgtok::r_square; 168 case '{': return tgtok::l_brace; 169 case '}': return tgtok::r_brace; 170 case '(': return tgtok::l_paren; 171 case ')': return tgtok::r_paren; 172 case '=': return tgtok::equal; 173 case '?': return tgtok::question; 174 case '#': 175 if (FileOrLineStart) { 176 tgtok::TokKind Kind = prepIsDirective(); 177 if (Kind != tgtok::Error) 178 return lexPreprocessor(Kind); 179 } 180 181 return tgtok::paste; 182 183 // The period is a separate case so we can recognize the "..." 184 // range punctuator. 185 case '.': 186 if (peekNextChar(0) == '.') { 187 ++CurPtr; // Eat second dot. 188 if (peekNextChar(0) == '.') { 189 ++CurPtr; // Eat third dot. 190 return tgtok::dotdotdot; 191 } 192 return ReturnError(TokStart, "Invalid '..' punctuation"); 193 } 194 return tgtok::dot; 195 196 case '\r': 197 PrintFatalError("getNextChar() must never return '\r'"); 198 return tgtok::Error; 199 200 case 0: 201 case ' ': 202 case '\t': 203 // Ignore whitespace. 204 return LexToken(FileOrLineStart); 205 case '\n': 206 // Ignore whitespace, and identify the new line. 207 return LexToken(true); 208 case '/': 209 // If this is the start of a // comment, skip until the end of the line or 210 // the end of the buffer. 211 if (*CurPtr == '/') 212 SkipBCPLComment(); 213 else if (*CurPtr == '*') { 214 if (SkipCComment()) 215 return tgtok::Error; 216 } else // Otherwise, this is an error. 217 return ReturnError(TokStart, "Unexpected character"); 218 return LexToken(FileOrLineStart); 219 case '-': case '+': 220 case '0': case '1': case '2': case '3': case '4': case '5': case '6': 221 case '7': case '8': case '9': { 222 int NextChar = 0; 223 if (isdigit(CurChar)) { 224 // Allow identifiers to start with a number if it is followed by 225 // an identifier. This can happen with paste operations like 226 // foo#8i. 227 int i = 0; 228 do { 229 NextChar = peekNextChar(i++); 230 } while (isdigit(NextChar)); 231 232 if (NextChar == 'x' || NextChar == 'b') { 233 // If this is [0-9]b[01] or [0-9]x[0-9A-fa-f] this is most 234 // likely a number. 235 int NextNextChar = peekNextChar(i); 236 switch (NextNextChar) { 237 default: 238 break; 239 case '0': case '1': 240 if (NextChar == 'b') 241 return LexNumber(); 242 LLVM_FALLTHROUGH; 243 case '2': case '3': case '4': case '5': 244 case '6': case '7': case '8': case '9': 245 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 246 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 247 if (NextChar == 'x') 248 return LexNumber(); 249 break; 250 } 251 } 252 } 253 254 if (isalpha(NextChar) || NextChar == '_') 255 return LexIdentifier(); 256 257 return LexNumber(); 258 } 259 case '"': return LexString(); 260 case '$': return LexVarName(); 261 case '[': return LexBracket(); 262 case '!': return LexExclaim(); 263 } 264 } 265 266 /// LexString - Lex "[^"]*" 267 tgtok::TokKind TGLexer::LexString() { 268 const char *StrStart = CurPtr; 269 270 CurStrVal = ""; 271 272 while (*CurPtr != '"') { 273 // If we hit the end of the buffer, report an error. 274 if (*CurPtr == 0 && CurPtr == CurBuf.end()) 275 return ReturnError(StrStart, "End of file in string literal"); 276 277 if (*CurPtr == '\n' || *CurPtr == '\r') 278 return ReturnError(StrStart, "End of line in string literal"); 279 280 if (*CurPtr != '\\') { 281 CurStrVal += *CurPtr++; 282 continue; 283 } 284 285 ++CurPtr; 286 287 switch (*CurPtr) { 288 case '\\': case '\'': case '"': 289 // These turn into their literal character. 290 CurStrVal += *CurPtr++; 291 break; 292 case 't': 293 CurStrVal += '\t'; 294 ++CurPtr; 295 break; 296 case 'n': 297 CurStrVal += '\n'; 298 ++CurPtr; 299 break; 300 301 case '\n': 302 case '\r': 303 return ReturnError(CurPtr, "escaped newlines not supported in tblgen"); 304 305 // If we hit the end of the buffer, report an error. 306 case '\0': 307 if (CurPtr == CurBuf.end()) 308 return ReturnError(StrStart, "End of file in string literal"); 309 LLVM_FALLTHROUGH; 310 default: 311 return ReturnError(CurPtr, "invalid escape in string literal"); 312 } 313 } 314 315 ++CurPtr; 316 return tgtok::StrVal; 317 } 318 319 tgtok::TokKind TGLexer::LexVarName() { 320 if (!isalpha(CurPtr[0]) && CurPtr[0] != '_') 321 return ReturnError(TokStart, "Invalid variable name"); 322 323 // Otherwise, we're ok, consume the rest of the characters. 324 const char *VarNameStart = CurPtr++; 325 326 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 327 ++CurPtr; 328 329 CurStrVal.assign(VarNameStart, CurPtr); 330 return tgtok::VarName; 331 } 332 333 tgtok::TokKind TGLexer::LexIdentifier() { 334 // The first letter is [a-zA-Z_]. 335 const char *IdentStart = TokStart; 336 337 // Match the rest of the identifier regex: [0-9a-zA-Z_]* 338 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 339 ++CurPtr; 340 341 // Check to see if this identifier is a reserved keyword. 342 StringRef Str(IdentStart, CurPtr-IdentStart); 343 344 tgtok::TokKind Kind = StringSwitch<tgtok::TokKind>(Str) 345 .Case("int", tgtok::Int) 346 .Case("bit", tgtok::Bit) 347 .Case("bits", tgtok::Bits) 348 .Case("string", tgtok::String) 349 .Case("list", tgtok::List) 350 .Case("code", tgtok::Code) 351 .Case("dag", tgtok::Dag) 352 .Case("class", tgtok::Class) 353 .Case("def", tgtok::Def) 354 .Case("true", tgtok::TrueVal) 355 .Case("false", tgtok::FalseVal) 356 .Case("foreach", tgtok::Foreach) 357 .Case("defm", tgtok::Defm) 358 .Case("defset", tgtok::Defset) 359 .Case("multiclass", tgtok::MultiClass) 360 .Case("field", tgtok::Field) 361 .Case("let", tgtok::Let) 362 .Case("in", tgtok::In) 363 .Case("defvar", tgtok::Defvar) 364 .Case("include", tgtok::Include) 365 .Case("if", tgtok::If) 366 .Case("then", tgtok::Then) 367 .Case("else", tgtok::ElseKW) 368 .Default(tgtok::Id); 369 370 // A couple of tokens require special processing. 371 switch (Kind) { 372 case tgtok::Include: 373 if (LexInclude()) return tgtok::Error; 374 return Lex(); 375 case tgtok::Id: 376 CurStrVal.assign(Str.begin(), Str.end()); 377 break; 378 default: 379 break; 380 } 381 382 return Kind; 383 } 384 385 /// LexInclude - We just read the "include" token. Get the string token that 386 /// comes next and enter the include. 387 bool TGLexer::LexInclude() { 388 // The token after the include must be a string. 389 tgtok::TokKind Tok = LexToken(); 390 if (Tok == tgtok::Error) return true; 391 if (Tok != tgtok::StrVal) { 392 PrintError(getLoc(), "Expected filename after include"); 393 return true; 394 } 395 396 // Get the string. 397 std::string Filename = CurStrVal; 398 std::string IncludedFile; 399 400 CurBuffer = SrcMgr.AddIncludeFile(Filename, SMLoc::getFromPointer(CurPtr), 401 IncludedFile); 402 if (!CurBuffer) { 403 PrintError(getLoc(), "Could not find include file '" + Filename + "'"); 404 return true; 405 } 406 407 Dependencies.insert(IncludedFile); 408 // Save the line number and lex buffer of the includer. 409 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 410 CurPtr = CurBuf.begin(); 411 412 PrepIncludeStack.push_back( 413 std::make_unique<std::vector<PreprocessorControlDesc>>()); 414 return false; 415 } 416 417 void TGLexer::SkipBCPLComment() { 418 ++CurPtr; // skip the second slash. 419 while (true) { 420 switch (*CurPtr) { 421 case '\n': 422 case '\r': 423 return; // Newline is end of comment. 424 case 0: 425 // If this is the end of the buffer, end the comment. 426 if (CurPtr == CurBuf.end()) 427 return; 428 break; 429 } 430 // Otherwise, skip the character. 431 ++CurPtr; 432 } 433 } 434 435 /// SkipCComment - This skips C-style /**/ comments. The only difference from C 436 /// is that we allow nesting. 437 bool TGLexer::SkipCComment() { 438 ++CurPtr; // skip the star. 439 unsigned CommentDepth = 1; 440 441 while (true) { 442 int CurChar = getNextChar(); 443 switch (CurChar) { 444 case EOF: 445 PrintError(TokStart, "Unterminated comment!"); 446 return true; 447 case '*': 448 // End of the comment? 449 if (CurPtr[0] != '/') break; 450 451 ++CurPtr; // End the */. 452 if (--CommentDepth == 0) 453 return false; 454 break; 455 case '/': 456 // Start of a nested comment? 457 if (CurPtr[0] != '*') break; 458 ++CurPtr; 459 ++CommentDepth; 460 break; 461 } 462 } 463 } 464 465 /// LexNumber - Lex: 466 /// [-+]?[0-9]+ 467 /// 0x[0-9a-fA-F]+ 468 /// 0b[01]+ 469 tgtok::TokKind TGLexer::LexNumber() { 470 if (CurPtr[-1] == '0') { 471 if (CurPtr[0] == 'x') { 472 ++CurPtr; 473 const char *NumStart = CurPtr; 474 while (isxdigit(CurPtr[0])) 475 ++CurPtr; 476 477 // Requires at least one hex digit. 478 if (CurPtr == NumStart) 479 return ReturnError(TokStart, "Invalid hexadecimal number"); 480 481 errno = 0; 482 CurIntVal = strtoll(NumStart, nullptr, 16); 483 if (errno == EINVAL) 484 return ReturnError(TokStart, "Invalid hexadecimal number"); 485 if (errno == ERANGE) { 486 errno = 0; 487 CurIntVal = (int64_t)strtoull(NumStart, nullptr, 16); 488 if (errno == EINVAL) 489 return ReturnError(TokStart, "Invalid hexadecimal number"); 490 if (errno == ERANGE) 491 return ReturnError(TokStart, "Hexadecimal number out of range"); 492 } 493 return tgtok::IntVal; 494 } else if (CurPtr[0] == 'b') { 495 ++CurPtr; 496 const char *NumStart = CurPtr; 497 while (CurPtr[0] == '0' || CurPtr[0] == '1') 498 ++CurPtr; 499 500 // Requires at least one binary digit. 501 if (CurPtr == NumStart) 502 return ReturnError(CurPtr-2, "Invalid binary number"); 503 CurIntVal = strtoll(NumStart, nullptr, 2); 504 return tgtok::BinaryIntVal; 505 } 506 } 507 508 // Check for a sign without a digit. 509 if (!isdigit(CurPtr[0])) { 510 if (CurPtr[-1] == '-') 511 return tgtok::minus; 512 else if (CurPtr[-1] == '+') 513 return tgtok::plus; 514 } 515 516 while (isdigit(CurPtr[0])) 517 ++CurPtr; 518 CurIntVal = strtoll(TokStart, nullptr, 10); 519 return tgtok::IntVal; 520 } 521 522 /// LexBracket - We just read '['. If this is a code block, return it, 523 /// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]' 524 tgtok::TokKind TGLexer::LexBracket() { 525 if (CurPtr[0] != '{') 526 return tgtok::l_square; 527 ++CurPtr; 528 const char *CodeStart = CurPtr; 529 while (true) { 530 int Char = getNextChar(); 531 if (Char == EOF) break; 532 533 if (Char != '}') continue; 534 535 Char = getNextChar(); 536 if (Char == EOF) break; 537 if (Char == ']') { 538 CurStrVal.assign(CodeStart, CurPtr-2); 539 return tgtok::CodeFragment; 540 } 541 } 542 543 return ReturnError(CodeStart-2, "Unterminated Code Block"); 544 } 545 546 /// LexExclaim - Lex '!' and '![a-zA-Z]+'. 547 tgtok::TokKind TGLexer::LexExclaim() { 548 if (!isalpha(*CurPtr)) 549 return ReturnError(CurPtr - 1, "Invalid \"!operator\""); 550 551 const char *Start = CurPtr++; 552 while (isalpha(*CurPtr)) 553 ++CurPtr; 554 555 // Check to see which operator this is. 556 tgtok::TokKind Kind = 557 StringSwitch<tgtok::TokKind>(StringRef(Start, CurPtr - Start)) 558 .Case("eq", tgtok::XEq) 559 .Case("ne", tgtok::XNe) 560 .Case("le", tgtok::XLe) 561 .Case("lt", tgtok::XLt) 562 .Case("ge", tgtok::XGe) 563 .Case("gt", tgtok::XGt) 564 .Case("if", tgtok::XIf) 565 .Case("cond", tgtok::XCond) 566 .Case("isa", tgtok::XIsA) 567 .Case("head", tgtok::XHead) 568 .Case("tail", tgtok::XTail) 569 .Case("size", tgtok::XSize) 570 .Case("con", tgtok::XConcat) 571 .Case("dag", tgtok::XDag) 572 .Case("add", tgtok::XADD) 573 .Case("sub", tgtok::XSUB) 574 .Case("mul", tgtok::XMUL) 575 .Case("not", tgtok::XNOT) 576 .Case("and", tgtok::XAND) 577 .Case("or", tgtok::XOR) 578 .Case("xor", tgtok::XXOR) 579 .Case("shl", tgtok::XSHL) 580 .Case("sra", tgtok::XSRA) 581 .Case("srl", tgtok::XSRL) 582 .Case("cast", tgtok::XCast) 583 .Case("empty", tgtok::XEmpty) 584 .Case("subst", tgtok::XSubst) 585 .Case("foldl", tgtok::XFoldl) 586 .Case("foreach", tgtok::XForEach) 587 .Case("listconcat", tgtok::XListConcat) 588 .Case("listsplat", tgtok::XListSplat) 589 .Case("strconcat", tgtok::XStrConcat) 590 .Case("interleave", tgtok::XInterleave) 591 .Cases("setdagop", "setop", tgtok::XSetDagOp) // !setop is deprecated. 592 .Cases("getdagop", "getop", tgtok::XGetDagOp) // !getop is deprecated. 593 .Default(tgtok::Error); 594 595 return Kind != tgtok::Error ? Kind : ReturnError(Start-1, "Unknown operator"); 596 } 597 598 bool TGLexer::prepExitInclude(bool IncludeStackMustBeEmpty) { 599 // Report an error, if preprocessor control stack for the current 600 // file is not empty. 601 if (!PrepIncludeStack.back()->empty()) { 602 prepReportPreprocessorStackError(); 603 604 return false; 605 } 606 607 // Pop the preprocessing controls from the include stack. 608 if (PrepIncludeStack.empty()) { 609 PrintFatalError("Preprocessor include stack is empty"); 610 } 611 612 PrepIncludeStack.pop_back(); 613 614 if (IncludeStackMustBeEmpty) { 615 if (!PrepIncludeStack.empty()) 616 PrintFatalError("Preprocessor include stack is not empty"); 617 } else { 618 if (PrepIncludeStack.empty()) 619 PrintFatalError("Preprocessor include stack is empty"); 620 } 621 622 return true; 623 } 624 625 tgtok::TokKind TGLexer::prepIsDirective() const { 626 for (unsigned ID = 0; ID < llvm::array_lengthof(PreprocessorDirs); ++ID) { 627 int NextChar = *CurPtr; 628 bool Match = true; 629 unsigned I = 0; 630 for (; I < strlen(PreprocessorDirs[ID].Word); ++I) { 631 if (NextChar != PreprocessorDirs[ID].Word[I]) { 632 Match = false; 633 break; 634 } 635 636 NextChar = peekNextChar(I + 1); 637 } 638 639 // Check for whitespace after the directive. If there is no whitespace, 640 // then we do not recognize it as a preprocessing directive. 641 if (Match) { 642 tgtok::TokKind Kind = PreprocessorDirs[ID].Kind; 643 644 // New line and EOF may follow only #else/#endif. It will be reported 645 // as an error for #ifdef/#define after the call to prepLexMacroName(). 646 if (NextChar == ' ' || NextChar == '\t' || NextChar == EOF || 647 NextChar == '\n' || 648 // It looks like TableGen does not support '\r' as the actual 649 // carriage return, e.g. getNextChar() treats a single '\r' 650 // as '\n'. So we do the same here. 651 NextChar == '\r') 652 return Kind; 653 654 // Allow comments after some directives, e.g.: 655 // #else// OR #else/**/ 656 // #endif// OR #endif/**/ 657 // 658 // Note that we do allow comments after #ifdef/#define here, e.g. 659 // #ifdef/**/ AND #ifdef// 660 // #define/**/ AND #define// 661 // 662 // These cases will be reported as incorrect after calling 663 // prepLexMacroName(). We could have supported C-style comments 664 // after #ifdef/#define, but this would complicate the code 665 // for little benefit. 666 if (NextChar == '/') { 667 NextChar = peekNextChar(I + 1); 668 669 if (NextChar == '*' || NextChar == '/') 670 return Kind; 671 672 // Pretend that we do not recognize the directive. 673 } 674 } 675 } 676 677 return tgtok::Error; 678 } 679 680 bool TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind) { 681 TokStart = CurPtr; 682 683 for (unsigned ID = 0; ID < llvm::array_lengthof(PreprocessorDirs); ++ID) 684 if (PreprocessorDirs[ID].Kind == Kind) { 685 // Advance CurPtr to the end of the preprocessing word. 686 CurPtr += strlen(PreprocessorDirs[ID].Word); 687 return true; 688 } 689 690 PrintFatalError("Unsupported preprocessing token in " 691 "prepEatPreprocessorDirective()"); 692 return false; 693 } 694 695 tgtok::TokKind TGLexer::lexPreprocessor( 696 tgtok::TokKind Kind, bool ReturnNextLiveToken) { 697 698 // We must be looking at a preprocessing directive. Eat it! 699 if (!prepEatPreprocessorDirective(Kind)) 700 PrintFatalError("lexPreprocessor() called for unknown " 701 "preprocessor directive"); 702 703 if (Kind == tgtok::Ifdef || Kind == tgtok::Ifndef) { 704 StringRef MacroName = prepLexMacroName(); 705 StringRef IfTokName = Kind == tgtok::Ifdef ? "#ifdef" : "#ifndef"; 706 if (MacroName.empty()) 707 return ReturnError(TokStart, "Expected macro name after " + IfTokName); 708 709 bool MacroIsDefined = DefinedMacros.count(MacroName) != 0; 710 711 // Canonicalize ifndef to ifdef equivalent 712 if (Kind == tgtok::Ifndef) { 713 MacroIsDefined = !MacroIsDefined; 714 Kind = tgtok::Ifdef; 715 } 716 717 // Regardless of whether we are processing tokens or not, 718 // we put the #ifdef control on stack. 719 PrepIncludeStack.back()->push_back( 720 {Kind, MacroIsDefined, SMLoc::getFromPointer(TokStart)}); 721 722 if (!prepSkipDirectiveEnd()) 723 return ReturnError(CurPtr, "Only comments are supported after " + 724 IfTokName + " NAME"); 725 726 // If we were not processing tokens before this #ifdef, 727 // then just return back to the lines skipping code. 728 if (!ReturnNextLiveToken) 729 return Kind; 730 731 // If we were processing tokens before this #ifdef, 732 // and the macro is defined, then just return the next token. 733 if (MacroIsDefined) 734 return LexToken(); 735 736 // We were processing tokens before this #ifdef, and the macro 737 // is not defined, so we have to start skipping the lines. 738 // If the skipping is successful, it will return the token following 739 // either #else or #endif corresponding to this #ifdef. 740 if (prepSkipRegion(ReturnNextLiveToken)) 741 return LexToken(); 742 743 return tgtok::Error; 744 } else if (Kind == tgtok::Else) { 745 // Check if this #else is correct before calling prepSkipDirectiveEnd(), 746 // which will move CurPtr away from the beginning of #else. 747 if (PrepIncludeStack.back()->empty()) 748 return ReturnError(TokStart, "#else without #ifdef or #ifndef"); 749 750 PreprocessorControlDesc IfdefEntry = PrepIncludeStack.back()->back(); 751 752 if (IfdefEntry.Kind != tgtok::Ifdef) { 753 PrintError(TokStart, "double #else"); 754 return ReturnError(IfdefEntry.SrcPos, "Previous #else is here"); 755 } 756 757 // Replace the corresponding #ifdef's control with its negation 758 // on the control stack. 759 PrepIncludeStack.back()->pop_back(); 760 PrepIncludeStack.back()->push_back( 761 {Kind, !IfdefEntry.IsDefined, SMLoc::getFromPointer(TokStart)}); 762 763 if (!prepSkipDirectiveEnd()) 764 return ReturnError(CurPtr, "Only comments are supported after #else"); 765 766 // If we were processing tokens before this #else, 767 // we have to start skipping lines until the matching #endif. 768 if (ReturnNextLiveToken) { 769 if (prepSkipRegion(ReturnNextLiveToken)) 770 return LexToken(); 771 772 return tgtok::Error; 773 } 774 775 // Return to the lines skipping code. 776 return Kind; 777 } else if (Kind == tgtok::Endif) { 778 // Check if this #endif is correct before calling prepSkipDirectiveEnd(), 779 // which will move CurPtr away from the beginning of #endif. 780 if (PrepIncludeStack.back()->empty()) 781 return ReturnError(TokStart, "#endif without #ifdef"); 782 783 auto &IfdefOrElseEntry = PrepIncludeStack.back()->back(); 784 785 if (IfdefOrElseEntry.Kind != tgtok::Ifdef && 786 IfdefOrElseEntry.Kind != tgtok::Else) { 787 PrintFatalError("Invalid preprocessor control on the stack"); 788 return tgtok::Error; 789 } 790 791 if (!prepSkipDirectiveEnd()) 792 return ReturnError(CurPtr, "Only comments are supported after #endif"); 793 794 PrepIncludeStack.back()->pop_back(); 795 796 // If we were processing tokens before this #endif, then 797 // we should continue it. 798 if (ReturnNextLiveToken) { 799 return LexToken(); 800 } 801 802 // Return to the lines skipping code. 803 return Kind; 804 } else if (Kind == tgtok::Define) { 805 StringRef MacroName = prepLexMacroName(); 806 if (MacroName.empty()) 807 return ReturnError(TokStart, "Expected macro name after #define"); 808 809 if (!DefinedMacros.insert(MacroName).second) 810 PrintWarning(getLoc(), 811 "Duplicate definition of macro: " + Twine(MacroName)); 812 813 if (!prepSkipDirectiveEnd()) 814 return ReturnError(CurPtr, 815 "Only comments are supported after #define NAME"); 816 817 if (!ReturnNextLiveToken) { 818 PrintFatalError("#define must be ignored during the lines skipping"); 819 return tgtok::Error; 820 } 821 822 return LexToken(); 823 } 824 825 PrintFatalError("Preprocessing directive is not supported"); 826 return tgtok::Error; 827 } 828 829 bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) { 830 if (!MustNeverBeFalse) 831 PrintFatalError("Invalid recursion."); 832 833 do { 834 // Skip all symbols to the line end. 835 prepSkipToLineEnd(); 836 837 // Find the first non-whitespace symbol in the next line(s). 838 if (!prepSkipLineBegin()) 839 return false; 840 841 // If the first non-blank/comment symbol on the line is '#', 842 // it may be a start of preprocessing directive. 843 // 844 // If it is not '#' just go to the next line. 845 if (*CurPtr == '#') 846 ++CurPtr; 847 else 848 continue; 849 850 tgtok::TokKind Kind = prepIsDirective(); 851 852 // If we did not find a preprocessing directive or it is #define, 853 // then just skip to the next line. We do not have to do anything 854 // for #define in the line-skipping mode. 855 if (Kind == tgtok::Error || Kind == tgtok::Define) 856 continue; 857 858 tgtok::TokKind ProcessedKind = lexPreprocessor(Kind, false); 859 860 // If lexPreprocessor() encountered an error during lexing this 861 // preprocessor idiom, then return false to the calling lexPreprocessor(). 862 // This will force tgtok::Error to be returned to the tokens processing. 863 if (ProcessedKind == tgtok::Error) 864 return false; 865 866 if (Kind != ProcessedKind) 867 PrintFatalError("prepIsDirective() and lexPreprocessor() " 868 "returned different token kinds"); 869 870 // If this preprocessing directive enables tokens processing, 871 // then return to the lexPreprocessor() and get to the next token. 872 // We can move from line-skipping mode to processing tokens only 873 // due to #else or #endif. 874 if (prepIsProcessingEnabled()) { 875 if (Kind != tgtok::Else && Kind != tgtok::Endif) { 876 PrintFatalError("Tokens processing was enabled by an unexpected " 877 "preprocessing directive"); 878 return false; 879 } 880 881 return true; 882 } 883 } while (CurPtr != CurBuf.end()); 884 885 // We have reached the end of the file, but never left the lines-skipping 886 // mode. This means there is no matching #endif. 887 prepReportPreprocessorStackError(); 888 return false; 889 } 890 891 StringRef TGLexer::prepLexMacroName() { 892 // Skip whitespaces between the preprocessing directive and the macro name. 893 while (*CurPtr == ' ' || *CurPtr == '\t') 894 ++CurPtr; 895 896 TokStart = CurPtr; 897 // Macro names start with [a-zA-Z_]. 898 if (*CurPtr != '_' && !isalpha(*CurPtr)) 899 return ""; 900 901 // Match the rest of the identifier regex: [0-9a-zA-Z_]* 902 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 903 ++CurPtr; 904 905 return StringRef(TokStart, CurPtr - TokStart); 906 } 907 908 bool TGLexer::prepSkipLineBegin() { 909 while (CurPtr != CurBuf.end()) { 910 switch (*CurPtr) { 911 case ' ': 912 case '\t': 913 case '\n': 914 case '\r': 915 break; 916 917 case '/': { 918 int NextChar = peekNextChar(1); 919 if (NextChar == '*') { 920 // Skip C-style comment. 921 // Note that we do not care about skipping the C++-style comments. 922 // If the line contains "//", it may not contain any processable 923 // preprocessing directive. Just return CurPtr pointing to 924 // the first '/' in this case. We also do not care about 925 // incorrect symbols after the first '/' - we are in lines-skipping 926 // mode, so incorrect code is allowed to some extent. 927 928 // Set TokStart to the beginning of the comment to enable proper 929 // diagnostic printing in case of error in SkipCComment(). 930 TokStart = CurPtr; 931 932 // CurPtr must point to '*' before call to SkipCComment(). 933 ++CurPtr; 934 if (SkipCComment()) 935 return false; 936 } else { 937 // CurPtr points to the non-whitespace '/'. 938 return true; 939 } 940 941 // We must not increment CurPtr after the comment was lexed. 942 continue; 943 } 944 945 default: 946 return true; 947 } 948 949 ++CurPtr; 950 } 951 952 // We have reached the end of the file. Return to the lines skipping 953 // code, and allow it to handle the EOF as needed. 954 return true; 955 } 956 957 bool TGLexer::prepSkipDirectiveEnd() { 958 while (CurPtr != CurBuf.end()) { 959 switch (*CurPtr) { 960 case ' ': 961 case '\t': 962 break; 963 964 case '\n': 965 case '\r': 966 return true; 967 968 case '/': { 969 int NextChar = peekNextChar(1); 970 if (NextChar == '/') { 971 // Skip C++-style comment. 972 // We may just return true now, but let's skip to the line/buffer end 973 // to simplify the method specification. 974 ++CurPtr; 975 SkipBCPLComment(); 976 } else if (NextChar == '*') { 977 // When we are skipping C-style comment at the end of a preprocessing 978 // directive, we can skip several lines. If any meaningful TD token 979 // follows the end of the C-style comment on the same line, it will 980 // be considered as an invalid usage of TD token. 981 // For example, we want to forbid usages like this one: 982 // #define MACRO class Class {} 983 // But with C-style comments we also disallow the following: 984 // #define MACRO /* This macro is used 985 // to ... */ class Class {} 986 // One can argue that this should be allowed, but it does not seem 987 // to be worth of the complication. Moreover, this matches 988 // the C preprocessor behavior. 989 990 // Set TokStart to the beginning of the comment to enable proper 991 // diagnostic printer in case of error in SkipCComment(). 992 TokStart = CurPtr; 993 ++CurPtr; 994 if (SkipCComment()) 995 return false; 996 } else { 997 TokStart = CurPtr; 998 PrintError(CurPtr, "Unexpected character"); 999 return false; 1000 } 1001 1002 // We must not increment CurPtr after the comment was lexed. 1003 continue; 1004 } 1005 1006 default: 1007 // Do not allow any non-whitespaces after the directive. 1008 TokStart = CurPtr; 1009 return false; 1010 } 1011 1012 ++CurPtr; 1013 } 1014 1015 return true; 1016 } 1017 1018 void TGLexer::prepSkipToLineEnd() { 1019 while (*CurPtr != '\n' && *CurPtr != '\r' && CurPtr != CurBuf.end()) 1020 ++CurPtr; 1021 } 1022 1023 bool TGLexer::prepIsProcessingEnabled() { 1024 for (auto I = PrepIncludeStack.back()->rbegin(), 1025 E = PrepIncludeStack.back()->rend(); 1026 I != E; ++I) { 1027 if (!I->IsDefined) 1028 return false; 1029 } 1030 1031 return true; 1032 } 1033 1034 void TGLexer::prepReportPreprocessorStackError() { 1035 if (PrepIncludeStack.back()->empty()) 1036 PrintFatalError("prepReportPreprocessorStackError() called with " 1037 "empty control stack"); 1038 1039 auto &PrepControl = PrepIncludeStack.back()->back(); 1040 PrintError(CurBuf.end(), "Reached EOF without matching #endif"); 1041 PrintError(PrepControl.SrcPos, "The latest preprocessor control is here"); 1042 1043 TokStart = CurPtr; 1044 } 1045