1 //===- TGLexer.cpp - Lexer for TableGen -----------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Implement the Lexer for TableGen. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "TGLexer.h" 14 #include "llvm/ADT/ArrayRef.h" 15 #include "llvm/ADT/StringSwitch.h" 16 #include "llvm/ADT/Twine.h" 17 #include "llvm/Config/config.h" // for strtoull()/strtoll() define 18 #include "llvm/Support/Compiler.h" 19 #include "llvm/Support/MemoryBuffer.h" 20 #include "llvm/Support/SourceMgr.h" 21 #include "llvm/TableGen/Error.h" 22 #include <algorithm> 23 #include <cctype> 24 #include <cerrno> 25 #include <cstdint> 26 #include <cstdio> 27 #include <cstdlib> 28 #include <cstring> 29 30 using namespace llvm; 31 32 namespace { 33 // A list of supported preprocessing directives with their 34 // internal token kinds and names. 35 struct { 36 tgtok::TokKind Kind; 37 const char *Word; 38 } PreprocessorDirs[] = { 39 { tgtok::Ifdef, "ifdef" }, 40 { tgtok::Ifndef, "ifndef" }, 41 { tgtok::Else, "else" }, 42 { tgtok::Endif, "endif" }, 43 { tgtok::Define, "define" } 44 }; 45 } // end anonymous namespace 46 47 TGLexer::TGLexer(SourceMgr &SM, ArrayRef<std::string> Macros) : SrcMgr(SM) { 48 CurBuffer = SrcMgr.getMainFileID(); 49 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 50 CurPtr = CurBuf.begin(); 51 TokStart = nullptr; 52 53 // Pretend that we enter the "top-level" include file. 54 PrepIncludeStack.push_back( 55 std::make_unique<std::vector<PreprocessorControlDesc>>()); 56 57 // Put all macros defined in the command line into the DefinedMacros set. 58 std::for_each(Macros.begin(), Macros.end(), 59 [this](const std::string &MacroName) { 60 DefinedMacros.insert(MacroName); 61 }); 62 } 63 64 SMLoc TGLexer::getLoc() const { 65 return SMLoc::getFromPointer(TokStart); 66 } 67 68 /// ReturnError - Set the error to the specified string at the specified 69 /// location. This is defined to always return tgtok::Error. 70 tgtok::TokKind TGLexer::ReturnError(SMLoc Loc, const Twine &Msg) { 71 PrintError(Loc, Msg); 72 return tgtok::Error; 73 } 74 75 tgtok::TokKind TGLexer::ReturnError(const char *Loc, const Twine &Msg) { 76 return ReturnError(SMLoc::getFromPointer(Loc), Msg); 77 } 78 79 bool TGLexer::processEOF() { 80 SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer); 81 if (ParentIncludeLoc != SMLoc()) { 82 // If prepExitInclude() detects a problem with the preprocessing 83 // control stack, it will return false. Pretend that we reached 84 // the final EOF and stop lexing more tokens by returning false 85 // to LexToken(). 86 if (!prepExitInclude(false)) 87 return false; 88 89 CurBuffer = SrcMgr.FindBufferContainingLoc(ParentIncludeLoc); 90 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 91 CurPtr = ParentIncludeLoc.getPointer(); 92 // Make sure TokStart points into the parent file's buffer. 93 // LexToken() assigns to it before calling getNextChar(), 94 // so it is pointing into the included file now. 95 TokStart = CurPtr; 96 return true; 97 } 98 99 // Pretend that we exit the "top-level" include file. 100 // Note that in case of an error (e.g. control stack imbalance) 101 // the routine will issue a fatal error. 102 prepExitInclude(true); 103 return false; 104 } 105 106 int TGLexer::getNextChar() { 107 char CurChar = *CurPtr++; 108 switch (CurChar) { 109 default: 110 return (unsigned char)CurChar; 111 case 0: { 112 // A nul character in the stream is either the end of the current buffer or 113 // a random nul in the file. Disambiguate that here. 114 if (CurPtr-1 != CurBuf.end()) 115 return 0; // Just whitespace. 116 117 // Otherwise, return end of file. 118 --CurPtr; // Another call to lex will return EOF again. 119 return EOF; 120 } 121 case '\n': 122 case '\r': 123 // Handle the newline character by ignoring it and incrementing the line 124 // count. However, be careful about 'dos style' files with \n\r in them. 125 // Only treat a \n\r or \r\n as a single line. 126 if ((*CurPtr == '\n' || (*CurPtr == '\r')) && 127 *CurPtr != CurChar) 128 ++CurPtr; // Eat the two char newline sequence. 129 return '\n'; 130 } 131 } 132 133 int TGLexer::peekNextChar(int Index) const { 134 return *(CurPtr + Index); 135 } 136 137 tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) { 138 TokStart = CurPtr; 139 // This always consumes at least one character. 140 int CurChar = getNextChar(); 141 142 switch (CurChar) { 143 default: 144 // Handle letters: [a-zA-Z_] 145 if (isalpha(CurChar) || CurChar == '_') 146 return LexIdentifier(); 147 148 // Unknown character, emit an error. 149 return ReturnError(TokStart, "Unexpected character"); 150 case EOF: 151 // Lex next token, if we just left an include file. 152 // Note that leaving an include file means that the next 153 // symbol is located at the end of the 'include "..."' 154 // construct, so LexToken() is called with default 155 // false parameter. 156 if (processEOF()) 157 return LexToken(); 158 159 // Return EOF denoting the end of lexing. 160 return tgtok::Eof; 161 162 case ':': return tgtok::colon; 163 case ';': return tgtok::semi; 164 case ',': return tgtok::comma; 165 case '<': return tgtok::less; 166 case '>': return tgtok::greater; 167 case ']': return tgtok::r_square; 168 case '{': return tgtok::l_brace; 169 case '}': return tgtok::r_brace; 170 case '(': return tgtok::l_paren; 171 case ')': return tgtok::r_paren; 172 case '=': return tgtok::equal; 173 case '?': return tgtok::question; 174 case '#': 175 if (FileOrLineStart) { 176 tgtok::TokKind Kind = prepIsDirective(); 177 if (Kind != tgtok::Error) 178 return lexPreprocessor(Kind); 179 } 180 181 return tgtok::paste; 182 183 // The period is a separate case so we can recognize the "..." 184 // range punctuator. 185 case '.': 186 if (peekNextChar(0) == '.') { 187 ++CurPtr; // Eat second dot. 188 if (peekNextChar(0) == '.') { 189 ++CurPtr; // Eat third dot. 190 return tgtok::dotdotdot; 191 } 192 return ReturnError(TokStart, "Invalid '..' punctuation"); 193 } 194 return tgtok::dot; 195 196 case '\r': 197 PrintFatalError("getNextChar() must never return '\r'"); 198 return tgtok::Error; 199 200 case 0: 201 case ' ': 202 case '\t': 203 // Ignore whitespace. 204 return LexToken(FileOrLineStart); 205 case '\n': 206 // Ignore whitespace, and identify the new line. 207 return LexToken(true); 208 case '/': 209 // If this is the start of a // comment, skip until the end of the line or 210 // the end of the buffer. 211 if (*CurPtr == '/') 212 SkipBCPLComment(); 213 else if (*CurPtr == '*') { 214 if (SkipCComment()) 215 return tgtok::Error; 216 } else // Otherwise, this is an error. 217 return ReturnError(TokStart, "Unexpected character"); 218 return LexToken(FileOrLineStart); 219 case '-': case '+': 220 case '0': case '1': case '2': case '3': case '4': case '5': case '6': 221 case '7': case '8': case '9': { 222 int NextChar = 0; 223 if (isdigit(CurChar)) { 224 // Allow identifiers to start with a number if it is followed by 225 // an identifier. This can happen with paste operations like 226 // foo#8i. 227 int i = 0; 228 do { 229 NextChar = peekNextChar(i++); 230 } while (isdigit(NextChar)); 231 232 if (NextChar == 'x' || NextChar == 'b') { 233 // If this is [0-9]b[01] or [0-9]x[0-9A-fa-f] this is most 234 // likely a number. 235 int NextNextChar = peekNextChar(i); 236 switch (NextNextChar) { 237 default: 238 break; 239 case '0': case '1': 240 if (NextChar == 'b') 241 return LexNumber(); 242 LLVM_FALLTHROUGH; 243 case '2': case '3': case '4': case '5': 244 case '6': case '7': case '8': case '9': 245 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 246 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 247 if (NextChar == 'x') 248 return LexNumber(); 249 break; 250 } 251 } 252 } 253 254 if (isalpha(NextChar) || NextChar == '_') 255 return LexIdentifier(); 256 257 return LexNumber(); 258 } 259 case '"': return LexString(); 260 case '$': return LexVarName(); 261 case '[': return LexBracket(); 262 case '!': return LexExclaim(); 263 } 264 } 265 266 /// LexString - Lex "[^"]*" 267 tgtok::TokKind TGLexer::LexString() { 268 const char *StrStart = CurPtr; 269 270 CurStrVal = ""; 271 272 while (*CurPtr != '"') { 273 // If we hit the end of the buffer, report an error. 274 if (*CurPtr == 0 && CurPtr == CurBuf.end()) 275 return ReturnError(StrStart, "End of file in string literal"); 276 277 if (*CurPtr == '\n' || *CurPtr == '\r') 278 return ReturnError(StrStart, "End of line in string literal"); 279 280 if (*CurPtr != '\\') { 281 CurStrVal += *CurPtr++; 282 continue; 283 } 284 285 ++CurPtr; 286 287 switch (*CurPtr) { 288 case '\\': case '\'': case '"': 289 // These turn into their literal character. 290 CurStrVal += *CurPtr++; 291 break; 292 case 't': 293 CurStrVal += '\t'; 294 ++CurPtr; 295 break; 296 case 'n': 297 CurStrVal += '\n'; 298 ++CurPtr; 299 break; 300 301 case '\n': 302 case '\r': 303 return ReturnError(CurPtr, "escaped newlines not supported in tblgen"); 304 305 // If we hit the end of the buffer, report an error. 306 case '\0': 307 if (CurPtr == CurBuf.end()) 308 return ReturnError(StrStart, "End of file in string literal"); 309 LLVM_FALLTHROUGH; 310 default: 311 return ReturnError(CurPtr, "invalid escape in string literal"); 312 } 313 } 314 315 ++CurPtr; 316 return tgtok::StrVal; 317 } 318 319 tgtok::TokKind TGLexer::LexVarName() { 320 if (!isalpha(CurPtr[0]) && CurPtr[0] != '_') 321 return ReturnError(TokStart, "Invalid variable name"); 322 323 // Otherwise, we're ok, consume the rest of the characters. 324 const char *VarNameStart = CurPtr++; 325 326 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 327 ++CurPtr; 328 329 CurStrVal.assign(VarNameStart, CurPtr); 330 return tgtok::VarName; 331 } 332 333 tgtok::TokKind TGLexer::LexIdentifier() { 334 // The first letter is [a-zA-Z_]. 335 const char *IdentStart = TokStart; 336 337 // Match the rest of the identifier regex: [0-9a-zA-Z_]* 338 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 339 ++CurPtr; 340 341 // Check to see if this identifier is a reserved keyword. 342 StringRef Str(IdentStart, CurPtr-IdentStart); 343 344 tgtok::TokKind Kind = StringSwitch<tgtok::TokKind>(Str) 345 .Case("int", tgtok::Int) 346 .Case("bit", tgtok::Bit) 347 .Case("bits", tgtok::Bits) 348 .Case("string", tgtok::String) 349 .Case("list", tgtok::List) 350 .Case("code", tgtok::Code) 351 .Case("dag", tgtok::Dag) 352 .Case("class", tgtok::Class) 353 .Case("def", tgtok::Def) 354 .Case("true", tgtok::TrueVal) 355 .Case("false", tgtok::FalseVal) 356 .Case("foreach", tgtok::Foreach) 357 .Case("defm", tgtok::Defm) 358 .Case("defset", tgtok::Defset) 359 .Case("multiclass", tgtok::MultiClass) 360 .Case("field", tgtok::Field) 361 .Case("let", tgtok::Let) 362 .Case("in", tgtok::In) 363 .Case("defvar", tgtok::Defvar) 364 .Case("include", tgtok::Include) 365 .Case("if", tgtok::If) 366 .Case("then", tgtok::Then) 367 .Case("else", tgtok::ElseKW) 368 .Default(tgtok::Id); 369 370 // A couple of tokens require special processing. 371 switch (Kind) { 372 case tgtok::Include: 373 if (LexInclude()) return tgtok::Error; 374 return Lex(); 375 case tgtok::Id: 376 CurStrVal.assign(Str.begin(), Str.end()); 377 break; 378 default: 379 break; 380 } 381 382 return Kind; 383 } 384 385 /// LexInclude - We just read the "include" token. Get the string token that 386 /// comes next and enter the include. 387 bool TGLexer::LexInclude() { 388 // The token after the include must be a string. 389 tgtok::TokKind Tok = LexToken(); 390 if (Tok == tgtok::Error) return true; 391 if (Tok != tgtok::StrVal) { 392 PrintError(getLoc(), "Expected filename after include"); 393 return true; 394 } 395 396 // Get the string. 397 std::string Filename = CurStrVal; 398 std::string IncludedFile; 399 400 CurBuffer = SrcMgr.AddIncludeFile(Filename, SMLoc::getFromPointer(CurPtr), 401 IncludedFile); 402 if (!CurBuffer) { 403 PrintError(getLoc(), "Could not find include file '" + Filename + "'"); 404 return true; 405 } 406 407 Dependencies.insert(IncludedFile); 408 // Save the line number and lex buffer of the includer. 409 CurBuf = SrcMgr.getMemoryBuffer(CurBuffer)->getBuffer(); 410 CurPtr = CurBuf.begin(); 411 412 PrepIncludeStack.push_back( 413 std::make_unique<std::vector<PreprocessorControlDesc>>()); 414 return false; 415 } 416 417 void TGLexer::SkipBCPLComment() { 418 ++CurPtr; // skip the second slash. 419 while (true) { 420 switch (*CurPtr) { 421 case '\n': 422 case '\r': 423 return; // Newline is end of comment. 424 case 0: 425 // If this is the end of the buffer, end the comment. 426 if (CurPtr == CurBuf.end()) 427 return; 428 break; 429 } 430 // Otherwise, skip the character. 431 ++CurPtr; 432 } 433 } 434 435 /// SkipCComment - This skips C-style /**/ comments. The only difference from C 436 /// is that we allow nesting. 437 bool TGLexer::SkipCComment() { 438 ++CurPtr; // skip the star. 439 unsigned CommentDepth = 1; 440 441 while (true) { 442 int CurChar = getNextChar(); 443 switch (CurChar) { 444 case EOF: 445 PrintError(TokStart, "Unterminated comment!"); 446 return true; 447 case '*': 448 // End of the comment? 449 if (CurPtr[0] != '/') break; 450 451 ++CurPtr; // End the */. 452 if (--CommentDepth == 0) 453 return false; 454 break; 455 case '/': 456 // Start of a nested comment? 457 if (CurPtr[0] != '*') break; 458 ++CurPtr; 459 ++CommentDepth; 460 break; 461 } 462 } 463 } 464 465 /// LexNumber - Lex: 466 /// [-+]?[0-9]+ 467 /// 0x[0-9a-fA-F]+ 468 /// 0b[01]+ 469 tgtok::TokKind TGLexer::LexNumber() { 470 if (CurPtr[-1] == '0') { 471 if (CurPtr[0] == 'x') { 472 ++CurPtr; 473 const char *NumStart = CurPtr; 474 while (isxdigit(CurPtr[0])) 475 ++CurPtr; 476 477 // Requires at least one hex digit. 478 if (CurPtr == NumStart) 479 return ReturnError(TokStart, "Invalid hexadecimal number"); 480 481 errno = 0; 482 CurIntVal = strtoll(NumStart, nullptr, 16); 483 if (errno == EINVAL) 484 return ReturnError(TokStart, "Invalid hexadecimal number"); 485 if (errno == ERANGE) { 486 errno = 0; 487 CurIntVal = (int64_t)strtoull(NumStart, nullptr, 16); 488 if (errno == EINVAL) 489 return ReturnError(TokStart, "Invalid hexadecimal number"); 490 if (errno == ERANGE) 491 return ReturnError(TokStart, "Hexadecimal number out of range"); 492 } 493 return tgtok::IntVal; 494 } else if (CurPtr[0] == 'b') { 495 ++CurPtr; 496 const char *NumStart = CurPtr; 497 while (CurPtr[0] == '0' || CurPtr[0] == '1') 498 ++CurPtr; 499 500 // Requires at least one binary digit. 501 if (CurPtr == NumStart) 502 return ReturnError(CurPtr-2, "Invalid binary number"); 503 CurIntVal = strtoll(NumStart, nullptr, 2); 504 return tgtok::BinaryIntVal; 505 } 506 } 507 508 // Check for a sign without a digit. 509 if (!isdigit(CurPtr[0])) { 510 if (CurPtr[-1] == '-') 511 return tgtok::minus; 512 else if (CurPtr[-1] == '+') 513 return tgtok::plus; 514 } 515 516 while (isdigit(CurPtr[0])) 517 ++CurPtr; 518 CurIntVal = strtoll(TokStart, nullptr, 10); 519 return tgtok::IntVal; 520 } 521 522 /// LexBracket - We just read '['. If this is a code block, return it, 523 /// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]' 524 tgtok::TokKind TGLexer::LexBracket() { 525 if (CurPtr[0] != '{') 526 return tgtok::l_square; 527 ++CurPtr; 528 const char *CodeStart = CurPtr; 529 while (true) { 530 int Char = getNextChar(); 531 if (Char == EOF) break; 532 533 if (Char != '}') continue; 534 535 Char = getNextChar(); 536 if (Char == EOF) break; 537 if (Char == ']') { 538 CurStrVal.assign(CodeStart, CurPtr-2); 539 return tgtok::CodeFragment; 540 } 541 } 542 543 return ReturnError(CodeStart - 2, "Unterminated code block"); 544 } 545 546 /// LexExclaim - Lex '!' and '![a-zA-Z]+'. 547 tgtok::TokKind TGLexer::LexExclaim() { 548 if (!isalpha(*CurPtr)) 549 return ReturnError(CurPtr - 1, "Invalid \"!operator\""); 550 551 const char *Start = CurPtr++; 552 while (isalpha(*CurPtr)) 553 ++CurPtr; 554 555 // Check to see which operator this is. 556 tgtok::TokKind Kind = 557 StringSwitch<tgtok::TokKind>(StringRef(Start, CurPtr - Start)) 558 .Case("eq", tgtok::XEq) 559 .Case("ne", tgtok::XNe) 560 .Case("le", tgtok::XLe) 561 .Case("lt", tgtok::XLt) 562 .Case("ge", tgtok::XGe) 563 .Case("gt", tgtok::XGt) 564 .Case("if", tgtok::XIf) 565 .Case("cond", tgtok::XCond) 566 .Case("isa", tgtok::XIsA) 567 .Case("head", tgtok::XHead) 568 .Case("tail", tgtok::XTail) 569 .Case("size", tgtok::XSize) 570 .Case("con", tgtok::XConcat) 571 .Case("dag", tgtok::XDag) 572 .Case("add", tgtok::XADD) 573 .Case("sub", tgtok::XSUB) 574 .Case("mul", tgtok::XMUL) 575 .Case("not", tgtok::XNOT) 576 .Case("and", tgtok::XAND) 577 .Case("or", tgtok::XOR) 578 .Case("xor", tgtok::XXOR) 579 .Case("shl", tgtok::XSHL) 580 .Case("sra", tgtok::XSRA) 581 .Case("srl", tgtok::XSRL) 582 .Case("cast", tgtok::XCast) 583 .Case("empty", tgtok::XEmpty) 584 .Case("subst", tgtok::XSubst) 585 .Case("foldl", tgtok::XFoldl) 586 .Case("foreach", tgtok::XForEach) 587 .Case("filter", tgtok::XFilter) 588 .Case("listconcat", tgtok::XListConcat) 589 .Case("listsplat", tgtok::XListSplat) 590 .Case("strconcat", tgtok::XStrConcat) 591 .Case("interleave", tgtok::XInterleave) 592 .Cases("setdagop", "setop", tgtok::XSetDagOp) // !setop is deprecated. 593 .Cases("getdagop", "getop", tgtok::XGetDagOp) // !getop is deprecated. 594 .Default(tgtok::Error); 595 596 return Kind != tgtok::Error ? Kind : ReturnError(Start-1, "Unknown operator"); 597 } 598 599 bool TGLexer::prepExitInclude(bool IncludeStackMustBeEmpty) { 600 // Report an error, if preprocessor control stack for the current 601 // file is not empty. 602 if (!PrepIncludeStack.back()->empty()) { 603 prepReportPreprocessorStackError(); 604 605 return false; 606 } 607 608 // Pop the preprocessing controls from the include stack. 609 if (PrepIncludeStack.empty()) { 610 PrintFatalError("Preprocessor include stack is empty"); 611 } 612 613 PrepIncludeStack.pop_back(); 614 615 if (IncludeStackMustBeEmpty) { 616 if (!PrepIncludeStack.empty()) 617 PrintFatalError("Preprocessor include stack is not empty"); 618 } else { 619 if (PrepIncludeStack.empty()) 620 PrintFatalError("Preprocessor include stack is empty"); 621 } 622 623 return true; 624 } 625 626 tgtok::TokKind TGLexer::prepIsDirective() const { 627 for (unsigned ID = 0; ID < llvm::array_lengthof(PreprocessorDirs); ++ID) { 628 int NextChar = *CurPtr; 629 bool Match = true; 630 unsigned I = 0; 631 for (; I < strlen(PreprocessorDirs[ID].Word); ++I) { 632 if (NextChar != PreprocessorDirs[ID].Word[I]) { 633 Match = false; 634 break; 635 } 636 637 NextChar = peekNextChar(I + 1); 638 } 639 640 // Check for whitespace after the directive. If there is no whitespace, 641 // then we do not recognize it as a preprocessing directive. 642 if (Match) { 643 tgtok::TokKind Kind = PreprocessorDirs[ID].Kind; 644 645 // New line and EOF may follow only #else/#endif. It will be reported 646 // as an error for #ifdef/#define after the call to prepLexMacroName(). 647 if (NextChar == ' ' || NextChar == '\t' || NextChar == EOF || 648 NextChar == '\n' || 649 // It looks like TableGen does not support '\r' as the actual 650 // carriage return, e.g. getNextChar() treats a single '\r' 651 // as '\n'. So we do the same here. 652 NextChar == '\r') 653 return Kind; 654 655 // Allow comments after some directives, e.g.: 656 // #else// OR #else/**/ 657 // #endif// OR #endif/**/ 658 // 659 // Note that we do allow comments after #ifdef/#define here, e.g. 660 // #ifdef/**/ AND #ifdef// 661 // #define/**/ AND #define// 662 // 663 // These cases will be reported as incorrect after calling 664 // prepLexMacroName(). We could have supported C-style comments 665 // after #ifdef/#define, but this would complicate the code 666 // for little benefit. 667 if (NextChar == '/') { 668 NextChar = peekNextChar(I + 1); 669 670 if (NextChar == '*' || NextChar == '/') 671 return Kind; 672 673 // Pretend that we do not recognize the directive. 674 } 675 } 676 } 677 678 return tgtok::Error; 679 } 680 681 bool TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind) { 682 TokStart = CurPtr; 683 684 for (unsigned ID = 0; ID < llvm::array_lengthof(PreprocessorDirs); ++ID) 685 if (PreprocessorDirs[ID].Kind == Kind) { 686 // Advance CurPtr to the end of the preprocessing word. 687 CurPtr += strlen(PreprocessorDirs[ID].Word); 688 return true; 689 } 690 691 PrintFatalError("Unsupported preprocessing token in " 692 "prepEatPreprocessorDirective()"); 693 return false; 694 } 695 696 tgtok::TokKind TGLexer::lexPreprocessor( 697 tgtok::TokKind Kind, bool ReturnNextLiveToken) { 698 699 // We must be looking at a preprocessing directive. Eat it! 700 if (!prepEatPreprocessorDirective(Kind)) 701 PrintFatalError("lexPreprocessor() called for unknown " 702 "preprocessor directive"); 703 704 if (Kind == tgtok::Ifdef || Kind == tgtok::Ifndef) { 705 StringRef MacroName = prepLexMacroName(); 706 StringRef IfTokName = Kind == tgtok::Ifdef ? "#ifdef" : "#ifndef"; 707 if (MacroName.empty()) 708 return ReturnError(TokStart, "Expected macro name after " + IfTokName); 709 710 bool MacroIsDefined = DefinedMacros.count(MacroName) != 0; 711 712 // Canonicalize ifndef to ifdef equivalent 713 if (Kind == tgtok::Ifndef) { 714 MacroIsDefined = !MacroIsDefined; 715 Kind = tgtok::Ifdef; 716 } 717 718 // Regardless of whether we are processing tokens or not, 719 // we put the #ifdef control on stack. 720 PrepIncludeStack.back()->push_back( 721 {Kind, MacroIsDefined, SMLoc::getFromPointer(TokStart)}); 722 723 if (!prepSkipDirectiveEnd()) 724 return ReturnError(CurPtr, "Only comments are supported after " + 725 IfTokName + " NAME"); 726 727 // If we were not processing tokens before this #ifdef, 728 // then just return back to the lines skipping code. 729 if (!ReturnNextLiveToken) 730 return Kind; 731 732 // If we were processing tokens before this #ifdef, 733 // and the macro is defined, then just return the next token. 734 if (MacroIsDefined) 735 return LexToken(); 736 737 // We were processing tokens before this #ifdef, and the macro 738 // is not defined, so we have to start skipping the lines. 739 // If the skipping is successful, it will return the token following 740 // either #else or #endif corresponding to this #ifdef. 741 if (prepSkipRegion(ReturnNextLiveToken)) 742 return LexToken(); 743 744 return tgtok::Error; 745 } else if (Kind == tgtok::Else) { 746 // Check if this #else is correct before calling prepSkipDirectiveEnd(), 747 // which will move CurPtr away from the beginning of #else. 748 if (PrepIncludeStack.back()->empty()) 749 return ReturnError(TokStart, "#else without #ifdef or #ifndef"); 750 751 PreprocessorControlDesc IfdefEntry = PrepIncludeStack.back()->back(); 752 753 if (IfdefEntry.Kind != tgtok::Ifdef) { 754 PrintError(TokStart, "double #else"); 755 return ReturnError(IfdefEntry.SrcPos, "Previous #else is here"); 756 } 757 758 // Replace the corresponding #ifdef's control with its negation 759 // on the control stack. 760 PrepIncludeStack.back()->pop_back(); 761 PrepIncludeStack.back()->push_back( 762 {Kind, !IfdefEntry.IsDefined, SMLoc::getFromPointer(TokStart)}); 763 764 if (!prepSkipDirectiveEnd()) 765 return ReturnError(CurPtr, "Only comments are supported after #else"); 766 767 // If we were processing tokens before this #else, 768 // we have to start skipping lines until the matching #endif. 769 if (ReturnNextLiveToken) { 770 if (prepSkipRegion(ReturnNextLiveToken)) 771 return LexToken(); 772 773 return tgtok::Error; 774 } 775 776 // Return to the lines skipping code. 777 return Kind; 778 } else if (Kind == tgtok::Endif) { 779 // Check if this #endif is correct before calling prepSkipDirectiveEnd(), 780 // which will move CurPtr away from the beginning of #endif. 781 if (PrepIncludeStack.back()->empty()) 782 return ReturnError(TokStart, "#endif without #ifdef"); 783 784 auto &IfdefOrElseEntry = PrepIncludeStack.back()->back(); 785 786 if (IfdefOrElseEntry.Kind != tgtok::Ifdef && 787 IfdefOrElseEntry.Kind != tgtok::Else) { 788 PrintFatalError("Invalid preprocessor control on the stack"); 789 return tgtok::Error; 790 } 791 792 if (!prepSkipDirectiveEnd()) 793 return ReturnError(CurPtr, "Only comments are supported after #endif"); 794 795 PrepIncludeStack.back()->pop_back(); 796 797 // If we were processing tokens before this #endif, then 798 // we should continue it. 799 if (ReturnNextLiveToken) { 800 return LexToken(); 801 } 802 803 // Return to the lines skipping code. 804 return Kind; 805 } else if (Kind == tgtok::Define) { 806 StringRef MacroName = prepLexMacroName(); 807 if (MacroName.empty()) 808 return ReturnError(TokStart, "Expected macro name after #define"); 809 810 if (!DefinedMacros.insert(MacroName).second) 811 PrintWarning(getLoc(), 812 "Duplicate definition of macro: " + Twine(MacroName)); 813 814 if (!prepSkipDirectiveEnd()) 815 return ReturnError(CurPtr, 816 "Only comments are supported after #define NAME"); 817 818 if (!ReturnNextLiveToken) { 819 PrintFatalError("#define must be ignored during the lines skipping"); 820 return tgtok::Error; 821 } 822 823 return LexToken(); 824 } 825 826 PrintFatalError("Preprocessing directive is not supported"); 827 return tgtok::Error; 828 } 829 830 bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) { 831 if (!MustNeverBeFalse) 832 PrintFatalError("Invalid recursion."); 833 834 do { 835 // Skip all symbols to the line end. 836 prepSkipToLineEnd(); 837 838 // Find the first non-whitespace symbol in the next line(s). 839 if (!prepSkipLineBegin()) 840 return false; 841 842 // If the first non-blank/comment symbol on the line is '#', 843 // it may be a start of preprocessing directive. 844 // 845 // If it is not '#' just go to the next line. 846 if (*CurPtr == '#') 847 ++CurPtr; 848 else 849 continue; 850 851 tgtok::TokKind Kind = prepIsDirective(); 852 853 // If we did not find a preprocessing directive or it is #define, 854 // then just skip to the next line. We do not have to do anything 855 // for #define in the line-skipping mode. 856 if (Kind == tgtok::Error || Kind == tgtok::Define) 857 continue; 858 859 tgtok::TokKind ProcessedKind = lexPreprocessor(Kind, false); 860 861 // If lexPreprocessor() encountered an error during lexing this 862 // preprocessor idiom, then return false to the calling lexPreprocessor(). 863 // This will force tgtok::Error to be returned to the tokens processing. 864 if (ProcessedKind == tgtok::Error) 865 return false; 866 867 if (Kind != ProcessedKind) 868 PrintFatalError("prepIsDirective() and lexPreprocessor() " 869 "returned different token kinds"); 870 871 // If this preprocessing directive enables tokens processing, 872 // then return to the lexPreprocessor() and get to the next token. 873 // We can move from line-skipping mode to processing tokens only 874 // due to #else or #endif. 875 if (prepIsProcessingEnabled()) { 876 if (Kind != tgtok::Else && Kind != tgtok::Endif) { 877 PrintFatalError("Tokens processing was enabled by an unexpected " 878 "preprocessing directive"); 879 return false; 880 } 881 882 return true; 883 } 884 } while (CurPtr != CurBuf.end()); 885 886 // We have reached the end of the file, but never left the lines-skipping 887 // mode. This means there is no matching #endif. 888 prepReportPreprocessorStackError(); 889 return false; 890 } 891 892 StringRef TGLexer::prepLexMacroName() { 893 // Skip whitespaces between the preprocessing directive and the macro name. 894 while (*CurPtr == ' ' || *CurPtr == '\t') 895 ++CurPtr; 896 897 TokStart = CurPtr; 898 // Macro names start with [a-zA-Z_]. 899 if (*CurPtr != '_' && !isalpha(*CurPtr)) 900 return ""; 901 902 // Match the rest of the identifier regex: [0-9a-zA-Z_]* 903 while (isalpha(*CurPtr) || isdigit(*CurPtr) || *CurPtr == '_') 904 ++CurPtr; 905 906 return StringRef(TokStart, CurPtr - TokStart); 907 } 908 909 bool TGLexer::prepSkipLineBegin() { 910 while (CurPtr != CurBuf.end()) { 911 switch (*CurPtr) { 912 case ' ': 913 case '\t': 914 case '\n': 915 case '\r': 916 break; 917 918 case '/': { 919 int NextChar = peekNextChar(1); 920 if (NextChar == '*') { 921 // Skip C-style comment. 922 // Note that we do not care about skipping the C++-style comments. 923 // If the line contains "//", it may not contain any processable 924 // preprocessing directive. Just return CurPtr pointing to 925 // the first '/' in this case. We also do not care about 926 // incorrect symbols after the first '/' - we are in lines-skipping 927 // mode, so incorrect code is allowed to some extent. 928 929 // Set TokStart to the beginning of the comment to enable proper 930 // diagnostic printing in case of error in SkipCComment(). 931 TokStart = CurPtr; 932 933 // CurPtr must point to '*' before call to SkipCComment(). 934 ++CurPtr; 935 if (SkipCComment()) 936 return false; 937 } else { 938 // CurPtr points to the non-whitespace '/'. 939 return true; 940 } 941 942 // We must not increment CurPtr after the comment was lexed. 943 continue; 944 } 945 946 default: 947 return true; 948 } 949 950 ++CurPtr; 951 } 952 953 // We have reached the end of the file. Return to the lines skipping 954 // code, and allow it to handle the EOF as needed. 955 return true; 956 } 957 958 bool TGLexer::prepSkipDirectiveEnd() { 959 while (CurPtr != CurBuf.end()) { 960 switch (*CurPtr) { 961 case ' ': 962 case '\t': 963 break; 964 965 case '\n': 966 case '\r': 967 return true; 968 969 case '/': { 970 int NextChar = peekNextChar(1); 971 if (NextChar == '/') { 972 // Skip C++-style comment. 973 // We may just return true now, but let's skip to the line/buffer end 974 // to simplify the method specification. 975 ++CurPtr; 976 SkipBCPLComment(); 977 } else if (NextChar == '*') { 978 // When we are skipping C-style comment at the end of a preprocessing 979 // directive, we can skip several lines. If any meaningful TD token 980 // follows the end of the C-style comment on the same line, it will 981 // be considered as an invalid usage of TD token. 982 // For example, we want to forbid usages like this one: 983 // #define MACRO class Class {} 984 // But with C-style comments we also disallow the following: 985 // #define MACRO /* This macro is used 986 // to ... */ class Class {} 987 // One can argue that this should be allowed, but it does not seem 988 // to be worth of the complication. Moreover, this matches 989 // the C preprocessor behavior. 990 991 // Set TokStart to the beginning of the comment to enable proper 992 // diagnostic printer in case of error in SkipCComment(). 993 TokStart = CurPtr; 994 ++CurPtr; 995 if (SkipCComment()) 996 return false; 997 } else { 998 TokStart = CurPtr; 999 PrintError(CurPtr, "Unexpected character"); 1000 return false; 1001 } 1002 1003 // We must not increment CurPtr after the comment was lexed. 1004 continue; 1005 } 1006 1007 default: 1008 // Do not allow any non-whitespaces after the directive. 1009 TokStart = CurPtr; 1010 return false; 1011 } 1012 1013 ++CurPtr; 1014 } 1015 1016 return true; 1017 } 1018 1019 void TGLexer::prepSkipToLineEnd() { 1020 while (*CurPtr != '\n' && *CurPtr != '\r' && CurPtr != CurBuf.end()) 1021 ++CurPtr; 1022 } 1023 1024 bool TGLexer::prepIsProcessingEnabled() { 1025 for (auto I = PrepIncludeStack.back()->rbegin(), 1026 E = PrepIncludeStack.back()->rend(); 1027 I != E; ++I) { 1028 if (!I->IsDefined) 1029 return false; 1030 } 1031 1032 return true; 1033 } 1034 1035 void TGLexer::prepReportPreprocessorStackError() { 1036 if (PrepIncludeStack.back()->empty()) 1037 PrintFatalError("prepReportPreprocessorStackError() called with " 1038 "empty control stack"); 1039 1040 auto &PrepControl = PrepIncludeStack.back()->back(); 1041 PrintError(CurBuf.end(), "Reached EOF without matching #endif"); 1042 PrintError(PrepControl.SrcPos, "The latest preprocessor control is here"); 1043 1044 TokStart = CurPtr; 1045 } 1046