1 //===--- PPCaching.cpp - Handle caching lexed tokens ----------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements pieces of the Preprocessor interface that manage the 11 // caching of lexed tokens. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "clang/Lex/Preprocessor.h" 16 using namespace clang; 17 18 // EnableBacktrackAtThisPos - From the point that this method is called, and 19 // until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor 20 // keeps track of the lexed tokens so that a subsequent Backtrack() call will 21 // make the Preprocessor re-lex the same tokens. 22 // 23 // Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can 24 // be called multiple times and CommitBacktrackedTokens/Backtrack calls will 25 // be combined with the EnableBacktrackAtThisPos calls in reverse order. 26 void Preprocessor::EnableBacktrackAtThisPos() { 27 BacktrackPositions.push_back(CachedLexPos); 28 EnterCachingLexMode(); 29 } 30 31 // Disable the last EnableBacktrackAtThisPos call. 32 void Preprocessor::CommitBacktrackedTokens() { 33 assert(!BacktrackPositions.empty() 34 && "EnableBacktrackAtThisPos was not called!"); 35 BacktrackPositions.pop_back(); 36 } 37 38 Preprocessor::CachedTokensRange Preprocessor::LastCachedTokenRange() { 39 assert(isBacktrackEnabled()); 40 auto PrevCachedLexPos = BacktrackPositions.back(); 41 return CachedTokensRange{PrevCachedLexPos, CachedLexPos}; 42 } 43 44 void Preprocessor::EraseCachedTokens(CachedTokensRange TokenRange) { 45 assert(TokenRange.Begin <= TokenRange.End); 46 if (CachedLexPos == TokenRange.Begin && TokenRange.Begin != TokenRange.End) { 47 // We have backtracked to the start of the token range as we want to consume 48 // them again. Erase the tokens only after consuming then. 49 assert(!CachedTokenRangeToErase); 50 CachedTokenRangeToErase = TokenRange; 51 return; 52 } 53 // The cached tokens were committed, so they should be erased now. 54 assert(TokenRange.End == CachedLexPos); 55 CachedTokens.erase(CachedTokens.begin() + TokenRange.Begin, 56 CachedTokens.begin() + TokenRange.End); 57 CachedLexPos = TokenRange.Begin; 58 ExitCachingLexMode(); 59 } 60 61 // Make Preprocessor re-lex the tokens that were lexed since 62 // EnableBacktrackAtThisPos() was previously called. 63 void Preprocessor::Backtrack() { 64 assert(!BacktrackPositions.empty() 65 && "EnableBacktrackAtThisPos was not called!"); 66 CachedLexPos = BacktrackPositions.back(); 67 BacktrackPositions.pop_back(); 68 recomputeCurLexerKind(); 69 } 70 71 void Preprocessor::CachingLex(Token &Result) { 72 if (!InCachingLexMode()) 73 return; 74 75 if (CachedLexPos < CachedTokens.size()) { 76 Result = CachedTokens[CachedLexPos++]; 77 // Erase the some of the cached tokens after they are consumed when 78 // asked to do so. 79 if (CachedTokenRangeToErase && 80 CachedTokenRangeToErase->End == CachedLexPos) { 81 EraseCachedTokens(*CachedTokenRangeToErase); 82 CachedTokenRangeToErase = None; 83 } 84 return; 85 } 86 87 ExitCachingLexMode(); 88 Lex(Result); 89 90 if (isBacktrackEnabled()) { 91 // Cache the lexed token. 92 EnterCachingLexMode(); 93 CachedTokens.push_back(Result); 94 ++CachedLexPos; 95 return; 96 } 97 98 if (CachedLexPos < CachedTokens.size()) { 99 EnterCachingLexMode(); 100 } else { 101 // All cached tokens were consumed. 102 CachedTokens.clear(); 103 CachedLexPos = 0; 104 } 105 } 106 107 void Preprocessor::EnterCachingLexMode() { 108 if (InCachingLexMode()) { 109 assert(CurLexerKind == CLK_CachingLexer && "Unexpected lexer kind"); 110 return; 111 } 112 113 PushIncludeMacroStack(); 114 CurLexerKind = CLK_CachingLexer; 115 } 116 117 118 const Token &Preprocessor::PeekAhead(unsigned N) { 119 assert(CachedLexPos + N > CachedTokens.size() && "Confused caching."); 120 ExitCachingLexMode(); 121 for (size_t C = CachedLexPos + N - CachedTokens.size(); C > 0; --C) { 122 CachedTokens.push_back(Token()); 123 Lex(CachedTokens.back()); 124 } 125 EnterCachingLexMode(); 126 return CachedTokens.back(); 127 } 128 129 void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) { 130 assert(Tok.isAnnotation() && "Expected annotation token"); 131 assert(CachedLexPos != 0 && "Expected to have some cached tokens"); 132 assert(CachedTokens[CachedLexPos-1].getLastLoc() == Tok.getAnnotationEndLoc() 133 && "The annotation should be until the most recent cached token"); 134 135 // Start from the end of the cached tokens list and look for the token 136 // that is the beginning of the annotation token. 137 for (CachedTokensTy::size_type i = CachedLexPos; i != 0; --i) { 138 CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1; 139 if (AnnotBegin->getLocation() == Tok.getLocation()) { 140 assert((BacktrackPositions.empty() || BacktrackPositions.back() <= i) && 141 "The backtrack pos points inside the annotated tokens!"); 142 // Replace the cached tokens with the single annotation token. 143 if (i < CachedLexPos) 144 CachedTokens.erase(AnnotBegin + 1, CachedTokens.begin() + CachedLexPos); 145 *AnnotBegin = Tok; 146 CachedLexPos = i; 147 return; 148 } 149 } 150 } 151 152 bool Preprocessor::IsPreviousCachedToken(const Token &Tok) const { 153 // There's currently no cached token... 154 if (!CachedLexPos) 155 return false; 156 157 const Token LastCachedTok = CachedTokens[CachedLexPos - 1]; 158 if (LastCachedTok.getKind() != Tok.getKind()) 159 return false; 160 161 int RelOffset = 0; 162 if ((!getSourceManager().isInSameSLocAddrSpace( 163 Tok.getLocation(), getLastCachedTokenLocation(), &RelOffset)) || 164 RelOffset) 165 return false; 166 167 return true; 168 } 169 170 void Preprocessor::ReplacePreviousCachedToken(ArrayRef<Token> NewToks) { 171 assert(CachedLexPos != 0 && "Expected to have some cached tokens"); 172 CachedTokens.insert(CachedTokens.begin() + CachedLexPos - 1, NewToks.begin(), 173 NewToks.end()); 174 CachedTokens.erase(CachedTokens.begin() + CachedLexPos - 1 + NewToks.size()); 175 CachedLexPos += NewToks.size() - 1; 176 } 177