1 //===--- SourceCode.cpp - Source code manipulation routines -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file provides functions that simplify extraction of source code. 10 // 11 //===----------------------------------------------------------------------===// 12 #include "clang/Tooling/Transformer/SourceCode.h" 13 #include "clang/AST/ASTContext.h" 14 #include "clang/AST/Attr.h" 15 #include "clang/AST/Comment.h" 16 #include "clang/AST/Decl.h" 17 #include "clang/AST/DeclCXX.h" 18 #include "clang/AST/DeclTemplate.h" 19 #include "clang/AST/Expr.h" 20 #include "clang/Basic/SourceManager.h" 21 #include "clang/Lex/Lexer.h" 22 #include "llvm/Support/Errc.h" 23 #include <set> 24 25 using namespace clang; 26 27 using llvm::errc; 28 using llvm::StringError; 29 30 StringRef clang::tooling::getText(CharSourceRange Range, 31 const ASTContext &Context) { 32 return Lexer::getSourceText(Range, Context.getSourceManager(), 33 Context.getLangOpts()); 34 } 35 36 CharSourceRange clang::tooling::maybeExtendRange(CharSourceRange Range, 37 tok::TokenKind Next, 38 ASTContext &Context) { 39 Optional<Token> Tok = Lexer::findNextToken( 40 Range.getEnd(), Context.getSourceManager(), Context.getLangOpts()); 41 if (!Tok || !Tok->is(Next)) 42 return Range; 43 return CharSourceRange::getTokenRange(Range.getBegin(), Tok->getLocation()); 44 } 45 46 llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range, 47 const SourceManager &SM) { 48 if (Range.isInvalid()) 49 return llvm::make_error<StringError>(errc::invalid_argument, 50 "Invalid range"); 51 52 if (Range.getBegin().isMacroID() || Range.getEnd().isMacroID()) 53 return llvm::make_error<StringError>( 54 errc::invalid_argument, "Range starts or ends in a macro expansion"); 55 56 if (SM.isInSystemHeader(Range.getBegin()) || 57 SM.isInSystemHeader(Range.getEnd())) 58 return llvm::make_error<StringError>(errc::invalid_argument, 59 "Range is in system header"); 60 61 std::pair<FileID, unsigned> BeginInfo = SM.getDecomposedLoc(Range.getBegin()); 62 std::pair<FileID, unsigned> EndInfo = SM.getDecomposedLoc(Range.getEnd()); 63 if (BeginInfo.first != EndInfo.first) 64 return llvm::make_error<StringError>( 65 errc::invalid_argument, "Range begins and ends in different files"); 66 67 if (BeginInfo.second > EndInfo.second) 68 return llvm::make_error<StringError>( 69 errc::invalid_argument, "Range's begin is past its end"); 70 71 return llvm::Error::success(); 72 } 73 74 llvm::Optional<CharSourceRange> 75 clang::tooling::getRangeForEdit(const CharSourceRange &EditRange, 76 const SourceManager &SM, 77 const LangOptions &LangOpts) { 78 // FIXME: makeFileCharRange() has the disadvantage of stripping off "identity" 79 // macros. For example, if we're looking to rewrite the int literal 3 to 6, 80 // and we have the following definition: 81 // #define DO_NOTHING(x) x 82 // then 83 // foo(DO_NOTHING(3)) 84 // will be rewritten to 85 // foo(6) 86 // rather than the arguably better 87 // foo(DO_NOTHING(6)) 88 // Decide whether the current behavior is desirable and modify if not. 89 CharSourceRange Range = Lexer::makeFileCharRange(EditRange, SM, LangOpts); 90 bool IsInvalid = llvm::errorToBool(validateEditRange(Range, SM)); 91 if (IsInvalid) 92 return llvm::None; 93 return Range; 94 95 } 96 97 static bool startsWithNewline(const SourceManager &SM, const Token &Tok) { 98 return isVerticalWhitespace(SM.getCharacterData(Tok.getLocation())[0]); 99 } 100 101 static bool contains(const std::set<tok::TokenKind> &Terminators, 102 const Token &Tok) { 103 return Terminators.count(Tok.getKind()) > 0; 104 } 105 106 // Returns the exclusive, *file* end location of the entity whose last token is 107 // at location 'EntityLast'. That is, it returns the location one past the last 108 // relevant character. 109 // 110 // Associated tokens include comments, horizontal whitespace and 'Terminators' 111 // -- optional tokens, which, if any are found, will be included; if 112 // 'Terminators' is empty, we will not include any extra tokens beyond comments 113 // and horizontal whitespace. 114 static SourceLocation 115 getEntityEndLoc(const SourceManager &SM, SourceLocation EntityLast, 116 const std::set<tok::TokenKind> &Terminators, 117 const LangOptions &LangOpts) { 118 assert(EntityLast.isValid() && "Invalid end location found."); 119 120 // We remember the last location of a non-horizontal-whitespace token we have 121 // lexed; this is the location up to which we will want to delete. 122 // FIXME: Support using the spelling loc here for cases where we want to 123 // analyze the macro text. 124 125 CharSourceRange ExpansionRange = SM.getExpansionRange(EntityLast); 126 // FIXME: Should check isTokenRange(), for the (rare) case that 127 // `ExpansionRange` is a character range. 128 std::unique_ptr<Lexer> Lexer = [&]() { 129 bool Invalid = false; 130 auto FileOffset = SM.getDecomposedLoc(ExpansionRange.getEnd()); 131 llvm::StringRef File = SM.getBufferData(FileOffset.first, &Invalid); 132 assert(!Invalid && "Cannot get file/offset"); 133 return std::make_unique<clang::Lexer>( 134 SM.getLocForStartOfFile(FileOffset.first), LangOpts, File.begin(), 135 File.data() + FileOffset.second, File.end()); 136 }(); 137 138 // Tell Lexer to return whitespace as pseudo-tokens (kind is tok::unknown). 139 Lexer->SetKeepWhitespaceMode(true); 140 141 // Generally, the code we want to include looks like this ([] are optional), 142 // If Terminators is empty: 143 // [ <comment> ] [ <newline> ] 144 // Otherwise: 145 // ... <terminator> [ <comment> ] [ <newline> ] 146 147 Token Tok; 148 bool Terminated = false; 149 150 // First, lex to the current token (which is the last token of the range that 151 // is definitely associated with the decl). Then, we process the first token 152 // separately from the rest based on conditions that hold specifically for 153 // that first token. 154 // 155 // We do not search for a terminator if none is required or we've already 156 // encountered it. Otherwise, if the original `EntityLast` location was in a 157 // macro expansion, we don't have visibility into the text, so we assume we've 158 // already terminated. However, we note this assumption with 159 // `TerminatedByMacro`, because we'll want to handle it somewhat differently 160 // for the terminators semicolon and comma. These terminators can be safely 161 // associated with the entity when they appear after the macro -- extra 162 // semicolons have no effect on the program and a well-formed program won't 163 // have multiple commas in a row, so we're guaranteed that there is only one. 164 // 165 // FIXME: This handling of macros is more conservative than necessary. When 166 // the end of the expansion coincides with the end of the node, we can still 167 // safely analyze the code. But, it is more complicated, because we need to 168 // start by lexing the spelling loc for the first token and then switch to the 169 // expansion loc. 170 bool TerminatedByMacro = false; 171 Lexer->LexFromRawLexer(Tok); 172 if (Terminators.empty() || contains(Terminators, Tok)) 173 Terminated = true; 174 else if (EntityLast.isMacroID()) { 175 Terminated = true; 176 TerminatedByMacro = true; 177 } 178 179 // We save the most recent candidate for the exclusive end location. 180 SourceLocation End = Tok.getEndLoc(); 181 182 while (!Terminated) { 183 // Lex the next token we want to possibly expand the range with. 184 Lexer->LexFromRawLexer(Tok); 185 186 switch (Tok.getKind()) { 187 case tok::eof: 188 // Unexpected separators. 189 case tok::l_brace: 190 case tok::r_brace: 191 case tok::comma: 192 return End; 193 // Whitespace pseudo-tokens. 194 case tok::unknown: 195 if (startsWithNewline(SM, Tok)) 196 // Include at least until the end of the line. 197 End = Tok.getEndLoc(); 198 break; 199 default: 200 if (contains(Terminators, Tok)) 201 Terminated = true; 202 End = Tok.getEndLoc(); 203 break; 204 } 205 } 206 207 do { 208 // Lex the next token we want to possibly expand the range with. 209 Lexer->LexFromRawLexer(Tok); 210 211 switch (Tok.getKind()) { 212 case tok::unknown: 213 if (startsWithNewline(SM, Tok)) 214 // We're done, but include this newline. 215 return Tok.getEndLoc(); 216 break; 217 case tok::comment: 218 // Include any comments we find on the way. 219 End = Tok.getEndLoc(); 220 break; 221 case tok::semi: 222 case tok::comma: 223 if (TerminatedByMacro && contains(Terminators, Tok)) { 224 End = Tok.getEndLoc(); 225 // We've found a real terminator. 226 TerminatedByMacro = false; 227 break; 228 } 229 // Found an unrelated token; stop and don't include it. 230 return End; 231 default: 232 // Found an unrelated token; stop and don't include it. 233 return End; 234 } 235 } while (true); 236 } 237 238 // Returns the expected terminator tokens for the given declaration. 239 // 240 // If we do not know the correct terminator token, returns an empty set. 241 // 242 // There are cases where we have more than one possible terminator (for example, 243 // we find either a comma or a semicolon after a VarDecl). 244 static std::set<tok::TokenKind> getTerminators(const Decl &D) { 245 if (llvm::isa<RecordDecl>(D) || llvm::isa<UsingDecl>(D)) 246 return {tok::semi}; 247 248 if (llvm::isa<FunctionDecl>(D) || llvm::isa<LinkageSpecDecl>(D)) 249 return {tok::r_brace, tok::semi}; 250 251 if (llvm::isa<VarDecl>(D) || llvm::isa<FieldDecl>(D)) 252 return {tok::comma, tok::semi}; 253 254 return {}; 255 } 256 257 // Starting from `Loc`, skips whitespace up to, and including, a single 258 // newline. Returns the (exclusive) end of any skipped whitespace (that is, the 259 // location immediately after the whitespace). 260 static SourceLocation skipWhitespaceAndNewline(const SourceManager &SM, 261 SourceLocation Loc, 262 const LangOptions &LangOpts) { 263 const char *LocChars = SM.getCharacterData(Loc); 264 int i = 0; 265 while (isHorizontalWhitespace(LocChars[i])) 266 ++i; 267 if (isVerticalWhitespace(LocChars[i])) 268 ++i; 269 return Loc.getLocWithOffset(i); 270 } 271 272 // Is `Loc` separated from any following decl by something meaningful (e.g. an 273 // empty line, a comment), ignoring horizontal whitespace? Since this is a 274 // heuristic, we return false when in doubt. `Loc` cannot be the first location 275 // in the file. 276 static bool atOrBeforeSeparation(const SourceManager &SM, SourceLocation Loc, 277 const LangOptions &LangOpts) { 278 // If the preceding character is a newline, we'll check for an empty line as a 279 // separator. However, we can't identify an empty line using tokens, so we 280 // analyse the characters. If we try to use tokens, we'll just end up with a 281 // whitespace token, whose characters we'd have to analyse anyhow. 282 bool Invalid = false; 283 const char *LocChars = 284 SM.getCharacterData(Loc.getLocWithOffset(-1), &Invalid); 285 assert(!Invalid && 286 "Loc must be a valid character and not the first of the source file."); 287 if (isVerticalWhitespace(LocChars[0])) { 288 for (int i = 1; isWhitespace(LocChars[i]); ++i) 289 if (isVerticalWhitespace(LocChars[i])) 290 return true; 291 } 292 // We didn't find an empty line, so lex the next token, skipping past any 293 // whitespace we just scanned. 294 Token Tok; 295 bool Failed = Lexer::getRawToken(Loc, Tok, SM, LangOpts, 296 /*IgnoreWhiteSpace=*/true); 297 if (Failed) 298 // Any text that confuses the lexer seems fair to consider a separation. 299 return true; 300 301 switch (Tok.getKind()) { 302 case tok::comment: 303 case tok::l_brace: 304 case tok::r_brace: 305 case tok::eof: 306 return true; 307 default: 308 return false; 309 } 310 } 311 312 CharSourceRange tooling::getAssociatedRange(const Decl &Decl, 313 ASTContext &Context) { 314 const SourceManager &SM = Context.getSourceManager(); 315 const LangOptions &LangOpts = Context.getLangOpts(); 316 CharSourceRange Range = CharSourceRange::getTokenRange(Decl.getSourceRange()); 317 318 // First, expand to the start of the template<> declaration if necessary. 319 if (const auto *Record = llvm::dyn_cast<CXXRecordDecl>(&Decl)) { 320 if (const auto *T = Record->getDescribedClassTemplate()) 321 if (SM.isBeforeInTranslationUnit(T->getBeginLoc(), Range.getBegin())) 322 Range.setBegin(T->getBeginLoc()); 323 } else if (const auto *F = llvm::dyn_cast<FunctionDecl>(&Decl)) { 324 if (const auto *T = F->getDescribedFunctionTemplate()) 325 if (SM.isBeforeInTranslationUnit(T->getBeginLoc(), Range.getBegin())) 326 Range.setBegin(T->getBeginLoc()); 327 } 328 329 // Next, expand the end location past trailing comments to include a potential 330 // newline at the end of the decl's line. 331 Range.setEnd( 332 getEntityEndLoc(SM, Decl.getEndLoc(), getTerminators(Decl), LangOpts)); 333 Range.setTokenRange(false); 334 335 // Expand to include preceeding associated comments. We ignore any comments 336 // that are not preceeding the decl, since we've already skipped trailing 337 // comments with getEntityEndLoc. 338 if (const RawComment *Comment = 339 Decl.getASTContext().getRawCommentForDeclNoCache(&Decl)) 340 // Only include a preceding comment if: 341 // * it is *not* separate from the declaration (not including any newline 342 // that immediately follows the comment), 343 // * the decl *is* separate from any following entity (so, there are no 344 // other entities the comment could refer to), and 345 // * it is not a IfThisThenThat lint check. 346 if (SM.isBeforeInTranslationUnit(Comment->getBeginLoc(), 347 Range.getBegin()) && 348 !atOrBeforeSeparation( 349 SM, skipWhitespaceAndNewline(SM, Comment->getEndLoc(), LangOpts), 350 LangOpts) && 351 atOrBeforeSeparation(SM, Range.getEnd(), LangOpts)) { 352 const StringRef CommentText = Comment->getRawText(SM); 353 if (!CommentText.contains("LINT.IfChange") && 354 !CommentText.contains("LINT.ThenChange")) 355 Range.setBegin(Comment->getBeginLoc()); 356 } 357 // Add leading attributes. 358 for (auto *Attr : Decl.attrs()) { 359 if (Attr->getLocation().isInvalid() || 360 !SM.isBeforeInTranslationUnit(Attr->getLocation(), Range.getBegin())) 361 continue; 362 Range.setBegin(Attr->getLocation()); 363 364 // Extend to the left '[[' or '__attribute((' if we saw the attribute, 365 // unless it is not a valid location. 366 bool Invalid; 367 StringRef Source = 368 SM.getBufferData(SM.getFileID(Range.getBegin()), &Invalid); 369 if (Invalid) 370 continue; 371 llvm::StringRef BeforeAttr = 372 Source.substr(0, SM.getFileOffset(Range.getBegin())); 373 llvm::StringRef BeforeAttrStripped = BeforeAttr.rtrim(); 374 375 for (llvm::StringRef Prefix : {"[[", "__attribute__(("}) { 376 // Handle whitespace between attribute prefix and attribute value. 377 if (BeforeAttrStripped.endswith(Prefix)) { 378 // Move start to start position of prefix, which is 379 // length(BeforeAttr) - length(BeforeAttrStripped) + length(Prefix) 380 // positions to the left. 381 Range.setBegin(Range.getBegin().getLocWithOffset(static_cast<int>( 382 -BeforeAttr.size() + BeforeAttrStripped.size() - Prefix.size()))); 383 break; 384 // If we didn't see '[[' or '__attribute' it's probably coming from a 385 // macro expansion which is already handled by makeFileCharRange(), 386 // below. 387 } 388 } 389 } 390 391 // Range.getEnd() is already fully un-expanded by getEntityEndLoc. But, 392 // Range.getBegin() may be inside an expansion. 393 return Lexer::makeFileCharRange(Range, SM, LangOpts); 394 } 395