Coverage Report

Created: 2020-09-15 12:33

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- SourceCode.cpp - Source code manipulation routines -----*- C++ -*-===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
//  This file provides functions that simplify extraction of source code.
10
//
11
//===----------------------------------------------------------------------===//
12
#include "clang/Tooling/Transformer/SourceCode.h"
13
#include "clang/AST/ASTContext.h"
14
#include "clang/AST/Attr.h"
15
#include "clang/AST/Comment.h"
16
#include "clang/AST/Decl.h"
17
#include "clang/AST/DeclCXX.h"
18
#include "clang/AST/DeclTemplate.h"
19
#include "clang/AST/Expr.h"
20
#include "clang/Basic/SourceManager.h"
21
#include "clang/Lex/Lexer.h"
22
#include "llvm/Support/Errc.h"
23
#include "llvm/Support/Error.h"
24
#include <set>
25
26
using namespace clang;
27
28
using llvm::errc;
29
using llvm::StringError;
30
31
StringRef clang::tooling::getText(CharSourceRange Range,
32
123
                                  const ASTContext &Context) {
33
123
  return Lexer::getSourceText(Range, Context.getSourceManager(),
34
123
                              Context.getLangOpts());
35
123
}
36
37
CharSourceRange clang::tooling::maybeExtendRange(CharSourceRange Range,
38
                                                 tok::TokenKind Next,
39
51
                                                 ASTContext &Context) {
40
51
  CharSourceRange R = Lexer::getAsCharRange(Range, Context.getSourceManager(),
41
51
                                            Context.getLangOpts());
42
51
  if (R.isInvalid())
43
1
    return Range;
44
50
  Token Tok;
45
50
  bool Err =
46
50
      Lexer::getRawToken(R.getEnd(), Tok, Context.getSourceManager(),
47
50
                         Context.getLangOpts(), /*IgnoreWhiteSpace=*/true);
48
50
  if (Err || !Tok.is(Next))
49
19
    return Range;
50
31
  return CharSourceRange::getTokenRange(Range.getBegin(), Tok.getLocation());
51
31
}
52
53
llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range,
54
121
                                              const SourceManager &SM) {
55
121
  if (Range.isInvalid())
56
4
    return llvm::make_error<StringError>(errc::invalid_argument,
57
4
                                         "Invalid range");
58
117
59
117
  if (Range.getBegin().isMacroID() || 
Range.getEnd().isMacroID()113
)
60
4
    return llvm::make_error<StringError>(
61
4
        errc::invalid_argument, "Range starts or ends in a macro expansion");
62
113
63
113
  if (SM.isInSystemHeader(Range.getBegin()) ||
64
113
      SM.isInSystemHeader(Range.getEnd()))
65
0
    return llvm::make_error<StringError>(errc::invalid_argument,
66
0
                                         "Range is in system header");
67
113
68
113
  std::pair<FileID, unsigned> BeginInfo = SM.getDecomposedLoc(Range.getBegin());
69
113
  std::pair<FileID, unsigned> EndInfo = SM.getDecomposedLoc(Range.getEnd());
70
113
  if (BeginInfo.first != EndInfo.first)
71
0
    return llvm::make_error<StringError>(
72
0
        errc::invalid_argument, "Range begins and ends in different files");
73
113
74
113
  if (BeginInfo.second > EndInfo.second)
75
1
    return llvm::make_error<StringError>(
76
1
        errc::invalid_argument, "Range's begin is past its end");
77
112
78
112
  return llvm::Error::success();
79
112
}
80
81
llvm::Optional<CharSourceRange>
82
clang::tooling::getRangeForEdit(const CharSourceRange &EditRange,
83
                                const SourceManager &SM,
84
105
                                const LangOptions &LangOpts) {
85
  // FIXME: makeFileCharRange() has the disadvantage of stripping off "identity"
86
  // macros. For example, if we're looking to rewrite the int literal 3 to 6,
87
  // and we have the following definition:
88
  //    #define DO_NOTHING(x) x
89
  // then
90
  //    foo(DO_NOTHING(3))
91
  // will be rewritten to
92
  //    foo(6)
93
  // rather than the arguably better
94
  //    foo(DO_NOTHING(6))
95
  // Decide whether the current behavior is desirable and modify if not.
96
105
  CharSourceRange Range = Lexer::makeFileCharRange(EditRange, SM, LangOpts);
97
105
  bool IsInvalid = llvm::errorToBool(validateEditRange(Range, SM));
98
105
  if (IsInvalid)
99
3
    return llvm::None;
100
102
  return Range;
101
102
102
102
}
103
104
15
static bool startsWithNewline(const SourceManager &SM, const Token &Tok) {
105
15
  return isVerticalWhitespace(SM.getCharacterData(Tok.getLocation())[0]);
106
15
}
107
108
static bool contains(const std::set<tok::TokenKind> &Terminators,
109
73
                     const Token &Tok) {
110
73
  return Terminators.count(Tok.getKind()) > 0;
111
73
}
112
113
// Returns the exclusive, *file* end location of the entity whose last token is
114
// at location 'EntityLast'. That is, it returns the location one past the last
115
// relevant character.
116
//
117
// Associated tokens include comments, horizontal whitespace and 'Terminators'
118
// -- optional tokens, which, if any are found, will be included; if
119
// 'Terminators' is empty, we will not include any extra tokens beyond comments
120
// and horizontal whitespace.
121
static SourceLocation
122
getEntityEndLoc(const SourceManager &SM, SourceLocation EntityLast,
123
                const std::set<tok::TokenKind> &Terminators,
124
38
                const LangOptions &LangOpts) {
125
38
  assert(EntityLast.isValid() && "Invalid end location found.");
126
38
127
  // We remember the last location of a non-horizontal-whitespace token we have
128
  // lexed; this is the location up to which we will want to delete.
129
  // FIXME: Support using the spelling loc here for cases where we want to
130
  // analyze the macro text.
131
38
132
38
  CharSourceRange ExpansionRange = SM.getExpansionRange(EntityLast);
133
  // FIXME: Should check isTokenRange(), for the (rare) case that
134
  // `ExpansionRange` is a character range.
135
38
  std::unique_ptr<Lexer> Lexer = [&]() {
136
38
    bool Invalid = false;
137
38
    auto FileOffset = SM.getDecomposedLoc(ExpansionRange.getEnd());
138
38
    llvm::StringRef File = SM.getBufferData(FileOffset.first, &Invalid);
139
38
    assert(!Invalid && "Cannot get file/offset");
140
38
    return std::make_unique<clang::Lexer>(
141
38
        SM.getLocForStartOfFile(FileOffset.first), LangOpts, File.begin(),
142
38
        File.data() + FileOffset.second, File.end());
143
38
  }();
144
38
145
  // Tell Lexer to return whitespace as pseudo-tokens (kind is tok::unknown).
146
38
  Lexer->SetKeepWhitespaceMode(true);
147
38
148
  // Generally, the code we want to include looks like this ([] are optional),
149
  // If Terminators is empty:
150
  //   [ <comment> ] [ <newline> ]
151
  // Otherwise:
152
  //   ... <terminator> [ <comment> ] [ <newline> ]
153
38
154
38
  Token Tok;
155
38
  bool Terminated = false;
156
38
157
  // First, lex to the current token (which is the last token of the range that
158
  // is definitely associated with the decl). Then, we process the first token
159
  // separately from the rest based on conditions that hold specifically for
160
  // that first token.
161
  //
162
  // We do not search for a terminator if none is required or we've already
163
  // encountered it. Otherwise, if the original `EntityLast` location was in a
164
  // macro expansion, we don't have visibility into the text, so we assume we've
165
  // already terminated. However, we note this assumption with
166
  // `TerminatedByMacro`, because we'll want to handle it somewhat differently
167
  // for the terminators semicolon and comma. These terminators can be safely
168
  // associated with the entity when they appear after the macro -- extra
169
  // semicolons have no effect on the program and a well-formed program won't
170
  // have multiple commas in a row, so we're guaranteed that there is only one.
171
  //
172
  // FIXME: This handling of macros is more conservative than necessary. When
173
  // the end of the expansion coincides with the end of the node, we can still
174
  // safely analyze the code. But, it is more complicated, because we need to
175
  // start by lexing the spelling loc for the first token and then switch to the
176
  // expansion loc.
177
38
  bool TerminatedByMacro = false;
178
38
  Lexer->LexFromRawLexer(Tok);
179
38
  if (Terminators.empty() || contains(Terminators, Tok))
180
3
    Terminated = true;
181
35
  else if (EntityLast.isMacroID()) {
182
3
    Terminated = true;
183
3
    TerminatedByMacro = true;
184
3
  }
185
38
186
  // We save the most recent candidate for the exclusive end location.
187
38
  SourceLocation End = Tok.getEndLoc();
188
38
189
70
  while (!Terminated) {
190
    // Lex the next token we want to possibly expand the range with.
191
32
    Lexer->LexFromRawLexer(Tok);
192
32
193
32
    switch (Tok.getKind()) {
194
0
    case tok::eof:
195
    // Unexpected separators.
196
0
    case tok::l_brace:
197
0
    case tok::r_brace:
198
0
    case tok::comma:
199
0
      return End;
200
    // Whitespace pseudo-tokens.
201
0
    case tok::unknown:
202
0
      if (startsWithNewline(SM, Tok))
203
        // Include at least until the end of the line.
204
0
        End = Tok.getEndLoc();
205
0
      break;
206
32
    default:
207
32
      if (contains(Terminators, Tok))
208
32
        Terminated = true;
209
32
      End = Tok.getEndLoc();
210
32
      break;
211
32
    }
212
32
  }
213
38
214
47
  
do 38
{
215
    // Lex the next token we want to possibly expand the range with.
216
47
    Lexer->LexFromRawLexer(Tok);
217
47
218
47
    switch (Tok.getKind()) {
219
15
    case tok::unknown:
220
15
      if (startsWithNewline(SM, Tok))
221
        // We're done, but include this newline.
222
11
        return Tok.getEndLoc();
223
4
      break;
224
2
    case tok::comment:
225
      // Include any comments we find on the way.
226
2
      End = Tok.getEndLoc();
227
2
      break;
228
3
    case tok::semi:
229
3
    case tok::comma:
230
3
      if (TerminatedByMacro && contains(Terminators, Tok)) {
231
3
        End = Tok.getEndLoc();
232
        // We've found a real terminator.
233
3
        TerminatedByMacro = false;
234
3
        break;
235
3
      }
236
      // Found an unrelated token; stop and don't include it.
237
0
      return End;
238
27
    default:
239
      // Found an unrelated token; stop and don't include it.
240
27
      return End;
241
9
    }
242
9
  } while (true);
243
38
}
244
245
// Returns the expected terminator tokens for the given declaration.
246
//
247
// If we do not know the correct terminator token, returns an empty set.
248
//
249
// There are cases where we have more than one possible terminator (for example,
250
// we find either a comma or a semicolon after a VarDecl).
251
38
static std::set<tok::TokenKind> getTerminators(const Decl &D) {
252
38
  if (llvm::isa<RecordDecl>(D) || 
llvm::isa<UsingDecl>(D)32
)
253
6
    return {tok::semi};
254
32
255
32
  if (llvm::isa<FunctionDecl>(D) || 
llvm::isa<LinkageSpecDecl>(D)27
)
256
5
    return {tok::r_brace, tok::semi};
257
27
258
27
  if (llvm::isa<VarDecl>(D) || 
llvm::isa<FieldDecl>(D)0
)
259
27
    return {tok::comma, tok::semi};
260
0
261
0
  return {};
262
0
}
263
264
// Starting from `Loc`, skips whitespace up to, and including, a single
265
// newline. Returns the (exclusive) end of any skipped whitespace (that is, the
266
// location immediately after the whitespace).
267
static SourceLocation skipWhitespaceAndNewline(const SourceManager &SM,
268
                                               SourceLocation Loc,
269
14
                                               const LangOptions &LangOpts) {
270
14
  const char *LocChars = SM.getCharacterData(Loc);
271
14
  int i = 0;
272
16
  while (isHorizontalWhitespace(LocChars[i]))
273
2
    ++i;
274
14
  if (isVerticalWhitespace(LocChars[i]))
275
14
    ++i;
276
14
  return Loc.getLocWithOffset(i);
277
14
}
278
279
// Is `Loc` separated from any following decl by something meaningful (e.g. an
280
// empty line, a comment), ignoring horizontal whitespace?  Since this is a
281
// heuristic, we return false when in doubt.  `Loc` cannot be the first location
282
// in the file.
283
static bool atOrBeforeSeparation(const SourceManager &SM, SourceLocation Loc,
284
26
                                 const LangOptions &LangOpts) {
285
  // If the preceding character is a newline, we'll check for an empty line as a
286
  // separator. However, we can't identify an empty line using tokens, so we
287
  // analyse the characters. If we try to use tokens, we'll just end up with a
288
  // whitespace token, whose characters we'd have to analyse anyhow.
289
26
  bool Invalid = false;
290
26
  const char *LocChars =
291
26
      SM.getCharacterData(Loc.getLocWithOffset(-1), &Invalid);
292
26
  assert(!Invalid &&
293
26
         "Loc must be a valid character and not the first of the source file.");
294
26
  if (isVerticalWhitespace(LocChars[0])) {
295
43
    for (int i = 1; isWhitespace(LocChars[i]); 
++i26
)
296
28
      if (isVerticalWhitespace(LocChars[i]))
297
2
        return true;
298
17
  }
299
  // We didn't find an empty line, so lex the next token, skipping past any
300
  // whitespace we just scanned.
301
24
  Token Tok;
302
24
  bool Failed = Lexer::getRawToken(Loc, Tok, SM, LangOpts,
303
24
                                   /*IgnoreWhiteSpace=*/true);
304
24
  if (Failed)
305
    // Any text that confuses the lexer seems fair to consider a separation.
306
0
    return true;
307
24
308
24
  switch (Tok.getKind()) {
309
11
  case tok::comment:
310
11
  case tok::l_brace:
311
11
  case tok::r_brace:
312
11
  case tok::eof:
313
11
    return true;
314
13
  default:
315
13
    return false;
316
24
  }
317
24
}
318
319
CharSourceRange tooling::getAssociatedRange(const Decl &Decl,
320
38
                                            ASTContext &Context) {
321
38
  const SourceManager &SM = Context.getSourceManager();
322
38
  const LangOptions &LangOpts = Context.getLangOpts();
323
38
  CharSourceRange Range = CharSourceRange::getTokenRange(Decl.getSourceRange());
324
38
325
  // First, expand to the start of the template<> declaration if necessary.
326
38
  if (const auto *Record = llvm::dyn_cast<CXXRecordDecl>(&Decl)) {
327
6
    if (const auto *T = Record->getDescribedClassTemplate())
328
2
      if (SM.isBeforeInTranslationUnit(T->getBeginLoc(), Range.getBegin()))
329
2
        Range.setBegin(T->getBeginLoc());
330
32
  } else if (const auto *F = llvm::dyn_cast<FunctionDecl>(&Decl)) {
331
5
    if (const auto *T = F->getDescribedFunctionTemplate())
332
3
      if (SM.isBeforeInTranslationUnit(T->getBeginLoc(), Range.getBegin()))
333
2
        Range.setBegin(T->getBeginLoc());
334
5
  }
335
38
336
  // Next, expand the end location past trailing comments to include a potential
337
  // newline at the end of the decl's line.
338
38
  Range.setEnd(
339
38
      getEntityEndLoc(SM, Decl.getEndLoc(), getTerminators(Decl), LangOpts));
340
38
  Range.setTokenRange(false);
341
38
342
  // Expand to include preceeding associated comments. We ignore any comments
343
  // that are not preceeding the decl, since we've already skipped trailing
344
  // comments with getEntityEndLoc.
345
38
  if (const RawComment *Comment =
346
15
          Decl.getASTContext().getRawCommentForDeclNoCache(&Decl))
347
    // Only include a preceding comment if:
348
    // * it is *not* separate from the declaration (not including any newline
349
    //   that immediately follows the comment),
350
    // * the decl *is* separate from any following entity (so, there are no
351
    //   other entities the comment could refer to), and
352
    // * it is not a IfThisThenThat lint check.
353
15
    if (SM.isBeforeInTranslationUnit(Comment->getBeginLoc(),
354
15
                                     Range.getBegin()) &&
355
14
        !atOrBeforeSeparation(
356
14
            SM, skipWhitespaceAndNewline(SM, Comment->getEndLoc(), LangOpts),
357
14
            LangOpts) &&
358
12
        atOrBeforeSeparation(SM, Range.getEnd(), LangOpts)) {
359
11
      const StringRef CommentText = Comment->getRawText(SM);
360
11
      if (!CommentText.contains("LINT.IfChange") &&
361
10
          !CommentText.contains("LINT.ThenChange"))
362
9
        Range.setBegin(Comment->getBeginLoc());
363
11
    }
364
  // Add leading attributes.
365
4
  for (auto *Attr : Decl.attrs()) {
366
4
    if (Attr->getLocation().isInvalid() ||
367
4
        !SM.isBeforeInTranslationUnit(Attr->getLocation(), Range.getBegin()))
368
4
      continue;
369
0
    Range.setBegin(Attr->getLocation());
370
0
371
    // Extend to the left '[[' or '__attribute((' if we saw the attribute,
372
    // unless it is not a valid location.
373
0
    bool Invalid;
374
0
    StringRef Source =
375
0
        SM.getBufferData(SM.getFileID(Range.getBegin()), &Invalid);
376
0
    if (Invalid)
377
0
      continue;
378
0
    llvm::StringRef BeforeAttr =
379
0
        Source.substr(0, SM.getFileOffset(Range.getBegin()));
380
0
    llvm::StringRef BeforeAttrStripped = BeforeAttr.rtrim();
381
0
382
0
    for (llvm::StringRef Prefix : {"[[", "__attribute__(("}) {
383
      // Handle whitespace between attribute prefix and attribute value.
384
0
      if (BeforeAttrStripped.endswith(Prefix)) {
385
        // Move start to start position of prefix, which is
386
        // length(BeforeAttr) - length(BeforeAttrStripped) + length(Prefix)
387
        // positions to the left.
388
0
        Range.setBegin(Range.getBegin().getLocWithOffset(static_cast<int>(
389
0
            -BeforeAttr.size() + BeforeAttrStripped.size() - Prefix.size())));
390
0
        break;
391
        // If we didn't see '[[' or '__attribute' it's probably coming from a
392
        // macro expansion which is already handled by makeFileCharRange(),
393
        // below.
394
0
      }
395
0
    }
396
0
  }
397
38
398
  // Range.getEnd() is already fully un-expanded by getEntityEndLoc. But,
399
  // Range.getBegin() may be inside an expansion.
400
38
  return Lexer::makeFileCharRange(Range, SM, LangOpts);
401
38
}