/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/Lex/PPCaching.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- PPCaching.cpp - Handle caching lexed tokens ----------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file implements pieces of the Preprocessor interface that manage the |
10 | | // caching of lexed tokens. |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #include "clang/Lex/Preprocessor.h" |
15 | | using namespace clang; |
16 | | |
17 | | // EnableBacktrackAtThisPos - From the point that this method is called, and |
18 | | // until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor |
19 | | // keeps track of the lexed tokens so that a subsequent Backtrack() call will |
20 | | // make the Preprocessor re-lex the same tokens. |
21 | | // |
22 | | // Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can |
23 | | // be called multiple times and CommitBacktrackedTokens/Backtrack calls will |
24 | | // be combined with the EnableBacktrackAtThisPos calls in reverse order. |
25 | 15.3M | void Preprocessor::EnableBacktrackAtThisPos() { |
26 | 15.3M | assert(LexLevel == 0 && "cannot use lookahead while lexing"); |
27 | 15.3M | BacktrackPositions.push_back(CachedLexPos); |
28 | 15.3M | EnterCachingLexMode(); |
29 | 15.3M | } |
30 | | |
31 | | // Disable the last EnableBacktrackAtThisPos call. |
32 | 52.5k | void Preprocessor::CommitBacktrackedTokens() { |
33 | 52.5k | assert(!BacktrackPositions.empty() |
34 | 52.5k | && "EnableBacktrackAtThisPos was not called!"); |
35 | 52.5k | BacktrackPositions.pop_back(); |
36 | 52.5k | } |
37 | | |
38 | | // Make Preprocessor re-lex the tokens that were lexed since |
39 | | // EnableBacktrackAtThisPos() was previously called. |
40 | 15.2M | void Preprocessor::Backtrack() { |
41 | 15.2M | assert(!BacktrackPositions.empty() |
42 | 15.2M | && "EnableBacktrackAtThisPos was not called!"); |
43 | 15.2M | CachedLexPos = BacktrackPositions.back(); |
44 | 15.2M | BacktrackPositions.pop_back(); |
45 | 15.2M | recomputeCurLexerKind(); |
46 | 15.2M | } |
47 | | |
48 | 316M | void Preprocessor::CachingLex(Token &Result) { |
49 | 316M | if (!InCachingLexMode()) |
50 | 0 | return; |
51 | | |
52 | | // The assert in EnterCachingLexMode should prevent this from happening. |
53 | 316M | assert(LexLevel == 1 && |
54 | 316M | "should not use token caching within the preprocessor"); |
55 | | |
56 | 316M | if (CachedLexPos < CachedTokens.size()) { |
57 | 183M | Result = CachedTokens[CachedLexPos++]; |
58 | 183M | Result.setFlag(Token::IsReinjected); |
59 | 183M | return; |
60 | 183M | } |
61 | | |
62 | 133M | ExitCachingLexMode(); |
63 | 133M | Lex(Result); |
64 | | |
65 | 133M | if (isBacktrackEnabled()) { |
66 | | // Cache the lexed token. |
67 | 14.1M | EnterCachingLexModeUnchecked(); |
68 | 14.1M | CachedTokens.push_back(Result); |
69 | 14.1M | ++CachedLexPos; |
70 | 14.1M | return; |
71 | 14.1M | } |
72 | | |
73 | 118M | if (CachedLexPos < CachedTokens.size()) { |
74 | 0 | EnterCachingLexModeUnchecked(); |
75 | 118M | } else { |
76 | | // All cached tokens were consumed. |
77 | 118M | CachedTokens.clear(); |
78 | 118M | CachedLexPos = 0; |
79 | 118M | } |
80 | 118M | } |
81 | | |
82 | 182M | void Preprocessor::EnterCachingLexMode() { |
83 | | // The caching layer sits on top of all the other lexers, so it's incorrect |
84 | | // to cache tokens while inside a nested lex action. The cached tokens would |
85 | | // be retained after returning to the enclosing lex action and, at best, |
86 | | // would appear at the wrong position in the token stream. |
87 | 182M | assert(LexLevel == 0 && |
88 | 182M | "entered caching lex mode while lexing something else"); |
89 | | |
90 | 182M | if (InCachingLexMode()) { |
91 | 20.3M | assert(CurLexerKind == CLK_CachingLexer && "Unexpected lexer kind"); |
92 | 20.3M | return; |
93 | 20.3M | } |
94 | | |
95 | 161M | EnterCachingLexModeUnchecked(); |
96 | 161M | } |
97 | | |
98 | 175M | void Preprocessor::EnterCachingLexModeUnchecked() { |
99 | 175M | assert(CurLexerKind != CLK_CachingLexer && "already in caching lex mode"); |
100 | 175M | PushIncludeMacroStack(); |
101 | 175M | CurLexerKind = CLK_CachingLexer; |
102 | 175M | } |
103 | | |
104 | | |
105 | 161M | const Token &Preprocessor::PeekAhead(unsigned N) { |
106 | 161M | assert(CachedLexPos + N > CachedTokens.size() && "Confused caching."); |
107 | 161M | ExitCachingLexMode(); |
108 | 322M | for (size_t C = CachedLexPos + N - CachedTokens.size(); C > 0; --C161M ) { |
109 | 161M | CachedTokens.push_back(Token()); |
110 | 161M | Lex(CachedTokens.back()); |
111 | 161M | } |
112 | 161M | EnterCachingLexMode(); |
113 | 161M | return CachedTokens.back(); |
114 | 161M | } |
115 | | |
116 | 11.7M | void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) { |
117 | 11.7M | assert(Tok.isAnnotation() && "Expected annotation token"); |
118 | 11.7M | assert(CachedLexPos != 0 && "Expected to have some cached tokens"); |
119 | 11.7M | assert(CachedTokens[CachedLexPos-1].getLastLoc() == Tok.getAnnotationEndLoc() |
120 | 11.7M | && "The annotation should be until the most recent cached token"); |
121 | | |
122 | | // Start from the end of the cached tokens list and look for the token |
123 | | // that is the beginning of the annotation token. |
124 | 12.2M | for (CachedTokensTy::size_type i = CachedLexPos; 11.7M i != 0; --i550k ) { |
125 | 12.2M | CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1; |
126 | 12.2M | if (AnnotBegin->getLocation() == Tok.getLocation()) { |
127 | 11.7M | assert((BacktrackPositions.empty() || BacktrackPositions.back() <= i) && |
128 | 11.7M | "The backtrack pos points inside the annotated tokens!"); |
129 | | // Replace the cached tokens with the single annotation token. |
130 | 11.7M | if (i < CachedLexPos) |
131 | 168k | CachedTokens.erase(AnnotBegin + 1, CachedTokens.begin() + CachedLexPos); |
132 | 11.7M | *AnnotBegin = Tok; |
133 | 11.7M | CachedLexPos = i; |
134 | 11.7M | return; |
135 | 11.7M | } |
136 | 12.2M | } |
137 | 11.7M | } |
138 | | |
139 | 31.2k | bool Preprocessor::IsPreviousCachedToken(const Token &Tok) const { |
140 | | // There's currently no cached token... |
141 | 31.2k | if (!CachedLexPos) |
142 | 8.40k | return false; |
143 | | |
144 | 22.8k | const Token LastCachedTok = CachedTokens[CachedLexPos - 1]; |
145 | 22.8k | if (LastCachedTok.getKind() != Tok.getKind()) |
146 | 0 | return false; |
147 | | |
148 | 22.8k | SourceLocation::IntTy RelOffset = 0; |
149 | 22.8k | if ((!getSourceManager().isInSameSLocAddrSpace( |
150 | 22.8k | Tok.getLocation(), getLastCachedTokenLocation(), &RelOffset)) || |
151 | 22.8k | RelOffset) |
152 | 0 | return false; |
153 | | |
154 | 22.8k | return true; |
155 | 22.8k | } |
156 | | |
157 | 22.8k | void Preprocessor::ReplacePreviousCachedToken(ArrayRef<Token> NewToks) { |
158 | 22.8k | assert(CachedLexPos != 0 && "Expected to have some cached tokens"); |
159 | 22.8k | CachedTokens.insert(CachedTokens.begin() + CachedLexPos - 1, NewToks.begin(), |
160 | 22.8k | NewToks.end()); |
161 | 22.8k | CachedTokens.erase(CachedTokens.begin() + CachedLexPos - 1 + NewToks.size()); |
162 | 22.8k | CachedLexPos += NewToks.size() - 1; |
163 | 22.8k | } |