/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/Lex/TokenLexer.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===- TokenLexer.cpp - Lex from a token stream ---------------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file implements the TokenLexer interface. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "clang/Lex/TokenLexer.h" |
14 | | #include "clang/Basic/Diagnostic.h" |
15 | | #include "clang/Basic/IdentifierTable.h" |
16 | | #include "clang/Basic/LangOptions.h" |
17 | | #include "clang/Basic/SourceLocation.h" |
18 | | #include "clang/Basic/SourceManager.h" |
19 | | #include "clang/Basic/TokenKinds.h" |
20 | | #include "clang/Lex/LexDiagnostic.h" |
21 | | #include "clang/Lex/Lexer.h" |
22 | | #include "clang/Lex/MacroArgs.h" |
23 | | #include "clang/Lex/MacroInfo.h" |
24 | | #include "clang/Lex/Preprocessor.h" |
25 | | #include "clang/Lex/Token.h" |
26 | | #include "clang/Lex/VariadicMacroSupport.h" |
27 | | #include "llvm/ADT/ArrayRef.h" |
28 | | #include "llvm/ADT/STLExtras.h" |
29 | | #include "llvm/ADT/SmallString.h" |
30 | | #include "llvm/ADT/SmallVector.h" |
31 | | #include "llvm/ADT/iterator_range.h" |
32 | | #include <cassert> |
33 | | #include <cstring> |
34 | | #include <optional> |
35 | | |
36 | | using namespace clang; |
37 | | |
38 | | /// Create a TokenLexer for the specified macro with the specified actual |
39 | | /// arguments. Note that this ctor takes ownership of the ActualArgs pointer. |
40 | | void TokenLexer::Init(Token &Tok, SourceLocation ELEnd, MacroInfo *MI, |
41 | 69.6M | MacroArgs *Actuals) { |
42 | | // If the client is reusing a TokenLexer, make sure to free any memory |
43 | | // associated with it. |
44 | 69.6M | destroy(); |
45 | | |
46 | 69.6M | Macro = MI; |
47 | 69.6M | ActualArgs = Actuals; |
48 | 69.6M | CurTokenIdx = 0; |
49 | | |
50 | 69.6M | ExpandLocStart = Tok.getLocation(); |
51 | 69.6M | ExpandLocEnd = ELEnd; |
52 | 69.6M | AtStartOfLine = Tok.isAtStartOfLine(); |
53 | 69.6M | HasLeadingSpace = Tok.hasLeadingSpace(); |
54 | 69.6M | NextTokGetsSpace = false; |
55 | 69.6M | Tokens = &*Macro->tokens_begin(); |
56 | 69.6M | OwnsTokens = false; |
57 | 69.6M | DisableMacroExpansion = false; |
58 | 69.6M | IsReinject = false; |
59 | 69.6M | NumTokens = Macro->tokens_end()-Macro->tokens_begin(); |
60 | 69.6M | MacroExpansionStart = SourceLocation(); |
61 | | |
62 | 69.6M | SourceManager &SM = PP.getSourceManager(); |
63 | 69.6M | MacroStartSLocOffset = SM.getNextLocalOffset(); |
64 | | |
65 | 69.6M | if (NumTokens > 0) { |
66 | 69.6M | assert(Tokens[0].getLocation().isValid()); |
67 | 69.6M | assert((Tokens[0].getLocation().isFileID() || Tokens[0].is(tok::comment)) && |
68 | 69.6M | "Macro defined in macro?"); |
69 | 69.6M | assert(ExpandLocStart.isValid()); |
70 | | |
71 | | // Reserve a source location entry chunk for the length of the macro |
72 | | // definition. Tokens that get lexed directly from the definition will |
73 | | // have their locations pointing inside this chunk. This is to avoid |
74 | | // creating separate source location entries for each token. |
75 | 69.6M | MacroDefStart = SM.getExpansionLoc(Tokens[0].getLocation()); |
76 | 69.6M | MacroDefLength = Macro->getDefinitionLength(SM); |
77 | 69.6M | MacroExpansionStart = SM.createExpansionLoc(MacroDefStart, |
78 | 69.6M | ExpandLocStart, |
79 | 69.6M | ExpandLocEnd, |
80 | 69.6M | MacroDefLength); |
81 | 69.6M | } |
82 | | |
83 | | // If this is a function-like macro, expand the arguments and change |
84 | | // Tokens to point to the expanded tokens. |
85 | 69.6M | if (Macro->isFunctionLike() && Macro->getNumParams()28.6M ) |
86 | 28.6M | ExpandFunctionArguments(); |
87 | | |
88 | | // Mark the macro as currently disabled, so that it is not recursively |
89 | | // expanded. The macro must be disabled only after argument pre-expansion of |
90 | | // function-like macro arguments occurs. |
91 | 69.6M | Macro->DisableMacro(); |
92 | 69.6M | } |
93 | | |
94 | | /// Create a TokenLexer for the specified token stream. This does not |
95 | | /// take ownership of the specified token vector. |
96 | | void TokenLexer::Init(const Token *TokArray, unsigned NumToks, |
97 | | bool disableMacroExpansion, bool ownsTokens, |
98 | 7.95M | bool isReinject) { |
99 | 7.95M | assert(!isReinject || disableMacroExpansion); |
100 | | // If the client is reusing a TokenLexer, make sure to free any memory |
101 | | // associated with it. |
102 | 7.95M | destroy(); |
103 | | |
104 | 7.95M | Macro = nullptr; |
105 | 7.95M | ActualArgs = nullptr; |
106 | 7.95M | Tokens = TokArray; |
107 | 7.95M | OwnsTokens = ownsTokens; |
108 | 7.95M | DisableMacroExpansion = disableMacroExpansion; |
109 | 7.95M | IsReinject = isReinject; |
110 | 7.95M | NumTokens = NumToks; |
111 | 7.95M | CurTokenIdx = 0; |
112 | 7.95M | ExpandLocStart = ExpandLocEnd = SourceLocation(); |
113 | 7.95M | AtStartOfLine = false; |
114 | 7.95M | HasLeadingSpace = false; |
115 | 7.95M | NextTokGetsSpace = false; |
116 | 7.95M | MacroExpansionStart = SourceLocation(); |
117 | | |
118 | | // Set HasLeadingSpace/AtStartOfLine so that the first token will be |
119 | | // returned unmodified. |
120 | 7.95M | if (NumToks != 0) { |
121 | 7.95M | AtStartOfLine = TokArray[0].isAtStartOfLine(); |
122 | 7.95M | HasLeadingSpace = TokArray[0].hasLeadingSpace(); |
123 | 7.95M | } |
124 | 7.95M | } |
125 | | |
126 | 78.1M | void TokenLexer::destroy() { |
127 | | // If this was a function-like macro that actually uses its arguments, delete |
128 | | // the expanded tokens. |
129 | 78.1M | if (OwnsTokens) { |
130 | 412k | delete [] Tokens; |
131 | 412k | Tokens = nullptr; |
132 | 412k | OwnsTokens = false; |
133 | 412k | } |
134 | | |
135 | | // TokenLexer owns its formal arguments. |
136 | 78.1M | if (ActualArgs) ActualArgs->destroy(PP)28.6M ; |
137 | 78.1M | } |
138 | | |
139 | | bool TokenLexer::MaybeRemoveCommaBeforeVaArgs( |
140 | | SmallVectorImpl<Token> &ResultToks, bool HasPasteOperator, MacroInfo *Macro, |
141 | 167k | unsigned MacroArgNo, Preprocessor &PP) { |
142 | | // Is the macro argument __VA_ARGS__? |
143 | 167k | if (!Macro->isVariadic() || MacroArgNo != Macro->getNumParams()-1167k ) |
144 | 143k | return false; |
145 | | |
146 | | // In Microsoft-compatibility mode, a comma is removed in the expansion |
147 | | // of " ... , __VA_ARGS__ " if __VA_ARGS__ is empty. This extension is |
148 | | // not supported by gcc. |
149 | 24.0k | if (!HasPasteOperator && !PP.getLangOpts().MSVCCompat13.4k ) |
150 | 13.4k | return false; |
151 | | |
152 | | // GCC removes the comma in the expansion of " ... , ## __VA_ARGS__ " if |
153 | | // __VA_ARGS__ is empty, but not in strict C99 mode where there are no |
154 | | // named arguments, where it remains. In all other modes, including C99 |
155 | | // with GNU extensions, it is removed regardless of named arguments. |
156 | | // Microsoft also appears to support this extension, unofficially. |
157 | 10.5k | if (PP.getLangOpts().C99 && !PP.getLangOpts().GNUMode828 |
158 | 10.5k | && Macro->getNumParams() < 29 ) |
159 | 3 | return false; |
160 | | |
161 | | // Is a comma available to be removed? |
162 | 10.5k | if (ResultToks.empty() || !ResultToks.back().is(tok::comma)) |
163 | 441 | return false; |
164 | | |
165 | | // Issue an extension diagnostic for the paste operator. |
166 | 10.1k | if (HasPasteOperator) |
167 | 10.1k | PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma); |
168 | | |
169 | | // Remove the comma. |
170 | 10.1k | ResultToks.pop_back(); |
171 | | |
172 | 10.1k | if (!ResultToks.empty()) { |
173 | | // If the comma was right after another paste (e.g. "X##,##__VA_ARGS__"), |
174 | | // then removal of the comma should produce a placemarker token (in C99 |
175 | | // terms) which we model by popping off the previous ##, giving us a plain |
176 | | // "X" when __VA_ARGS__ is empty. |
177 | 10.1k | if (ResultToks.back().is(tok::hashhash)) |
178 | 1 | ResultToks.pop_back(); |
179 | | |
180 | | // Remember that this comma was elided. |
181 | 10.1k | ResultToks.back().setFlag(Token::CommaAfterElided); |
182 | 10.1k | } |
183 | | |
184 | | // Never add a space, even if the comma, ##, or arg had a space. |
185 | 10.1k | NextTokGetsSpace = false; |
186 | 10.1k | return true; |
187 | 10.5k | } |
188 | | |
189 | | void TokenLexer::stringifyVAOPTContents( |
190 | | SmallVectorImpl<Token> &ResultToks, const VAOptExpansionContext &VCtx, |
191 | 43 | const SourceLocation VAOPTClosingParenLoc) { |
192 | 43 | const int NumToksPriorToVAOpt = VCtx.getNumberOfTokensPriorToVAOpt(); |
193 | 43 | const unsigned int NumVAOptTokens = ResultToks.size() - NumToksPriorToVAOpt; |
194 | 43 | Token *const VAOPTTokens = |
195 | 43 | NumVAOptTokens ? &ResultToks[NumToksPriorToVAOpt]15 : nullptr28 ; |
196 | | |
197 | 43 | SmallVector<Token, 64> ConcatenatedVAOPTResultToks; |
198 | | // FIXME: Should we keep track within VCtx that we did or didnot |
199 | | // encounter pasting - and only then perform this loop. |
200 | | |
201 | | // Perform token pasting (concatenation) prior to stringization. |
202 | 166 | for (unsigned int CurTokenIdx = 0; CurTokenIdx != NumVAOptTokens; |
203 | 123 | ++CurTokenIdx) { |
204 | 123 | if (VAOPTTokens[CurTokenIdx].is(tok::hashhash)) { |
205 | 18 | assert(CurTokenIdx != 0 && |
206 | 18 | "Can not have __VAOPT__ contents begin with a ##"); |
207 | 18 | Token &LHS = VAOPTTokens[CurTokenIdx - 1]; |
208 | 18 | pasteTokens(LHS, llvm::ArrayRef(VAOPTTokens, NumVAOptTokens), |
209 | 18 | CurTokenIdx); |
210 | | // Replace the token prior to the first ## in this iteration. |
211 | 18 | ConcatenatedVAOPTResultToks.back() = LHS; |
212 | 18 | if (CurTokenIdx == NumVAOptTokens) |
213 | 0 | break; |
214 | 18 | } |
215 | 123 | ConcatenatedVAOPTResultToks.push_back(VAOPTTokens[CurTokenIdx]); |
216 | 123 | } |
217 | | |
218 | 43 | ConcatenatedVAOPTResultToks.push_back(VCtx.getEOFTok()); |
219 | | // Get the SourceLocation that represents the start location within |
220 | | // the macro definition that marks where this string is substituted |
221 | | // into: i.e. the __VA_OPT__ and the ')' within the spelling of the |
222 | | // macro definition, and use it to indicate that the stringified token |
223 | | // was generated from that location. |
224 | 43 | const SourceLocation ExpansionLocStartWithinMacro = |
225 | 43 | getExpansionLocForMacroDefLoc(VCtx.getVAOptLoc()); |
226 | 43 | const SourceLocation ExpansionLocEndWithinMacro = |
227 | 43 | getExpansionLocForMacroDefLoc(VAOPTClosingParenLoc); |
228 | | |
229 | 43 | Token StringifiedVAOPT = MacroArgs::StringifyArgument( |
230 | 43 | &ConcatenatedVAOPTResultToks[0], PP, VCtx.hasCharifyBefore() /*Charify*/, |
231 | 43 | ExpansionLocStartWithinMacro, ExpansionLocEndWithinMacro); |
232 | | |
233 | 43 | if (VCtx.getLeadingSpaceForStringifiedToken()) |
234 | 30 | StringifiedVAOPT.setFlag(Token::LeadingSpace); |
235 | | |
236 | 43 | StringifiedVAOPT.setFlag(Token::StringifiedInMacro); |
237 | | // Resize (shrink) the token stream to just capture this stringified token. |
238 | 43 | ResultToks.resize(NumToksPriorToVAOpt + 1); |
239 | 43 | ResultToks.back() = StringifiedVAOPT; |
240 | 43 | } |
241 | | |
242 | | /// Expand the arguments of a function-like macro so that we can quickly |
243 | | /// return preexpanded tokens from Tokens. |
244 | 28.6M | void TokenLexer::ExpandFunctionArguments() { |
245 | 28.6M | SmallVector<Token, 128> ResultToks; |
246 | | |
247 | | // Loop through 'Tokens', expanding them into ResultToks. Keep |
248 | | // track of whether we change anything. If not, no need to keep them. If so, |
249 | | // we install the newly expanded sequence as the new 'Tokens' list. |
250 | 28.6M | bool MadeChange = false; |
251 | | |
252 | 28.6M | std::optional<bool> CalledWithVariadicArguments; |
253 | | |
254 | 28.6M | VAOptExpansionContext VCtx(PP); |
255 | | |
256 | 260M | for (unsigned I = 0, E = NumTokens; I != E; ++I231M ) { |
257 | 231M | const Token &CurTok = Tokens[I]; |
258 | | // We don't want a space for the next token after a paste |
259 | | // operator. In valid code, the token will get smooshed onto the |
260 | | // preceding one anyway. In assembler-with-cpp mode, invalid |
261 | | // pastes are allowed through: in this case, we do not want the |
262 | | // extra whitespace to be added. For example, we want ". ## foo" |
263 | | // -> ".foo" not ". foo". |
264 | 231M | if (I != 0 && !Tokens[I-1].is(tok::hashhash)202M && CurTok.hasLeadingSpace()194M ) |
265 | 20.4M | NextTokGetsSpace = true; |
266 | | |
267 | 231M | if (VCtx.isVAOptToken(CurTok)) { |
268 | 130 | MadeChange = true; |
269 | 130 | assert(Tokens[I + 1].is(tok::l_paren) && |
270 | 130 | "__VA_OPT__ must be followed by '('"); |
271 | | |
272 | 130 | ++I; // Skip the l_paren |
273 | 130 | VCtx.sawVAOptFollowedByOpeningParens(CurTok.getLocation(), |
274 | 130 | ResultToks.size()); |
275 | | |
276 | 130 | continue; |
277 | 130 | } |
278 | | |
279 | | // We have entered into the __VA_OPT__ context, so handle tokens |
280 | | // appropriately. |
281 | 231M | if (VCtx.isInVAOpt()) { |
282 | | // If we are about to process a token that is either an argument to |
283 | | // __VA_OPT__ or its closing rparen, then: |
284 | | // 1) If the token is the closing rparen that exits us out of __VA_OPT__, |
285 | | // perform any necessary stringification or placemarker processing, |
286 | | // and/or skip to the next token. |
287 | | // 2) else if macro was invoked without variadic arguments skip this |
288 | | // token. |
289 | | // 3) else (macro was invoked with variadic arguments) process the token |
290 | | // normally. |
291 | | |
292 | 644 | if (Tokens[I].is(tok::l_paren)) |
293 | 6 | VCtx.sawOpeningParen(Tokens[I].getLocation()); |
294 | | // Continue skipping tokens within __VA_OPT__ if the macro was not |
295 | | // called with variadic arguments, else let the rest of the loop handle |
296 | | // this token. Note sawClosingParen() returns true only if the r_paren matches |
297 | | // the closing r_paren of the __VA_OPT__. |
298 | 644 | if (!Tokens[I].is(tok::r_paren) || !VCtx.sawClosingParen()136 ) { |
299 | | // Lazily expand __VA_ARGS__ when we see the first __VA_OPT__. |
300 | 514 | if (!CalledWithVariadicArguments) { |
301 | 98 | CalledWithVariadicArguments = |
302 | 98 | ActualArgs->invokedWithVariadicArgument(Macro, PP); |
303 | 98 | } |
304 | 514 | if (!*CalledWithVariadicArguments) { |
305 | | // Skip this token. |
306 | 249 | continue; |
307 | 249 | } |
308 | | // ... else the macro was called with variadic arguments, and we do not |
309 | | // have a closing rparen - so process this token normally. |
310 | 514 | } else { |
311 | | // Current token is the closing r_paren which marks the end of the |
312 | | // __VA_OPT__ invocation, so handle any place-marker pasting (if |
313 | | // empty) by removing hashhash either before (if exists) or after. And |
314 | | // also stringify the entire contents if VAOPT was preceded by a hash, |
315 | | // but do so only after any token concatenation that needs to occur |
316 | | // within the contents of VAOPT. |
317 | | |
318 | 130 | if (VCtx.hasStringifyOrCharifyBefore()) { |
319 | | // Replace all the tokens just added from within VAOPT into a single |
320 | | // stringified token. This requires token-pasting to eagerly occur |
321 | | // within these tokens. If either the contents of VAOPT were empty |
322 | | // or the macro wasn't called with any variadic arguments, the result |
323 | | // is a token that represents an empty string. |
324 | 43 | stringifyVAOPTContents(ResultToks, VCtx, |
325 | 43 | /*ClosingParenLoc*/ Tokens[I].getLocation()); |
326 | | |
327 | 87 | } else if (/*No tokens within VAOPT*/ |
328 | 87 | ResultToks.size() == VCtx.getNumberOfTokensPriorToVAOpt()) { |
329 | | // Treat VAOPT as a placemarker token. Eat either the '##' before the |
330 | | // RHS/VAOPT (if one exists, suggesting that the LHS (if any) to that |
331 | | // hashhash was not a placemarker) or the '##' |
332 | | // after VAOPT, but not both. |
333 | | |
334 | 41 | if (ResultToks.size() && ResultToks.back().is(tok::hashhash)33 ) { |
335 | 12 | ResultToks.pop_back(); |
336 | 29 | } else if ((I + 1 != E) && Tokens[I + 1].is(tok::hashhash)25 ) { |
337 | 6 | ++I; // Skip the following hashhash. |
338 | 6 | } |
339 | 46 | } else { |
340 | | // If there's a ## before the __VA_OPT__, we might have discovered |
341 | | // that the __VA_OPT__ begins with a placeholder. We delay action on |
342 | | // that to now to avoid messing up our stashed count of tokens before |
343 | | // __VA_OPT__. |
344 | 46 | if (VCtx.beginsWithPlaceholder()) { |
345 | 7 | assert(VCtx.getNumberOfTokensPriorToVAOpt() > 0 && |
346 | 7 | ResultToks.size() >= VCtx.getNumberOfTokensPriorToVAOpt() && |
347 | 7 | ResultToks[VCtx.getNumberOfTokensPriorToVAOpt() - 1].is( |
348 | 7 | tok::hashhash) && |
349 | 7 | "no token paste before __VA_OPT__"); |
350 | 7 | ResultToks.erase(ResultToks.begin() + |
351 | 7 | VCtx.getNumberOfTokensPriorToVAOpt() - 1); |
352 | 7 | } |
353 | | // If the expansion of __VA_OPT__ ends with a placeholder, eat any |
354 | | // following '##' token. |
355 | 46 | if (VCtx.endsWithPlaceholder() && I + 1 != E7 && |
356 | 46 | Tokens[I + 1].is(tok::hashhash)7 ) { |
357 | 7 | ++I; |
358 | 7 | } |
359 | 46 | } |
360 | 130 | VCtx.reset(); |
361 | | // We processed __VA_OPT__'s closing paren (and the exit out of |
362 | | // __VA_OPT__), so skip to the next token. |
363 | 130 | continue; |
364 | 130 | } |
365 | 644 | } |
366 | | |
367 | | // If we found the stringify operator, get the argument stringified. The |
368 | | // preprocessor already verified that the following token is a macro |
369 | | // parameter or __VA_OPT__ when the #define was lexed. |
370 | | |
371 | 231M | if (CurTok.isOneOf(tok::hash, tok::hashat)) { |
372 | 1.86M | int ArgNo = Macro->getParameterNum(Tokens[I+1].getIdentifierInfo()); |
373 | 1.86M | assert((ArgNo != -1 || VCtx.isVAOptToken(Tokens[I + 1])) && |
374 | 1.86M | "Token following # is not an argument or __VA_OPT__!"); |
375 | | |
376 | 1.86M | if (ArgNo == -1) { |
377 | | // Handle the __VA_OPT__ case. |
378 | 43 | VCtx.sawHashOrHashAtBefore(NextTokGetsSpace, |
379 | 43 | CurTok.is(tok::hashat)); |
380 | 43 | continue; |
381 | 43 | } |
382 | | // Else handle the simple argument case. |
383 | 1.86M | SourceLocation ExpansionLocStart = |
384 | 1.86M | getExpansionLocForMacroDefLoc(CurTok.getLocation()); |
385 | 1.86M | SourceLocation ExpansionLocEnd = |
386 | 1.86M | getExpansionLocForMacroDefLoc(Tokens[I+1].getLocation()); |
387 | | |
388 | 1.86M | bool Charify = CurTok.is(tok::hashat); |
389 | 1.86M | const Token *UnexpArg = ActualArgs->getUnexpArgument(ArgNo); |
390 | 1.86M | Token Res = MacroArgs::StringifyArgument( |
391 | 1.86M | UnexpArg, PP, Charify, ExpansionLocStart, ExpansionLocEnd); |
392 | 1.86M | Res.setFlag(Token::StringifiedInMacro); |
393 | | |
394 | | // The stringified/charified string leading space flag gets set to match |
395 | | // the #/#@ operator. |
396 | 1.86M | if (NextTokGetsSpace) |
397 | 41.4k | Res.setFlag(Token::LeadingSpace); |
398 | | |
399 | 1.86M | ResultToks.push_back(Res); |
400 | 1.86M | MadeChange = true; |
401 | 1.86M | ++I; // Skip arg name. |
402 | 1.86M | NextTokGetsSpace = false; |
403 | 1.86M | continue; |
404 | 1.86M | } |
405 | | |
406 | | // Find out if there is a paste (##) operator before or after the token. |
407 | 229M | bool NonEmptyPasteBefore = |
408 | 229M | !ResultToks.empty() && ResultToks.back().is(tok::hashhash)202M ; |
409 | 229M | bool PasteBefore = I != 0 && Tokens[I-1].is(tok::hashhash)202M ; |
410 | 229M | bool PasteAfter = I+1 != E && Tokens[I+1].is(tok::hashhash)202M ; |
411 | 229M | bool RParenAfter = I+1 != E && Tokens[I+1].is(tok::r_paren)202M ; |
412 | | |
413 | 229M | assert((!NonEmptyPasteBefore || PasteBefore || VCtx.isInVAOpt()) && |
414 | 229M | "unexpected ## in ResultToks"); |
415 | | |
416 | | // Otherwise, if this is not an argument token, just add the token to the |
417 | | // output buffer. |
418 | 229M | IdentifierInfo *II = CurTok.getIdentifierInfo(); |
419 | 229M | int ArgNo = II ? Macro->getParameterNum(II)103M : -1125M ; |
420 | 229M | if (ArgNo == -1) { |
421 | | // This isn't an argument, just add it. |
422 | 187M | ResultToks.push_back(CurTok); |
423 | | |
424 | 187M | if (NextTokGetsSpace) { |
425 | 17.8M | ResultToks.back().setFlag(Token::LeadingSpace); |
426 | 17.8M | NextTokGetsSpace = false; |
427 | 170M | } else if (PasteBefore && !NonEmptyPasteBefore518k ) |
428 | 28 | ResultToks.back().clearFlag(Token::LeadingSpace); |
429 | | |
430 | 187M | continue; |
431 | 187M | } |
432 | | |
433 | | // An argument is expanded somehow, the result is different than the |
434 | | // input. |
435 | 41.5M | MadeChange = true; |
436 | | |
437 | | // Otherwise, this is a use of the argument. |
438 | | |
439 | | // In Microsoft mode, remove the comma before __VA_ARGS__ to ensure there |
440 | | // are no trailing commas if __VA_ARGS__ is empty. |
441 | 41.5M | if (!PasteBefore && ActualArgs->isVarargsElidedUse()33.3M && |
442 | 41.5M | MaybeRemoveCommaBeforeVaArgs(ResultToks, |
443 | 157k | /*HasPasteOperator=*/false, |
444 | 157k | Macro, ArgNo, PP)) |
445 | 3 | continue; |
446 | | |
447 | | // If it is not the LHS/RHS of a ## operator, we must pre-expand the |
448 | | // argument and substitute the expanded tokens into the result. This is |
449 | | // C99 6.10.3.1p1. |
450 | 41.5M | if (!PasteBefore && !PasteAfter33.3M ) { |
451 | 31.4M | const Token *ResultArgToks; |
452 | | |
453 | | // Only preexpand the argument if it could possibly need it. This |
454 | | // avoids some work in common cases. |
455 | 31.4M | const Token *ArgTok = ActualArgs->getUnexpArgument(ArgNo); |
456 | 31.4M | if (ActualArgs->ArgNeedsPreexpansion(ArgTok, PP)) |
457 | 6.17M | ResultArgToks = &ActualArgs->getPreExpArgument(ArgNo, PP)[0]; |
458 | 25.3M | else |
459 | 25.3M | ResultArgToks = ArgTok; // Use non-preexpanded tokens. |
460 | | |
461 | | // If the arg token expanded into anything, append it. |
462 | 31.4M | if (ResultArgToks->isNot(tok::eof)) { |
463 | 30.8M | size_t FirstResult = ResultToks.size(); |
464 | 30.8M | unsigned NumToks = MacroArgs::getArgLength(ResultArgToks); |
465 | 30.8M | ResultToks.append(ResultArgToks, ResultArgToks+NumToks); |
466 | | |
467 | | // In Microsoft-compatibility mode, we follow MSVC's preprocessing |
468 | | // behavior by not considering single commas from nested macro |
469 | | // expansions as argument separators. Set a flag on the token so we can |
470 | | // test for this later when the macro expansion is processed. |
471 | 30.8M | if (PP.getLangOpts().MSVCCompat && NumToks == 112.1k && |
472 | 30.8M | ResultToks.back().is(tok::comma)7.78k ) |
473 | 6 | ResultToks.back().setFlag(Token::IgnoredComma); |
474 | | |
475 | | // If the '##' came from expanding an argument, turn it into 'unknown' |
476 | | // to avoid pasting. |
477 | 30.8M | for (Token &Tok : llvm::drop_begin(ResultToks, FirstResult)) |
478 | 93.5M | if (Tok.is(tok::hashhash)) |
479 | 2 | Tok.setKind(tok::unknown); |
480 | | |
481 | 30.8M | if(ExpandLocStart.isValid()) { |
482 | 30.8M | updateLocForMacroArgTokens(CurTok.getLocation(), |
483 | 30.8M | ResultToks.begin()+FirstResult, |
484 | 30.8M | ResultToks.end()); |
485 | 30.8M | } |
486 | | |
487 | | // If any tokens were substituted from the argument, the whitespace |
488 | | // before the first token should match the whitespace of the arg |
489 | | // identifier. |
490 | 30.8M | ResultToks[FirstResult].setFlagValue(Token::LeadingSpace, |
491 | 30.8M | NextTokGetsSpace); |
492 | 30.8M | ResultToks[FirstResult].setFlagValue(Token::StartOfLine, false); |
493 | 30.8M | NextTokGetsSpace = false; |
494 | 30.8M | } else { |
495 | | // We're creating a placeholder token. Usually this doesn't matter, |
496 | | // but it can affect paste behavior when at the start or end of a |
497 | | // __VA_OPT__. |
498 | 639k | if (NonEmptyPasteBefore) { |
499 | | // We're imagining a placeholder token is inserted here. If this is |
500 | | // the first token in a __VA_OPT__ after a ##, delete the ##. |
501 | 3 | assert(VCtx.isInVAOpt() && "should only happen inside a __VA_OPT__"); |
502 | 3 | VCtx.hasPlaceholderAfterHashhashAtStart(); |
503 | 639k | } else if (RParenAfter) |
504 | 375k | VCtx.hasPlaceholderBeforeRParen(); |
505 | 639k | } |
506 | 31.4M | continue; |
507 | 31.4M | } |
508 | | |
509 | | // Okay, we have a token that is either the LHS or RHS of a paste (##) |
510 | | // argument. It gets substituted as its non-pre-expanded tokens. |
511 | 10.0M | const Token *ArgToks = ActualArgs->getUnexpArgument(ArgNo); |
512 | 10.0M | unsigned NumToks = MacroArgs::getArgLength(ArgToks); |
513 | 10.0M | if (NumToks) { // Not an empty argument? |
514 | 10.0M | bool VaArgsPseudoPaste = false; |
515 | | // If this is the GNU ", ## __VA_ARGS__" extension, and we just learned |
516 | | // that __VA_ARGS__ expands to multiple tokens, avoid a pasting error when |
517 | | // the expander tries to paste ',' with the first token of the __VA_ARGS__ |
518 | | // expansion. |
519 | 10.0M | if (NonEmptyPasteBefore && ResultToks.size() >= 28.17M && |
520 | 10.0M | ResultToks[ResultToks.size()-2].is(tok::comma)8.17M && |
521 | 10.0M | (unsigned)ArgNo == Macro->getNumParams()-145.5k && |
522 | 10.0M | Macro->isVariadic()45.5k ) { |
523 | 45.5k | VaArgsPseudoPaste = true; |
524 | | // Remove the paste operator, report use of the extension. |
525 | 45.5k | PP.Diag(ResultToks.pop_back_val().getLocation(), diag::ext_paste_comma); |
526 | 45.5k | } |
527 | | |
528 | 10.0M | ResultToks.append(ArgToks, ArgToks+NumToks); |
529 | | |
530 | | // If the '##' came from expanding an argument, turn it into 'unknown' |
531 | | // to avoid pasting. |
532 | 10.0M | for (Token &Tok : llvm::make_range(ResultToks.end() - NumToks, |
533 | 23.1M | ResultToks.end())) { |
534 | 23.1M | if (Tok.is(tok::hashhash)) |
535 | 0 | Tok.setKind(tok::unknown); |
536 | 23.1M | } |
537 | | |
538 | 10.0M | if (ExpandLocStart.isValid()) { |
539 | 10.0M | updateLocForMacroArgTokens(CurTok.getLocation(), |
540 | 10.0M | ResultToks.end()-NumToks, ResultToks.end()); |
541 | 10.0M | } |
542 | | |
543 | | // Transfer the leading whitespace information from the token |
544 | | // (the macro argument) onto the first token of the |
545 | | // expansion. Note that we don't do this for the GNU |
546 | | // pseudo-paste extension ", ## __VA_ARGS__". |
547 | 10.0M | if (!VaArgsPseudoPaste) { |
548 | 9.96M | ResultToks[ResultToks.size() - NumToks].setFlagValue(Token::StartOfLine, |
549 | 9.96M | false); |
550 | 9.96M | ResultToks[ResultToks.size() - NumToks].setFlagValue( |
551 | 9.96M | Token::LeadingSpace, NextTokGetsSpace); |
552 | 9.96M | } |
553 | | |
554 | 10.0M | NextTokGetsSpace = false; |
555 | 10.0M | continue; |
556 | 10.0M | } |
557 | | |
558 | | // If an empty argument is on the LHS or RHS of a paste, the standard (C99 |
559 | | // 6.10.3.3p2,3) calls for a bunch of placemarker stuff to occur. We |
560 | | // implement this by eating ## operators when a LHS or RHS expands to |
561 | | // empty. |
562 | 46.5k | if (PasteAfter) { |
563 | | // Discard the argument token and skip (don't copy to the expansion |
564 | | // buffer) the paste operator after it. |
565 | 8.37k | ++I; |
566 | 8.37k | continue; |
567 | 8.37k | } |
568 | | |
569 | 38.2k | if (RParenAfter && !NonEmptyPasteBefore10.5k ) |
570 | 5 | VCtx.hasPlaceholderBeforeRParen(); |
571 | | |
572 | | // If this is on the RHS of a paste operator, we've already copied the |
573 | | // paste operator to the ResultToks list, unless the LHS was empty too. |
574 | | // Remove it. |
575 | 38.2k | assert(PasteBefore); |
576 | 38.2k | if (NonEmptyPasteBefore) { |
577 | 38.1k | assert(ResultToks.back().is(tok::hashhash)); |
578 | | // Do not remove the paste operator if it is the one before __VA_OPT__ |
579 | | // (and we are still processing tokens within VA_OPT). We handle the case |
580 | | // of removing the paste operator if __VA_OPT__ reduces to the notional |
581 | | // placemarker above when we encounter the closing paren of VA_OPT. |
582 | 38.1k | if (!VCtx.isInVAOpt() || |
583 | 38.1k | ResultToks.size() > VCtx.getNumberOfTokensPriorToVAOpt()5 ) |
584 | 38.1k | ResultToks.pop_back(); |
585 | 4 | else |
586 | 4 | VCtx.hasPlaceholderAfterHashhashAtStart(); |
587 | 38.1k | } |
588 | | |
589 | | // If this is the __VA_ARGS__ token, and if the argument wasn't provided, |
590 | | // and if the macro had at least one real argument, and if the token before |
591 | | // the ## was a comma, remove the comma. This is a GCC extension which is |
592 | | // disabled when using -std=c99. |
593 | 38.2k | if (ActualArgs->isVarargsElidedUse()) |
594 | 10.5k | MaybeRemoveCommaBeforeVaArgs(ResultToks, |
595 | 10.5k | /*HasPasteOperator=*/true, |
596 | 10.5k | Macro, ArgNo, PP); |
597 | 38.2k | } |
598 | | |
599 | | // If anything changed, install this as the new Tokens list. |
600 | 28.6M | if (MadeChange) { |
601 | 28.5M | assert(!OwnsTokens && "This would leak if we already own the token list"); |
602 | | // This is deleted in the dtor. |
603 | 28.5M | NumTokens = ResultToks.size(); |
604 | | // The tokens will be added to Preprocessor's cache and will be removed |
605 | | // when this TokenLexer finishes lexing them. |
606 | 28.5M | Tokens = PP.cacheMacroExpandedTokens(this, ResultToks); |
607 | | |
608 | | // The preprocessor cache of macro expanded tokens owns these tokens,not us. |
609 | 28.5M | OwnsTokens = false; |
610 | 28.5M | } |
611 | 28.6M | } |
612 | | |
613 | | /// Checks if two tokens form wide string literal. |
614 | | static bool isWideStringLiteralFromMacro(const Token &FirstTok, |
615 | 1.16M | const Token &SecondTok) { |
616 | 1.16M | return FirstTok.is(tok::identifier) && |
617 | 1.16M | FirstTok.getIdentifierInfo()->isStr("L")276k && SecondTok.isLiteral()3 && |
618 | 1.16M | SecondTok.stringifiedInMacro()3 ; |
619 | 1.16M | } |
620 | | |
621 | | /// Lex - Lex and return a token from this macro stream. |
622 | 796M | bool TokenLexer::Lex(Token &Tok) { |
623 | | // Lexing off the end of the macro, pop this macro off the expansion stack. |
624 | 796M | if (isAtEnd()) { |
625 | | // If this is a macro (not a token stream), mark the macro enabled now |
626 | | // that it is no longer being expanded. |
627 | 71.4M | if (Macro) Macro->EnableMacro()69.6M ; |
628 | | |
629 | 71.4M | Tok.startToken(); |
630 | 71.4M | Tok.setFlagValue(Token::StartOfLine , AtStartOfLine); |
631 | 71.4M | Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace || NextTokGetsSpace71.1M ); |
632 | 71.4M | if (CurTokenIdx == 0) |
633 | 262k | Tok.setFlag(Token::LeadingEmptyMacro); |
634 | 71.4M | return PP.HandleEndOfTokenLexer(Tok); |
635 | 71.4M | } |
636 | | |
637 | 724M | SourceManager &SM = PP.getSourceManager(); |
638 | | |
639 | | // If this is the first token of the expanded result, we inherit spacing |
640 | | // properties later. |
641 | 724M | bool isFirstToken = CurTokenIdx == 0; |
642 | | |
643 | | // Get the next token to return. |
644 | 724M | Tok = Tokens[CurTokenIdx++]; |
645 | 724M | if (IsReinject) |
646 | 21.5M | Tok.setFlag(Token::IsReinjected); |
647 | | |
648 | 724M | bool TokenIsFromPaste = false; |
649 | | |
650 | | // If this token is followed by a token paste (##) operator, paste the tokens! |
651 | | // Note that ## is a normal token when not expanding a macro. |
652 | 724M | if (!isAtEnd() && Macro649M && |
653 | 724M | (617M Tokens[CurTokenIdx].is(tok::hashhash)617M || |
654 | | // Special processing of L#x macros in -fms-compatibility mode. |
655 | | // Microsoft compiler is able to form a wide string literal from |
656 | | // 'L#macro_arg' construct in a function-like macro. |
657 | 617M | (610M PP.getLangOpts().MSVCCompat610M && |
658 | 610M | isWideStringLiteralFromMacro(Tok, Tokens[CurTokenIdx])1.16M ))) { |
659 | | // When handling the microsoft /##/ extension, the final token is |
660 | | // returned by pasteTokens, not the pasted token. |
661 | 7.67M | if (pasteTokens(Tok)) |
662 | 4 | return true; |
663 | | |
664 | 7.67M | TokenIsFromPaste = true; |
665 | 7.67M | } |
666 | | |
667 | | // The token's current location indicate where the token was lexed from. We |
668 | | // need this information to compute the spelling of the token, but any |
669 | | // diagnostics for the expanded token should appear as if they came from |
670 | | // ExpansionLoc. Pull this information together into a new SourceLocation |
671 | | // that captures all of this. |
672 | 724M | if (ExpandLocStart.isValid() && // Don't do this for token streams. |
673 | | // Check that the token's location was not already set properly. |
674 | 724M | SM.isBeforeInSLocAddrSpace(Tok.getLocation(), MacroStartSLocOffset)684M ) { |
675 | 568M | SourceLocation instLoc; |
676 | 568M | if (Tok.is(tok::comment)) { |
677 | 21 | instLoc = SM.createExpansionLoc(Tok.getLocation(), |
678 | 21 | ExpandLocStart, |
679 | 21 | ExpandLocEnd, |
680 | 21 | Tok.getLength()); |
681 | 568M | } else { |
682 | 568M | instLoc = getExpansionLocForMacroDefLoc(Tok.getLocation()); |
683 | 568M | } |
684 | | |
685 | 568M | Tok.setLocation(instLoc); |
686 | 568M | } |
687 | | |
688 | | // If this is the first token, set the lexical properties of the token to |
689 | | // match the lexical properties of the macro identifier. |
690 | 724M | if (isFirstToken) { |
691 | 77.3M | Tok.setFlagValue(Token::StartOfLine , AtStartOfLine); |
692 | 77.3M | Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace); |
693 | 647M | } else { |
694 | | // If this is not the first token, we may still need to pass through |
695 | | // leading whitespace if we've expanded a macro. |
696 | 647M | if (AtStartOfLine) Tok.setFlag(Token::StartOfLine)626k ; |
697 | 647M | if (HasLeadingSpace) Tok.setFlag(Token::LeadingSpace)353k ; |
698 | 647M | } |
699 | 724M | AtStartOfLine = false; |
700 | 724M | HasLeadingSpace = false; |
701 | | |
702 | | // Handle recursive expansion! |
703 | 724M | if (!Tok.isAnnotation() && Tok.getIdentifierInfo() != nullptr723M ) { |
704 | | // Change the kind of this identifier to the appropriate token kind, e.g. |
705 | | // turning "for" into a keyword. |
706 | 287M | IdentifierInfo *II = Tok.getIdentifierInfo(); |
707 | 287M | Tok.setKind(II->getTokenID()); |
708 | | |
709 | | // If this identifier was poisoned and from a paste, emit an error. This |
710 | | // won't be handled by Preprocessor::HandleIdentifier because this is coming |
711 | | // from a macro expansion. |
712 | 287M | if (II->isPoisoned() && TokenIsFromPaste4 ) { |
713 | 2 | PP.HandlePoisonedIdentifier(Tok); |
714 | 2 | } |
715 | | |
716 | 287M | if (!DisableMacroExpansion && II->isHandleIdentifierCase()280M ) |
717 | 59.7M | return PP.HandleIdentifier(Tok); |
718 | 287M | } |
719 | | |
720 | | // Otherwise, return a normal token. |
721 | 664M | return true; |
722 | 724M | } |
723 | | |
724 | 7.67M | bool TokenLexer::pasteTokens(Token &Tok) { |
725 | 7.67M | return pasteTokens(Tok, llvm::ArrayRef(Tokens, NumTokens), CurTokenIdx); |
726 | 7.67M | } |
727 | | |
728 | | /// LHSTok is the LHS of a ## operator, and CurTokenIdx is the ## |
729 | | /// operator. Read the ## and RHS, and paste the LHS/RHS together. If there |
730 | | /// are more ## after it, chomp them iteratively. Return the result as LHSTok. |
731 | | /// If this returns true, the caller should immediately return the token. |
732 | | bool TokenLexer::pasteTokens(Token &LHSTok, ArrayRef<Token> TokenStream, |
733 | 7.67M | unsigned int &CurIdx) { |
734 | 7.67M | assert(CurIdx > 0 && "## can not be the first token within tokens"); |
735 | 7.67M | assert((TokenStream[CurIdx].is(tok::hashhash) || |
736 | 7.67M | (PP.getLangOpts().MSVCCompat && |
737 | 7.67M | isWideStringLiteralFromMacro(LHSTok, TokenStream[CurIdx]))) && |
738 | 7.67M | "Token at this Index must be ## or part of the MSVC 'L " |
739 | 7.67M | "#macro-arg' pasting pair"); |
740 | | |
741 | | // MSVC: If previous token was pasted, this must be a recovery from an invalid |
742 | | // paste operation. Ignore spaces before this token to mimic MSVC output. |
743 | | // Required for generating valid UUID strings in some MS headers. |
744 | 7.67M | if (PP.getLangOpts().MicrosoftExt && (CurIdx >= 2)1.87k && |
745 | 7.67M | TokenStream[CurIdx - 2].is(tok::hashhash)35 ) |
746 | 8 | LHSTok.clearFlag(Token::LeadingSpace); |
747 | | |
748 | 7.67M | SmallString<128> Buffer; |
749 | 7.67M | const char *ResultTokStrPtr = nullptr; |
750 | 7.67M | SourceLocation StartLoc = LHSTok.getLocation(); |
751 | 7.67M | SourceLocation PasteOpLoc; |
752 | | |
753 | 17.2M | auto IsAtEnd = [&TokenStream, &CurIdx] { |
754 | 17.2M | return TokenStream.size() == CurIdx; |
755 | 17.2M | }; |
756 | | |
757 | 8.64M | do { |
758 | | // Consume the ## operator if any. |
759 | 8.64M | PasteOpLoc = TokenStream[CurIdx].getLocation(); |
760 | 8.64M | if (TokenStream[CurIdx].is(tok::hashhash)) |
761 | 8.64M | ++CurIdx; |
762 | 8.64M | assert(!IsAtEnd() && "No token on the RHS of a paste operator!"); |
763 | | |
764 | | // Get the RHS token. |
765 | 8.64M | const Token &RHS = TokenStream[CurIdx]; |
766 | | |
767 | | // Allocate space for the result token. This is guaranteed to be enough for |
768 | | // the two tokens. |
769 | 8.64M | Buffer.resize(LHSTok.getLength() + RHS.getLength()); |
770 | | |
771 | | // Get the spelling of the LHS token in Buffer. |
772 | 8.64M | const char *BufPtr = &Buffer[0]; |
773 | 8.64M | bool Invalid = false; |
774 | 8.64M | unsigned LHSLen = PP.getSpelling(LHSTok, BufPtr, &Invalid); |
775 | 8.64M | if (BufPtr != &Buffer[0]) // Really, we want the chars in Buffer! |
776 | 8.64M | memcpy(&Buffer[0], BufPtr, LHSLen); |
777 | 8.64M | if (Invalid) |
778 | 0 | return true; |
779 | | |
780 | 8.64M | BufPtr = Buffer.data() + LHSLen; |
781 | 8.64M | unsigned RHSLen = PP.getSpelling(RHS, BufPtr, &Invalid); |
782 | 8.64M | if (Invalid) |
783 | 0 | return true; |
784 | 8.64M | if (RHSLen && BufPtr != &Buffer[LHSLen]8.64M ) |
785 | | // Really, we want the chars in Buffer! |
786 | 8.64M | memcpy(&Buffer[LHSLen], BufPtr, RHSLen); |
787 | | |
788 | | // Trim excess space. |
789 | 8.64M | Buffer.resize(LHSLen+RHSLen); |
790 | | |
791 | | // Plop the pasted result (including the trailing newline and null) into a |
792 | | // scratch buffer where we can lex it. |
793 | 8.64M | Token ResultTokTmp; |
794 | 8.64M | ResultTokTmp.startToken(); |
795 | | |
796 | | // Claim that the tmp token is a string_literal so that we can get the |
797 | | // character pointer back from CreateString in getLiteralData(). |
798 | 8.64M | ResultTokTmp.setKind(tok::string_literal); |
799 | 8.64M | PP.CreateString(Buffer, ResultTokTmp); |
800 | 8.64M | SourceLocation ResultTokLoc = ResultTokTmp.getLocation(); |
801 | 8.64M | ResultTokStrPtr = ResultTokTmp.getLiteralData(); |
802 | | |
803 | | // Lex the resultant pasted token into Result. |
804 | 8.64M | Token Result; |
805 | | |
806 | 8.64M | if (LHSTok.isAnyIdentifier() && RHS.isAnyIdentifier()8.63M ) { |
807 | | // Common paste case: identifier+identifier = identifier. Avoid creating |
808 | | // a lexer and other overhead. |
809 | 6.83M | PP.IncrementPasteCounter(true); |
810 | 6.83M | Result.startToken(); |
811 | 6.83M | Result.setKind(tok::raw_identifier); |
812 | 6.83M | Result.setRawIdentifierData(ResultTokStrPtr); |
813 | 6.83M | Result.setLocation(ResultTokLoc); |
814 | 6.83M | Result.setLength(LHSLen+RHSLen); |
815 | 6.83M | } else { |
816 | 1.80M | PP.IncrementPasteCounter(false); |
817 | | |
818 | 1.80M | assert(ResultTokLoc.isFileID() && |
819 | 1.80M | "Should be a raw location into scratch buffer"); |
820 | 1.80M | SourceManager &SourceMgr = PP.getSourceManager(); |
821 | 1.80M | FileID LocFileID = SourceMgr.getFileID(ResultTokLoc); |
822 | | |
823 | 1.80M | bool Invalid = false; |
824 | 1.80M | const char *ScratchBufStart |
825 | 1.80M | = SourceMgr.getBufferData(LocFileID, &Invalid).data(); |
826 | 1.80M | if (Invalid) |
827 | 0 | return false; |
828 | | |
829 | | // Make a lexer to lex this string from. Lex just this one token. |
830 | | // Make a lexer object so that we lex and expand the paste result. |
831 | 1.80M | Lexer TL(SourceMgr.getLocForStartOfFile(LocFileID), |
832 | 1.80M | PP.getLangOpts(), ScratchBufStart, |
833 | 1.80M | ResultTokStrPtr, ResultTokStrPtr+LHSLen+RHSLen); |
834 | | |
835 | | // Lex a token in raw mode. This way it won't look up identifiers |
836 | | // automatically, lexing off the end will return an eof token, and |
837 | | // warnings are disabled. This returns true if the result token is the |
838 | | // entire buffer. |
839 | 1.80M | bool isInvalid = !TL.LexFromRawLexer(Result); |
840 | | |
841 | | // If we got an EOF token, we didn't form even ONE token. For example, we |
842 | | // did "/ ## /" to get "//". |
843 | 1.80M | isInvalid |= Result.is(tok::eof); |
844 | | |
845 | | // If pasting the two tokens didn't form a full new token, this is an |
846 | | // error. This occurs with "x ## +" and other stuff. Return with LHSTok |
847 | | // unmodified and with RHS as the next token to lex. |
848 | 1.80M | if (isInvalid) { |
849 | | // Explicitly convert the token location to have proper expansion |
850 | | // information so that the user knows where it came from. |
851 | 30 | SourceManager &SM = PP.getSourceManager(); |
852 | 30 | SourceLocation Loc = |
853 | 30 | SM.createExpansionLoc(PasteOpLoc, ExpandLocStart, ExpandLocEnd, 2); |
854 | | |
855 | | // Test for the Microsoft extension of /##/ turning into // here on the |
856 | | // error path. |
857 | 30 | if (PP.getLangOpts().MicrosoftExt && LHSTok.is(tok::slash)17 && |
858 | 30 | RHS.is(tok::slash)4 ) { |
859 | 4 | HandleMicrosoftCommentPaste(LHSTok, Loc); |
860 | 4 | return true; |
861 | 4 | } |
862 | | |
863 | | // Do not emit the error when preprocessing assembler code. |
864 | 26 | if (!PP.getLangOpts().AsmPreprocessor) { |
865 | | // If we're in microsoft extensions mode, downgrade this from a hard |
866 | | // error to an extension that defaults to an error. This allows |
867 | | // disabling it. |
868 | 17 | PP.Diag(Loc, PP.getLangOpts().MicrosoftExt ? diag::ext_pp_bad_paste_ms12 |
869 | 17 | : diag::err_pp_bad_paste5 ) |
870 | 17 | << Buffer; |
871 | 17 | } |
872 | | |
873 | | // An error has occurred so exit loop. |
874 | 26 | break; |
875 | 30 | } |
876 | | |
877 | | // Turn ## into 'unknown' to avoid # ## # from looking like a paste |
878 | | // operator. |
879 | 1.80M | if (Result.is(tok::hashhash)) |
880 | 2 | Result.setKind(tok::unknown); |
881 | 1.80M | } |
882 | | |
883 | | // Transfer properties of the LHS over the Result. |
884 | 8.64M | Result.setFlagValue(Token::StartOfLine , LHSTok.isAtStartOfLine()); |
885 | 8.64M | Result.setFlagValue(Token::LeadingSpace, LHSTok.hasLeadingSpace()); |
886 | | |
887 | | // Finally, replace LHS with the result, consume the RHS, and iterate. |
888 | 8.64M | ++CurIdx; |
889 | 8.64M | LHSTok = Result; |
890 | 8.64M | } while (!IsAtEnd() && TokenStream[CurIdx].is(tok::hashhash)6.10M ); |
891 | | |
892 | 7.67M | SourceLocation EndLoc = TokenStream[CurIdx - 1].getLocation(); |
893 | | |
894 | | // The token's current location indicate where the token was lexed from. We |
895 | | // need this information to compute the spelling of the token, but any |
896 | | // diagnostics for the expanded token should appear as if the token was |
897 | | // expanded from the full ## expression. Pull this information together into |
898 | | // a new SourceLocation that captures all of this. |
899 | 7.67M | SourceManager &SM = PP.getSourceManager(); |
900 | 7.67M | if (StartLoc.isFileID()) |
901 | 5.84M | StartLoc = getExpansionLocForMacroDefLoc(StartLoc); |
902 | 7.67M | if (EndLoc.isFileID()) |
903 | 46.5k | EndLoc = getExpansionLocForMacroDefLoc(EndLoc); |
904 | 7.67M | FileID MacroFID = SM.getFileID(MacroExpansionStart); |
905 | 9.50M | while (SM.getFileID(StartLoc) != MacroFID) |
906 | 1.82M | StartLoc = SM.getImmediateExpansionRange(StartLoc).getBegin(); |
907 | 15.2M | while (SM.getFileID(EndLoc) != MacroFID) |
908 | 7.62M | EndLoc = SM.getImmediateExpansionRange(EndLoc).getEnd(); |
909 | | |
910 | 7.67M | LHSTok.setLocation(SM.createExpansionLoc(LHSTok.getLocation(), StartLoc, EndLoc, |
911 | 7.67M | LHSTok.getLength())); |
912 | | |
913 | | // Now that we got the result token, it will be subject to expansion. Since |
914 | | // token pasting re-lexes the result token in raw mode, identifier information |
915 | | // isn't looked up. As such, if the result is an identifier, look up id info. |
916 | 7.67M | if (LHSTok.is(tok::raw_identifier)) { |
917 | | // Look up the identifier info for the token. We disabled identifier lookup |
918 | | // by saying we're skipping contents, so we need to do this manually. |
919 | 7.66M | PP.LookUpIdentifierInfo(LHSTok); |
920 | 7.66M | } |
921 | 7.67M | return false; |
922 | 7.67M | } |
923 | | |
924 | | /// isNextTokenLParen - If the next token lexed will pop this macro off the |
925 | | /// expansion stack, return 2. If the next unexpanded token is a '(', return |
926 | | /// 1, otherwise return 0. |
927 | 30.2M | unsigned TokenLexer::isNextTokenLParen() const { |
928 | | // Out of tokens? |
929 | 30.2M | if (isAtEnd()) |
930 | 2.36M | return 2; |
931 | 27.8M | return Tokens[CurTokenIdx].is(tok::l_paren); |
932 | 30.2M | } |
933 | | |
934 | | /// isParsingPreprocessorDirective - Return true if we are in the middle of a |
935 | | /// preprocessor directive. |
936 | 616k | bool TokenLexer::isParsingPreprocessorDirective() const { |
937 | 616k | return Tokens[NumTokens-1].is(tok::eod) && !isAtEnd()41 ; |
938 | 616k | } |
939 | | |
940 | | /// HandleMicrosoftCommentPaste - In microsoft compatibility mode, /##/ pastes |
941 | | /// together to form a comment that comments out everything in the current |
942 | | /// macro, other active macros, and anything left on the current physical |
943 | | /// source line of the expanded buffer. Handle this by returning the |
944 | | /// first token on the next line. |
945 | 4 | void TokenLexer::HandleMicrosoftCommentPaste(Token &Tok, SourceLocation OpLoc) { |
946 | 4 | PP.Diag(OpLoc, diag::ext_comment_paste_microsoft); |
947 | | |
948 | | // We 'comment out' the rest of this macro by just ignoring the rest of the |
949 | | // tokens that have not been lexed yet, if any. |
950 | | |
951 | | // Since this must be a macro, mark the macro enabled now that it is no longer |
952 | | // being expanded. |
953 | 4 | assert(Macro && "Token streams can't paste comments"); |
954 | 4 | Macro->EnableMacro(); |
955 | | |
956 | 4 | PP.HandleMicrosoftCommentPaste(Tok); |
957 | 4 | } |
958 | | |
959 | | /// If \arg loc is a file ID and points inside the current macro |
960 | | /// definition, returns the appropriate source location pointing at the |
961 | | /// macro expansion source location entry, otherwise it returns an invalid |
962 | | /// SourceLocation. |
963 | | SourceLocation |
964 | 618M | TokenLexer::getExpansionLocForMacroDefLoc(SourceLocation loc) const { |
965 | 618M | assert(ExpandLocStart.isValid() && MacroExpansionStart.isValid() && |
966 | 618M | "Not appropriate for token streams"); |
967 | 618M | assert(loc.isValid() && loc.isFileID()); |
968 | | |
969 | 618M | SourceManager &SM = PP.getSourceManager(); |
970 | 618M | assert(SM.isInSLocAddrSpace(loc, MacroDefStart, MacroDefLength) && |
971 | 618M | "Expected loc to come from the macro definition"); |
972 | | |
973 | 618M | SourceLocation::UIntTy relativeOffset = 0; |
974 | 618M | SM.isInSLocAddrSpace(loc, MacroDefStart, MacroDefLength, &relativeOffset); |
975 | 618M | return MacroExpansionStart.getLocWithOffset(relativeOffset); |
976 | 618M | } |
977 | | |
978 | | /// Finds the tokens that are consecutive (from the same FileID) |
979 | | /// creates a single SLocEntry, and assigns SourceLocations to each token that |
980 | | /// point to that SLocEntry. e.g for |
981 | | /// assert(foo == bar); |
982 | | /// There will be a single SLocEntry for the "foo == bar" chunk and locations |
983 | | /// for the 'foo', '==', 'bar' tokens will point inside that chunk. |
984 | | /// |
985 | | /// \arg begin_tokens will be updated to a position past all the found |
986 | | /// consecutive tokens. |
987 | | static void updateConsecutiveMacroArgTokens(SourceManager &SM, |
988 | | SourceLocation ExpandLoc, |
989 | | Token *&begin_tokens, |
990 | 19.0M | Token * end_tokens) { |
991 | 19.0M | assert(begin_tokens + 1 < end_tokens); |
992 | 19.0M | SourceLocation BeginLoc = begin_tokens->getLocation(); |
993 | 19.0M | llvm::MutableArrayRef<Token> All(begin_tokens, end_tokens); |
994 | 19.0M | llvm::MutableArrayRef<Token> Partition; |
995 | | |
996 | 86.3M | auto NearLast = [&, Last = BeginLoc](SourceLocation Loc) mutable { |
997 | | // The maximum distance between two consecutive tokens in a partition. |
998 | | // This is an important trick to avoid using too much SourceLocation address |
999 | | // space! |
1000 | 86.3M | static constexpr SourceLocation::IntTy MaxDistance = 50; |
1001 | 86.3M | auto Distance = Loc.getRawEncoding() - Last.getRawEncoding(); |
1002 | 86.3M | Last = Loc; |
1003 | 86.3M | return Distance <= MaxDistance; |
1004 | 86.3M | }; |
1005 | | |
1006 | | // Partition the tokens by their FileID. |
1007 | | // This is a hot function, and calling getFileID can be expensive, the |
1008 | | // implementation is optimized by reducing the number of getFileID. |
1009 | 19.0M | if (BeginLoc.isFileID()) { |
1010 | | // Consecutive tokens not written in macros must be from the same file. |
1011 | | // (Neither #include nor eof can occur inside a macro argument.) |
1012 | 37.7M | Partition = All.take_while([&](const Token &T) { |
1013 | 37.7M | return T.getLocation().isFileID() && NearLast(T.getLocation())37.3M ; |
1014 | 37.7M | }); |
1015 | 15.1M | } else { |
1016 | | // Call getFileID once to calculate the bounds, and use the cheaper |
1017 | | // sourcelocation-against-bounds comparison. |
1018 | 15.1M | FileID BeginFID = SM.getFileID(BeginLoc); |
1019 | 15.1M | SourceLocation Limit = |
1020 | 15.1M | SM.getComposedLoc(BeginFID, SM.getFileIDSize(BeginFID)); |
1021 | 57.1M | Partition = All.take_while([&](const Token &T) { |
1022 | | // NOTE: the Limit is included! The lexer recovery only ever inserts a |
1023 | | // single token past the end of the FileID, specifically the ) when a |
1024 | | // macro-arg containing a comma should be guarded by parentheses. |
1025 | | // |
1026 | | // It is safe to include the Limit here because SourceManager allocates |
1027 | | // FileSize + 1 for each SLocEntry. |
1028 | | // |
1029 | | // See https://github.com/llvm/llvm-project/issues/60722. |
1030 | 57.1M | return T.getLocation() >= BeginLoc && T.getLocation() <= Limit54.9M |
1031 | 57.1M | && NearLast(T.getLocation())48.9M ; |
1032 | 57.1M | }); |
1033 | 15.1M | } |
1034 | 19.0M | assert(!Partition.empty()); |
1035 | | |
1036 | | // For the consecutive tokens, find the length of the SLocEntry to contain |
1037 | | // all of them. |
1038 | 19.0M | SourceLocation::UIntTy FullLength = |
1039 | 19.0M | Partition.back().getEndLoc().getRawEncoding() - |
1040 | 19.0M | Partition.front().getLocation().getRawEncoding(); |
1041 | | // Create a macro expansion SLocEntry that will "contain" all of the tokens. |
1042 | 19.0M | SourceLocation Expansion = |
1043 | 19.0M | SM.createMacroArgExpansionLoc(BeginLoc, ExpandLoc, FullLength); |
1044 | | |
1045 | | #ifdef EXPENSIVE_CHECKS |
1046 | | assert(llvm::all_of(Partition.drop_front(), |
1047 | | [&SM, ID = SM.getFileID(Partition.front().getLocation())]( |
1048 | | const Token &T) { |
1049 | | return ID == SM.getFileID(T.getLocation()); |
1050 | | }) && |
1051 | | "Must have the same FIleID!"); |
1052 | | #endif |
1053 | | // Change the location of the tokens from the spelling location to the new |
1054 | | // expanded location. |
1055 | 86.2M | for (Token& T : Partition) { |
1056 | 86.2M | SourceLocation::IntTy RelativeOffset = |
1057 | 86.2M | T.getLocation().getRawEncoding() - BeginLoc.getRawEncoding(); |
1058 | 86.2M | T.setLocation(Expansion.getLocWithOffset(RelativeOffset)); |
1059 | 86.2M | } |
1060 | 19.0M | begin_tokens = &Partition.back() + 1; |
1061 | 19.0M | } |
1062 | | |
1063 | | /// Creates SLocEntries and updates the locations of macro argument |
1064 | | /// tokens to their new expanded locations. |
1065 | | /// |
1066 | | /// \param ArgIdSpellLoc the location of the macro argument id inside the macro |
1067 | | /// definition. |
1068 | | void TokenLexer::updateLocForMacroArgTokens(SourceLocation ArgIdSpellLoc, |
1069 | | Token *begin_tokens, |
1070 | 40.8M | Token *end_tokens) { |
1071 | 40.8M | SourceManager &SM = PP.getSourceManager(); |
1072 | | |
1073 | 40.8M | SourceLocation ExpandLoc = |
1074 | 40.8M | getExpansionLocForMacroDefLoc(ArgIdSpellLoc); |
1075 | | |
1076 | 59.9M | while (begin_tokens < end_tokens) { |
1077 | | // If there's only one token just create a SLocEntry for it. |
1078 | 49.5M | if (end_tokens - begin_tokens == 1) { |
1079 | 30.4M | Token &Tok = *begin_tokens; |
1080 | 30.4M | Tok.setLocation(SM.createMacroArgExpansionLoc(Tok.getLocation(), |
1081 | 30.4M | ExpandLoc, |
1082 | 30.4M | Tok.getLength())); |
1083 | 30.4M | return; |
1084 | 30.4M | } |
1085 | | |
1086 | 19.0M | updateConsecutiveMacroArgTokens(SM, ExpandLoc, begin_tokens, end_tokens); |
1087 | 19.0M | } |
1088 | 40.8M | } |
1089 | | |
1090 | 36.0M | void TokenLexer::PropagateLineStartLeadingSpaceInfo(Token &Result) { |
1091 | 36.0M | AtStartOfLine = Result.isAtStartOfLine(); |
1092 | 36.0M | HasLeadingSpace = Result.hasLeadingSpace(); |
1093 | 36.0M | } |