xref: /freebsd-src/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp (revision fcaf7f8644a9988098ac6be2165bce3ea4786e91)
1 //===--- FormatTokenLexer.cpp - Lex FormatTokens -------------*- C++ ----*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements FormatTokenLexer, which tokenizes a source file
11 /// into a FormatToken stream suitable for ClangFormat.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "FormatTokenLexer.h"
16 #include "FormatToken.h"
17 #include "clang/Basic/SourceLocation.h"
18 #include "clang/Basic/SourceManager.h"
19 #include "clang/Format/Format.h"
20 #include "llvm/Support/Regex.h"
21 
22 namespace clang {
23 namespace format {
24 
25 FormatTokenLexer::FormatTokenLexer(
26     const SourceManager &SourceMgr, FileID ID, unsigned Column,
27     const FormatStyle &Style, encoding::Encoding Encoding,
28     llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
29     IdentifierTable &IdentTable)
30     : FormatTok(nullptr), IsFirstToken(true), StateStack({LexerState::NORMAL}),
31       Column(Column), TrailingWhitespace(0),
32       LangOpts(getFormattingLangOpts(Style)), SourceMgr(SourceMgr), ID(ID),
33       Style(Style), IdentTable(IdentTable), Keywords(IdentTable),
34       Encoding(Encoding), Allocator(Allocator), FirstInLineIndex(0),
35       FormattingDisabled(false), MacroBlockBeginRegex(Style.MacroBlockBegin),
36       MacroBlockEndRegex(Style.MacroBlockEnd) {
37   Lex.reset(new Lexer(ID, SourceMgr.getBufferOrFake(ID), SourceMgr, LangOpts));
38   Lex->SetKeepWhitespaceMode(true);
39 
40   for (const std::string &ForEachMacro : Style.ForEachMacros) {
41     auto Identifier = &IdentTable.get(ForEachMacro);
42     Macros.insert({Identifier, TT_ForEachMacro});
43   }
44   for (const std::string &IfMacro : Style.IfMacros) {
45     auto Identifier = &IdentTable.get(IfMacro);
46     Macros.insert({Identifier, TT_IfMacro});
47   }
48   for (const std::string &AttributeMacro : Style.AttributeMacros) {
49     auto Identifier = &IdentTable.get(AttributeMacro);
50     Macros.insert({Identifier, TT_AttributeMacro});
51   }
52   for (const std::string &StatementMacro : Style.StatementMacros) {
53     auto Identifier = &IdentTable.get(StatementMacro);
54     Macros.insert({Identifier, TT_StatementMacro});
55   }
56   for (const std::string &TypenameMacro : Style.TypenameMacros) {
57     auto Identifier = &IdentTable.get(TypenameMacro);
58     Macros.insert({Identifier, TT_TypenameMacro});
59   }
60   for (const std::string &NamespaceMacro : Style.NamespaceMacros) {
61     auto Identifier = &IdentTable.get(NamespaceMacro);
62     Macros.insert({Identifier, TT_NamespaceMacro});
63   }
64   for (const std::string &WhitespaceSensitiveMacro :
65        Style.WhitespaceSensitiveMacros) {
66     auto Identifier = &IdentTable.get(WhitespaceSensitiveMacro);
67     Macros.insert({Identifier, TT_UntouchableMacroFunc});
68   }
69   for (const std::string &StatementAttributeLikeMacro :
70        Style.StatementAttributeLikeMacros) {
71     auto Identifier = &IdentTable.get(StatementAttributeLikeMacro);
72     Macros.insert({Identifier, TT_StatementAttributeLikeMacro});
73   }
74 }
75 
76 ArrayRef<FormatToken *> FormatTokenLexer::lex() {
77   assert(Tokens.empty());
78   assert(FirstInLineIndex == 0);
79   do {
80     Tokens.push_back(getNextToken());
81     if (Style.isJavaScript()) {
82       tryParseJSRegexLiteral();
83       handleTemplateStrings();
84     }
85     if (Style.Language == FormatStyle::LK_TextProto)
86       tryParsePythonComment();
87     tryMergePreviousTokens();
88     if (Style.isCSharp()) {
89       // This needs to come after tokens have been merged so that C#
90       // string literals are correctly identified.
91       handleCSharpVerbatimAndInterpolatedStrings();
92     }
93     if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
94       FirstInLineIndex = Tokens.size() - 1;
95   } while (Tokens.back()->isNot(tok::eof));
96   return Tokens;
97 }
98 
99 void FormatTokenLexer::tryMergePreviousTokens() {
100   if (tryMerge_TMacro())
101     return;
102   if (tryMergeConflictMarkers())
103     return;
104   if (tryMergeLessLess())
105     return;
106   if (tryMergeForEach())
107     return;
108   if (Style.isCpp() && tryTransformTryUsageForC())
109     return;
110 
111   if (Style.isJavaScript() || Style.isCSharp()) {
112     static const tok::TokenKind NullishCoalescingOperator[] = {tok::question,
113                                                                tok::question};
114     static const tok::TokenKind NullPropagatingOperator[] = {tok::question,
115                                                              tok::period};
116     static const tok::TokenKind FatArrow[] = {tok::equal, tok::greater};
117 
118     if (tryMergeTokens(FatArrow, TT_FatArrow))
119       return;
120     if (tryMergeTokens(NullishCoalescingOperator, TT_NullCoalescingOperator)) {
121       // Treat like the "||" operator (as opposed to the ternary ?).
122       Tokens.back()->Tok.setKind(tok::pipepipe);
123       return;
124     }
125     if (tryMergeTokens(NullPropagatingOperator, TT_NullPropagatingOperator)) {
126       // Treat like a regular "." access.
127       Tokens.back()->Tok.setKind(tok::period);
128       return;
129     }
130     if (tryMergeNullishCoalescingEqual())
131       return;
132   }
133 
134   if (Style.isCSharp()) {
135     static const tok::TokenKind CSharpNullConditionalLSquare[] = {
136         tok::question, tok::l_square};
137 
138     if (tryMergeCSharpKeywordVariables())
139       return;
140     if (tryMergeCSharpStringLiteral())
141       return;
142     if (tryTransformCSharpForEach())
143       return;
144     if (tryMergeTokens(CSharpNullConditionalLSquare,
145                        TT_CSharpNullConditionalLSquare)) {
146       // Treat like a regular "[" operator.
147       Tokens.back()->Tok.setKind(tok::l_square);
148       return;
149     }
150   }
151 
152   if (tryMergeNSStringLiteral())
153     return;
154 
155   if (Style.isJavaScript()) {
156     static const tok::TokenKind JSIdentity[] = {tok::equalequal, tok::equal};
157     static const tok::TokenKind JSNotIdentity[] = {tok::exclaimequal,
158                                                    tok::equal};
159     static const tok::TokenKind JSShiftEqual[] = {tok::greater, tok::greater,
160                                                   tok::greaterequal};
161     static const tok::TokenKind JSExponentiation[] = {tok::star, tok::star};
162     static const tok::TokenKind JSExponentiationEqual[] = {tok::star,
163                                                            tok::starequal};
164     static const tok::TokenKind JSPipePipeEqual[] = {tok::pipepipe, tok::equal};
165     static const tok::TokenKind JSAndAndEqual[] = {tok::ampamp, tok::equal};
166 
167     // FIXME: Investigate what token type gives the correct operator priority.
168     if (tryMergeTokens(JSIdentity, TT_BinaryOperator))
169       return;
170     if (tryMergeTokens(JSNotIdentity, TT_BinaryOperator))
171       return;
172     if (tryMergeTokens(JSShiftEqual, TT_BinaryOperator))
173       return;
174     if (tryMergeTokens(JSExponentiation, TT_JsExponentiation))
175       return;
176     if (tryMergeTokens(JSExponentiationEqual, TT_JsExponentiationEqual)) {
177       Tokens.back()->Tok.setKind(tok::starequal);
178       return;
179     }
180     if (tryMergeTokens(JSAndAndEqual, TT_JsAndAndEqual) ||
181         tryMergeTokens(JSPipePipeEqual, TT_JsPipePipeEqual)) {
182       // Treat like the "=" assignment operator.
183       Tokens.back()->Tok.setKind(tok::equal);
184       return;
185     }
186     if (tryMergeJSPrivateIdentifier())
187       return;
188   }
189 
190   if (Style.Language == FormatStyle::LK_Java) {
191     static const tok::TokenKind JavaRightLogicalShiftAssign[] = {
192         tok::greater, tok::greater, tok::greaterequal};
193     if (tryMergeTokens(JavaRightLogicalShiftAssign, TT_BinaryOperator))
194       return;
195   }
196 }
197 
198 bool FormatTokenLexer::tryMergeNSStringLiteral() {
199   if (Tokens.size() < 2)
200     return false;
201   auto &At = *(Tokens.end() - 2);
202   auto &String = *(Tokens.end() - 1);
203   if (!At->is(tok::at) || !String->is(tok::string_literal))
204     return false;
205   At->Tok.setKind(tok::string_literal);
206   At->TokenText = StringRef(At->TokenText.begin(),
207                             String->TokenText.end() - At->TokenText.begin());
208   At->ColumnWidth += String->ColumnWidth;
209   At->setType(TT_ObjCStringLiteral);
210   Tokens.erase(Tokens.end() - 1);
211   return true;
212 }
213 
214 bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
215   // Merges #idenfier into a single identifier with the text #identifier
216   // but the token tok::identifier.
217   if (Tokens.size() < 2)
218     return false;
219   auto &Hash = *(Tokens.end() - 2);
220   auto &Identifier = *(Tokens.end() - 1);
221   if (!Hash->is(tok::hash) || !Identifier->is(tok::identifier))
222     return false;
223   Hash->Tok.setKind(tok::identifier);
224   Hash->TokenText =
225       StringRef(Hash->TokenText.begin(),
226                 Identifier->TokenText.end() - Hash->TokenText.begin());
227   Hash->ColumnWidth += Identifier->ColumnWidth;
228   Hash->setType(TT_JsPrivateIdentifier);
229   Tokens.erase(Tokens.end() - 1);
230   return true;
231 }
232 
233 // Search for verbatim or interpolated string literals @"ABC" or
234 // $"aaaaa{abc}aaaaa" i and mark the token as TT_CSharpStringLiteral, and to
235 // prevent splitting of @, $ and ".
236 // Merging of multiline verbatim strings with embedded '"' is handled in
237 // handleCSharpVerbatimAndInterpolatedStrings with lower-level lexing.
238 bool FormatTokenLexer::tryMergeCSharpStringLiteral() {
239   if (Tokens.size() < 2)
240     return false;
241 
242   // Interpolated strings could contain { } with " characters inside.
243   // $"{x ?? "null"}"
244   // should not be split into $"{x ?? ", null, "}" but should treated as a
245   // single string-literal.
246   //
247   // We opt not to try and format expressions inside {} within a C#
248   // interpolated string. Formatting expressions within an interpolated string
249   // would require similar work as that done for JavaScript template strings
250   // in `handleTemplateStrings()`.
251   auto &CSharpInterpolatedString = *(Tokens.end() - 2);
252   if (CSharpInterpolatedString->getType() == TT_CSharpStringLiteral &&
253       (CSharpInterpolatedString->TokenText.startswith(R"($")") ||
254        CSharpInterpolatedString->TokenText.startswith(R"($@")"))) {
255     int UnmatchedOpeningBraceCount = 0;
256 
257     auto TokenTextSize = CSharpInterpolatedString->TokenText.size();
258     for (size_t Index = 0; Index < TokenTextSize; ++Index) {
259       char C = CSharpInterpolatedString->TokenText[Index];
260       if (C == '{') {
261         // "{{"  inside an interpolated string is an escaped '{' so skip it.
262         if (Index + 1 < TokenTextSize &&
263             CSharpInterpolatedString->TokenText[Index + 1] == '{') {
264           ++Index;
265           continue;
266         }
267         ++UnmatchedOpeningBraceCount;
268       } else if (C == '}') {
269         // "}}"  inside an interpolated string is an escaped '}' so skip it.
270         if (Index + 1 < TokenTextSize &&
271             CSharpInterpolatedString->TokenText[Index + 1] == '}') {
272           ++Index;
273           continue;
274         }
275         --UnmatchedOpeningBraceCount;
276       }
277     }
278 
279     if (UnmatchedOpeningBraceCount > 0) {
280       auto &NextToken = *(Tokens.end() - 1);
281       CSharpInterpolatedString->TokenText =
282           StringRef(CSharpInterpolatedString->TokenText.begin(),
283                     NextToken->TokenText.end() -
284                         CSharpInterpolatedString->TokenText.begin());
285       CSharpInterpolatedString->ColumnWidth += NextToken->ColumnWidth;
286       Tokens.erase(Tokens.end() - 1);
287       return true;
288     }
289   }
290 
291   // Look for @"aaaaaa" or $"aaaaaa".
292   auto &String = *(Tokens.end() - 1);
293   if (!String->is(tok::string_literal))
294     return false;
295 
296   auto &At = *(Tokens.end() - 2);
297   if (!(At->is(tok::at) || At->TokenText == "$"))
298     return false;
299 
300   if (Tokens.size() > 2 && At->is(tok::at)) {
301     auto &Dollar = *(Tokens.end() - 3);
302     if (Dollar->TokenText == "$") {
303       // This looks like $@"aaaaa" so we need to combine all 3 tokens.
304       Dollar->Tok.setKind(tok::string_literal);
305       Dollar->TokenText =
306           StringRef(Dollar->TokenText.begin(),
307                     String->TokenText.end() - Dollar->TokenText.begin());
308       Dollar->ColumnWidth += (At->ColumnWidth + String->ColumnWidth);
309       Dollar->setType(TT_CSharpStringLiteral);
310       Tokens.erase(Tokens.end() - 2);
311       Tokens.erase(Tokens.end() - 1);
312       return true;
313     }
314   }
315 
316   // Convert back into just a string_literal.
317   At->Tok.setKind(tok::string_literal);
318   At->TokenText = StringRef(At->TokenText.begin(),
319                             String->TokenText.end() - At->TokenText.begin());
320   At->ColumnWidth += String->ColumnWidth;
321   At->setType(TT_CSharpStringLiteral);
322   Tokens.erase(Tokens.end() - 1);
323   return true;
324 }
325 
326 // Valid C# attribute targets:
327 // https://docs.microsoft.com/en-us/dotnet/csharp/programming-guide/concepts/attributes/#attribute-targets
328 const llvm::StringSet<> FormatTokenLexer::CSharpAttributeTargets = {
329     "assembly", "module",   "field",  "event", "method",
330     "param",    "property", "return", "type",
331 };
332 
333 bool FormatTokenLexer::tryMergeNullishCoalescingEqual() {
334   if (Tokens.size() < 2)
335     return false;
336   auto &NullishCoalescing = *(Tokens.end() - 2);
337   auto &Equal = *(Tokens.end() - 1);
338   if (NullishCoalescing->getType() != TT_NullCoalescingOperator ||
339       !Equal->is(tok::equal)) {
340     return false;
341   }
342   NullishCoalescing->Tok.setKind(tok::equal); // no '??=' in clang tokens.
343   NullishCoalescing->TokenText =
344       StringRef(NullishCoalescing->TokenText.begin(),
345                 Equal->TokenText.end() - NullishCoalescing->TokenText.begin());
346   NullishCoalescing->ColumnWidth += Equal->ColumnWidth;
347   NullishCoalescing->setType(TT_NullCoalescingEqual);
348   Tokens.erase(Tokens.end() - 1);
349   return true;
350 }
351 
352 bool FormatTokenLexer::tryMergeCSharpKeywordVariables() {
353   if (Tokens.size() < 2)
354     return false;
355   auto &At = *(Tokens.end() - 2);
356   auto &Keyword = *(Tokens.end() - 1);
357   if (!At->is(tok::at))
358     return false;
359   if (!Keywords.isCSharpKeyword(*Keyword))
360     return false;
361 
362   At->Tok.setKind(tok::identifier);
363   At->TokenText = StringRef(At->TokenText.begin(),
364                             Keyword->TokenText.end() - At->TokenText.begin());
365   At->ColumnWidth += Keyword->ColumnWidth;
366   At->setType(Keyword->getType());
367   Tokens.erase(Tokens.end() - 1);
368   return true;
369 }
370 
371 // In C# transform identifier foreach into kw_foreach
372 bool FormatTokenLexer::tryTransformCSharpForEach() {
373   if (Tokens.size() < 1)
374     return false;
375   auto &Identifier = *(Tokens.end() - 1);
376   if (!Identifier->is(tok::identifier))
377     return false;
378   if (Identifier->TokenText != "foreach")
379     return false;
380 
381   Identifier->setType(TT_ForEachMacro);
382   Identifier->Tok.setKind(tok::kw_for);
383   return true;
384 }
385 
386 bool FormatTokenLexer::tryMergeForEach() {
387   if (Tokens.size() < 2)
388     return false;
389   auto &For = *(Tokens.end() - 2);
390   auto &Each = *(Tokens.end() - 1);
391   if (!For->is(tok::kw_for))
392     return false;
393   if (!Each->is(tok::identifier))
394     return false;
395   if (Each->TokenText != "each")
396     return false;
397 
398   For->setType(TT_ForEachMacro);
399   For->Tok.setKind(tok::kw_for);
400 
401   For->TokenText = StringRef(For->TokenText.begin(),
402                              Each->TokenText.end() - For->TokenText.begin());
403   For->ColumnWidth += Each->ColumnWidth;
404   Tokens.erase(Tokens.end() - 1);
405   return true;
406 }
407 
408 bool FormatTokenLexer::tryTransformTryUsageForC() {
409   if (Tokens.size() < 2)
410     return false;
411   auto &Try = *(Tokens.end() - 2);
412   if (!Try->is(tok::kw_try))
413     return false;
414   auto &Next = *(Tokens.end() - 1);
415   if (Next->isOneOf(tok::l_brace, tok::colon, tok::hash, tok::comment))
416     return false;
417 
418   if (Tokens.size() > 2) {
419     auto &At = *(Tokens.end() - 3);
420     if (At->is(tok::at))
421       return false;
422   }
423 
424   Try->Tok.setKind(tok::identifier);
425   return true;
426 }
427 
428 bool FormatTokenLexer::tryMergeLessLess() {
429   // Merge X,less,less,Y into X,lessless,Y unless X or Y is less.
430   if (Tokens.size() < 3)
431     return false;
432 
433   auto First = Tokens.end() - 3;
434   if (First[0]->isNot(tok::less) || First[1]->isNot(tok::less))
435     return false;
436 
437   // Only merge if there currently is no whitespace between the two "<".
438   if (First[1]->hasWhitespaceBefore())
439     return false;
440 
441   auto X = Tokens.size() > 3 ? First[-1] : nullptr;
442   auto Y = First[2];
443   if ((X && X->is(tok::less)) || Y->is(tok::less))
444     return false;
445 
446   // Do not remove a whitespace between the two "<" e.g. "operator< <>".
447   if (X && X->is(tok::kw_operator) && Y->is(tok::greater))
448     return false;
449 
450   First[0]->Tok.setKind(tok::lessless);
451   First[0]->TokenText = "<<";
452   First[0]->ColumnWidth += 1;
453   Tokens.erase(Tokens.end() - 2);
454   return true;
455 }
456 
457 bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
458                                       TokenType NewType) {
459   if (Tokens.size() < Kinds.size())
460     return false;
461 
462   SmallVectorImpl<FormatToken *>::const_iterator First =
463       Tokens.end() - Kinds.size();
464   if (!First[0]->is(Kinds[0]))
465     return false;
466   unsigned AddLength = 0;
467   for (unsigned i = 1; i < Kinds.size(); ++i) {
468     if (!First[i]->is(Kinds[i]) || First[i]->hasWhitespaceBefore())
469       return false;
470     AddLength += First[i]->TokenText.size();
471   }
472   Tokens.resize(Tokens.size() - Kinds.size() + 1);
473   First[0]->TokenText = StringRef(First[0]->TokenText.data(),
474                                   First[0]->TokenText.size() + AddLength);
475   First[0]->ColumnWidth += AddLength;
476   First[0]->setType(NewType);
477   return true;
478 }
479 
480 // Returns \c true if \p Tok can only be followed by an operand in JavaScript.
481 bool FormatTokenLexer::precedesOperand(FormatToken *Tok) {
482   // NB: This is not entirely correct, as an r_paren can introduce an operand
483   // location in e.g. `if (foo) /bar/.exec(...);`. That is a rare enough
484   // corner case to not matter in practice, though.
485   return Tok->isOneOf(tok::period, tok::l_paren, tok::comma, tok::l_brace,
486                       tok::r_brace, tok::l_square, tok::semi, tok::exclaim,
487                       tok::colon, tok::question, tok::tilde) ||
488          Tok->isOneOf(tok::kw_return, tok::kw_do, tok::kw_case, tok::kw_throw,
489                       tok::kw_else, tok::kw_new, tok::kw_delete, tok::kw_void,
490                       tok::kw_typeof, Keywords.kw_instanceof, Keywords.kw_in) ||
491          Tok->isBinaryOperator();
492 }
493 
494 bool FormatTokenLexer::canPrecedeRegexLiteral(FormatToken *Prev) {
495   if (!Prev)
496     return true;
497 
498   // Regex literals can only follow after prefix unary operators, not after
499   // postfix unary operators. If the '++' is followed by a non-operand
500   // introducing token, the slash here is the operand and not the start of a
501   // regex.
502   // `!` is an unary prefix operator, but also a post-fix operator that casts
503   // away nullability, so the same check applies.
504   if (Prev->isOneOf(tok::plusplus, tok::minusminus, tok::exclaim))
505     return Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]);
506 
507   // The previous token must introduce an operand location where regex
508   // literals can occur.
509   if (!precedesOperand(Prev))
510     return false;
511 
512   return true;
513 }
514 
515 // Tries to parse a JavaScript Regex literal starting at the current token,
516 // if that begins with a slash and is in a location where JavaScript allows
517 // regex literals. Changes the current token to a regex literal and updates
518 // its text if successful.
519 void FormatTokenLexer::tryParseJSRegexLiteral() {
520   FormatToken *RegexToken = Tokens.back();
521   if (!RegexToken->isOneOf(tok::slash, tok::slashequal))
522     return;
523 
524   FormatToken *Prev = nullptr;
525   for (FormatToken *FT : llvm::drop_begin(llvm::reverse(Tokens))) {
526     // NB: Because previous pointers are not initialized yet, this cannot use
527     // Token.getPreviousNonComment.
528     if (FT->isNot(tok::comment)) {
529       Prev = FT;
530       break;
531     }
532   }
533 
534   if (!canPrecedeRegexLiteral(Prev))
535     return;
536 
537   // 'Manually' lex ahead in the current file buffer.
538   const char *Offset = Lex->getBufferLocation();
539   const char *RegexBegin = Offset - RegexToken->TokenText.size();
540   StringRef Buffer = Lex->getBuffer();
541   bool InCharacterClass = false;
542   bool HaveClosingSlash = false;
543   for (; !HaveClosingSlash && Offset != Buffer.end(); ++Offset) {
544     // Regular expressions are terminated with a '/', which can only be
545     // escaped using '\' or a character class between '[' and ']'.
546     // See http://www.ecma-international.org/ecma-262/5.1/#sec-7.8.5.
547     switch (*Offset) {
548     case '\\':
549       // Skip the escaped character.
550       ++Offset;
551       break;
552     case '[':
553       InCharacterClass = true;
554       break;
555     case ']':
556       InCharacterClass = false;
557       break;
558     case '/':
559       if (!InCharacterClass)
560         HaveClosingSlash = true;
561       break;
562     }
563   }
564 
565   RegexToken->setType(TT_RegexLiteral);
566   // Treat regex literals like other string_literals.
567   RegexToken->Tok.setKind(tok::string_literal);
568   RegexToken->TokenText = StringRef(RegexBegin, Offset - RegexBegin);
569   RegexToken->ColumnWidth = RegexToken->TokenText.size();
570 
571   resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
572 }
573 
574 void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
575   FormatToken *CSharpStringLiteral = Tokens.back();
576 
577   if (CSharpStringLiteral->getType() != TT_CSharpStringLiteral)
578     return;
579 
580   // Deal with multiline strings.
581   if (!(CSharpStringLiteral->TokenText.startswith(R"(@")") ||
582         CSharpStringLiteral->TokenText.startswith(R"($@")"))) {
583     return;
584   }
585 
586   const char *StrBegin =
587       Lex->getBufferLocation() - CSharpStringLiteral->TokenText.size();
588   const char *Offset = StrBegin;
589   if (CSharpStringLiteral->TokenText.startswith(R"(@")"))
590     Offset += 2;
591   else // CSharpStringLiteral->TokenText.startswith(R"($@")")
592     Offset += 3;
593 
594   // Look for a terminating '"' in the current file buffer.
595   // Make no effort to format code within an interpolated or verbatim string.
596   for (; Offset != Lex->getBuffer().end(); ++Offset) {
597     if (Offset[0] == '"') {
598       // "" within a verbatim string is an escaped double quote: skip it.
599       if (Offset + 1 < Lex->getBuffer().end() && Offset[1] == '"')
600         ++Offset;
601       else
602         break;
603     }
604   }
605 
606   // Make no attempt to format code properly if a verbatim string is
607   // unterminated.
608   if (Offset == Lex->getBuffer().end())
609     return;
610 
611   StringRef LiteralText(StrBegin, Offset - StrBegin + 1);
612   CSharpStringLiteral->TokenText = LiteralText;
613 
614   // Adjust width for potentially multiline string literals.
615   size_t FirstBreak = LiteralText.find('\n');
616   StringRef FirstLineText = FirstBreak == StringRef::npos
617                                 ? LiteralText
618                                 : LiteralText.substr(0, FirstBreak);
619   CSharpStringLiteral->ColumnWidth = encoding::columnWidthWithTabs(
620       FirstLineText, CSharpStringLiteral->OriginalColumn, Style.TabWidth,
621       Encoding);
622   size_t LastBreak = LiteralText.rfind('\n');
623   if (LastBreak != StringRef::npos) {
624     CSharpStringLiteral->IsMultiline = true;
625     unsigned StartColumn = 0;
626     CSharpStringLiteral->LastLineColumnWidth =
627         encoding::columnWidthWithTabs(LiteralText.substr(LastBreak + 1),
628                                       StartColumn, Style.TabWidth, Encoding);
629   }
630 
631   SourceLocation loc = Offset < Lex->getBuffer().end()
632                            ? Lex->getSourceLocation(Offset + 1)
633                            : SourceMgr.getLocForEndOfFile(ID);
634   resetLexer(SourceMgr.getFileOffset(loc));
635 }
636 
637 void FormatTokenLexer::handleTemplateStrings() {
638   FormatToken *BacktickToken = Tokens.back();
639 
640   if (BacktickToken->is(tok::l_brace)) {
641     StateStack.push(LexerState::NORMAL);
642     return;
643   }
644   if (BacktickToken->is(tok::r_brace)) {
645     if (StateStack.size() == 1)
646       return;
647     StateStack.pop();
648     if (StateStack.top() != LexerState::TEMPLATE_STRING)
649       return;
650     // If back in TEMPLATE_STRING, fallthrough and continue parsing the
651   } else if (BacktickToken->is(tok::unknown) &&
652              BacktickToken->TokenText == "`") {
653     StateStack.push(LexerState::TEMPLATE_STRING);
654   } else {
655     return; // Not actually a template
656   }
657 
658   // 'Manually' lex ahead in the current file buffer.
659   const char *Offset = Lex->getBufferLocation();
660   const char *TmplBegin = Offset - BacktickToken->TokenText.size(); // at "`"
661   for (; Offset != Lex->getBuffer().end(); ++Offset) {
662     if (Offset[0] == '`') {
663       StateStack.pop();
664       break;
665     }
666     if (Offset[0] == '\\') {
667       ++Offset; // Skip the escaped character.
668     } else if (Offset + 1 < Lex->getBuffer().end() && Offset[0] == '$' &&
669                Offset[1] == '{') {
670       // '${' introduces an expression interpolation in the template string.
671       StateStack.push(LexerState::NORMAL);
672       ++Offset;
673       break;
674     }
675   }
676 
677   StringRef LiteralText(TmplBegin, Offset - TmplBegin + 1);
678   BacktickToken->setType(TT_TemplateString);
679   BacktickToken->Tok.setKind(tok::string_literal);
680   BacktickToken->TokenText = LiteralText;
681 
682   // Adjust width for potentially multiline string literals.
683   size_t FirstBreak = LiteralText.find('\n');
684   StringRef FirstLineText = FirstBreak == StringRef::npos
685                                 ? LiteralText
686                                 : LiteralText.substr(0, FirstBreak);
687   BacktickToken->ColumnWidth = encoding::columnWidthWithTabs(
688       FirstLineText, BacktickToken->OriginalColumn, Style.TabWidth, Encoding);
689   size_t LastBreak = LiteralText.rfind('\n');
690   if (LastBreak != StringRef::npos) {
691     BacktickToken->IsMultiline = true;
692     unsigned StartColumn = 0; // The template tail spans the entire line.
693     BacktickToken->LastLineColumnWidth =
694         encoding::columnWidthWithTabs(LiteralText.substr(LastBreak + 1),
695                                       StartColumn, Style.TabWidth, Encoding);
696   }
697 
698   SourceLocation loc = Offset < Lex->getBuffer().end()
699                            ? Lex->getSourceLocation(Offset + 1)
700                            : SourceMgr.getLocForEndOfFile(ID);
701   resetLexer(SourceMgr.getFileOffset(loc));
702 }
703 
704 void FormatTokenLexer::tryParsePythonComment() {
705   FormatToken *HashToken = Tokens.back();
706   if (!HashToken->isOneOf(tok::hash, tok::hashhash))
707     return;
708   // Turn the remainder of this line into a comment.
709   const char *CommentBegin =
710       Lex->getBufferLocation() - HashToken->TokenText.size(); // at "#"
711   size_t From = CommentBegin - Lex->getBuffer().begin();
712   size_t To = Lex->getBuffer().find_first_of('\n', From);
713   if (To == StringRef::npos)
714     To = Lex->getBuffer().size();
715   size_t Len = To - From;
716   HashToken->setType(TT_LineComment);
717   HashToken->Tok.setKind(tok::comment);
718   HashToken->TokenText = Lex->getBuffer().substr(From, Len);
719   SourceLocation Loc = To < Lex->getBuffer().size()
720                            ? Lex->getSourceLocation(CommentBegin + Len)
721                            : SourceMgr.getLocForEndOfFile(ID);
722   resetLexer(SourceMgr.getFileOffset(Loc));
723 }
724 
725 bool FormatTokenLexer::tryMerge_TMacro() {
726   if (Tokens.size() < 4)
727     return false;
728   FormatToken *Last = Tokens.back();
729   if (!Last->is(tok::r_paren))
730     return false;
731 
732   FormatToken *String = Tokens[Tokens.size() - 2];
733   if (!String->is(tok::string_literal) || String->IsMultiline)
734     return false;
735 
736   if (!Tokens[Tokens.size() - 3]->is(tok::l_paren))
737     return false;
738 
739   FormatToken *Macro = Tokens[Tokens.size() - 4];
740   if (Macro->TokenText != "_T")
741     return false;
742 
743   const char *Start = Macro->TokenText.data();
744   const char *End = Last->TokenText.data() + Last->TokenText.size();
745   String->TokenText = StringRef(Start, End - Start);
746   String->IsFirst = Macro->IsFirst;
747   String->LastNewlineOffset = Macro->LastNewlineOffset;
748   String->WhitespaceRange = Macro->WhitespaceRange;
749   String->OriginalColumn = Macro->OriginalColumn;
750   String->ColumnWidth = encoding::columnWidthWithTabs(
751       String->TokenText, String->OriginalColumn, Style.TabWidth, Encoding);
752   String->NewlinesBefore = Macro->NewlinesBefore;
753   String->HasUnescapedNewline = Macro->HasUnescapedNewline;
754 
755   Tokens.pop_back();
756   Tokens.pop_back();
757   Tokens.pop_back();
758   Tokens.back() = String;
759   if (FirstInLineIndex >= Tokens.size())
760     FirstInLineIndex = Tokens.size() - 1;
761   return true;
762 }
763 
764 bool FormatTokenLexer::tryMergeConflictMarkers() {
765   if (Tokens.back()->NewlinesBefore == 0 && Tokens.back()->isNot(tok::eof))
766     return false;
767 
768   // Conflict lines look like:
769   // <marker> <text from the vcs>
770   // For example:
771   // >>>>>>> /file/in/file/system at revision 1234
772   //
773   // We merge all tokens in a line that starts with a conflict marker
774   // into a single token with a special token type that the unwrapped line
775   // parser will use to correctly rebuild the underlying code.
776 
777   FileID ID;
778   // Get the position of the first token in the line.
779   unsigned FirstInLineOffset;
780   std::tie(ID, FirstInLineOffset) = SourceMgr.getDecomposedLoc(
781       Tokens[FirstInLineIndex]->getStartOfNonWhitespace());
782   StringRef Buffer = SourceMgr.getBufferOrFake(ID).getBuffer();
783   // Calculate the offset of the start of the current line.
784   auto LineOffset = Buffer.rfind('\n', FirstInLineOffset);
785   if (LineOffset == StringRef::npos)
786     LineOffset = 0;
787   else
788     ++LineOffset;
789 
790   auto FirstSpace = Buffer.find_first_of(" \n", LineOffset);
791   StringRef LineStart;
792   if (FirstSpace == StringRef::npos)
793     LineStart = Buffer.substr(LineOffset);
794   else
795     LineStart = Buffer.substr(LineOffset, FirstSpace - LineOffset);
796 
797   TokenType Type = TT_Unknown;
798   if (LineStart == "<<<<<<<" || LineStart == ">>>>") {
799     Type = TT_ConflictStart;
800   } else if (LineStart == "|||||||" || LineStart == "=======" ||
801              LineStart == "====") {
802     Type = TT_ConflictAlternative;
803   } else if (LineStart == ">>>>>>>" || LineStart == "<<<<") {
804     Type = TT_ConflictEnd;
805   }
806 
807   if (Type != TT_Unknown) {
808     FormatToken *Next = Tokens.back();
809 
810     Tokens.resize(FirstInLineIndex + 1);
811     // We do not need to build a complete token here, as we will skip it
812     // during parsing anyway (as we must not touch whitespace around conflict
813     // markers).
814     Tokens.back()->setType(Type);
815     Tokens.back()->Tok.setKind(tok::kw___unknown_anytype);
816 
817     Tokens.push_back(Next);
818     return true;
819   }
820 
821   return false;
822 }
823 
824 FormatToken *FormatTokenLexer::getStashedToken() {
825   // Create a synthesized second '>' or '<' token.
826   Token Tok = FormatTok->Tok;
827   StringRef TokenText = FormatTok->TokenText;
828 
829   unsigned OriginalColumn = FormatTok->OriginalColumn;
830   FormatTok = new (Allocator.Allocate()) FormatToken;
831   FormatTok->Tok = Tok;
832   SourceLocation TokLocation =
833       FormatTok->Tok.getLocation().getLocWithOffset(Tok.getLength() - 1);
834   FormatTok->Tok.setLocation(TokLocation);
835   FormatTok->WhitespaceRange = SourceRange(TokLocation, TokLocation);
836   FormatTok->TokenText = TokenText;
837   FormatTok->ColumnWidth = 1;
838   FormatTok->OriginalColumn = OriginalColumn + 1;
839 
840   return FormatTok;
841 }
842 
843 /// Truncate the current token to the new length and make the lexer continue
844 /// from the end of the truncated token. Used for other languages that have
845 /// different token boundaries, like JavaScript in which a comment ends at a
846 /// line break regardless of whether the line break follows a backslash. Also
847 /// used to set the lexer to the end of whitespace if the lexer regards
848 /// whitespace and an unrecognized symbol as one token.
849 void FormatTokenLexer::truncateToken(size_t NewLen) {
850   assert(NewLen <= FormatTok->TokenText.size());
851   resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(
852       Lex->getBufferLocation() - FormatTok->TokenText.size() + NewLen)));
853   FormatTok->TokenText = FormatTok->TokenText.substr(0, NewLen);
854   FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
855       FormatTok->TokenText, FormatTok->OriginalColumn, Style.TabWidth,
856       Encoding);
857   FormatTok->Tok.setLength(NewLen);
858 }
859 
860 /// Count the length of leading whitespace in a token.
861 static size_t countLeadingWhitespace(StringRef Text) {
862   // Basically counting the length matched by this regex.
863   // "^([\n\r\f\v \t]|(\\\\|\\?\\?/)[\n\r])+"
864   // Directly using the regex turned out to be slow. With the regex
865   // version formatting all files in this directory took about 1.25
866   // seconds. This version took about 0.5 seconds.
867   const unsigned char *const Begin = Text.bytes_begin();
868   const unsigned char *const End = Text.bytes_end();
869   const unsigned char *Cur = Begin;
870   while (Cur < End) {
871     if (isspace(Cur[0])) {
872       ++Cur;
873     } else if (Cur[0] == '\\' && (Cur[1] == '\n' || Cur[1] == '\r')) {
874       // A '\' followed by a newline always escapes the newline, regardless
875       // of whether there is another '\' before it.
876       // The source has a null byte at the end. So the end of the entire input
877       // isn't reached yet. Also the lexer doesn't break apart an escaped
878       // newline.
879       assert(End - Cur >= 2);
880       Cur += 2;
881     } else if (Cur[0] == '?' && Cur[1] == '?' && Cur[2] == '/' &&
882                (Cur[3] == '\n' || Cur[3] == '\r')) {
883       // Newlines can also be escaped by a '?' '?' '/' trigraph. By the way, the
884       // characters are quoted individually in this comment because if we write
885       // them together some compilers warn that we have a trigraph in the code.
886       assert(End - Cur >= 4);
887       Cur += 4;
888     } else {
889       break;
890     }
891   }
892   return Cur - Begin;
893 }
894 
895 FormatToken *FormatTokenLexer::getNextToken() {
896   if (StateStack.top() == LexerState::TOKEN_STASHED) {
897     StateStack.pop();
898     return getStashedToken();
899   }
900 
901   FormatTok = new (Allocator.Allocate()) FormatToken;
902   readRawToken(*FormatTok);
903   SourceLocation WhitespaceStart =
904       FormatTok->Tok.getLocation().getLocWithOffset(-TrailingWhitespace);
905   FormatTok->IsFirst = IsFirstToken;
906   IsFirstToken = false;
907 
908   // Consume and record whitespace until we find a significant token.
909   // Some tok::unknown tokens are not just whitespace, e.g. whitespace
910   // followed by a symbol such as backtick. Those symbols may be
911   // significant in other languages.
912   unsigned WhitespaceLength = TrailingWhitespace;
913   while (FormatTok->isNot(tok::eof)) {
914     auto LeadingWhitespace = countLeadingWhitespace(FormatTok->TokenText);
915     if (LeadingWhitespace == 0)
916       break;
917     if (LeadingWhitespace < FormatTok->TokenText.size())
918       truncateToken(LeadingWhitespace);
919     StringRef Text = FormatTok->TokenText;
920     bool InEscape = false;
921     for (int i = 0, e = Text.size(); i != e; ++i) {
922       switch (Text[i]) {
923       case '\r':
924         // If this is a CRLF sequence, break here and the LF will be handled on
925         // the next loop iteration. Otherwise, this is a single Mac CR, treat it
926         // the same as a single LF.
927         if (i + 1 < e && Text[i + 1] == '\n')
928           break;
929         LLVM_FALLTHROUGH;
930       case '\n':
931         ++FormatTok->NewlinesBefore;
932         if (!InEscape)
933           FormatTok->HasUnescapedNewline = true;
934         else
935           InEscape = false;
936         FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
937         Column = 0;
938         break;
939       case '\f':
940       case '\v':
941         Column = 0;
942         break;
943       case ' ':
944         ++Column;
945         break;
946       case '\t':
947         Column +=
948             Style.TabWidth - (Style.TabWidth ? Column % Style.TabWidth : 0);
949         break;
950       case '\\':
951       case '?':
952       case '/':
953         // The text was entirely whitespace when this loop was entered. Thus
954         // this has to be an escape sequence.
955         assert(Text.substr(i, 2) == "\\\r" || Text.substr(i, 2) == "\\\n" ||
956                Text.substr(i, 4) == "\?\?/\r" ||
957                Text.substr(i, 4) == "\?\?/\n" ||
958                (i >= 1 && (Text.substr(i - 1, 4) == "\?\?/\r" ||
959                            Text.substr(i - 1, 4) == "\?\?/\n")) ||
960                (i >= 2 && (Text.substr(i - 2, 4) == "\?\?/\r" ||
961                            Text.substr(i - 2, 4) == "\?\?/\n")));
962         InEscape = true;
963         break;
964       default:
965         // This shouldn't happen.
966         assert(false);
967         break;
968       }
969     }
970     WhitespaceLength += Text.size();
971     readRawToken(*FormatTok);
972   }
973 
974   if (FormatTok->is(tok::unknown))
975     FormatTok->setType(TT_ImplicitStringLiteral);
976 
977   // JavaScript and Java do not allow to escape the end of the line with a
978   // backslash. Backslashes are syntax errors in plain source, but can occur in
979   // comments. When a single line comment ends with a \, it'll cause the next
980   // line of code to be lexed as a comment, breaking formatting. The code below
981   // finds comments that contain a backslash followed by a line break, truncates
982   // the comment token at the backslash, and resets the lexer to restart behind
983   // the backslash.
984   if ((Style.isJavaScript() || Style.Language == FormatStyle::LK_Java) &&
985       FormatTok->is(tok::comment) && FormatTok->TokenText.startswith("//")) {
986     size_t BackslashPos = FormatTok->TokenText.find('\\');
987     while (BackslashPos != StringRef::npos) {
988       if (BackslashPos + 1 < FormatTok->TokenText.size() &&
989           FormatTok->TokenText[BackslashPos + 1] == '\n') {
990         truncateToken(BackslashPos + 1);
991         break;
992       }
993       BackslashPos = FormatTok->TokenText.find('\\', BackslashPos + 1);
994     }
995   }
996 
997   if (Style.isVerilog()) {
998     // Verilog uses the backtick instead of the hash for preprocessor stuff.
999     // And it uses the hash for delays and parameter lists. In order to continue
1000     // using `tok::hash` in other places, the backtick gets marked as the hash
1001     // here.  And in order to tell the backtick and hash apart for
1002     // Verilog-specific stuff, the hash becomes an identifier.
1003     if (FormatTok->isOneOf(tok::hash, tok::hashhash)) {
1004       FormatTok->Tok.setKind(tok::raw_identifier);
1005     } else if (FormatTok->is(tok::raw_identifier)) {
1006       if (FormatTok->TokenText == "`") {
1007         FormatTok->Tok.setIdentifierInfo(nullptr);
1008         FormatTok->Tok.setKind(tok::hash);
1009       } else if (FormatTok->TokenText == "``") {
1010         FormatTok->Tok.setIdentifierInfo(nullptr);
1011         FormatTok->Tok.setKind(tok::hashhash);
1012       }
1013     }
1014   }
1015 
1016   FormatTok->WhitespaceRange = SourceRange(
1017       WhitespaceStart, WhitespaceStart.getLocWithOffset(WhitespaceLength));
1018 
1019   FormatTok->OriginalColumn = Column;
1020 
1021   TrailingWhitespace = 0;
1022   if (FormatTok->is(tok::comment)) {
1023     // FIXME: Add the trimmed whitespace to Column.
1024     StringRef UntrimmedText = FormatTok->TokenText;
1025     FormatTok->TokenText = FormatTok->TokenText.rtrim(" \t\v\f");
1026     TrailingWhitespace = UntrimmedText.size() - FormatTok->TokenText.size();
1027   } else if (FormatTok->is(tok::raw_identifier)) {
1028     IdentifierInfo &Info = IdentTable.get(FormatTok->TokenText);
1029     FormatTok->Tok.setIdentifierInfo(&Info);
1030     FormatTok->Tok.setKind(Info.getTokenID());
1031     if (Style.Language == FormatStyle::LK_Java &&
1032         FormatTok->isOneOf(tok::kw_struct, tok::kw_union, tok::kw_delete,
1033                            tok::kw_operator)) {
1034       FormatTok->Tok.setKind(tok::identifier);
1035       FormatTok->Tok.setIdentifierInfo(nullptr);
1036     } else if (Style.isJavaScript() &&
1037                FormatTok->isOneOf(tok::kw_struct, tok::kw_union,
1038                                   tok::kw_operator)) {
1039       FormatTok->Tok.setKind(tok::identifier);
1040       FormatTok->Tok.setIdentifierInfo(nullptr);
1041     }
1042   } else if (FormatTok->is(tok::greatergreater)) {
1043     FormatTok->Tok.setKind(tok::greater);
1044     FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
1045     ++Column;
1046     StateStack.push(LexerState::TOKEN_STASHED);
1047   } else if (FormatTok->is(tok::lessless)) {
1048     FormatTok->Tok.setKind(tok::less);
1049     FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
1050     ++Column;
1051     StateStack.push(LexerState::TOKEN_STASHED);
1052   }
1053 
1054   // Now FormatTok is the next non-whitespace token.
1055 
1056   StringRef Text = FormatTok->TokenText;
1057   size_t FirstNewlinePos = Text.find('\n');
1058   if (FirstNewlinePos == StringRef::npos) {
1059     // FIXME: ColumnWidth actually depends on the start column, we need to
1060     // take this into account when the token is moved.
1061     FormatTok->ColumnWidth =
1062         encoding::columnWidthWithTabs(Text, Column, Style.TabWidth, Encoding);
1063     Column += FormatTok->ColumnWidth;
1064   } else {
1065     FormatTok->IsMultiline = true;
1066     // FIXME: ColumnWidth actually depends on the start column, we need to
1067     // take this into account when the token is moved.
1068     FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
1069         Text.substr(0, FirstNewlinePos), Column, Style.TabWidth, Encoding);
1070 
1071     // The last line of the token always starts in column 0.
1072     // Thus, the length can be precomputed even in the presence of tabs.
1073     FormatTok->LastLineColumnWidth = encoding::columnWidthWithTabs(
1074         Text.substr(Text.find_last_of('\n') + 1), 0, Style.TabWidth, Encoding);
1075     Column = FormatTok->LastLineColumnWidth;
1076   }
1077 
1078   if (Style.isCpp()) {
1079     auto it = Macros.find(FormatTok->Tok.getIdentifierInfo());
1080     if (!(Tokens.size() > 0 && Tokens.back()->Tok.getIdentifierInfo() &&
1081           Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
1082               tok::pp_define) &&
1083         it != Macros.end()) {
1084       FormatTok->setType(it->second);
1085       if (it->second == TT_IfMacro) {
1086         // The lexer token currently has type tok::kw_unknown. However, for this
1087         // substitution to be treated correctly in the TokenAnnotator, faking
1088         // the tok value seems to be needed. Not sure if there's a more elegant
1089         // way.
1090         FormatTok->Tok.setKind(tok::kw_if);
1091       }
1092     } else if (FormatTok->is(tok::identifier)) {
1093       if (MacroBlockBeginRegex.match(Text))
1094         FormatTok->setType(TT_MacroBlockBegin);
1095       else if (MacroBlockEndRegex.match(Text))
1096         FormatTok->setType(TT_MacroBlockEnd);
1097     }
1098   }
1099 
1100   return FormatTok;
1101 }
1102 
1103 bool FormatTokenLexer::readRawTokenVerilogSpecific(Token &Tok) {
1104   // In Verilog the quote is not a character literal.
1105   //
1106   // Make the backtick and double backtick identifiers to match against them
1107   // more easily.
1108   //
1109   // In Verilog an escaped identifier starts with backslash and ends with
1110   // whitespace. Unless that whitespace is an escaped newline. A backslash can
1111   // also begin an escaped newline outside of an escaped identifier. We check
1112   // for that outside of the Regex since we can't use negative lookhead
1113   // assertions. Simply changing the '*' to '+' breaks stuff as the escaped
1114   // identifier may have a length of 0 according to Section A.9.3.
1115   // FIXME: If there is an escaped newline in the middle of an escaped
1116   // identifier, allow for pasting the two lines together, But escaped
1117   // identifiers usually occur only in generated code anyway.
1118   static const llvm::Regex VerilogToken(R"re(^('|``?|\\(\\)re"
1119                                         "(\r?\n|\r)|[^[:space:]])*)");
1120 
1121   SmallVector<StringRef, 4> Matches;
1122   const char *Start = Lex->getBufferLocation();
1123   if (!VerilogToken.match(StringRef(Start, Lex->getBuffer().end() - Start),
1124                           &Matches)) {
1125     return false;
1126   }
1127   // There is a null byte at the end of the buffer, so we don't have to check
1128   // Start[1] is within the buffer.
1129   if (Start[0] == '\\' && (Start[1] == '\r' || Start[1] == '\n'))
1130     return false;
1131   size_t Len = Matches[0].size();
1132 
1133   // The kind has to be an identifier so we can match it against those defined
1134   // in Keywords. The kind has to be set before the length because the setLength
1135   // function checks that the kind is not an annotation.
1136   Tok.setKind(tok::raw_identifier);
1137   Tok.setLength(Len);
1138   Tok.setLocation(Lex->getSourceLocation(Start, Len));
1139   Tok.setRawIdentifierData(Start);
1140   Lex->seek(Lex->getCurrentBufferOffset() + Len, /*IsAtStartofline=*/false);
1141   return true;
1142 }
1143 
1144 void FormatTokenLexer::readRawToken(FormatToken &Tok) {
1145   // For Verilog, first see if there is a special token, and fall back to the
1146   // normal lexer if there isn't one.
1147   if (!Style.isVerilog() || !readRawTokenVerilogSpecific(Tok.Tok))
1148     Lex->LexFromRawLexer(Tok.Tok);
1149   Tok.TokenText = StringRef(SourceMgr.getCharacterData(Tok.Tok.getLocation()),
1150                             Tok.Tok.getLength());
1151   // For formatting, treat unterminated string literals like normal string
1152   // literals.
1153   if (Tok.is(tok::unknown)) {
1154     if (!Tok.TokenText.empty() && Tok.TokenText[0] == '"') {
1155       Tok.Tok.setKind(tok::string_literal);
1156       Tok.IsUnterminatedLiteral = true;
1157     } else if (Style.isJavaScript() && Tok.TokenText == "''") {
1158       Tok.Tok.setKind(tok::string_literal);
1159     }
1160   }
1161 
1162   if ((Style.isJavaScript() || Style.Language == FormatStyle::LK_Proto ||
1163        Style.Language == FormatStyle::LK_TextProto) &&
1164       Tok.is(tok::char_constant)) {
1165     Tok.Tok.setKind(tok::string_literal);
1166   }
1167 
1168   if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format on" ||
1169                                Tok.TokenText == "/* clang-format on */")) {
1170     FormattingDisabled = false;
1171   }
1172 
1173   Tok.Finalized = FormattingDisabled;
1174 
1175   if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format off" ||
1176                                Tok.TokenText == "/* clang-format off */")) {
1177     FormattingDisabled = true;
1178   }
1179 }
1180 
1181 void FormatTokenLexer::resetLexer(unsigned Offset) {
1182   StringRef Buffer = SourceMgr.getBufferData(ID);
1183   LangOpts = getFormattingLangOpts(Style);
1184   Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID), LangOpts,
1185                       Buffer.begin(), Buffer.begin() + Offset, Buffer.end()));
1186   Lex->SetKeepWhitespaceMode(true);
1187   TrailingWhitespace = 0;
1188 }
1189 
1190 } // namespace format
1191 } // namespace clang
1192