1 //===- Lexer.cpp - C Language Family Lexer --------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Lexer and Token interfaces. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/Lex/Lexer.h" 14 #include "UnicodeCharSets.h" 15 #include "clang/Basic/CharInfo.h" 16 #include "clang/Basic/Diagnostic.h" 17 #include "clang/Basic/IdentifierTable.h" 18 #include "clang/Basic/LLVM.h" 19 #include "clang/Basic/LangOptions.h" 20 #include "clang/Basic/SourceLocation.h" 21 #include "clang/Basic/SourceManager.h" 22 #include "clang/Basic/TokenKinds.h" 23 #include "clang/Lex/LexDiagnostic.h" 24 #include "clang/Lex/LiteralSupport.h" 25 #include "clang/Lex/MultipleIncludeOpt.h" 26 #include "clang/Lex/Preprocessor.h" 27 #include "clang/Lex/PreprocessorOptions.h" 28 #include "clang/Lex/Token.h" 29 #include "llvm/ADT/STLExtras.h" 30 #include "llvm/ADT/StringExtras.h" 31 #include "llvm/ADT/StringRef.h" 32 #include "llvm/ADT/StringSwitch.h" 33 #include "llvm/Support/Compiler.h" 34 #include "llvm/Support/ConvertUTF.h" 35 #include "llvm/Support/MathExtras.h" 36 #include "llvm/Support/MemoryBufferRef.h" 37 #include "llvm/Support/NativeFormatting.h" 38 #include "llvm/Support/Unicode.h" 39 #include "llvm/Support/UnicodeCharRanges.h" 40 #include <algorithm> 41 #include <cassert> 42 #include <cstddef> 43 #include <cstdint> 44 #include <cstring> 45 #include <optional> 46 #include <string> 47 #include <tuple> 48 #include <utility> 49 50 using namespace clang; 51 52 //===----------------------------------------------------------------------===// 53 // Token Class Implementation 54 //===----------------------------------------------------------------------===// 55 56 /// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. 57 bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { 58 if (isAnnotation()) 59 return false; 60 if (IdentifierInfo *II = getIdentifierInfo()) 61 return II->getObjCKeywordID() == objcKey; 62 return false; 63 } 64 65 /// getObjCKeywordID - Return the ObjC keyword kind. 66 tok::ObjCKeywordKind Token::getObjCKeywordID() const { 67 if (isAnnotation()) 68 return tok::objc_not_keyword; 69 IdentifierInfo *specId = getIdentifierInfo(); 70 return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword; 71 } 72 73 //===----------------------------------------------------------------------===// 74 // Lexer Class Implementation 75 //===----------------------------------------------------------------------===// 76 77 void Lexer::anchor() {} 78 79 void Lexer::InitLexer(const char *BufStart, const char *BufPtr, 80 const char *BufEnd) { 81 BufferStart = BufStart; 82 BufferPtr = BufPtr; 83 BufferEnd = BufEnd; 84 85 assert(BufEnd[0] == 0 && 86 "We assume that the input buffer has a null character at the end" 87 " to simplify lexing!"); 88 89 // Check whether we have a BOM in the beginning of the buffer. If yes - act 90 // accordingly. Right now we support only UTF-8 with and without BOM, so, just 91 // skip the UTF-8 BOM if it's present. 92 if (BufferStart == BufferPtr) { 93 // Determine the size of the BOM. 94 StringRef Buf(BufferStart, BufferEnd - BufferStart); 95 size_t BOMLength = llvm::StringSwitch<size_t>(Buf) 96 .StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM 97 .Default(0); 98 99 // Skip the BOM. 100 BufferPtr += BOMLength; 101 } 102 103 Is_PragmaLexer = false; 104 CurrentConflictMarkerState = CMK_None; 105 106 // Start of the file is a start of line. 107 IsAtStartOfLine = true; 108 IsAtPhysicalStartOfLine = true; 109 110 HasLeadingSpace = false; 111 HasLeadingEmptyMacro = false; 112 113 // We are not after parsing a #. 114 ParsingPreprocessorDirective = false; 115 116 // We are not after parsing #include. 117 ParsingFilename = false; 118 119 // We are not in raw mode. Raw mode disables diagnostics and interpretation 120 // of tokens (e.g. identifiers, thus disabling macro expansion). It is used 121 // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block 122 // or otherwise skipping over tokens. 123 LexingRawMode = false; 124 125 // Default to not keeping comments. 126 ExtendedTokenMode = 0; 127 128 NewLinePtr = nullptr; 129 } 130 131 /// Lexer constructor - Create a new lexer object for the specified buffer 132 /// with the specified preprocessor managing the lexing process. This lexer 133 /// assumes that the associated file buffer and Preprocessor objects will 134 /// outlive it, so it doesn't take ownership of either of them. 135 Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile, 136 Preprocessor &PP, bool IsFirstIncludeOfFile) 137 : PreprocessorLexer(&PP, FID), 138 FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)), 139 LangOpts(PP.getLangOpts()), LineComment(LangOpts.LineComment), 140 IsFirstTimeLexingFile(IsFirstIncludeOfFile) { 141 InitLexer(InputFile.getBufferStart(), InputFile.getBufferStart(), 142 InputFile.getBufferEnd()); 143 144 resetExtendedTokenMode(); 145 } 146 147 /// Lexer constructor - Create a new raw lexer object. This object is only 148 /// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text 149 /// range will outlive it, so it doesn't take ownership of it. 150 Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts, 151 const char *BufStart, const char *BufPtr, const char *BufEnd, 152 bool IsFirstIncludeOfFile) 153 : FileLoc(fileloc), LangOpts(langOpts), LineComment(LangOpts.LineComment), 154 IsFirstTimeLexingFile(IsFirstIncludeOfFile) { 155 InitLexer(BufStart, BufPtr, BufEnd); 156 157 // We *are* in raw mode. 158 LexingRawMode = true; 159 } 160 161 /// Lexer constructor - Create a new raw lexer object. This object is only 162 /// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text 163 /// range will outlive it, so it doesn't take ownership of it. 164 Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &FromFile, 165 const SourceManager &SM, const LangOptions &langOpts, 166 bool IsFirstIncludeOfFile) 167 : Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile.getBufferStart(), 168 FromFile.getBufferStart(), FromFile.getBufferEnd(), 169 IsFirstIncludeOfFile) {} 170 171 void Lexer::resetExtendedTokenMode() { 172 assert(PP && "Cannot reset token mode without a preprocessor"); 173 if (LangOpts.TraditionalCPP) 174 SetKeepWhitespaceMode(true); 175 else 176 SetCommentRetentionState(PP->getCommentRetentionState()); 177 } 178 179 /// Create_PragmaLexer: Lexer constructor - Create a new lexer object for 180 /// _Pragma expansion. This has a variety of magic semantics that this method 181 /// sets up. It returns a new'd Lexer that must be delete'd when done. 182 /// 183 /// On entrance to this routine, TokStartLoc is a macro location which has a 184 /// spelling loc that indicates the bytes to be lexed for the token and an 185 /// expansion location that indicates where all lexed tokens should be 186 /// "expanded from". 187 /// 188 /// TODO: It would really be nice to make _Pragma just be a wrapper around a 189 /// normal lexer that remaps tokens as they fly by. This would require making 190 /// Preprocessor::Lex virtual. Given that, we could just dump in a magic lexer 191 /// interface that could handle this stuff. This would pull GetMappedTokenLoc 192 /// out of the critical path of the lexer! 193 /// 194 Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc, 195 SourceLocation ExpansionLocStart, 196 SourceLocation ExpansionLocEnd, 197 unsigned TokLen, Preprocessor &PP) { 198 SourceManager &SM = PP.getSourceManager(); 199 200 // Create the lexer as if we were going to lex the file normally. 201 FileID SpellingFID = SM.getFileID(SpellingLoc); 202 llvm::MemoryBufferRef InputFile = SM.getBufferOrFake(SpellingFID); 203 Lexer *L = new Lexer(SpellingFID, InputFile, PP); 204 205 // Now that the lexer is created, change the start/end locations so that we 206 // just lex the subsection of the file that we want. This is lexing from a 207 // scratch buffer. 208 const char *StrData = SM.getCharacterData(SpellingLoc); 209 210 L->BufferPtr = StrData; 211 L->BufferEnd = StrData+TokLen; 212 assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!"); 213 214 // Set the SourceLocation with the remapping information. This ensures that 215 // GetMappedTokenLoc will remap the tokens as they are lexed. 216 L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID), 217 ExpansionLocStart, 218 ExpansionLocEnd, TokLen); 219 220 // Ensure that the lexer thinks it is inside a directive, so that end \n will 221 // return an EOD token. 222 L->ParsingPreprocessorDirective = true; 223 224 // This lexer really is for _Pragma. 225 L->Is_PragmaLexer = true; 226 return L; 227 } 228 229 void Lexer::seek(unsigned Offset, bool IsAtStartOfLine) { 230 this->IsAtPhysicalStartOfLine = IsAtStartOfLine; 231 this->IsAtStartOfLine = IsAtStartOfLine; 232 assert((BufferStart + Offset) <= BufferEnd); 233 BufferPtr = BufferStart + Offset; 234 } 235 236 template <typename T> static void StringifyImpl(T &Str, char Quote) { 237 typename T::size_type i = 0, e = Str.size(); 238 while (i < e) { 239 if (Str[i] == '\\' || Str[i] == Quote) { 240 Str.insert(Str.begin() + i, '\\'); 241 i += 2; 242 ++e; 243 } else if (Str[i] == '\n' || Str[i] == '\r') { 244 // Replace '\r\n' and '\n\r' to '\\' followed by 'n'. 245 if ((i < e - 1) && (Str[i + 1] == '\n' || Str[i + 1] == '\r') && 246 Str[i] != Str[i + 1]) { 247 Str[i] = '\\'; 248 Str[i + 1] = 'n'; 249 } else { 250 // Replace '\n' and '\r' to '\\' followed by 'n'. 251 Str[i] = '\\'; 252 Str.insert(Str.begin() + i + 1, 'n'); 253 ++e; 254 } 255 i += 2; 256 } else 257 ++i; 258 } 259 } 260 261 std::string Lexer::Stringify(StringRef Str, bool Charify) { 262 std::string Result = std::string(Str); 263 char Quote = Charify ? '\'' : '"'; 264 StringifyImpl(Result, Quote); 265 return Result; 266 } 267 268 void Lexer::Stringify(SmallVectorImpl<char> &Str) { StringifyImpl(Str, '"'); } 269 270 //===----------------------------------------------------------------------===// 271 // Token Spelling 272 //===----------------------------------------------------------------------===// 273 274 /// Slow case of getSpelling. Extract the characters comprising the 275 /// spelling of this token from the provided input buffer. 276 static size_t getSpellingSlow(const Token &Tok, const char *BufPtr, 277 const LangOptions &LangOpts, char *Spelling) { 278 assert(Tok.needsCleaning() && "getSpellingSlow called on simple token"); 279 280 size_t Length = 0; 281 const char *BufEnd = BufPtr + Tok.getLength(); 282 283 if (tok::isStringLiteral(Tok.getKind())) { 284 // Munch the encoding-prefix and opening double-quote. 285 while (BufPtr < BufEnd) { 286 unsigned Size; 287 Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts); 288 BufPtr += Size; 289 290 if (Spelling[Length - 1] == '"') 291 break; 292 } 293 294 // Raw string literals need special handling; trigraph expansion and line 295 // splicing do not occur within their d-char-sequence nor within their 296 // r-char-sequence. 297 if (Length >= 2 && 298 Spelling[Length - 2] == 'R' && Spelling[Length - 1] == '"') { 299 // Search backwards from the end of the token to find the matching closing 300 // quote. 301 const char *RawEnd = BufEnd; 302 do --RawEnd; while (*RawEnd != '"'); 303 size_t RawLength = RawEnd - BufPtr + 1; 304 305 // Everything between the quotes is included verbatim in the spelling. 306 memcpy(Spelling + Length, BufPtr, RawLength); 307 Length += RawLength; 308 BufPtr += RawLength; 309 310 // The rest of the token is lexed normally. 311 } 312 } 313 314 while (BufPtr < BufEnd) { 315 unsigned Size; 316 Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts); 317 BufPtr += Size; 318 } 319 320 assert(Length < Tok.getLength() && 321 "NeedsCleaning flag set on token that didn't need cleaning!"); 322 return Length; 323 } 324 325 /// getSpelling() - Return the 'spelling' of this token. The spelling of a 326 /// token are the characters used to represent the token in the source file 327 /// after trigraph expansion and escaped-newline folding. In particular, this 328 /// wants to get the true, uncanonicalized, spelling of things like digraphs 329 /// UCNs, etc. 330 StringRef Lexer::getSpelling(SourceLocation loc, 331 SmallVectorImpl<char> &buffer, 332 const SourceManager &SM, 333 const LangOptions &options, 334 bool *invalid) { 335 // Break down the source location. 336 std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc); 337 338 // Try to the load the file buffer. 339 bool invalidTemp = false; 340 StringRef file = SM.getBufferData(locInfo.first, &invalidTemp); 341 if (invalidTemp) { 342 if (invalid) *invalid = true; 343 return {}; 344 } 345 346 const char *tokenBegin = file.data() + locInfo.second; 347 348 // Lex from the start of the given location. 349 Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options, 350 file.begin(), tokenBegin, file.end()); 351 Token token; 352 lexer.LexFromRawLexer(token); 353 354 unsigned length = token.getLength(); 355 356 // Common case: no need for cleaning. 357 if (!token.needsCleaning()) 358 return StringRef(tokenBegin, length); 359 360 // Hard case, we need to relex the characters into the string. 361 buffer.resize(length); 362 buffer.resize(getSpellingSlow(token, tokenBegin, options, buffer.data())); 363 return StringRef(buffer.data(), buffer.size()); 364 } 365 366 /// getSpelling() - Return the 'spelling' of this token. The spelling of a 367 /// token are the characters used to represent the token in the source file 368 /// after trigraph expansion and escaped-newline folding. In particular, this 369 /// wants to get the true, uncanonicalized, spelling of things like digraphs 370 /// UCNs, etc. 371 std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr, 372 const LangOptions &LangOpts, bool *Invalid) { 373 assert((int)Tok.getLength() >= 0 && "Token character range is bogus!"); 374 375 bool CharDataInvalid = false; 376 const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation(), 377 &CharDataInvalid); 378 if (Invalid) 379 *Invalid = CharDataInvalid; 380 if (CharDataInvalid) 381 return {}; 382 383 // If this token contains nothing interesting, return it directly. 384 if (!Tok.needsCleaning()) 385 return std::string(TokStart, TokStart + Tok.getLength()); 386 387 std::string Result; 388 Result.resize(Tok.getLength()); 389 Result.resize(getSpellingSlow(Tok, TokStart, LangOpts, &*Result.begin())); 390 return Result; 391 } 392 393 /// getSpelling - This method is used to get the spelling of a token into a 394 /// preallocated buffer, instead of as an std::string. The caller is required 395 /// to allocate enough space for the token, which is guaranteed to be at least 396 /// Tok.getLength() bytes long. The actual length of the token is returned. 397 /// 398 /// Note that this method may do two possible things: it may either fill in 399 /// the buffer specified with characters, or it may *change the input pointer* 400 /// to point to a constant buffer with the data already in it (avoiding a 401 /// copy). The caller is not allowed to modify the returned buffer pointer 402 /// if an internal buffer is returned. 403 unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer, 404 const SourceManager &SourceMgr, 405 const LangOptions &LangOpts, bool *Invalid) { 406 assert((int)Tok.getLength() >= 0 && "Token character range is bogus!"); 407 408 const char *TokStart = nullptr; 409 // NOTE: this has to be checked *before* testing for an IdentifierInfo. 410 if (Tok.is(tok::raw_identifier)) 411 TokStart = Tok.getRawIdentifier().data(); 412 else if (!Tok.hasUCN()) { 413 if (const IdentifierInfo *II = Tok.getIdentifierInfo()) { 414 // Just return the string from the identifier table, which is very quick. 415 Buffer = II->getNameStart(); 416 return II->getLength(); 417 } 418 } 419 420 // NOTE: this can be checked even after testing for an IdentifierInfo. 421 if (Tok.isLiteral()) 422 TokStart = Tok.getLiteralData(); 423 424 if (!TokStart) { 425 // Compute the start of the token in the input lexer buffer. 426 bool CharDataInvalid = false; 427 TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid); 428 if (Invalid) 429 *Invalid = CharDataInvalid; 430 if (CharDataInvalid) { 431 Buffer = ""; 432 return 0; 433 } 434 } 435 436 // If this token contains nothing interesting, return it directly. 437 if (!Tok.needsCleaning()) { 438 Buffer = TokStart; 439 return Tok.getLength(); 440 } 441 442 // Otherwise, hard case, relex the characters into the string. 443 return getSpellingSlow(Tok, TokStart, LangOpts, const_cast<char*>(Buffer)); 444 } 445 446 /// MeasureTokenLength - Relex the token at the specified location and return 447 /// its length in bytes in the input file. If the token needs cleaning (e.g. 448 /// includes a trigraph or an escaped newline) then this count includes bytes 449 /// that are part of that. 450 unsigned Lexer::MeasureTokenLength(SourceLocation Loc, 451 const SourceManager &SM, 452 const LangOptions &LangOpts) { 453 Token TheTok; 454 if (getRawToken(Loc, TheTok, SM, LangOpts)) 455 return 0; 456 return TheTok.getLength(); 457 } 458 459 /// Relex the token at the specified location. 460 /// \returns true if there was a failure, false on success. 461 bool Lexer::getRawToken(SourceLocation Loc, Token &Result, 462 const SourceManager &SM, 463 const LangOptions &LangOpts, 464 bool IgnoreWhiteSpace) { 465 // TODO: this could be special cased for common tokens like identifiers, ')', 466 // etc to make this faster, if it mattered. Just look at StrData[0] to handle 467 // all obviously single-char tokens. This could use 468 // Lexer::isObviouslySimpleCharacter for example to handle identifiers or 469 // something. 470 471 // If this comes from a macro expansion, we really do want the macro name, not 472 // the token this macro expanded to. 473 Loc = SM.getExpansionLoc(Loc); 474 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 475 bool Invalid = false; 476 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 477 if (Invalid) 478 return true; 479 480 const char *StrData = Buffer.data()+LocInfo.second; 481 482 if (!IgnoreWhiteSpace && isWhitespace(StrData[0])) 483 return true; 484 485 // Create a lexer starting at the beginning of this token. 486 Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, 487 Buffer.begin(), StrData, Buffer.end()); 488 TheLexer.SetCommentRetentionState(true); 489 TheLexer.LexFromRawLexer(Result); 490 return false; 491 } 492 493 /// Returns the pointer that points to the beginning of line that contains 494 /// the given offset, or null if the offset if invalid. 495 static const char *findBeginningOfLine(StringRef Buffer, unsigned Offset) { 496 const char *BufStart = Buffer.data(); 497 if (Offset >= Buffer.size()) 498 return nullptr; 499 500 const char *LexStart = BufStart + Offset; 501 for (; LexStart != BufStart; --LexStart) { 502 if (isVerticalWhitespace(LexStart[0]) && 503 !Lexer::isNewLineEscaped(BufStart, LexStart)) { 504 // LexStart should point at first character of logical line. 505 ++LexStart; 506 break; 507 } 508 } 509 return LexStart; 510 } 511 512 static SourceLocation getBeginningOfFileToken(SourceLocation Loc, 513 const SourceManager &SM, 514 const LangOptions &LangOpts) { 515 assert(Loc.isFileID()); 516 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 517 if (LocInfo.first.isInvalid()) 518 return Loc; 519 520 bool Invalid = false; 521 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 522 if (Invalid) 523 return Loc; 524 525 // Back up from the current location until we hit the beginning of a line 526 // (or the buffer). We'll relex from that point. 527 const char *StrData = Buffer.data() + LocInfo.second; 528 const char *LexStart = findBeginningOfLine(Buffer, LocInfo.second); 529 if (!LexStart || LexStart == StrData) 530 return Loc; 531 532 // Create a lexer starting at the beginning of this token. 533 SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second); 534 Lexer TheLexer(LexerStartLoc, LangOpts, Buffer.data(), LexStart, 535 Buffer.end()); 536 TheLexer.SetCommentRetentionState(true); 537 538 // Lex tokens until we find the token that contains the source location. 539 Token TheTok; 540 do { 541 TheLexer.LexFromRawLexer(TheTok); 542 543 if (TheLexer.getBufferLocation() > StrData) { 544 // Lexing this token has taken the lexer past the source location we're 545 // looking for. If the current token encompasses our source location, 546 // return the beginning of that token. 547 if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData) 548 return TheTok.getLocation(); 549 550 // We ended up skipping over the source location entirely, which means 551 // that it points into whitespace. We're done here. 552 break; 553 } 554 } while (TheTok.getKind() != tok::eof); 555 556 // We've passed our source location; just return the original source location. 557 return Loc; 558 } 559 560 SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc, 561 const SourceManager &SM, 562 const LangOptions &LangOpts) { 563 if (Loc.isFileID()) 564 return getBeginningOfFileToken(Loc, SM, LangOpts); 565 566 if (!SM.isMacroArgExpansion(Loc)) 567 return Loc; 568 569 SourceLocation FileLoc = SM.getSpellingLoc(Loc); 570 SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts); 571 std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc); 572 std::pair<FileID, unsigned> BeginFileLocInfo = 573 SM.getDecomposedLoc(BeginFileLoc); 574 assert(FileLocInfo.first == BeginFileLocInfo.first && 575 FileLocInfo.second >= BeginFileLocInfo.second); 576 return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second); 577 } 578 579 namespace { 580 581 enum PreambleDirectiveKind { 582 PDK_Skipped, 583 PDK_Unknown 584 }; 585 586 } // namespace 587 588 PreambleBounds Lexer::ComputePreamble(StringRef Buffer, 589 const LangOptions &LangOpts, 590 unsigned MaxLines) { 591 // Create a lexer starting at the beginning of the file. Note that we use a 592 // "fake" file source location at offset 1 so that the lexer will track our 593 // position within the file. 594 const SourceLocation::UIntTy StartOffset = 1; 595 SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset); 596 Lexer TheLexer(FileLoc, LangOpts, Buffer.begin(), Buffer.begin(), 597 Buffer.end()); 598 TheLexer.SetCommentRetentionState(true); 599 600 bool InPreprocessorDirective = false; 601 Token TheTok; 602 SourceLocation ActiveCommentLoc; 603 604 unsigned MaxLineOffset = 0; 605 if (MaxLines) { 606 const char *CurPtr = Buffer.begin(); 607 unsigned CurLine = 0; 608 while (CurPtr != Buffer.end()) { 609 char ch = *CurPtr++; 610 if (ch == '\n') { 611 ++CurLine; 612 if (CurLine == MaxLines) 613 break; 614 } 615 } 616 if (CurPtr != Buffer.end()) 617 MaxLineOffset = CurPtr - Buffer.begin(); 618 } 619 620 do { 621 TheLexer.LexFromRawLexer(TheTok); 622 623 if (InPreprocessorDirective) { 624 // If we've hit the end of the file, we're done. 625 if (TheTok.getKind() == tok::eof) { 626 break; 627 } 628 629 // If we haven't hit the end of the preprocessor directive, skip this 630 // token. 631 if (!TheTok.isAtStartOfLine()) 632 continue; 633 634 // We've passed the end of the preprocessor directive, and will look 635 // at this token again below. 636 InPreprocessorDirective = false; 637 } 638 639 // Keep track of the # of lines in the preamble. 640 if (TheTok.isAtStartOfLine()) { 641 unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset; 642 643 // If we were asked to limit the number of lines in the preamble, 644 // and we're about to exceed that limit, we're done. 645 if (MaxLineOffset && TokOffset >= MaxLineOffset) 646 break; 647 } 648 649 // Comments are okay; skip over them. 650 if (TheTok.getKind() == tok::comment) { 651 if (ActiveCommentLoc.isInvalid()) 652 ActiveCommentLoc = TheTok.getLocation(); 653 continue; 654 } 655 656 if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) { 657 // This is the start of a preprocessor directive. 658 Token HashTok = TheTok; 659 InPreprocessorDirective = true; 660 ActiveCommentLoc = SourceLocation(); 661 662 // Figure out which directive this is. Since we're lexing raw tokens, 663 // we don't have an identifier table available. Instead, just look at 664 // the raw identifier to recognize and categorize preprocessor directives. 665 TheLexer.LexFromRawLexer(TheTok); 666 if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) { 667 StringRef Keyword = TheTok.getRawIdentifier(); 668 PreambleDirectiveKind PDK 669 = llvm::StringSwitch<PreambleDirectiveKind>(Keyword) 670 .Case("include", PDK_Skipped) 671 .Case("__include_macros", PDK_Skipped) 672 .Case("define", PDK_Skipped) 673 .Case("undef", PDK_Skipped) 674 .Case("line", PDK_Skipped) 675 .Case("error", PDK_Skipped) 676 .Case("pragma", PDK_Skipped) 677 .Case("import", PDK_Skipped) 678 .Case("include_next", PDK_Skipped) 679 .Case("warning", PDK_Skipped) 680 .Case("ident", PDK_Skipped) 681 .Case("sccs", PDK_Skipped) 682 .Case("assert", PDK_Skipped) 683 .Case("unassert", PDK_Skipped) 684 .Case("if", PDK_Skipped) 685 .Case("ifdef", PDK_Skipped) 686 .Case("ifndef", PDK_Skipped) 687 .Case("elif", PDK_Skipped) 688 .Case("elifdef", PDK_Skipped) 689 .Case("elifndef", PDK_Skipped) 690 .Case("else", PDK_Skipped) 691 .Case("endif", PDK_Skipped) 692 .Default(PDK_Unknown); 693 694 switch (PDK) { 695 case PDK_Skipped: 696 continue; 697 698 case PDK_Unknown: 699 // We don't know what this directive is; stop at the '#'. 700 break; 701 } 702 } 703 704 // We only end up here if we didn't recognize the preprocessor 705 // directive or it was one that can't occur in the preamble at this 706 // point. Roll back the current token to the location of the '#'. 707 TheTok = HashTok; 708 } else if (TheTok.isAtStartOfLine() && 709 TheTok.getKind() == tok::raw_identifier && 710 TheTok.getRawIdentifier() == "module" && 711 LangOpts.CPlusPlusModules) { 712 // The initial global module fragment introducer "module;" is part of 713 // the preamble, which runs up to the module declaration "module foo;". 714 Token ModuleTok = TheTok; 715 do { 716 TheLexer.LexFromRawLexer(TheTok); 717 } while (TheTok.getKind() == tok::comment); 718 if (TheTok.getKind() != tok::semi) { 719 // Not global module fragment, roll back. 720 TheTok = ModuleTok; 721 break; 722 } 723 continue; 724 } 725 726 // We hit a token that we don't recognize as being in the 727 // "preprocessing only" part of the file, so we're no longer in 728 // the preamble. 729 break; 730 } while (true); 731 732 SourceLocation End; 733 if (ActiveCommentLoc.isValid()) 734 End = ActiveCommentLoc; // don't truncate a decl comment. 735 else 736 End = TheTok.getLocation(); 737 738 return PreambleBounds(End.getRawEncoding() - FileLoc.getRawEncoding(), 739 TheTok.isAtStartOfLine()); 740 } 741 742 unsigned Lexer::getTokenPrefixLength(SourceLocation TokStart, unsigned CharNo, 743 const SourceManager &SM, 744 const LangOptions &LangOpts) { 745 // Figure out how many physical characters away the specified expansion 746 // character is. This needs to take into consideration newlines and 747 // trigraphs. 748 bool Invalid = false; 749 const char *TokPtr = SM.getCharacterData(TokStart, &Invalid); 750 751 // If they request the first char of the token, we're trivially done. 752 if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr))) 753 return 0; 754 755 unsigned PhysOffset = 0; 756 757 // The usual case is that tokens don't contain anything interesting. Skip 758 // over the uninteresting characters. If a token only consists of simple 759 // chars, this method is extremely fast. 760 while (Lexer::isObviouslySimpleCharacter(*TokPtr)) { 761 if (CharNo == 0) 762 return PhysOffset; 763 ++TokPtr; 764 --CharNo; 765 ++PhysOffset; 766 } 767 768 // If we have a character that may be a trigraph or escaped newline, use a 769 // lexer to parse it correctly. 770 for (; CharNo; --CharNo) { 771 unsigned Size; 772 Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts); 773 TokPtr += Size; 774 PhysOffset += Size; 775 } 776 777 // Final detail: if we end up on an escaped newline, we want to return the 778 // location of the actual byte of the token. For example foo\<newline>bar 779 // advanced by 3 should return the location of b, not of \\. One compounding 780 // detail of this is that the escape may be made by a trigraph. 781 if (!Lexer::isObviouslySimpleCharacter(*TokPtr)) 782 PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr; 783 784 return PhysOffset; 785 } 786 787 /// Computes the source location just past the end of the 788 /// token at this source location. 789 /// 790 /// This routine can be used to produce a source location that 791 /// points just past the end of the token referenced by \p Loc, and 792 /// is generally used when a diagnostic needs to point just after a 793 /// token where it expected something different that it received. If 794 /// the returned source location would not be meaningful (e.g., if 795 /// it points into a macro), this routine returns an invalid 796 /// source location. 797 /// 798 /// \param Offset an offset from the end of the token, where the source 799 /// location should refer to. The default offset (0) produces a source 800 /// location pointing just past the end of the token; an offset of 1 produces 801 /// a source location pointing to the last character in the token, etc. 802 SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset, 803 const SourceManager &SM, 804 const LangOptions &LangOpts) { 805 if (Loc.isInvalid()) 806 return {}; 807 808 if (Loc.isMacroID()) { 809 if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc)) 810 return {}; // Points inside the macro expansion. 811 } 812 813 unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 814 if (Len > Offset) 815 Len = Len - Offset; 816 else 817 return Loc; 818 819 return Loc.getLocWithOffset(Len); 820 } 821 822 /// Returns true if the given MacroID location points at the first 823 /// token of the macro expansion. 824 bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc, 825 const SourceManager &SM, 826 const LangOptions &LangOpts, 827 SourceLocation *MacroBegin) { 828 assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc"); 829 830 SourceLocation expansionLoc; 831 if (!SM.isAtStartOfImmediateMacroExpansion(loc, &expansionLoc)) 832 return false; 833 834 if (expansionLoc.isFileID()) { 835 // No other macro expansions, this is the first. 836 if (MacroBegin) 837 *MacroBegin = expansionLoc; 838 return true; 839 } 840 841 return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin); 842 } 843 844 /// Returns true if the given MacroID location points at the last 845 /// token of the macro expansion. 846 bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc, 847 const SourceManager &SM, 848 const LangOptions &LangOpts, 849 SourceLocation *MacroEnd) { 850 assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc"); 851 852 SourceLocation spellLoc = SM.getSpellingLoc(loc); 853 unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts); 854 if (tokLen == 0) 855 return false; 856 857 SourceLocation afterLoc = loc.getLocWithOffset(tokLen); 858 SourceLocation expansionLoc; 859 if (!SM.isAtEndOfImmediateMacroExpansion(afterLoc, &expansionLoc)) 860 return false; 861 862 if (expansionLoc.isFileID()) { 863 // No other macro expansions. 864 if (MacroEnd) 865 *MacroEnd = expansionLoc; 866 return true; 867 } 868 869 return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd); 870 } 871 872 static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range, 873 const SourceManager &SM, 874 const LangOptions &LangOpts) { 875 SourceLocation Begin = Range.getBegin(); 876 SourceLocation End = Range.getEnd(); 877 assert(Begin.isFileID() && End.isFileID()); 878 if (Range.isTokenRange()) { 879 End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts); 880 if (End.isInvalid()) 881 return {}; 882 } 883 884 // Break down the source locations. 885 FileID FID; 886 unsigned BeginOffs; 887 std::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin); 888 if (FID.isInvalid()) 889 return {}; 890 891 unsigned EndOffs; 892 if (!SM.isInFileID(End, FID, &EndOffs) || 893 BeginOffs > EndOffs) 894 return {}; 895 896 return CharSourceRange::getCharRange(Begin, End); 897 } 898 899 // Assumes that `Loc` is in an expansion. 900 static bool isInExpansionTokenRange(const SourceLocation Loc, 901 const SourceManager &SM) { 902 return SM.getSLocEntry(SM.getFileID(Loc)) 903 .getExpansion() 904 .isExpansionTokenRange(); 905 } 906 907 CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range, 908 const SourceManager &SM, 909 const LangOptions &LangOpts) { 910 SourceLocation Begin = Range.getBegin(); 911 SourceLocation End = Range.getEnd(); 912 if (Begin.isInvalid() || End.isInvalid()) 913 return {}; 914 915 if (Begin.isFileID() && End.isFileID()) 916 return makeRangeFromFileLocs(Range, SM, LangOpts); 917 918 if (Begin.isMacroID() && End.isFileID()) { 919 if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin)) 920 return {}; 921 Range.setBegin(Begin); 922 return makeRangeFromFileLocs(Range, SM, LangOpts); 923 } 924 925 if (Begin.isFileID() && End.isMacroID()) { 926 if (Range.isTokenRange()) { 927 if (!isAtEndOfMacroExpansion(End, SM, LangOpts, &End)) 928 return {}; 929 // Use the *original* end, not the expanded one in `End`. 930 Range.setTokenRange(isInExpansionTokenRange(Range.getEnd(), SM)); 931 } else if (!isAtStartOfMacroExpansion(End, SM, LangOpts, &End)) 932 return {}; 933 Range.setEnd(End); 934 return makeRangeFromFileLocs(Range, SM, LangOpts); 935 } 936 937 assert(Begin.isMacroID() && End.isMacroID()); 938 SourceLocation MacroBegin, MacroEnd; 939 if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) && 940 ((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts, 941 &MacroEnd)) || 942 (Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts, 943 &MacroEnd)))) { 944 Range.setBegin(MacroBegin); 945 Range.setEnd(MacroEnd); 946 // Use the *original* `End`, not the expanded one in `MacroEnd`. 947 if (Range.isTokenRange()) 948 Range.setTokenRange(isInExpansionTokenRange(End, SM)); 949 return makeRangeFromFileLocs(Range, SM, LangOpts); 950 } 951 952 bool Invalid = false; 953 const SrcMgr::SLocEntry &BeginEntry = SM.getSLocEntry(SM.getFileID(Begin), 954 &Invalid); 955 if (Invalid) 956 return {}; 957 958 if (BeginEntry.getExpansion().isMacroArgExpansion()) { 959 const SrcMgr::SLocEntry &EndEntry = SM.getSLocEntry(SM.getFileID(End), 960 &Invalid); 961 if (Invalid) 962 return {}; 963 964 if (EndEntry.getExpansion().isMacroArgExpansion() && 965 BeginEntry.getExpansion().getExpansionLocStart() == 966 EndEntry.getExpansion().getExpansionLocStart()) { 967 Range.setBegin(SM.getImmediateSpellingLoc(Begin)); 968 Range.setEnd(SM.getImmediateSpellingLoc(End)); 969 return makeFileCharRange(Range, SM, LangOpts); 970 } 971 } 972 973 return {}; 974 } 975 976 StringRef Lexer::getSourceText(CharSourceRange Range, 977 const SourceManager &SM, 978 const LangOptions &LangOpts, 979 bool *Invalid) { 980 Range = makeFileCharRange(Range, SM, LangOpts); 981 if (Range.isInvalid()) { 982 if (Invalid) *Invalid = true; 983 return {}; 984 } 985 986 // Break down the source location. 987 std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin()); 988 if (beginInfo.first.isInvalid()) { 989 if (Invalid) *Invalid = true; 990 return {}; 991 } 992 993 unsigned EndOffs; 994 if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) || 995 beginInfo.second > EndOffs) { 996 if (Invalid) *Invalid = true; 997 return {}; 998 } 999 1000 // Try to the load the file buffer. 1001 bool invalidTemp = false; 1002 StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp); 1003 if (invalidTemp) { 1004 if (Invalid) *Invalid = true; 1005 return {}; 1006 } 1007 1008 if (Invalid) *Invalid = false; 1009 return file.substr(beginInfo.second, EndOffs - beginInfo.second); 1010 } 1011 1012 StringRef Lexer::getImmediateMacroName(SourceLocation Loc, 1013 const SourceManager &SM, 1014 const LangOptions &LangOpts) { 1015 assert(Loc.isMacroID() && "Only reasonable to call this on macros"); 1016 1017 // Find the location of the immediate macro expansion. 1018 while (true) { 1019 FileID FID = SM.getFileID(Loc); 1020 const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID); 1021 const SrcMgr::ExpansionInfo &Expansion = E->getExpansion(); 1022 Loc = Expansion.getExpansionLocStart(); 1023 if (!Expansion.isMacroArgExpansion()) 1024 break; 1025 1026 // For macro arguments we need to check that the argument did not come 1027 // from an inner macro, e.g: "MAC1( MAC2(foo) )" 1028 1029 // Loc points to the argument id of the macro definition, move to the 1030 // macro expansion. 1031 Loc = SM.getImmediateExpansionRange(Loc).getBegin(); 1032 SourceLocation SpellLoc = Expansion.getSpellingLoc(); 1033 if (SpellLoc.isFileID()) 1034 break; // No inner macro. 1035 1036 // If spelling location resides in the same FileID as macro expansion 1037 // location, it means there is no inner macro. 1038 FileID MacroFID = SM.getFileID(Loc); 1039 if (SM.isInFileID(SpellLoc, MacroFID)) 1040 break; 1041 1042 // Argument came from inner macro. 1043 Loc = SpellLoc; 1044 } 1045 1046 // Find the spelling location of the start of the non-argument expansion 1047 // range. This is where the macro name was spelled in order to begin 1048 // expanding this macro. 1049 Loc = SM.getSpellingLoc(Loc); 1050 1051 // Dig out the buffer where the macro name was spelled and the extents of the 1052 // name so that we can render it into the expansion note. 1053 std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc); 1054 unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 1055 StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first); 1056 return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength); 1057 } 1058 1059 StringRef Lexer::getImmediateMacroNameForDiagnostics( 1060 SourceLocation Loc, const SourceManager &SM, const LangOptions &LangOpts) { 1061 assert(Loc.isMacroID() && "Only reasonable to call this on macros"); 1062 // Walk past macro argument expansions. 1063 while (SM.isMacroArgExpansion(Loc)) 1064 Loc = SM.getImmediateExpansionRange(Loc).getBegin(); 1065 1066 // If the macro's spelling isn't FileID or from scratch space, then it's 1067 // actually a token paste or stringization (or similar) and not a macro at 1068 // all. 1069 SourceLocation SpellLoc = SM.getSpellingLoc(Loc); 1070 if (!SpellLoc.isFileID() || SM.isWrittenInScratchSpace(SpellLoc)) 1071 return {}; 1072 1073 // Find the spelling location of the start of the non-argument expansion 1074 // range. This is where the macro name was spelled in order to begin 1075 // expanding this macro. 1076 Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).getBegin()); 1077 1078 // Dig out the buffer where the macro name was spelled and the extents of the 1079 // name so that we can render it into the expansion note. 1080 std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc); 1081 unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts); 1082 StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first); 1083 return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength); 1084 } 1085 1086 bool Lexer::isAsciiIdentifierContinueChar(char c, const LangOptions &LangOpts) { 1087 return isAsciiIdentifierContinue(c, LangOpts.DollarIdents); 1088 } 1089 1090 bool Lexer::isNewLineEscaped(const char *BufferStart, const char *Str) { 1091 assert(isVerticalWhitespace(Str[0])); 1092 if (Str - 1 < BufferStart) 1093 return false; 1094 1095 if ((Str[0] == '\n' && Str[-1] == '\r') || 1096 (Str[0] == '\r' && Str[-1] == '\n')) { 1097 if (Str - 2 < BufferStart) 1098 return false; 1099 --Str; 1100 } 1101 --Str; 1102 1103 // Rewind to first non-space character: 1104 while (Str > BufferStart && isHorizontalWhitespace(*Str)) 1105 --Str; 1106 1107 return *Str == '\\'; 1108 } 1109 1110 StringRef Lexer::getIndentationForLine(SourceLocation Loc, 1111 const SourceManager &SM) { 1112 if (Loc.isInvalid() || Loc.isMacroID()) 1113 return {}; 1114 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 1115 if (LocInfo.first.isInvalid()) 1116 return {}; 1117 bool Invalid = false; 1118 StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid); 1119 if (Invalid) 1120 return {}; 1121 const char *Line = findBeginningOfLine(Buffer, LocInfo.second); 1122 if (!Line) 1123 return {}; 1124 StringRef Rest = Buffer.substr(Line - Buffer.data()); 1125 size_t NumWhitespaceChars = Rest.find_first_not_of(" \t"); 1126 return NumWhitespaceChars == StringRef::npos 1127 ? "" 1128 : Rest.take_front(NumWhitespaceChars); 1129 } 1130 1131 //===----------------------------------------------------------------------===// 1132 // Diagnostics forwarding code. 1133 //===----------------------------------------------------------------------===// 1134 1135 /// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the 1136 /// lexer buffer was all expanded at a single point, perform the mapping. 1137 /// This is currently only used for _Pragma implementation, so it is the slow 1138 /// path of the hot getSourceLocation method. Do not allow it to be inlined. 1139 static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc( 1140 Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen); 1141 static SourceLocation GetMappedTokenLoc(Preprocessor &PP, 1142 SourceLocation FileLoc, 1143 unsigned CharNo, unsigned TokLen) { 1144 assert(FileLoc.isMacroID() && "Must be a macro expansion"); 1145 1146 // Otherwise, we're lexing "mapped tokens". This is used for things like 1147 // _Pragma handling. Combine the expansion location of FileLoc with the 1148 // spelling location. 1149 SourceManager &SM = PP.getSourceManager(); 1150 1151 // Create a new SLoc which is expanded from Expansion(FileLoc) but whose 1152 // characters come from spelling(FileLoc)+Offset. 1153 SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc); 1154 SpellingLoc = SpellingLoc.getLocWithOffset(CharNo); 1155 1156 // Figure out the expansion loc range, which is the range covered by the 1157 // original _Pragma(...) sequence. 1158 CharSourceRange II = SM.getImmediateExpansionRange(FileLoc); 1159 1160 return SM.createExpansionLoc(SpellingLoc, II.getBegin(), II.getEnd(), TokLen); 1161 } 1162 1163 /// getSourceLocation - Return a source location identifier for the specified 1164 /// offset in the current file. 1165 SourceLocation Lexer::getSourceLocation(const char *Loc, 1166 unsigned TokLen) const { 1167 assert(Loc >= BufferStart && Loc <= BufferEnd && 1168 "Location out of range for this buffer!"); 1169 1170 // In the normal case, we're just lexing from a simple file buffer, return 1171 // the file id from FileLoc with the offset specified. 1172 unsigned CharNo = Loc-BufferStart; 1173 if (FileLoc.isFileID()) 1174 return FileLoc.getLocWithOffset(CharNo); 1175 1176 // Otherwise, this is the _Pragma lexer case, which pretends that all of the 1177 // tokens are lexed from where the _Pragma was defined. 1178 assert(PP && "This doesn't work on raw lexers"); 1179 return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen); 1180 } 1181 1182 /// Diag - Forwarding function for diagnostics. This translate a source 1183 /// position in the current buffer into a SourceLocation object for rendering. 1184 DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const { 1185 return PP->Diag(getSourceLocation(Loc), DiagID); 1186 } 1187 1188 //===----------------------------------------------------------------------===// 1189 // Trigraph and Escaped Newline Handling Code. 1190 //===----------------------------------------------------------------------===// 1191 1192 /// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair, 1193 /// return the decoded trigraph letter it corresponds to, or '\0' if nothing. 1194 static char GetTrigraphCharForLetter(char Letter) { 1195 switch (Letter) { 1196 default: return 0; 1197 case '=': return '#'; 1198 case ')': return ']'; 1199 case '(': return '['; 1200 case '!': return '|'; 1201 case '\'': return '^'; 1202 case '>': return '}'; 1203 case '/': return '\\'; 1204 case '<': return '{'; 1205 case '-': return '~'; 1206 } 1207 } 1208 1209 /// DecodeTrigraphChar - If the specified character is a legal trigraph when 1210 /// prefixed with ??, emit a trigraph warning. If trigraphs are enabled, 1211 /// return the result character. Finally, emit a warning about trigraph use 1212 /// whether trigraphs are enabled or not. 1213 static char DecodeTrigraphChar(const char *CP, Lexer *L, bool Trigraphs) { 1214 char Res = GetTrigraphCharForLetter(*CP); 1215 if (!Res) 1216 return Res; 1217 1218 if (!Trigraphs) { 1219 if (L && !L->isLexingRawMode()) 1220 L->Diag(CP-2, diag::trigraph_ignored); 1221 return 0; 1222 } 1223 1224 if (L && !L->isLexingRawMode()) 1225 L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1); 1226 return Res; 1227 } 1228 1229 /// getEscapedNewLineSize - Return the size of the specified escaped newline, 1230 /// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a 1231 /// trigraph equivalent on entry to this function. 1232 unsigned Lexer::getEscapedNewLineSize(const char *Ptr) { 1233 unsigned Size = 0; 1234 while (isWhitespace(Ptr[Size])) { 1235 ++Size; 1236 1237 if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r') 1238 continue; 1239 1240 // If this is a \r\n or \n\r, skip the other half. 1241 if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') && 1242 Ptr[Size-1] != Ptr[Size]) 1243 ++Size; 1244 1245 return Size; 1246 } 1247 1248 // Not an escaped newline, must be a \t or something else. 1249 return 0; 1250 } 1251 1252 /// SkipEscapedNewLines - If P points to an escaped newline (or a series of 1253 /// them), skip over them and return the first non-escaped-newline found, 1254 /// otherwise return P. 1255 const char *Lexer::SkipEscapedNewLines(const char *P) { 1256 while (true) { 1257 const char *AfterEscape; 1258 if (*P == '\\') { 1259 AfterEscape = P+1; 1260 } else if (*P == '?') { 1261 // If not a trigraph for escape, bail out. 1262 if (P[1] != '?' || P[2] != '/') 1263 return P; 1264 // FIXME: Take LangOpts into account; the language might not 1265 // support trigraphs. 1266 AfterEscape = P+3; 1267 } else { 1268 return P; 1269 } 1270 1271 unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape); 1272 if (NewLineSize == 0) return P; 1273 P = AfterEscape+NewLineSize; 1274 } 1275 } 1276 1277 std::optional<Token> Lexer::findNextToken(SourceLocation Loc, 1278 const SourceManager &SM, 1279 const LangOptions &LangOpts) { 1280 if (Loc.isMacroID()) { 1281 if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc)) 1282 return std::nullopt; 1283 } 1284 Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts); 1285 1286 // Break down the source location. 1287 std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc); 1288 1289 // Try to load the file buffer. 1290 bool InvalidTemp = false; 1291 StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp); 1292 if (InvalidTemp) 1293 return std::nullopt; 1294 1295 const char *TokenBegin = File.data() + LocInfo.second; 1296 1297 // Lex from the start of the given location. 1298 Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(), 1299 TokenBegin, File.end()); 1300 // Find the token. 1301 Token Tok; 1302 lexer.LexFromRawLexer(Tok); 1303 return Tok; 1304 } 1305 1306 /// Checks that the given token is the first token that occurs after the 1307 /// given location (this excludes comments and whitespace). Returns the location 1308 /// immediately after the specified token. If the token is not found or the 1309 /// location is inside a macro, the returned source location will be invalid. 1310 SourceLocation Lexer::findLocationAfterToken( 1311 SourceLocation Loc, tok::TokenKind TKind, const SourceManager &SM, 1312 const LangOptions &LangOpts, bool SkipTrailingWhitespaceAndNewLine) { 1313 std::optional<Token> Tok = findNextToken(Loc, SM, LangOpts); 1314 if (!Tok || Tok->isNot(TKind)) 1315 return {}; 1316 SourceLocation TokenLoc = Tok->getLocation(); 1317 1318 // Calculate how much whitespace needs to be skipped if any. 1319 unsigned NumWhitespaceChars = 0; 1320 if (SkipTrailingWhitespaceAndNewLine) { 1321 const char *TokenEnd = SM.getCharacterData(TokenLoc) + Tok->getLength(); 1322 unsigned char C = *TokenEnd; 1323 while (isHorizontalWhitespace(C)) { 1324 C = *(++TokenEnd); 1325 NumWhitespaceChars++; 1326 } 1327 1328 // Skip \r, \n, \r\n, or \n\r 1329 if (C == '\n' || C == '\r') { 1330 char PrevC = C; 1331 C = *(++TokenEnd); 1332 NumWhitespaceChars++; 1333 if ((C == '\n' || C == '\r') && C != PrevC) 1334 NumWhitespaceChars++; 1335 } 1336 } 1337 1338 return TokenLoc.getLocWithOffset(Tok->getLength() + NumWhitespaceChars); 1339 } 1340 1341 /// getCharAndSizeSlow - Peek a single 'character' from the specified buffer, 1342 /// get its size, and return it. This is tricky in several cases: 1343 /// 1. If currently at the start of a trigraph, we warn about the trigraph, 1344 /// then either return the trigraph (skipping 3 chars) or the '?', 1345 /// depending on whether trigraphs are enabled or not. 1346 /// 2. If this is an escaped newline (potentially with whitespace between 1347 /// the backslash and newline), implicitly skip the newline and return 1348 /// the char after it. 1349 /// 1350 /// This handles the slow/uncommon case of the getCharAndSize method. Here we 1351 /// know that we can accumulate into Size, and that we have already incremented 1352 /// Ptr by Size bytes. 1353 /// 1354 /// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should 1355 /// be updated to match. 1356 char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size, 1357 Token *Tok) { 1358 // If we have a slash, look for an escaped newline. 1359 if (Ptr[0] == '\\') { 1360 ++Size; 1361 ++Ptr; 1362 Slash: 1363 // Common case, backslash-char where the char is not whitespace. 1364 if (!isWhitespace(Ptr[0])) return '\\'; 1365 1366 // See if we have optional whitespace characters between the slash and 1367 // newline. 1368 if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) { 1369 // Remember that this token needs to be cleaned. 1370 if (Tok) Tok->setFlag(Token::NeedsCleaning); 1371 1372 // Warn if there was whitespace between the backslash and newline. 1373 if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode()) 1374 Diag(Ptr, diag::backslash_newline_space); 1375 1376 // Found backslash<whitespace><newline>. Parse the char after it. 1377 Size += EscapedNewLineSize; 1378 Ptr += EscapedNewLineSize; 1379 1380 // Use slow version to accumulate a correct size field. 1381 return getCharAndSizeSlow(Ptr, Size, Tok); 1382 } 1383 1384 // Otherwise, this is not an escaped newline, just return the slash. 1385 return '\\'; 1386 } 1387 1388 // If this is a trigraph, process it. 1389 if (Ptr[0] == '?' && Ptr[1] == '?') { 1390 // If this is actually a legal trigraph (not something like "??x"), emit 1391 // a trigraph warning. If so, and if trigraphs are enabled, return it. 1392 if (char C = DecodeTrigraphChar(Ptr + 2, Tok ? this : nullptr, 1393 LangOpts.Trigraphs)) { 1394 // Remember that this token needs to be cleaned. 1395 if (Tok) Tok->setFlag(Token::NeedsCleaning); 1396 1397 Ptr += 3; 1398 Size += 3; 1399 if (C == '\\') goto Slash; 1400 return C; 1401 } 1402 } 1403 1404 // If this is neither, return a single character. 1405 ++Size; 1406 return *Ptr; 1407 } 1408 1409 /// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the 1410 /// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size, 1411 /// and that we have already incremented Ptr by Size bytes. 1412 /// 1413 /// NOTE: When this method is updated, getCharAndSizeSlow (above) should 1414 /// be updated to match. 1415 char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size, 1416 const LangOptions &LangOpts) { 1417 // If we have a slash, look for an escaped newline. 1418 if (Ptr[0] == '\\') { 1419 ++Size; 1420 ++Ptr; 1421 Slash: 1422 // Common case, backslash-char where the char is not whitespace. 1423 if (!isWhitespace(Ptr[0])) return '\\'; 1424 1425 // See if we have optional whitespace characters followed by a newline. 1426 if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) { 1427 // Found backslash<whitespace><newline>. Parse the char after it. 1428 Size += EscapedNewLineSize; 1429 Ptr += EscapedNewLineSize; 1430 1431 // Use slow version to accumulate a correct size field. 1432 return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts); 1433 } 1434 1435 // Otherwise, this is not an escaped newline, just return the slash. 1436 return '\\'; 1437 } 1438 1439 // If this is a trigraph, process it. 1440 if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') { 1441 // If this is actually a legal trigraph (not something like "??x"), return 1442 // it. 1443 if (char C = GetTrigraphCharForLetter(Ptr[2])) { 1444 Ptr += 3; 1445 Size += 3; 1446 if (C == '\\') goto Slash; 1447 return C; 1448 } 1449 } 1450 1451 // If this is neither, return a single character. 1452 ++Size; 1453 return *Ptr; 1454 } 1455 1456 //===----------------------------------------------------------------------===// 1457 // Helper methods for lexing. 1458 //===----------------------------------------------------------------------===// 1459 1460 /// Routine that indiscriminately sets the offset into the source file. 1461 void Lexer::SetByteOffset(unsigned Offset, bool StartOfLine) { 1462 BufferPtr = BufferStart + Offset; 1463 if (BufferPtr > BufferEnd) 1464 BufferPtr = BufferEnd; 1465 // FIXME: What exactly does the StartOfLine bit mean? There are two 1466 // possible meanings for the "start" of the line: the first token on the 1467 // unexpanded line, or the first token on the expanded line. 1468 IsAtStartOfLine = StartOfLine; 1469 IsAtPhysicalStartOfLine = StartOfLine; 1470 } 1471 1472 static bool isUnicodeWhitespace(uint32_t Codepoint) { 1473 static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars( 1474 UnicodeWhitespaceCharRanges); 1475 return UnicodeWhitespaceChars.contains(Codepoint); 1476 } 1477 1478 static llvm::SmallString<5> codepointAsHexString(uint32_t C) { 1479 llvm::SmallString<5> CharBuf; 1480 llvm::raw_svector_ostream CharOS(CharBuf); 1481 llvm::write_hex(CharOS, C, llvm::HexPrintStyle::Upper, 4); 1482 return CharBuf; 1483 } 1484 1485 // To mitigate https://github.com/llvm/llvm-project/issues/54732, 1486 // we allow "Mathematical Notation Characters" in identifiers. 1487 // This is a proposed profile that extends the XID_Start/XID_continue 1488 // with mathematical symbols, superscipts and subscripts digits 1489 // found in some production software. 1490 // https://www.unicode.org/L2/L2022/22230-math-profile.pdf 1491 static bool isMathematicalExtensionID(uint32_t C, const LangOptions &LangOpts, 1492 bool IsStart, bool &IsExtension) { 1493 static const llvm::sys::UnicodeCharSet MathStartChars( 1494 MathematicalNotationProfileIDStartRanges); 1495 static const llvm::sys::UnicodeCharSet MathContinueChars( 1496 MathematicalNotationProfileIDContinueRanges); 1497 if (MathStartChars.contains(C) || 1498 (!IsStart && MathContinueChars.contains(C))) { 1499 IsExtension = true; 1500 return true; 1501 } 1502 return false; 1503 } 1504 1505 static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts, 1506 bool &IsExtension) { 1507 if (LangOpts.AsmPreprocessor) { 1508 return false; 1509 } else if (LangOpts.DollarIdents && '$' == C) { 1510 return true; 1511 } else if (LangOpts.CPlusPlus || LangOpts.C23) { 1512 // A non-leading codepoint must have the XID_Continue property. 1513 // XIDContinueRanges doesn't contains characters also in XIDStartRanges, 1514 // so we need to check both tables. 1515 // '_' doesn't have the XID_Continue property but is allowed in C and C++. 1516 static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges); 1517 static const llvm::sys::UnicodeCharSet XIDContinueChars(XIDContinueRanges); 1518 if (C == '_' || XIDStartChars.contains(C) || XIDContinueChars.contains(C)) 1519 return true; 1520 return isMathematicalExtensionID(C, LangOpts, /*IsStart=*/false, 1521 IsExtension); 1522 } else if (LangOpts.C11) { 1523 static const llvm::sys::UnicodeCharSet C11AllowedIDChars( 1524 C11AllowedIDCharRanges); 1525 return C11AllowedIDChars.contains(C); 1526 } else { 1527 static const llvm::sys::UnicodeCharSet C99AllowedIDChars( 1528 C99AllowedIDCharRanges); 1529 return C99AllowedIDChars.contains(C); 1530 } 1531 } 1532 1533 static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts, 1534 bool &IsExtension) { 1535 assert(C > 0x7F && "isAllowedInitiallyIDChar called with an ASCII codepoint"); 1536 IsExtension = false; 1537 if (LangOpts.AsmPreprocessor) { 1538 return false; 1539 } 1540 if (LangOpts.CPlusPlus || LangOpts.C23) { 1541 static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges); 1542 if (XIDStartChars.contains(C)) 1543 return true; 1544 return isMathematicalExtensionID(C, LangOpts, /*IsStart=*/true, 1545 IsExtension); 1546 } 1547 if (!isAllowedIDChar(C, LangOpts, IsExtension)) 1548 return false; 1549 if (LangOpts.C11) { 1550 static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars( 1551 C11DisallowedInitialIDCharRanges); 1552 return !C11DisallowedInitialIDChars.contains(C); 1553 } 1554 static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars( 1555 C99DisallowedInitialIDCharRanges); 1556 return !C99DisallowedInitialIDChars.contains(C); 1557 } 1558 1559 static void diagnoseExtensionInIdentifier(DiagnosticsEngine &Diags, uint32_t C, 1560 CharSourceRange Range) { 1561 1562 static const llvm::sys::UnicodeCharSet MathStartChars( 1563 MathematicalNotationProfileIDStartRanges); 1564 static const llvm::sys::UnicodeCharSet MathContinueChars( 1565 MathematicalNotationProfileIDContinueRanges); 1566 1567 (void)MathStartChars; 1568 (void)MathContinueChars; 1569 assert((MathStartChars.contains(C) || MathContinueChars.contains(C)) && 1570 "Unexpected mathematical notation codepoint"); 1571 Diags.Report(Range.getBegin(), diag::ext_mathematical_notation) 1572 << codepointAsHexString(C) << Range; 1573 } 1574 1575 static inline CharSourceRange makeCharRange(Lexer &L, const char *Begin, 1576 const char *End) { 1577 return CharSourceRange::getCharRange(L.getSourceLocation(Begin), 1578 L.getSourceLocation(End)); 1579 } 1580 1581 static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C, 1582 CharSourceRange Range, bool IsFirst) { 1583 // Check C99 compatibility. 1584 if (!Diags.isIgnored(diag::warn_c99_compat_unicode_id, Range.getBegin())) { 1585 enum { 1586 CannotAppearInIdentifier = 0, 1587 CannotStartIdentifier 1588 }; 1589 1590 static const llvm::sys::UnicodeCharSet C99AllowedIDChars( 1591 C99AllowedIDCharRanges); 1592 static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars( 1593 C99DisallowedInitialIDCharRanges); 1594 if (!C99AllowedIDChars.contains(C)) { 1595 Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id) 1596 << Range 1597 << CannotAppearInIdentifier; 1598 } else if (IsFirst && C99DisallowedInitialIDChars.contains(C)) { 1599 Diags.Report(Range.getBegin(), diag::warn_c99_compat_unicode_id) 1600 << Range 1601 << CannotStartIdentifier; 1602 } 1603 } 1604 } 1605 1606 /// After encountering UTF-8 character C and interpreting it as an identifier 1607 /// character, check whether it's a homoglyph for a common non-identifier 1608 /// source character that is unlikely to be an intentional identifier 1609 /// character and warn if so. 1610 static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C, 1611 CharSourceRange Range) { 1612 // FIXME: Handle Unicode quotation marks (smart quotes, fullwidth quotes). 1613 struct HomoglyphPair { 1614 uint32_t Character; 1615 char LooksLike; 1616 bool operator<(HomoglyphPair R) const { return Character < R.Character; } 1617 }; 1618 static constexpr HomoglyphPair SortedHomoglyphs[] = { 1619 {U'\u00ad', 0}, // SOFT HYPHEN 1620 {U'\u01c3', '!'}, // LATIN LETTER RETROFLEX CLICK 1621 {U'\u037e', ';'}, // GREEK QUESTION MARK 1622 {U'\u200b', 0}, // ZERO WIDTH SPACE 1623 {U'\u200c', 0}, // ZERO WIDTH NON-JOINER 1624 {U'\u200d', 0}, // ZERO WIDTH JOINER 1625 {U'\u2060', 0}, // WORD JOINER 1626 {U'\u2061', 0}, // FUNCTION APPLICATION 1627 {U'\u2062', 0}, // INVISIBLE TIMES 1628 {U'\u2063', 0}, // INVISIBLE SEPARATOR 1629 {U'\u2064', 0}, // INVISIBLE PLUS 1630 {U'\u2212', '-'}, // MINUS SIGN 1631 {U'\u2215', '/'}, // DIVISION SLASH 1632 {U'\u2216', '\\'}, // SET MINUS 1633 {U'\u2217', '*'}, // ASTERISK OPERATOR 1634 {U'\u2223', '|'}, // DIVIDES 1635 {U'\u2227', '^'}, // LOGICAL AND 1636 {U'\u2236', ':'}, // RATIO 1637 {U'\u223c', '~'}, // TILDE OPERATOR 1638 {U'\ua789', ':'}, // MODIFIER LETTER COLON 1639 {U'\ufeff', 0}, // ZERO WIDTH NO-BREAK SPACE 1640 {U'\uff01', '!'}, // FULLWIDTH EXCLAMATION MARK 1641 {U'\uff03', '#'}, // FULLWIDTH NUMBER SIGN 1642 {U'\uff04', '$'}, // FULLWIDTH DOLLAR SIGN 1643 {U'\uff05', '%'}, // FULLWIDTH PERCENT SIGN 1644 {U'\uff06', '&'}, // FULLWIDTH AMPERSAND 1645 {U'\uff08', '('}, // FULLWIDTH LEFT PARENTHESIS 1646 {U'\uff09', ')'}, // FULLWIDTH RIGHT PARENTHESIS 1647 {U'\uff0a', '*'}, // FULLWIDTH ASTERISK 1648 {U'\uff0b', '+'}, // FULLWIDTH ASTERISK 1649 {U'\uff0c', ','}, // FULLWIDTH COMMA 1650 {U'\uff0d', '-'}, // FULLWIDTH HYPHEN-MINUS 1651 {U'\uff0e', '.'}, // FULLWIDTH FULL STOP 1652 {U'\uff0f', '/'}, // FULLWIDTH SOLIDUS 1653 {U'\uff1a', ':'}, // FULLWIDTH COLON 1654 {U'\uff1b', ';'}, // FULLWIDTH SEMICOLON 1655 {U'\uff1c', '<'}, // FULLWIDTH LESS-THAN SIGN 1656 {U'\uff1d', '='}, // FULLWIDTH EQUALS SIGN 1657 {U'\uff1e', '>'}, // FULLWIDTH GREATER-THAN SIGN 1658 {U'\uff1f', '?'}, // FULLWIDTH QUESTION MARK 1659 {U'\uff20', '@'}, // FULLWIDTH COMMERCIAL AT 1660 {U'\uff3b', '['}, // FULLWIDTH LEFT SQUARE BRACKET 1661 {U'\uff3c', '\\'}, // FULLWIDTH REVERSE SOLIDUS 1662 {U'\uff3d', ']'}, // FULLWIDTH RIGHT SQUARE BRACKET 1663 {U'\uff3e', '^'}, // FULLWIDTH CIRCUMFLEX ACCENT 1664 {U'\uff5b', '{'}, // FULLWIDTH LEFT CURLY BRACKET 1665 {U'\uff5c', '|'}, // FULLWIDTH VERTICAL LINE 1666 {U'\uff5d', '}'}, // FULLWIDTH RIGHT CURLY BRACKET 1667 {U'\uff5e', '~'}, // FULLWIDTH TILDE 1668 {0, 0} 1669 }; 1670 auto Homoglyph = 1671 std::lower_bound(std::begin(SortedHomoglyphs), 1672 std::end(SortedHomoglyphs) - 1, HomoglyphPair{C, '\0'}); 1673 if (Homoglyph->Character == C) { 1674 if (Homoglyph->LooksLike) { 1675 const char LooksLikeStr[] = {Homoglyph->LooksLike, 0}; 1676 Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph) 1677 << Range << codepointAsHexString(C) << LooksLikeStr; 1678 } else { 1679 Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_zero_width) 1680 << Range << codepointAsHexString(C); 1681 } 1682 } 1683 } 1684 1685 static void diagnoseInvalidUnicodeCodepointInIdentifier( 1686 DiagnosticsEngine &Diags, const LangOptions &LangOpts, uint32_t CodePoint, 1687 CharSourceRange Range, bool IsFirst) { 1688 if (isASCII(CodePoint)) 1689 return; 1690 1691 bool IsExtension; 1692 bool IsIDStart = isAllowedInitiallyIDChar(CodePoint, LangOpts, IsExtension); 1693 bool IsIDContinue = 1694 IsIDStart || isAllowedIDChar(CodePoint, LangOpts, IsExtension); 1695 1696 if ((IsFirst && IsIDStart) || (!IsFirst && IsIDContinue)) 1697 return; 1698 1699 bool InvalidOnlyAtStart = IsFirst && !IsIDStart && IsIDContinue; 1700 1701 if (!IsFirst || InvalidOnlyAtStart) { 1702 Diags.Report(Range.getBegin(), diag::err_character_not_allowed_identifier) 1703 << Range << codepointAsHexString(CodePoint) << int(InvalidOnlyAtStart) 1704 << FixItHint::CreateRemoval(Range); 1705 } else { 1706 Diags.Report(Range.getBegin(), diag::err_character_not_allowed) 1707 << Range << codepointAsHexString(CodePoint) 1708 << FixItHint::CreateRemoval(Range); 1709 } 1710 } 1711 1712 bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size, 1713 Token &Result) { 1714 const char *UCNPtr = CurPtr + Size; 1715 uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/nullptr); 1716 if (CodePoint == 0) { 1717 return false; 1718 } 1719 bool IsExtension = false; 1720 if (!isAllowedIDChar(CodePoint, LangOpts, IsExtension)) { 1721 if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint)) 1722 return false; 1723 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1724 !PP->isPreprocessedOutput()) 1725 diagnoseInvalidUnicodeCodepointInIdentifier( 1726 PP->getDiagnostics(), LangOpts, CodePoint, 1727 makeCharRange(*this, CurPtr, UCNPtr), 1728 /*IsFirst=*/false); 1729 1730 // We got a unicode codepoint that is neither a space nor a 1731 // a valid identifier part. 1732 // Carry on as if the codepoint was valid for recovery purposes. 1733 } else if (!isLexingRawMode()) { 1734 if (IsExtension) 1735 diagnoseExtensionInIdentifier(PP->getDiagnostics(), CodePoint, 1736 makeCharRange(*this, CurPtr, UCNPtr)); 1737 1738 maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint, 1739 makeCharRange(*this, CurPtr, UCNPtr), 1740 /*IsFirst=*/false); 1741 } 1742 1743 Result.setFlag(Token::HasUCN); 1744 if ((UCNPtr - CurPtr == 6 && CurPtr[1] == 'u') || 1745 (UCNPtr - CurPtr == 10 && CurPtr[1] == 'U')) 1746 CurPtr = UCNPtr; 1747 else 1748 while (CurPtr != UCNPtr) 1749 (void)getAndAdvanceChar(CurPtr, Result); 1750 return true; 1751 } 1752 1753 bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) { 1754 const char *UnicodePtr = CurPtr; 1755 llvm::UTF32 CodePoint; 1756 llvm::ConversionResult Result = 1757 llvm::convertUTF8Sequence((const llvm::UTF8 **)&UnicodePtr, 1758 (const llvm::UTF8 *)BufferEnd, 1759 &CodePoint, 1760 llvm::strictConversion); 1761 if (Result != llvm::conversionOK) 1762 return false; 1763 1764 bool IsExtension = false; 1765 if (!isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts, 1766 IsExtension)) { 1767 if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint)) 1768 return false; 1769 1770 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1771 !PP->isPreprocessedOutput()) 1772 diagnoseInvalidUnicodeCodepointInIdentifier( 1773 PP->getDiagnostics(), LangOpts, CodePoint, 1774 makeCharRange(*this, CurPtr, UnicodePtr), /*IsFirst=*/false); 1775 // We got a unicode codepoint that is neither a space nor a 1776 // a valid identifier part. Carry on as if the codepoint was 1777 // valid for recovery purposes. 1778 } else if (!isLexingRawMode()) { 1779 if (IsExtension) 1780 diagnoseExtensionInIdentifier(PP->getDiagnostics(), CodePoint, 1781 makeCharRange(*this, CurPtr, UnicodePtr)); 1782 maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint, 1783 makeCharRange(*this, CurPtr, UnicodePtr), 1784 /*IsFirst=*/false); 1785 maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), CodePoint, 1786 makeCharRange(*this, CurPtr, UnicodePtr)); 1787 } 1788 1789 CurPtr = UnicodePtr; 1790 return true; 1791 } 1792 1793 bool Lexer::LexUnicodeIdentifierStart(Token &Result, uint32_t C, 1794 const char *CurPtr) { 1795 bool IsExtension = false; 1796 if (isAllowedInitiallyIDChar(C, LangOpts, IsExtension)) { 1797 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1798 !PP->isPreprocessedOutput()) { 1799 if (IsExtension) 1800 diagnoseExtensionInIdentifier(PP->getDiagnostics(), C, 1801 makeCharRange(*this, BufferPtr, CurPtr)); 1802 maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C, 1803 makeCharRange(*this, BufferPtr, CurPtr), 1804 /*IsFirst=*/true); 1805 maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), C, 1806 makeCharRange(*this, BufferPtr, CurPtr)); 1807 } 1808 1809 MIOpt.ReadToken(); 1810 return LexIdentifierContinue(Result, CurPtr); 1811 } 1812 1813 if (!isLexingRawMode() && !ParsingPreprocessorDirective && 1814 !PP->isPreprocessedOutput() && !isASCII(*BufferPtr) && 1815 !isUnicodeWhitespace(C)) { 1816 // Non-ASCII characters tend to creep into source code unintentionally. 1817 // Instead of letting the parser complain about the unknown token, 1818 // just drop the character. 1819 // Note that we can /only/ do this when the non-ASCII character is actually 1820 // spelled as Unicode, not written as a UCN. The standard requires that 1821 // we not throw away any possible preprocessor tokens, but there's a 1822 // loophole in the mapping of Unicode characters to basic character set 1823 // characters that allows us to map these particular characters to, say, 1824 // whitespace. 1825 diagnoseInvalidUnicodeCodepointInIdentifier( 1826 PP->getDiagnostics(), LangOpts, C, 1827 makeCharRange(*this, BufferPtr, CurPtr), /*IsStart*/ true); 1828 BufferPtr = CurPtr; 1829 return false; 1830 } 1831 1832 // Otherwise, we have an explicit UCN or a character that's unlikely to show 1833 // up by accident. 1834 MIOpt.ReadToken(); 1835 FormTokenWithChars(Result, CurPtr, tok::unknown); 1836 return true; 1837 } 1838 1839 bool Lexer::LexIdentifierContinue(Token &Result, const char *CurPtr) { 1840 // Match [_A-Za-z0-9]*, we have already matched an identifier start. 1841 while (true) { 1842 unsigned char C = *CurPtr; 1843 // Fast path. 1844 if (isAsciiIdentifierContinue(C)) { 1845 ++CurPtr; 1846 continue; 1847 } 1848 1849 unsigned Size; 1850 // Slow path: handle trigraph, unicode codepoints, UCNs. 1851 C = getCharAndSize(CurPtr, Size); 1852 if (isAsciiIdentifierContinue(C)) { 1853 CurPtr = ConsumeChar(CurPtr, Size, Result); 1854 continue; 1855 } 1856 if (C == '$') { 1857 // If we hit a $ and they are not supported in identifiers, we are done. 1858 if (!LangOpts.DollarIdents) 1859 break; 1860 // Otherwise, emit a diagnostic and continue. 1861 if (!isLexingRawMode()) 1862 Diag(CurPtr, diag::ext_dollar_in_identifier); 1863 CurPtr = ConsumeChar(CurPtr, Size, Result); 1864 continue; 1865 } 1866 if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) 1867 continue; 1868 if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) 1869 continue; 1870 // Neither an expected Unicode codepoint nor a UCN. 1871 break; 1872 } 1873 1874 const char *IdStart = BufferPtr; 1875 FormTokenWithChars(Result, CurPtr, tok::raw_identifier); 1876 Result.setRawIdentifierData(IdStart); 1877 1878 // If we are in raw mode, return this identifier raw. There is no need to 1879 // look up identifier information or attempt to macro expand it. 1880 if (LexingRawMode) 1881 return true; 1882 1883 // Fill in Result.IdentifierInfo and update the token kind, 1884 // looking up the identifier in the identifier table. 1885 IdentifierInfo *II = PP->LookUpIdentifierInfo(Result); 1886 // Note that we have to call PP->LookUpIdentifierInfo() even for code 1887 // completion, it writes IdentifierInfo into Result, and callers rely on it. 1888 1889 // If the completion point is at the end of an identifier, we want to treat 1890 // the identifier as incomplete even if it resolves to a macro or a keyword. 1891 // This allows e.g. 'class^' to complete to 'classifier'. 1892 if (isCodeCompletionPoint(CurPtr)) { 1893 // Return the code-completion token. 1894 Result.setKind(tok::code_completion); 1895 // Skip the code-completion char and all immediate identifier characters. 1896 // This ensures we get consistent behavior when completing at any point in 1897 // an identifier (i.e. at the start, in the middle, at the end). Note that 1898 // only simple cases (i.e. [a-zA-Z0-9_]) are supported to keep the code 1899 // simpler. 1900 assert(*CurPtr == 0 && "Completion character must be 0"); 1901 ++CurPtr; 1902 // Note that code completion token is not added as a separate character 1903 // when the completion point is at the end of the buffer. Therefore, we need 1904 // to check if the buffer has ended. 1905 if (CurPtr < BufferEnd) { 1906 while (isAsciiIdentifierContinue(*CurPtr)) 1907 ++CurPtr; 1908 } 1909 BufferPtr = CurPtr; 1910 return true; 1911 } 1912 1913 // Finally, now that we know we have an identifier, pass this off to the 1914 // preprocessor, which may macro expand it or something. 1915 if (II->isHandleIdentifierCase()) 1916 return PP->HandleIdentifier(Result); 1917 1918 return true; 1919 } 1920 1921 /// isHexaLiteral - Return true if Start points to a hex constant. 1922 /// in microsoft mode (where this is supposed to be several different tokens). 1923 bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) { 1924 unsigned Size; 1925 char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts); 1926 if (C1 != '0') 1927 return false; 1928 char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts); 1929 return (C2 == 'x' || C2 == 'X'); 1930 } 1931 1932 /// LexNumericConstant - Lex the remainder of a integer or floating point 1933 /// constant. From[-1] is the first character lexed. Return the end of the 1934 /// constant. 1935 bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) { 1936 unsigned Size; 1937 char C = getCharAndSize(CurPtr, Size); 1938 char PrevCh = 0; 1939 while (isPreprocessingNumberBody(C)) { 1940 CurPtr = ConsumeChar(CurPtr, Size, Result); 1941 PrevCh = C; 1942 C = getCharAndSize(CurPtr, Size); 1943 } 1944 1945 // If we fell out, check for a sign, due to 1e+12. If we have one, continue. 1946 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) { 1947 // If we are in Microsoft mode, don't continue if the constant is hex. 1948 // For example, MSVC will accept the following as 3 tokens: 0x1234567e+1 1949 if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts)) 1950 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 1951 } 1952 1953 // If we have a hex FP constant, continue. 1954 if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p')) { 1955 // Outside C99 and C++17, we accept hexadecimal floating point numbers as a 1956 // not-quite-conforming extension. Only do so if this looks like it's 1957 // actually meant to be a hexfloat, and not if it has a ud-suffix. 1958 bool IsHexFloat = true; 1959 if (!LangOpts.C99) { 1960 if (!isHexaLiteral(BufferPtr, LangOpts)) 1961 IsHexFloat = false; 1962 else if (!LangOpts.CPlusPlus17 && 1963 std::find(BufferPtr, CurPtr, '_') != CurPtr) 1964 IsHexFloat = false; 1965 } 1966 if (IsHexFloat) 1967 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result)); 1968 } 1969 1970 // If we have a digit separator, continue. 1971 if (C == '\'' && (LangOpts.CPlusPlus14 || LangOpts.C23)) { 1972 unsigned NextSize; 1973 char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, LangOpts); 1974 if (isAsciiIdentifierContinue(Next)) { 1975 if (!isLexingRawMode()) 1976 Diag(CurPtr, LangOpts.CPlusPlus 1977 ? diag::warn_cxx11_compat_digit_separator 1978 : diag::warn_c23_compat_digit_separator); 1979 CurPtr = ConsumeChar(CurPtr, Size, Result); 1980 CurPtr = ConsumeChar(CurPtr, NextSize, Result); 1981 return LexNumericConstant(Result, CurPtr); 1982 } 1983 } 1984 1985 // If we have a UCN or UTF-8 character (perhaps in a ud-suffix), continue. 1986 if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) 1987 return LexNumericConstant(Result, CurPtr); 1988 if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) 1989 return LexNumericConstant(Result, CurPtr); 1990 1991 // Update the location of token as well as BufferPtr. 1992 const char *TokStart = BufferPtr; 1993 FormTokenWithChars(Result, CurPtr, tok::numeric_constant); 1994 Result.setLiteralData(TokStart); 1995 return true; 1996 } 1997 1998 /// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes 1999 /// in C++11, or warn on a ud-suffix in C++98. 2000 const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr, 2001 bool IsStringLiteral) { 2002 assert(LangOpts.CPlusPlus); 2003 2004 // Maximally munch an identifier. 2005 unsigned Size; 2006 char C = getCharAndSize(CurPtr, Size); 2007 bool Consumed = false; 2008 2009 if (!isAsciiIdentifierStart(C)) { 2010 if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) 2011 Consumed = true; 2012 else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) 2013 Consumed = true; 2014 else 2015 return CurPtr; 2016 } 2017 2018 if (!LangOpts.CPlusPlus11) { 2019 if (!isLexingRawMode()) 2020 Diag(CurPtr, 2021 C == '_' ? diag::warn_cxx11_compat_user_defined_literal 2022 : diag::warn_cxx11_compat_reserved_user_defined_literal) 2023 << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " "); 2024 return CurPtr; 2025 } 2026 2027 // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix 2028 // that does not start with an underscore is ill-formed. As a conforming 2029 // extension, we treat all such suffixes as if they had whitespace before 2030 // them. We assume a suffix beginning with a UCN or UTF-8 character is more 2031 // likely to be a ud-suffix than a macro, however, and accept that. 2032 if (!Consumed) { 2033 bool IsUDSuffix = false; 2034 if (C == '_') 2035 IsUDSuffix = true; 2036 else if (IsStringLiteral && LangOpts.CPlusPlus14) { 2037 // In C++1y, we need to look ahead a few characters to see if this is a 2038 // valid suffix for a string literal or a numeric literal (this could be 2039 // the 'operator""if' defining a numeric literal operator). 2040 const unsigned MaxStandardSuffixLength = 3; 2041 char Buffer[MaxStandardSuffixLength] = { C }; 2042 unsigned Consumed = Size; 2043 unsigned Chars = 1; 2044 while (true) { 2045 unsigned NextSize; 2046 char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize, LangOpts); 2047 if (!isAsciiIdentifierContinue(Next)) { 2048 // End of suffix. Check whether this is on the allowed list. 2049 const StringRef CompleteSuffix(Buffer, Chars); 2050 IsUDSuffix = 2051 StringLiteralParser::isValidUDSuffix(LangOpts, CompleteSuffix); 2052 break; 2053 } 2054 2055 if (Chars == MaxStandardSuffixLength) 2056 // Too long: can't be a standard suffix. 2057 break; 2058 2059 Buffer[Chars++] = Next; 2060 Consumed += NextSize; 2061 } 2062 } 2063 2064 if (!IsUDSuffix) { 2065 if (!isLexingRawMode()) 2066 Diag(CurPtr, LangOpts.MSVCCompat 2067 ? diag::ext_ms_reserved_user_defined_literal 2068 : diag::ext_reserved_user_defined_literal) 2069 << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " "); 2070 return CurPtr; 2071 } 2072 2073 CurPtr = ConsumeChar(CurPtr, Size, Result); 2074 } 2075 2076 Result.setFlag(Token::HasUDSuffix); 2077 while (true) { 2078 C = getCharAndSize(CurPtr, Size); 2079 if (isAsciiIdentifierContinue(C)) { 2080 CurPtr = ConsumeChar(CurPtr, Size, Result); 2081 } else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) { 2082 } else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) { 2083 } else 2084 break; 2085 } 2086 2087 return CurPtr; 2088 } 2089 2090 /// LexStringLiteral - Lex the remainder of a string literal, after having lexed 2091 /// either " or L" or u8" or u" or U". 2092 bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr, 2093 tok::TokenKind Kind) { 2094 const char *AfterQuote = CurPtr; 2095 // Does this string contain the \0 character? 2096 const char *NulCharacter = nullptr; 2097 2098 if (!isLexingRawMode() && 2099 (Kind == tok::utf8_string_literal || 2100 Kind == tok::utf16_string_literal || 2101 Kind == tok::utf32_string_literal)) 2102 Diag(BufferPtr, LangOpts.CPlusPlus ? diag::warn_cxx98_compat_unicode_literal 2103 : diag::warn_c99_compat_unicode_literal); 2104 2105 char C = getAndAdvanceChar(CurPtr, Result); 2106 while (C != '"') { 2107 // Skip escaped characters. Escaped newlines will already be processed by 2108 // getAndAdvanceChar. 2109 if (C == '\\') 2110 C = getAndAdvanceChar(CurPtr, Result); 2111 2112 if (C == '\n' || C == '\r' || // Newline. 2113 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 2114 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 2115 Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 1; 2116 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2117 return true; 2118 } 2119 2120 if (C == 0) { 2121 if (isCodeCompletionPoint(CurPtr-1)) { 2122 if (ParsingFilename) 2123 codeCompleteIncludedFile(AfterQuote, CurPtr - 1, /*IsAngled=*/false); 2124 else 2125 PP->CodeCompleteNaturalLanguage(); 2126 FormTokenWithChars(Result, CurPtr - 1, tok::unknown); 2127 cutOffLexing(); 2128 return true; 2129 } 2130 2131 NulCharacter = CurPtr-1; 2132 } 2133 C = getAndAdvanceChar(CurPtr, Result); 2134 } 2135 2136 // If we are in C++11, lex the optional ud-suffix. 2137 if (LangOpts.CPlusPlus) 2138 CurPtr = LexUDSuffix(Result, CurPtr, true); 2139 2140 // If a nul character existed in the string, warn about it. 2141 if (NulCharacter && !isLexingRawMode()) 2142 Diag(NulCharacter, diag::null_in_char_or_string) << 1; 2143 2144 // Update the location of the token as well as the BufferPtr instance var. 2145 const char *TokStart = BufferPtr; 2146 FormTokenWithChars(Result, CurPtr, Kind); 2147 Result.setLiteralData(TokStart); 2148 return true; 2149 } 2150 2151 /// LexRawStringLiteral - Lex the remainder of a raw string literal, after 2152 /// having lexed R", LR", u8R", uR", or UR". 2153 bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr, 2154 tok::TokenKind Kind) { 2155 // This function doesn't use getAndAdvanceChar because C++0x [lex.pptoken]p3: 2156 // Between the initial and final double quote characters of the raw string, 2157 // any transformations performed in phases 1 and 2 (trigraphs, 2158 // universal-character-names, and line splicing) are reverted. 2159 2160 if (!isLexingRawMode()) 2161 Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal); 2162 2163 unsigned PrefixLen = 0; 2164 2165 while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen])) 2166 ++PrefixLen; 2167 2168 // If the last character was not a '(', then we didn't lex a valid delimiter. 2169 if (CurPtr[PrefixLen] != '(') { 2170 if (!isLexingRawMode()) { 2171 const char *PrefixEnd = &CurPtr[PrefixLen]; 2172 if (PrefixLen == 16) { 2173 Diag(PrefixEnd, diag::err_raw_delim_too_long); 2174 } else { 2175 Diag(PrefixEnd, diag::err_invalid_char_raw_delim) 2176 << StringRef(PrefixEnd, 1); 2177 } 2178 } 2179 2180 // Search for the next '"' in hopes of salvaging the lexer. Unfortunately, 2181 // it's possible the '"' was intended to be part of the raw string, but 2182 // there's not much we can do about that. 2183 while (true) { 2184 char C = *CurPtr++; 2185 2186 if (C == '"') 2187 break; 2188 if (C == 0 && CurPtr-1 == BufferEnd) { 2189 --CurPtr; 2190 break; 2191 } 2192 } 2193 2194 FormTokenWithChars(Result, CurPtr, tok::unknown); 2195 return true; 2196 } 2197 2198 // Save prefix and move CurPtr past it 2199 const char *Prefix = CurPtr; 2200 CurPtr += PrefixLen + 1; // skip over prefix and '(' 2201 2202 while (true) { 2203 char C = *CurPtr++; 2204 2205 if (C == ')') { 2206 // Check for prefix match and closing quote. 2207 if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') { 2208 CurPtr += PrefixLen + 1; // skip over prefix and '"' 2209 break; 2210 } 2211 } else if (C == 0 && CurPtr-1 == BufferEnd) { // End of file. 2212 if (!isLexingRawMode()) 2213 Diag(BufferPtr, diag::err_unterminated_raw_string) 2214 << StringRef(Prefix, PrefixLen); 2215 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2216 return true; 2217 } 2218 } 2219 2220 // If we are in C++11, lex the optional ud-suffix. 2221 if (LangOpts.CPlusPlus) 2222 CurPtr = LexUDSuffix(Result, CurPtr, true); 2223 2224 // Update the location of token as well as BufferPtr. 2225 const char *TokStart = BufferPtr; 2226 FormTokenWithChars(Result, CurPtr, Kind); 2227 Result.setLiteralData(TokStart); 2228 return true; 2229 } 2230 2231 /// LexAngledStringLiteral - Lex the remainder of an angled string literal, 2232 /// after having lexed the '<' character. This is used for #include filenames. 2233 bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { 2234 // Does this string contain the \0 character? 2235 const char *NulCharacter = nullptr; 2236 const char *AfterLessPos = CurPtr; 2237 char C = getAndAdvanceChar(CurPtr, Result); 2238 while (C != '>') { 2239 // Skip escaped characters. Escaped newlines will already be processed by 2240 // getAndAdvanceChar. 2241 if (C == '\\') 2242 C = getAndAdvanceChar(CurPtr, Result); 2243 2244 if (isVerticalWhitespace(C) || // Newline. 2245 (C == 0 && (CurPtr - 1 == BufferEnd))) { // End of file. 2246 // If the filename is unterminated, then it must just be a lone < 2247 // character. Return this as such. 2248 FormTokenWithChars(Result, AfterLessPos, tok::less); 2249 return true; 2250 } 2251 2252 if (C == 0) { 2253 if (isCodeCompletionPoint(CurPtr - 1)) { 2254 codeCompleteIncludedFile(AfterLessPos, CurPtr - 1, /*IsAngled=*/true); 2255 cutOffLexing(); 2256 FormTokenWithChars(Result, CurPtr - 1, tok::unknown); 2257 return true; 2258 } 2259 NulCharacter = CurPtr-1; 2260 } 2261 C = getAndAdvanceChar(CurPtr, Result); 2262 } 2263 2264 // If a nul character existed in the string, warn about it. 2265 if (NulCharacter && !isLexingRawMode()) 2266 Diag(NulCharacter, diag::null_in_char_or_string) << 1; 2267 2268 // Update the location of token as well as BufferPtr. 2269 const char *TokStart = BufferPtr; 2270 FormTokenWithChars(Result, CurPtr, tok::header_name); 2271 Result.setLiteralData(TokStart); 2272 return true; 2273 } 2274 2275 void Lexer::codeCompleteIncludedFile(const char *PathStart, 2276 const char *CompletionPoint, 2277 bool IsAngled) { 2278 // Completion only applies to the filename, after the last slash. 2279 StringRef PartialPath(PathStart, CompletionPoint - PathStart); 2280 llvm::StringRef SlashChars = LangOpts.MSVCCompat ? "/\\" : "/"; 2281 auto Slash = PartialPath.find_last_of(SlashChars); 2282 StringRef Dir = 2283 (Slash == StringRef::npos) ? "" : PartialPath.take_front(Slash); 2284 const char *StartOfFilename = 2285 (Slash == StringRef::npos) ? PathStart : PathStart + Slash + 1; 2286 // Code completion filter range is the filename only, up to completion point. 2287 PP->setCodeCompletionIdentifierInfo(&PP->getIdentifierTable().get( 2288 StringRef(StartOfFilename, CompletionPoint - StartOfFilename))); 2289 // We should replace the characters up to the closing quote or closest slash, 2290 // if any. 2291 while (CompletionPoint < BufferEnd) { 2292 char Next = *(CompletionPoint + 1); 2293 if (Next == 0 || Next == '\r' || Next == '\n') 2294 break; 2295 ++CompletionPoint; 2296 if (Next == (IsAngled ? '>' : '"')) 2297 break; 2298 if (SlashChars.contains(Next)) 2299 break; 2300 } 2301 2302 PP->setCodeCompletionTokenRange( 2303 FileLoc.getLocWithOffset(StartOfFilename - BufferStart), 2304 FileLoc.getLocWithOffset(CompletionPoint - BufferStart)); 2305 PP->CodeCompleteIncludedFile(Dir, IsAngled); 2306 } 2307 2308 /// LexCharConstant - Lex the remainder of a character constant, after having 2309 /// lexed either ' or L' or u8' or u' or U'. 2310 bool Lexer::LexCharConstant(Token &Result, const char *CurPtr, 2311 tok::TokenKind Kind) { 2312 // Does this character contain the \0 character? 2313 const char *NulCharacter = nullptr; 2314 2315 if (!isLexingRawMode()) { 2316 if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant) 2317 Diag(BufferPtr, LangOpts.CPlusPlus 2318 ? diag::warn_cxx98_compat_unicode_literal 2319 : diag::warn_c99_compat_unicode_literal); 2320 else if (Kind == tok::utf8_char_constant) 2321 Diag(BufferPtr, diag::warn_cxx14_compat_u8_character_literal); 2322 } 2323 2324 char C = getAndAdvanceChar(CurPtr, Result); 2325 if (C == '\'') { 2326 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 2327 Diag(BufferPtr, diag::ext_empty_character); 2328 FormTokenWithChars(Result, CurPtr, tok::unknown); 2329 return true; 2330 } 2331 2332 while (C != '\'') { 2333 // Skip escaped characters. 2334 if (C == '\\') 2335 C = getAndAdvanceChar(CurPtr, Result); 2336 2337 if (C == '\n' || C == '\r' || // Newline. 2338 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. 2339 if (!isLexingRawMode() && !LangOpts.AsmPreprocessor) 2340 Diag(BufferPtr, diag::ext_unterminated_char_or_string) << 0; 2341 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2342 return true; 2343 } 2344 2345 if (C == 0) { 2346 if (isCodeCompletionPoint(CurPtr-1)) { 2347 PP->CodeCompleteNaturalLanguage(); 2348 FormTokenWithChars(Result, CurPtr-1, tok::unknown); 2349 cutOffLexing(); 2350 return true; 2351 } 2352 2353 NulCharacter = CurPtr-1; 2354 } 2355 C = getAndAdvanceChar(CurPtr, Result); 2356 } 2357 2358 // If we are in C++11, lex the optional ud-suffix. 2359 if (LangOpts.CPlusPlus) 2360 CurPtr = LexUDSuffix(Result, CurPtr, false); 2361 2362 // If a nul character existed in the character, warn about it. 2363 if (NulCharacter && !isLexingRawMode()) 2364 Diag(NulCharacter, diag::null_in_char_or_string) << 0; 2365 2366 // Update the location of token as well as BufferPtr. 2367 const char *TokStart = BufferPtr; 2368 FormTokenWithChars(Result, CurPtr, Kind); 2369 Result.setLiteralData(TokStart); 2370 return true; 2371 } 2372 2373 /// SkipWhitespace - Efficiently skip over a series of whitespace characters. 2374 /// Update BufferPtr to point to the next non-whitespace character and return. 2375 /// 2376 /// This method forms a token and returns true if KeepWhitespaceMode is enabled. 2377 bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr, 2378 bool &TokAtPhysicalStartOfLine) { 2379 // Whitespace - Skip it, then return the token after the whitespace. 2380 bool SawNewline = isVerticalWhitespace(CurPtr[-1]); 2381 2382 unsigned char Char = *CurPtr; 2383 2384 const char *lastNewLine = nullptr; 2385 auto setLastNewLine = [&](const char *Ptr) { 2386 lastNewLine = Ptr; 2387 if (!NewLinePtr) 2388 NewLinePtr = Ptr; 2389 }; 2390 if (SawNewline) 2391 setLastNewLine(CurPtr - 1); 2392 2393 // Skip consecutive spaces efficiently. 2394 while (true) { 2395 // Skip horizontal whitespace very aggressively. 2396 while (isHorizontalWhitespace(Char)) 2397 Char = *++CurPtr; 2398 2399 // Otherwise if we have something other than whitespace, we're done. 2400 if (!isVerticalWhitespace(Char)) 2401 break; 2402 2403 if (ParsingPreprocessorDirective) { 2404 // End of preprocessor directive line, let LexTokenInternal handle this. 2405 BufferPtr = CurPtr; 2406 return false; 2407 } 2408 2409 // OK, but handle newline. 2410 if (*CurPtr == '\n') 2411 setLastNewLine(CurPtr); 2412 SawNewline = true; 2413 Char = *++CurPtr; 2414 } 2415 2416 // If the client wants us to return whitespace, return it now. 2417 if (isKeepWhitespaceMode()) { 2418 FormTokenWithChars(Result, CurPtr, tok::unknown); 2419 if (SawNewline) { 2420 IsAtStartOfLine = true; 2421 IsAtPhysicalStartOfLine = true; 2422 } 2423 // FIXME: The next token will not have LeadingSpace set. 2424 return true; 2425 } 2426 2427 // If this isn't immediately after a newline, there is leading space. 2428 char PrevChar = CurPtr[-1]; 2429 bool HasLeadingSpace = !isVerticalWhitespace(PrevChar); 2430 2431 Result.setFlagValue(Token::LeadingSpace, HasLeadingSpace); 2432 if (SawNewline) { 2433 Result.setFlag(Token::StartOfLine); 2434 TokAtPhysicalStartOfLine = true; 2435 2436 if (NewLinePtr && lastNewLine && NewLinePtr != lastNewLine && PP) { 2437 if (auto *Handler = PP->getEmptylineHandler()) 2438 Handler->HandleEmptyline(SourceRange(getSourceLocation(NewLinePtr + 1), 2439 getSourceLocation(lastNewLine))); 2440 } 2441 } 2442 2443 BufferPtr = CurPtr; 2444 return false; 2445 } 2446 2447 /// We have just read the // characters from input. Skip until we find the 2448 /// newline character that terminates the comment. Then update BufferPtr and 2449 /// return. 2450 /// 2451 /// If we're in KeepCommentMode or any CommentHandler has inserted 2452 /// some tokens, this will store the first token and return true. 2453 bool Lexer::SkipLineComment(Token &Result, const char *CurPtr, 2454 bool &TokAtPhysicalStartOfLine) { 2455 // If Line comments aren't explicitly enabled for this language, emit an 2456 // extension warning. 2457 if (!LineComment) { 2458 if (!isLexingRawMode()) // There's no PP in raw mode, so can't emit diags. 2459 Diag(BufferPtr, diag::ext_line_comment); 2460 2461 // Mark them enabled so we only emit one warning for this translation 2462 // unit. 2463 LineComment = true; 2464 } 2465 2466 // Scan over the body of the comment. The common case, when scanning, is that 2467 // the comment contains normal ascii characters with nothing interesting in 2468 // them. As such, optimize for this case with the inner loop. 2469 // 2470 // This loop terminates with CurPtr pointing at the newline (or end of buffer) 2471 // character that ends the line comment. 2472 2473 // C++23 [lex.phases] p1 2474 // Diagnose invalid UTF-8 if the corresponding warning is enabled, emitting a 2475 // diagnostic only once per entire ill-formed subsequence to avoid 2476 // emiting to many diagnostics (see http://unicode.org/review/pr-121.html). 2477 bool UnicodeDecodingAlreadyDiagnosed = false; 2478 2479 char C; 2480 while (true) { 2481 C = *CurPtr; 2482 // Skip over characters in the fast loop. 2483 while (isASCII(C) && C != 0 && // Potentially EOF. 2484 C != '\n' && C != '\r') { // Newline or DOS-style newline. 2485 C = *++CurPtr; 2486 UnicodeDecodingAlreadyDiagnosed = false; 2487 } 2488 2489 if (!isASCII(C)) { 2490 unsigned Length = llvm::getUTF8SequenceSize( 2491 (const llvm::UTF8 *)CurPtr, (const llvm::UTF8 *)BufferEnd); 2492 if (Length == 0) { 2493 if (!UnicodeDecodingAlreadyDiagnosed && !isLexingRawMode()) 2494 Diag(CurPtr, diag::warn_invalid_utf8_in_comment); 2495 UnicodeDecodingAlreadyDiagnosed = true; 2496 ++CurPtr; 2497 } else { 2498 UnicodeDecodingAlreadyDiagnosed = false; 2499 CurPtr += Length; 2500 } 2501 continue; 2502 } 2503 2504 const char *NextLine = CurPtr; 2505 if (C != 0) { 2506 // We found a newline, see if it's escaped. 2507 const char *EscapePtr = CurPtr-1; 2508 bool HasSpace = false; 2509 while (isHorizontalWhitespace(*EscapePtr)) { // Skip whitespace. 2510 --EscapePtr; 2511 HasSpace = true; 2512 } 2513 2514 if (*EscapePtr == '\\') 2515 // Escaped newline. 2516 CurPtr = EscapePtr; 2517 else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' && 2518 EscapePtr[-2] == '?' && LangOpts.Trigraphs) 2519 // Trigraph-escaped newline. 2520 CurPtr = EscapePtr-2; 2521 else 2522 break; // This is a newline, we're done. 2523 2524 // If there was space between the backslash and newline, warn about it. 2525 if (HasSpace && !isLexingRawMode()) 2526 Diag(EscapePtr, diag::backslash_newline_space); 2527 } 2528 2529 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to 2530 // properly decode the character. Read it in raw mode to avoid emitting 2531 // diagnostics about things like trigraphs. If we see an escaped newline, 2532 // we'll handle it below. 2533 const char *OldPtr = CurPtr; 2534 bool OldRawMode = isLexingRawMode(); 2535 LexingRawMode = true; 2536 C = getAndAdvanceChar(CurPtr, Result); 2537 LexingRawMode = OldRawMode; 2538 2539 // If we only read only one character, then no special handling is needed. 2540 // We're done and can skip forward to the newline. 2541 if (C != 0 && CurPtr == OldPtr+1) { 2542 CurPtr = NextLine; 2543 break; 2544 } 2545 2546 // If we read multiple characters, and one of those characters was a \r or 2547 // \n, then we had an escaped newline within the comment. Emit diagnostic 2548 // unless the next line is also a // comment. 2549 if (CurPtr != OldPtr + 1 && C != '/' && 2550 (CurPtr == BufferEnd + 1 || CurPtr[0] != '/')) { 2551 for (; OldPtr != CurPtr; ++OldPtr) 2552 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') { 2553 // Okay, we found a // comment that ends in a newline, if the next 2554 // line is also a // comment, but has spaces, don't emit a diagnostic. 2555 if (isWhitespace(C)) { 2556 const char *ForwardPtr = CurPtr; 2557 while (isWhitespace(*ForwardPtr)) // Skip whitespace. 2558 ++ForwardPtr; 2559 if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/') 2560 break; 2561 } 2562 2563 if (!isLexingRawMode()) 2564 Diag(OldPtr-1, diag::ext_multi_line_line_comment); 2565 break; 2566 } 2567 } 2568 2569 if (C == '\r' || C == '\n' || CurPtr == BufferEnd + 1) { 2570 --CurPtr; 2571 break; 2572 } 2573 2574 if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) { 2575 PP->CodeCompleteNaturalLanguage(); 2576 cutOffLexing(); 2577 return false; 2578 } 2579 } 2580 2581 // Found but did not consume the newline. Notify comment handlers about the 2582 // comment unless we're in a #if 0 block. 2583 if (PP && !isLexingRawMode() && 2584 PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr), 2585 getSourceLocation(CurPtr)))) { 2586 BufferPtr = CurPtr; 2587 return true; // A token has to be returned. 2588 } 2589 2590 // If we are returning comments as tokens, return this comment as a token. 2591 if (inKeepCommentMode()) 2592 return SaveLineComment(Result, CurPtr); 2593 2594 // If we are inside a preprocessor directive and we see the end of line, 2595 // return immediately, so that the lexer can return this as an EOD token. 2596 if (ParsingPreprocessorDirective || CurPtr == BufferEnd) { 2597 BufferPtr = CurPtr; 2598 return false; 2599 } 2600 2601 // Otherwise, eat the \n character. We don't care if this is a \n\r or 2602 // \r\n sequence. This is an efficiency hack (because we know the \n can't 2603 // contribute to another token), it isn't needed for correctness. Note that 2604 // this is ok even in KeepWhitespaceMode, because we would have returned the 2605 // comment above in that mode. 2606 NewLinePtr = CurPtr++; 2607 2608 // The next returned token is at the start of the line. 2609 Result.setFlag(Token::StartOfLine); 2610 TokAtPhysicalStartOfLine = true; 2611 // No leading whitespace seen so far. 2612 Result.clearFlag(Token::LeadingSpace); 2613 BufferPtr = CurPtr; 2614 return false; 2615 } 2616 2617 /// If in save-comment mode, package up this Line comment in an appropriate 2618 /// way and return it. 2619 bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) { 2620 // If we're not in a preprocessor directive, just return the // comment 2621 // directly. 2622 FormTokenWithChars(Result, CurPtr, tok::comment); 2623 2624 if (!ParsingPreprocessorDirective || LexingRawMode) 2625 return true; 2626 2627 // If this Line-style comment is in a macro definition, transmogrify it into 2628 // a C-style block comment. 2629 bool Invalid = false; 2630 std::string Spelling = PP->getSpelling(Result, &Invalid); 2631 if (Invalid) 2632 return true; 2633 2634 assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not line comment?"); 2635 Spelling[1] = '*'; // Change prefix to "/*". 2636 Spelling += "*/"; // add suffix. 2637 2638 Result.setKind(tok::comment); 2639 PP->CreateString(Spelling, Result, 2640 Result.getLocation(), Result.getLocation()); 2641 return true; 2642 } 2643 2644 /// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline 2645 /// character (either \\n or \\r) is part of an escaped newline sequence. Issue 2646 /// a diagnostic if so. We know that the newline is inside of a block comment. 2647 static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, Lexer *L, 2648 bool Trigraphs) { 2649 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r'); 2650 2651 // Position of the first trigraph in the ending sequence. 2652 const char *TrigraphPos = nullptr; 2653 // Position of the first whitespace after a '\' in the ending sequence. 2654 const char *SpacePos = nullptr; 2655 2656 while (true) { 2657 // Back up off the newline. 2658 --CurPtr; 2659 2660 // If this is a two-character newline sequence, skip the other character. 2661 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') { 2662 // \n\n or \r\r -> not escaped newline. 2663 if (CurPtr[0] == CurPtr[1]) 2664 return false; 2665 // \n\r or \r\n -> skip the newline. 2666 --CurPtr; 2667 } 2668 2669 // If we have horizontal whitespace, skip over it. We allow whitespace 2670 // between the slash and newline. 2671 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) { 2672 SpacePos = CurPtr; 2673 --CurPtr; 2674 } 2675 2676 // If we have a slash, this is an escaped newline. 2677 if (*CurPtr == '\\') { 2678 --CurPtr; 2679 } else if (CurPtr[0] == '/' && CurPtr[-1] == '?' && CurPtr[-2] == '?') { 2680 // This is a trigraph encoding of a slash. 2681 TrigraphPos = CurPtr - 2; 2682 CurPtr -= 3; 2683 } else { 2684 return false; 2685 } 2686 2687 // If the character preceding the escaped newline is a '*', then after line 2688 // splicing we have a '*/' ending the comment. 2689 if (*CurPtr == '*') 2690 break; 2691 2692 if (*CurPtr != '\n' && *CurPtr != '\r') 2693 return false; 2694 } 2695 2696 if (TrigraphPos) { 2697 // If no trigraphs are enabled, warn that we ignored this trigraph and 2698 // ignore this * character. 2699 if (!Trigraphs) { 2700 if (!L->isLexingRawMode()) 2701 L->Diag(TrigraphPos, diag::trigraph_ignored_block_comment); 2702 return false; 2703 } 2704 if (!L->isLexingRawMode()) 2705 L->Diag(TrigraphPos, diag::trigraph_ends_block_comment); 2706 } 2707 2708 // Warn about having an escaped newline between the */ characters. 2709 if (!L->isLexingRawMode()) 2710 L->Diag(CurPtr + 1, diag::escaped_newline_block_comment_end); 2711 2712 // If there was space between the backslash and newline, warn about it. 2713 if (SpacePos && !L->isLexingRawMode()) 2714 L->Diag(SpacePos, diag::backslash_newline_space); 2715 2716 return true; 2717 } 2718 2719 #ifdef __SSE2__ 2720 #include <emmintrin.h> 2721 #elif __ALTIVEC__ 2722 #include <altivec.h> 2723 #undef bool 2724 #endif 2725 2726 /// We have just read from input the / and * characters that started a comment. 2727 /// Read until we find the * and / characters that terminate the comment. 2728 /// Note that we don't bother decoding trigraphs or escaped newlines in block 2729 /// comments, because they cannot cause the comment to end. The only thing 2730 /// that can happen is the comment could end with an escaped newline between 2731 /// the terminating * and /. 2732 /// 2733 /// If we're in KeepCommentMode or any CommentHandler has inserted 2734 /// some tokens, this will store the first token and return true. 2735 bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr, 2736 bool &TokAtPhysicalStartOfLine) { 2737 // Scan one character past where we should, looking for a '/' character. Once 2738 // we find it, check to see if it was preceded by a *. This common 2739 // optimization helps people who like to put a lot of * characters in their 2740 // comments. 2741 2742 // The first character we get with newlines and trigraphs skipped to handle 2743 // the degenerate /*/ case below correctly if the * has an escaped newline 2744 // after it. 2745 unsigned CharSize; 2746 unsigned char C = getCharAndSize(CurPtr, CharSize); 2747 CurPtr += CharSize; 2748 if (C == 0 && CurPtr == BufferEnd+1) { 2749 if (!isLexingRawMode()) 2750 Diag(BufferPtr, diag::err_unterminated_block_comment); 2751 --CurPtr; 2752 2753 // KeepWhitespaceMode should return this broken comment as a token. Since 2754 // it isn't a well formed comment, just return it as an 'unknown' token. 2755 if (isKeepWhitespaceMode()) { 2756 FormTokenWithChars(Result, CurPtr, tok::unknown); 2757 return true; 2758 } 2759 2760 BufferPtr = CurPtr; 2761 return false; 2762 } 2763 2764 // Check to see if the first character after the '/*' is another /. If so, 2765 // then this slash does not end the block comment, it is part of it. 2766 if (C == '/') 2767 C = *CurPtr++; 2768 2769 // C++23 [lex.phases] p1 2770 // Diagnose invalid UTF-8 if the corresponding warning is enabled, emitting a 2771 // diagnostic only once per entire ill-formed subsequence to avoid 2772 // emiting to many diagnostics (see http://unicode.org/review/pr-121.html). 2773 bool UnicodeDecodingAlreadyDiagnosed = false; 2774 2775 while (true) { 2776 // Skip over all non-interesting characters until we find end of buffer or a 2777 // (probably ending) '/' character. 2778 if (CurPtr + 24 < BufferEnd && 2779 // If there is a code-completion point avoid the fast scan because it 2780 // doesn't check for '\0'. 2781 !(PP && PP->getCodeCompletionFileLoc() == FileLoc)) { 2782 // While not aligned to a 16-byte boundary. 2783 while (C != '/' && (intptr_t)CurPtr % 16 != 0) { 2784 if (!isASCII(C)) 2785 goto MultiByteUTF8; 2786 C = *CurPtr++; 2787 } 2788 if (C == '/') goto FoundSlash; 2789 2790 #ifdef __SSE2__ 2791 __m128i Slashes = _mm_set1_epi8('/'); 2792 while (CurPtr + 16 < BufferEnd) { 2793 int Mask = _mm_movemask_epi8(*(const __m128i *)CurPtr); 2794 if (LLVM_UNLIKELY(Mask != 0)) { 2795 goto MultiByteUTF8; 2796 } 2797 // look for slashes 2798 int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr, 2799 Slashes)); 2800 if (cmp != 0) { 2801 // Adjust the pointer to point directly after the first slash. It's 2802 // not necessary to set C here, it will be overwritten at the end of 2803 // the outer loop. 2804 CurPtr += llvm::countr_zero<unsigned>(cmp) + 1; 2805 goto FoundSlash; 2806 } 2807 CurPtr += 16; 2808 } 2809 #elif __ALTIVEC__ 2810 __vector unsigned char LongUTF = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 2811 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 2812 0x80, 0x80, 0x80, 0x80}; 2813 __vector unsigned char Slashes = { 2814 '/', '/', '/', '/', '/', '/', '/', '/', 2815 '/', '/', '/', '/', '/', '/', '/', '/' 2816 }; 2817 while (CurPtr + 16 < BufferEnd) { 2818 if (LLVM_UNLIKELY( 2819 vec_any_ge(*(const __vector unsigned char *)CurPtr, LongUTF))) 2820 goto MultiByteUTF8; 2821 if (vec_any_eq(*(const __vector unsigned char *)CurPtr, Slashes)) { 2822 break; 2823 } 2824 CurPtr += 16; 2825 } 2826 2827 #else 2828 while (CurPtr + 16 < BufferEnd) { 2829 bool HasNonASCII = false; 2830 for (unsigned I = 0; I < 16; ++I) 2831 HasNonASCII |= !isASCII(CurPtr[I]); 2832 2833 if (LLVM_UNLIKELY(HasNonASCII)) 2834 goto MultiByteUTF8; 2835 2836 bool HasSlash = false; 2837 for (unsigned I = 0; I < 16; ++I) 2838 HasSlash |= CurPtr[I] == '/'; 2839 if (HasSlash) 2840 break; 2841 CurPtr += 16; 2842 } 2843 #endif 2844 2845 // It has to be one of the bytes scanned, increment to it and read one. 2846 C = *CurPtr++; 2847 } 2848 2849 // Loop to scan the remainder, warning on invalid UTF-8 2850 // if the corresponding warning is enabled, emitting a diagnostic only once 2851 // per sequence that cannot be decoded. 2852 while (C != '/' && C != '\0') { 2853 if (isASCII(C)) { 2854 UnicodeDecodingAlreadyDiagnosed = false; 2855 C = *CurPtr++; 2856 continue; 2857 } 2858 MultiByteUTF8: 2859 // CurPtr is 1 code unit past C, so to decode 2860 // the codepoint, we need to read from the previous position. 2861 unsigned Length = llvm::getUTF8SequenceSize( 2862 (const llvm::UTF8 *)CurPtr - 1, (const llvm::UTF8 *)BufferEnd); 2863 if (Length == 0) { 2864 if (!UnicodeDecodingAlreadyDiagnosed && !isLexingRawMode()) 2865 Diag(CurPtr - 1, diag::warn_invalid_utf8_in_comment); 2866 UnicodeDecodingAlreadyDiagnosed = true; 2867 } else { 2868 UnicodeDecodingAlreadyDiagnosed = false; 2869 CurPtr += Length - 1; 2870 } 2871 C = *CurPtr++; 2872 } 2873 2874 if (C == '/') { 2875 FoundSlash: 2876 if (CurPtr[-2] == '*') // We found the final */. We're done! 2877 break; 2878 2879 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) { 2880 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr - 2, this, 2881 LangOpts.Trigraphs)) { 2882 // We found the final */, though it had an escaped newline between the 2883 // * and /. We're done! 2884 break; 2885 } 2886 } 2887 if (CurPtr[0] == '*' && CurPtr[1] != '/') { 2888 // If this is a /* inside of the comment, emit a warning. Don't do this 2889 // if this is a /*/, which will end the comment. This misses cases with 2890 // embedded escaped newlines, but oh well. 2891 if (!isLexingRawMode()) 2892 Diag(CurPtr-1, diag::warn_nested_block_comment); 2893 } 2894 } else if (C == 0 && CurPtr == BufferEnd+1) { 2895 if (!isLexingRawMode()) 2896 Diag(BufferPtr, diag::err_unterminated_block_comment); 2897 // Note: the user probably forgot a */. We could continue immediately 2898 // after the /*, but this would involve lexing a lot of what really is the 2899 // comment, which surely would confuse the parser. 2900 --CurPtr; 2901 2902 // KeepWhitespaceMode should return this broken comment as a token. Since 2903 // it isn't a well formed comment, just return it as an 'unknown' token. 2904 if (isKeepWhitespaceMode()) { 2905 FormTokenWithChars(Result, CurPtr, tok::unknown); 2906 return true; 2907 } 2908 2909 BufferPtr = CurPtr; 2910 return false; 2911 } else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) { 2912 PP->CodeCompleteNaturalLanguage(); 2913 cutOffLexing(); 2914 return false; 2915 } 2916 2917 C = *CurPtr++; 2918 } 2919 2920 // Notify comment handlers about the comment unless we're in a #if 0 block. 2921 if (PP && !isLexingRawMode() && 2922 PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr), 2923 getSourceLocation(CurPtr)))) { 2924 BufferPtr = CurPtr; 2925 return true; // A token has to be returned. 2926 } 2927 2928 // If we are returning comments as tokens, return this comment as a token. 2929 if (inKeepCommentMode()) { 2930 FormTokenWithChars(Result, CurPtr, tok::comment); 2931 return true; 2932 } 2933 2934 // It is common for the tokens immediately after a /**/ comment to be 2935 // whitespace. Instead of going through the big switch, handle it 2936 // efficiently now. This is safe even in KeepWhitespaceMode because we would 2937 // have already returned above with the comment as a token. 2938 if (isHorizontalWhitespace(*CurPtr)) { 2939 SkipWhitespace(Result, CurPtr+1, TokAtPhysicalStartOfLine); 2940 return false; 2941 } 2942 2943 // Otherwise, just return so that the next character will be lexed as a token. 2944 BufferPtr = CurPtr; 2945 Result.setFlag(Token::LeadingSpace); 2946 return false; 2947 } 2948 2949 //===----------------------------------------------------------------------===// 2950 // Primary Lexing Entry Points 2951 //===----------------------------------------------------------------------===// 2952 2953 /// ReadToEndOfLine - Read the rest of the current preprocessor line as an 2954 /// uninterpreted string. This switches the lexer out of directive mode. 2955 void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) { 2956 assert(ParsingPreprocessorDirective && ParsingFilename == false && 2957 "Must be in a preprocessing directive!"); 2958 Token Tmp; 2959 Tmp.startToken(); 2960 2961 // CurPtr - Cache BufferPtr in an automatic variable. 2962 const char *CurPtr = BufferPtr; 2963 while (true) { 2964 char Char = getAndAdvanceChar(CurPtr, Tmp); 2965 switch (Char) { 2966 default: 2967 if (Result) 2968 Result->push_back(Char); 2969 break; 2970 case 0: // Null. 2971 // Found end of file? 2972 if (CurPtr-1 != BufferEnd) { 2973 if (isCodeCompletionPoint(CurPtr-1)) { 2974 PP->CodeCompleteNaturalLanguage(); 2975 cutOffLexing(); 2976 return; 2977 } 2978 2979 // Nope, normal character, continue. 2980 if (Result) 2981 Result->push_back(Char); 2982 break; 2983 } 2984 // FALL THROUGH. 2985 [[fallthrough]]; 2986 case '\r': 2987 case '\n': 2988 // Okay, we found the end of the line. First, back up past the \0, \r, \n. 2989 assert(CurPtr[-1] == Char && "Trigraphs for newline?"); 2990 BufferPtr = CurPtr-1; 2991 2992 // Next, lex the character, which should handle the EOD transition. 2993 Lex(Tmp); 2994 if (Tmp.is(tok::code_completion)) { 2995 if (PP) 2996 PP->CodeCompleteNaturalLanguage(); 2997 Lex(Tmp); 2998 } 2999 assert(Tmp.is(tok::eod) && "Unexpected token!"); 3000 3001 // Finally, we're done; 3002 return; 3003 } 3004 } 3005 } 3006 3007 /// LexEndOfFile - CurPtr points to the end of this file. Handle this 3008 /// condition, reporting diagnostics and handling other edge cases as required. 3009 /// This returns true if Result contains a token, false if PP.Lex should be 3010 /// called again. 3011 bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) { 3012 // If we hit the end of the file while parsing a preprocessor directive, 3013 // end the preprocessor directive first. The next token returned will 3014 // then be the end of file. 3015 if (ParsingPreprocessorDirective) { 3016 // Done parsing the "line". 3017 ParsingPreprocessorDirective = false; 3018 // Update the location of token as well as BufferPtr. 3019 FormTokenWithChars(Result, CurPtr, tok::eod); 3020 3021 // Restore comment saving mode, in case it was disabled for directive. 3022 if (PP) 3023 resetExtendedTokenMode(); 3024 return true; // Have a token. 3025 } 3026 3027 // If we are in raw mode, return this event as an EOF token. Let the caller 3028 // that put us in raw mode handle the event. 3029 if (isLexingRawMode()) { 3030 Result.startToken(); 3031 BufferPtr = BufferEnd; 3032 FormTokenWithChars(Result, BufferEnd, tok::eof); 3033 return true; 3034 } 3035 3036 if (PP->isRecordingPreamble() && PP->isInPrimaryFile()) { 3037 PP->setRecordedPreambleConditionalStack(ConditionalStack); 3038 // If the preamble cuts off the end of a header guard, consider it guarded. 3039 // The guard is valid for the preamble content itself, and for tools the 3040 // most useful answer is "yes, this file has a header guard". 3041 if (!ConditionalStack.empty()) 3042 MIOpt.ExitTopLevelConditional(); 3043 ConditionalStack.clear(); 3044 } 3045 3046 // Issue diagnostics for unterminated #if and missing newline. 3047 3048 // If we are in a #if directive, emit an error. 3049 while (!ConditionalStack.empty()) { 3050 if (PP->getCodeCompletionFileLoc() != FileLoc) 3051 PP->Diag(ConditionalStack.back().IfLoc, 3052 diag::err_pp_unterminated_conditional); 3053 ConditionalStack.pop_back(); 3054 } 3055 3056 // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue 3057 // a pedwarn. 3058 if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) { 3059 DiagnosticsEngine &Diags = PP->getDiagnostics(); 3060 SourceLocation EndLoc = getSourceLocation(BufferEnd); 3061 unsigned DiagID; 3062 3063 if (LangOpts.CPlusPlus11) { 3064 // C++11 [lex.phases] 2.2 p2 3065 // Prefer the C++98 pedantic compatibility warning over the generic, 3066 // non-extension, user-requested "missing newline at EOF" warning. 3067 if (!Diags.isIgnored(diag::warn_cxx98_compat_no_newline_eof, EndLoc)) { 3068 DiagID = diag::warn_cxx98_compat_no_newline_eof; 3069 } else { 3070 DiagID = diag::warn_no_newline_eof; 3071 } 3072 } else { 3073 DiagID = diag::ext_no_newline_eof; 3074 } 3075 3076 Diag(BufferEnd, DiagID) 3077 << FixItHint::CreateInsertion(EndLoc, "\n"); 3078 } 3079 3080 BufferPtr = CurPtr; 3081 3082 // Finally, let the preprocessor handle this. 3083 return PP->HandleEndOfFile(Result, isPragmaLexer()); 3084 } 3085 3086 /// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from 3087 /// the specified lexer will return a tok::l_paren token, 0 if it is something 3088 /// else and 2 if there are no more tokens in the buffer controlled by the 3089 /// lexer. 3090 unsigned Lexer::isNextPPTokenLParen() { 3091 assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?"); 3092 3093 if (isDependencyDirectivesLexer()) { 3094 if (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size()) 3095 return 2; 3096 return DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is( 3097 tok::l_paren); 3098 } 3099 3100 // Switch to 'skipping' mode. This will ensure that we can lex a token 3101 // without emitting diagnostics, disables macro expansion, and will cause EOF 3102 // to return an EOF token instead of popping the include stack. 3103 LexingRawMode = true; 3104 3105 // Save state that can be changed while lexing so that we can restore it. 3106 const char *TmpBufferPtr = BufferPtr; 3107 bool inPPDirectiveMode = ParsingPreprocessorDirective; 3108 bool atStartOfLine = IsAtStartOfLine; 3109 bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine; 3110 bool leadingSpace = HasLeadingSpace; 3111 3112 Token Tok; 3113 Lex(Tok); 3114 3115 // Restore state that may have changed. 3116 BufferPtr = TmpBufferPtr; 3117 ParsingPreprocessorDirective = inPPDirectiveMode; 3118 HasLeadingSpace = leadingSpace; 3119 IsAtStartOfLine = atStartOfLine; 3120 IsAtPhysicalStartOfLine = atPhysicalStartOfLine; 3121 3122 // Restore the lexer back to non-skipping mode. 3123 LexingRawMode = false; 3124 3125 if (Tok.is(tok::eof)) 3126 return 2; 3127 return Tok.is(tok::l_paren); 3128 } 3129 3130 /// Find the end of a version control conflict marker. 3131 static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd, 3132 ConflictMarkerKind CMK) { 3133 const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>"; 3134 size_t TermLen = CMK == CMK_Perforce ? 5 : 7; 3135 auto RestOfBuffer = StringRef(CurPtr, BufferEnd - CurPtr).substr(TermLen); 3136 size_t Pos = RestOfBuffer.find(Terminator); 3137 while (Pos != StringRef::npos) { 3138 // Must occur at start of line. 3139 if (Pos == 0 || 3140 (RestOfBuffer[Pos - 1] != '\r' && RestOfBuffer[Pos - 1] != '\n')) { 3141 RestOfBuffer = RestOfBuffer.substr(Pos+TermLen); 3142 Pos = RestOfBuffer.find(Terminator); 3143 continue; 3144 } 3145 return RestOfBuffer.data()+Pos; 3146 } 3147 return nullptr; 3148 } 3149 3150 /// IsStartOfConflictMarker - If the specified pointer is the start of a version 3151 /// control conflict marker like '<<<<<<<', recognize it as such, emit an error 3152 /// and recover nicely. This returns true if it is a conflict marker and false 3153 /// if not. 3154 bool Lexer::IsStartOfConflictMarker(const char *CurPtr) { 3155 // Only a conflict marker if it starts at the beginning of a line. 3156 if (CurPtr != BufferStart && 3157 CurPtr[-1] != '\n' && CurPtr[-1] != '\r') 3158 return false; 3159 3160 // Check to see if we have <<<<<<< or >>>>. 3161 if (!StringRef(CurPtr, BufferEnd - CurPtr).startswith("<<<<<<<") && 3162 !StringRef(CurPtr, BufferEnd - CurPtr).startswith(">>>> ")) 3163 return false; 3164 3165 // If we have a situation where we don't care about conflict markers, ignore 3166 // it. 3167 if (CurrentConflictMarkerState || isLexingRawMode()) 3168 return false; 3169 3170 ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce; 3171 3172 // Check to see if there is an ending marker somewhere in the buffer at the 3173 // start of a line to terminate this conflict marker. 3174 if (FindConflictEnd(CurPtr, BufferEnd, Kind)) { 3175 // We found a match. We are really in a conflict marker. 3176 // Diagnose this, and ignore to the end of line. 3177 Diag(CurPtr, diag::err_conflict_marker); 3178 CurrentConflictMarkerState = Kind; 3179 3180 // Skip ahead to the end of line. We know this exists because the 3181 // end-of-conflict marker starts with \r or \n. 3182 while (*CurPtr != '\r' && *CurPtr != '\n') { 3183 assert(CurPtr != BufferEnd && "Didn't find end of line"); 3184 ++CurPtr; 3185 } 3186 BufferPtr = CurPtr; 3187 return true; 3188 } 3189 3190 // No end of conflict marker found. 3191 return false; 3192 } 3193 3194 /// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if 3195 /// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it 3196 /// is the end of a conflict marker. Handle it by ignoring up until the end of 3197 /// the line. This returns true if it is a conflict marker and false if not. 3198 bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) { 3199 // Only a conflict marker if it starts at the beginning of a line. 3200 if (CurPtr != BufferStart && 3201 CurPtr[-1] != '\n' && CurPtr[-1] != '\r') 3202 return false; 3203 3204 // If we have a situation where we don't care about conflict markers, ignore 3205 // it. 3206 if (!CurrentConflictMarkerState || isLexingRawMode()) 3207 return false; 3208 3209 // Check to see if we have the marker (4 characters in a row). 3210 for (unsigned i = 1; i != 4; ++i) 3211 if (CurPtr[i] != CurPtr[0]) 3212 return false; 3213 3214 // If we do have it, search for the end of the conflict marker. This could 3215 // fail if it got skipped with a '#if 0' or something. Note that CurPtr might 3216 // be the end of conflict marker. 3217 if (const char *End = FindConflictEnd(CurPtr, BufferEnd, 3218 CurrentConflictMarkerState)) { 3219 CurPtr = End; 3220 3221 // Skip ahead to the end of line. 3222 while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n') 3223 ++CurPtr; 3224 3225 BufferPtr = CurPtr; 3226 3227 // No longer in the conflict marker. 3228 CurrentConflictMarkerState = CMK_None; 3229 return true; 3230 } 3231 3232 return false; 3233 } 3234 3235 static const char *findPlaceholderEnd(const char *CurPtr, 3236 const char *BufferEnd) { 3237 if (CurPtr == BufferEnd) 3238 return nullptr; 3239 BufferEnd -= 1; // Scan until the second last character. 3240 for (; CurPtr != BufferEnd; ++CurPtr) { 3241 if (CurPtr[0] == '#' && CurPtr[1] == '>') 3242 return CurPtr + 2; 3243 } 3244 return nullptr; 3245 } 3246 3247 bool Lexer::lexEditorPlaceholder(Token &Result, const char *CurPtr) { 3248 assert(CurPtr[-1] == '<' && CurPtr[0] == '#' && "Not a placeholder!"); 3249 if (!PP || !PP->getPreprocessorOpts().LexEditorPlaceholders || LexingRawMode) 3250 return false; 3251 const char *End = findPlaceholderEnd(CurPtr + 1, BufferEnd); 3252 if (!End) 3253 return false; 3254 const char *Start = CurPtr - 1; 3255 if (!LangOpts.AllowEditorPlaceholders) 3256 Diag(Start, diag::err_placeholder_in_source); 3257 Result.startToken(); 3258 FormTokenWithChars(Result, End, tok::raw_identifier); 3259 Result.setRawIdentifierData(Start); 3260 PP->LookUpIdentifierInfo(Result); 3261 Result.setFlag(Token::IsEditorPlaceholder); 3262 BufferPtr = End; 3263 return true; 3264 } 3265 3266 bool Lexer::isCodeCompletionPoint(const char *CurPtr) const { 3267 if (PP && PP->isCodeCompletionEnabled()) { 3268 SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart); 3269 return Loc == PP->getCodeCompletionLoc(); 3270 } 3271 3272 return false; 3273 } 3274 3275 std::optional<uint32_t> Lexer::tryReadNumericUCN(const char *&StartPtr, 3276 const char *SlashLoc, 3277 Token *Result) { 3278 unsigned CharSize; 3279 char Kind = getCharAndSize(StartPtr, CharSize); 3280 assert((Kind == 'u' || Kind == 'U') && "expected a UCN"); 3281 3282 unsigned NumHexDigits; 3283 if (Kind == 'u') 3284 NumHexDigits = 4; 3285 else if (Kind == 'U') 3286 NumHexDigits = 8; 3287 3288 bool Delimited = false; 3289 bool FoundEndDelimiter = false; 3290 unsigned Count = 0; 3291 bool Diagnose = Result && !isLexingRawMode(); 3292 3293 if (!LangOpts.CPlusPlus && !LangOpts.C99) { 3294 if (Diagnose) 3295 Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89); 3296 return std::nullopt; 3297 } 3298 3299 const char *CurPtr = StartPtr + CharSize; 3300 const char *KindLoc = &CurPtr[-1]; 3301 3302 uint32_t CodePoint = 0; 3303 while (Count != NumHexDigits || Delimited) { 3304 char C = getCharAndSize(CurPtr, CharSize); 3305 if (!Delimited && Count == 0 && C == '{') { 3306 Delimited = true; 3307 CurPtr += CharSize; 3308 continue; 3309 } 3310 3311 if (Delimited && C == '}') { 3312 CurPtr += CharSize; 3313 FoundEndDelimiter = true; 3314 break; 3315 } 3316 3317 unsigned Value = llvm::hexDigitValue(C); 3318 if (Value == -1U) { 3319 if (!Delimited) 3320 break; 3321 if (Diagnose) 3322 Diag(SlashLoc, diag::warn_delimited_ucn_incomplete) 3323 << StringRef(KindLoc, 1); 3324 return std::nullopt; 3325 } 3326 3327 if (CodePoint & 0xF000'0000) { 3328 if (Diagnose) 3329 Diag(KindLoc, diag::err_escape_too_large) << 0; 3330 return std::nullopt; 3331 } 3332 3333 CodePoint <<= 4; 3334 CodePoint |= Value; 3335 CurPtr += CharSize; 3336 Count++; 3337 } 3338 3339 if (Count == 0) { 3340 if (Diagnose) 3341 Diag(SlashLoc, FoundEndDelimiter ? diag::warn_delimited_ucn_empty 3342 : diag::warn_ucn_escape_no_digits) 3343 << StringRef(KindLoc, 1); 3344 return std::nullopt; 3345 } 3346 3347 if (Delimited && Kind == 'U') { 3348 if (Diagnose) 3349 Diag(SlashLoc, diag::err_hex_escape_no_digits) << StringRef(KindLoc, 1); 3350 return std::nullopt; 3351 } 3352 3353 if (!Delimited && Count != NumHexDigits) { 3354 if (Diagnose) { 3355 Diag(SlashLoc, diag::warn_ucn_escape_incomplete); 3356 // If the user wrote \U1234, suggest a fixit to \u. 3357 if (Count == 4 && NumHexDigits == 8) { 3358 CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1); 3359 Diag(KindLoc, diag::note_ucn_four_not_eight) 3360 << FixItHint::CreateReplacement(URange, "u"); 3361 } 3362 } 3363 return std::nullopt; 3364 } 3365 3366 if (Delimited && PP) { 3367 Diag(SlashLoc, PP->getLangOpts().CPlusPlus23 3368 ? diag::warn_cxx23_delimited_escape_sequence 3369 : diag::ext_delimited_escape_sequence) 3370 << /*delimited*/ 0 << (PP->getLangOpts().CPlusPlus ? 1 : 0); 3371 } 3372 3373 if (Result) { 3374 Result->setFlag(Token::HasUCN); 3375 // If the UCN contains either a trigraph or a line splicing, 3376 // we need to call getAndAdvanceChar again to set the appropriate flags 3377 // on Result. 3378 if (CurPtr - StartPtr == (ptrdiff_t)(Count + 1 + (Delimited ? 2 : 0))) 3379 StartPtr = CurPtr; 3380 else 3381 while (StartPtr != CurPtr) 3382 (void)getAndAdvanceChar(StartPtr, *Result); 3383 } else { 3384 StartPtr = CurPtr; 3385 } 3386 return CodePoint; 3387 } 3388 3389 std::optional<uint32_t> Lexer::tryReadNamedUCN(const char *&StartPtr, 3390 const char *SlashLoc, 3391 Token *Result) { 3392 unsigned CharSize; 3393 bool Diagnose = Result && !isLexingRawMode(); 3394 3395 char C = getCharAndSize(StartPtr, CharSize); 3396 assert(C == 'N' && "expected \\N{...}"); 3397 3398 const char *CurPtr = StartPtr + CharSize; 3399 const char *KindLoc = &CurPtr[-1]; 3400 3401 C = getCharAndSize(CurPtr, CharSize); 3402 if (C != '{') { 3403 if (Diagnose) 3404 Diag(SlashLoc, diag::warn_ucn_escape_incomplete); 3405 return std::nullopt; 3406 } 3407 CurPtr += CharSize; 3408 const char *StartName = CurPtr; 3409 bool FoundEndDelimiter = false; 3410 llvm::SmallVector<char, 30> Buffer; 3411 while (C) { 3412 C = getCharAndSize(CurPtr, CharSize); 3413 CurPtr += CharSize; 3414 if (C == '}') { 3415 FoundEndDelimiter = true; 3416 break; 3417 } 3418 3419 if (isVerticalWhitespace(C)) 3420 break; 3421 Buffer.push_back(C); 3422 } 3423 3424 if (!FoundEndDelimiter || Buffer.empty()) { 3425 if (Diagnose) 3426 Diag(SlashLoc, FoundEndDelimiter ? diag::warn_delimited_ucn_empty 3427 : diag::warn_delimited_ucn_incomplete) 3428 << StringRef(KindLoc, 1); 3429 return std::nullopt; 3430 } 3431 3432 StringRef Name(Buffer.data(), Buffer.size()); 3433 std::optional<char32_t> Match = 3434 llvm::sys::unicode::nameToCodepointStrict(Name); 3435 std::optional<llvm::sys::unicode::LooseMatchingResult> LooseMatch; 3436 if (!Match) { 3437 LooseMatch = llvm::sys::unicode::nameToCodepointLooseMatching(Name); 3438 if (Diagnose) { 3439 Diag(StartName, diag::err_invalid_ucn_name) 3440 << StringRef(Buffer.data(), Buffer.size()) 3441 << makeCharRange(*this, StartName, CurPtr - CharSize); 3442 if (LooseMatch) { 3443 Diag(StartName, diag::note_invalid_ucn_name_loose_matching) 3444 << FixItHint::CreateReplacement( 3445 makeCharRange(*this, StartName, CurPtr - CharSize), 3446 LooseMatch->Name); 3447 } 3448 } 3449 // We do not offer misspelled character names suggestions here 3450 // as the set of what would be a valid suggestion depends on context, 3451 // and we should not make invalid suggestions. 3452 } 3453 3454 if (Diagnose && Match) 3455 Diag(SlashLoc, PP->getLangOpts().CPlusPlus23 3456 ? diag::warn_cxx23_delimited_escape_sequence 3457 : diag::ext_delimited_escape_sequence) 3458 << /*named*/ 1 << (PP->getLangOpts().CPlusPlus ? 1 : 0); 3459 3460 // If no diagnostic has been emitted yet, likely because we are doing a 3461 // tentative lexing, we do not want to recover here to make sure the token 3462 // will not be incorrectly considered valid. This function will be called 3463 // again and a diagnostic emitted then. 3464 if (LooseMatch && Diagnose) 3465 Match = LooseMatch->CodePoint; 3466 3467 if (Result) { 3468 Result->setFlag(Token::HasUCN); 3469 // If the UCN contains either a trigraph or a line splicing, 3470 // we need to call getAndAdvanceChar again to set the appropriate flags 3471 // on Result. 3472 if (CurPtr - StartPtr == (ptrdiff_t)(Buffer.size() + 3)) 3473 StartPtr = CurPtr; 3474 else 3475 while (StartPtr != CurPtr) 3476 (void)getAndAdvanceChar(StartPtr, *Result); 3477 } else { 3478 StartPtr = CurPtr; 3479 } 3480 return Match ? std::optional<uint32_t>(*Match) : std::nullopt; 3481 } 3482 3483 uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc, 3484 Token *Result) { 3485 3486 unsigned CharSize; 3487 std::optional<uint32_t> CodePointOpt; 3488 char Kind = getCharAndSize(StartPtr, CharSize); 3489 if (Kind == 'u' || Kind == 'U') 3490 CodePointOpt = tryReadNumericUCN(StartPtr, SlashLoc, Result); 3491 else if (Kind == 'N') 3492 CodePointOpt = tryReadNamedUCN(StartPtr, SlashLoc, Result); 3493 3494 if (!CodePointOpt) 3495 return 0; 3496 3497 uint32_t CodePoint = *CodePointOpt; 3498 3499 // Don't apply C family restrictions to UCNs in assembly mode 3500 if (LangOpts.AsmPreprocessor) 3501 return CodePoint; 3502 3503 // C23 6.4.3p2: A universal character name shall not designate a code point 3504 // where the hexadecimal value is: 3505 // - in the range D800 through DFFF inclusive; or 3506 // - greater than 10FFFF. 3507 // A universal-character-name outside the c-char-sequence of a character 3508 // constant, or the s-char-sequence of a string-literal shall not designate 3509 // a control character or a character in the basic character set. 3510 3511 // C++11 [lex.charset]p2: If the hexadecimal value for a 3512 // universal-character-name corresponds to a surrogate code point (in the 3513 // range 0xD800-0xDFFF, inclusive), the program is ill-formed. Additionally, 3514 // if the hexadecimal value for a universal-character-name outside the 3515 // c-char-sequence, s-char-sequence, or r-char-sequence of a character or 3516 // string literal corresponds to a control character (in either of the 3517 // ranges 0x00-0x1F or 0x7F-0x9F, both inclusive) or to a character in the 3518 // basic source character set, the program is ill-formed. 3519 if (CodePoint < 0xA0) { 3520 // We don't use isLexingRawMode() here because we need to warn about bad 3521 // UCNs even when skipping preprocessing tokens in a #if block. 3522 if (Result && PP) { 3523 if (CodePoint < 0x20 || CodePoint >= 0x7F) 3524 Diag(BufferPtr, diag::err_ucn_control_character); 3525 else { 3526 char C = static_cast<char>(CodePoint); 3527 Diag(BufferPtr, diag::err_ucn_escape_basic_scs) << StringRef(&C, 1); 3528 } 3529 } 3530 3531 return 0; 3532 } else if (CodePoint >= 0xD800 && CodePoint <= 0xDFFF) { 3533 // C++03 allows UCNs representing surrogate characters. C99 and C++11 don't. 3534 // We don't use isLexingRawMode() here because we need to diagnose bad 3535 // UCNs even when skipping preprocessing tokens in a #if block. 3536 if (Result && PP) { 3537 if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus11) 3538 Diag(BufferPtr, diag::warn_ucn_escape_surrogate); 3539 else 3540 Diag(BufferPtr, diag::err_ucn_escape_invalid); 3541 } 3542 return 0; 3543 } 3544 3545 return CodePoint; 3546 } 3547 3548 bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C, 3549 const char *CurPtr) { 3550 if (!isLexingRawMode() && !PP->isPreprocessedOutput() && 3551 isUnicodeWhitespace(C)) { 3552 Diag(BufferPtr, diag::ext_unicode_whitespace) 3553 << makeCharRange(*this, BufferPtr, CurPtr); 3554 3555 Result.setFlag(Token::LeadingSpace); 3556 return true; 3557 } 3558 return false; 3559 } 3560 3561 void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) { 3562 IsAtStartOfLine = Result.isAtStartOfLine(); 3563 HasLeadingSpace = Result.hasLeadingSpace(); 3564 HasLeadingEmptyMacro = Result.hasLeadingEmptyMacro(); 3565 // Note that this doesn't affect IsAtPhysicalStartOfLine. 3566 } 3567 3568 bool Lexer::Lex(Token &Result) { 3569 assert(!isDependencyDirectivesLexer()); 3570 3571 // Start a new token. 3572 Result.startToken(); 3573 3574 // Set up misc whitespace flags for LexTokenInternal. 3575 if (IsAtStartOfLine) { 3576 Result.setFlag(Token::StartOfLine); 3577 IsAtStartOfLine = false; 3578 } 3579 3580 if (HasLeadingSpace) { 3581 Result.setFlag(Token::LeadingSpace); 3582 HasLeadingSpace = false; 3583 } 3584 3585 if (HasLeadingEmptyMacro) { 3586 Result.setFlag(Token::LeadingEmptyMacro); 3587 HasLeadingEmptyMacro = false; 3588 } 3589 3590 bool atPhysicalStartOfLine = IsAtPhysicalStartOfLine; 3591 IsAtPhysicalStartOfLine = false; 3592 bool isRawLex = isLexingRawMode(); 3593 (void) isRawLex; 3594 bool returnedToken = LexTokenInternal(Result, atPhysicalStartOfLine); 3595 // (After the LexTokenInternal call, the lexer might be destroyed.) 3596 assert((returnedToken || !isRawLex) && "Raw lex must succeed"); 3597 return returnedToken; 3598 } 3599 3600 /// LexTokenInternal - This implements a simple C family lexer. It is an 3601 /// extremely performance critical piece of code. This assumes that the buffer 3602 /// has a null character at the end of the file. This returns a preprocessing 3603 /// token, not a normal token, as such, it is an internal interface. It assumes 3604 /// that the Flags of result have been cleared before calling this. 3605 bool Lexer::LexTokenInternal(Token &Result, bool TokAtPhysicalStartOfLine) { 3606 LexStart: 3607 assert(!Result.needsCleaning() && "Result needs cleaning"); 3608 assert(!Result.hasPtrData() && "Result has not been reset"); 3609 3610 // CurPtr - Cache BufferPtr in an automatic variable. 3611 const char *CurPtr = BufferPtr; 3612 3613 // Small amounts of horizontal whitespace is very common between tokens. 3614 if (isHorizontalWhitespace(*CurPtr)) { 3615 do { 3616 ++CurPtr; 3617 } while (isHorizontalWhitespace(*CurPtr)); 3618 3619 // If we are keeping whitespace and other tokens, just return what we just 3620 // skipped. The next lexer invocation will return the token after the 3621 // whitespace. 3622 if (isKeepWhitespaceMode()) { 3623 FormTokenWithChars(Result, CurPtr, tok::unknown); 3624 // FIXME: The next token will not have LeadingSpace set. 3625 return true; 3626 } 3627 3628 BufferPtr = CurPtr; 3629 Result.setFlag(Token::LeadingSpace); 3630 } 3631 3632 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below. 3633 3634 // Read a character, advancing over it. 3635 char Char = getAndAdvanceChar(CurPtr, Result); 3636 tok::TokenKind Kind; 3637 3638 if (!isVerticalWhitespace(Char)) 3639 NewLinePtr = nullptr; 3640 3641 switch (Char) { 3642 case 0: // Null. 3643 // Found end of file? 3644 if (CurPtr-1 == BufferEnd) 3645 return LexEndOfFile(Result, CurPtr-1); 3646 3647 // Check if we are performing code completion. 3648 if (isCodeCompletionPoint(CurPtr-1)) { 3649 // Return the code-completion token. 3650 Result.startToken(); 3651 FormTokenWithChars(Result, CurPtr, tok::code_completion); 3652 return true; 3653 } 3654 3655 if (!isLexingRawMode()) 3656 Diag(CurPtr-1, diag::null_in_file); 3657 Result.setFlag(Token::LeadingSpace); 3658 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 3659 return true; // KeepWhitespaceMode 3660 3661 // We know the lexer hasn't changed, so just try again with this lexer. 3662 // (We manually eliminate the tail call to avoid recursion.) 3663 goto LexNextToken; 3664 3665 case 26: // DOS & CP/M EOF: "^Z". 3666 // If we're in Microsoft extensions mode, treat this as end of file. 3667 if (LangOpts.MicrosoftExt) { 3668 if (!isLexingRawMode()) 3669 Diag(CurPtr-1, diag::ext_ctrl_z_eof_microsoft); 3670 return LexEndOfFile(Result, CurPtr-1); 3671 } 3672 3673 // If Microsoft extensions are disabled, this is just random garbage. 3674 Kind = tok::unknown; 3675 break; 3676 3677 case '\r': 3678 if (CurPtr[0] == '\n') 3679 (void)getAndAdvanceChar(CurPtr, Result); 3680 [[fallthrough]]; 3681 case '\n': 3682 // If we are inside a preprocessor directive and we see the end of line, 3683 // we know we are done with the directive, so return an EOD token. 3684 if (ParsingPreprocessorDirective) { 3685 // Done parsing the "line". 3686 ParsingPreprocessorDirective = false; 3687 3688 // Restore comment saving mode, in case it was disabled for directive. 3689 if (PP) 3690 resetExtendedTokenMode(); 3691 3692 // Since we consumed a newline, we are back at the start of a line. 3693 IsAtStartOfLine = true; 3694 IsAtPhysicalStartOfLine = true; 3695 NewLinePtr = CurPtr - 1; 3696 3697 Kind = tok::eod; 3698 break; 3699 } 3700 3701 // No leading whitespace seen so far. 3702 Result.clearFlag(Token::LeadingSpace); 3703 3704 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 3705 return true; // KeepWhitespaceMode 3706 3707 // We only saw whitespace, so just try again with this lexer. 3708 // (We manually eliminate the tail call to avoid recursion.) 3709 goto LexNextToken; 3710 case ' ': 3711 case '\t': 3712 case '\f': 3713 case '\v': 3714 SkipHorizontalWhitespace: 3715 Result.setFlag(Token::LeadingSpace); 3716 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 3717 return true; // KeepWhitespaceMode 3718 3719 SkipIgnoredUnits: 3720 CurPtr = BufferPtr; 3721 3722 // If the next token is obviously a // or /* */ comment, skip it efficiently 3723 // too (without going through the big switch stmt). 3724 if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() && 3725 LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) { 3726 if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine)) 3727 return true; // There is a token to return. 3728 goto SkipIgnoredUnits; 3729 } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) { 3730 if (SkipBlockComment(Result, CurPtr+2, TokAtPhysicalStartOfLine)) 3731 return true; // There is a token to return. 3732 goto SkipIgnoredUnits; 3733 } else if (isHorizontalWhitespace(*CurPtr)) { 3734 goto SkipHorizontalWhitespace; 3735 } 3736 // We only saw whitespace, so just try again with this lexer. 3737 // (We manually eliminate the tail call to avoid recursion.) 3738 goto LexNextToken; 3739 3740 // C99 6.4.4.1: Integer Constants. 3741 // C99 6.4.4.2: Floating Constants. 3742 case '0': case '1': case '2': case '3': case '4': 3743 case '5': case '6': case '7': case '8': case '9': 3744 // Notify MIOpt that we read a non-whitespace/non-comment token. 3745 MIOpt.ReadToken(); 3746 return LexNumericConstant(Result, CurPtr); 3747 3748 // Identifier (e.g., uber), or 3749 // UTF-8 (C23/C++17) or UTF-16 (C11/C++11) character literal, or 3750 // UTF-8 or UTF-16 string literal (C11/C++11). 3751 case 'u': 3752 // Notify MIOpt that we read a non-whitespace/non-comment token. 3753 MIOpt.ReadToken(); 3754 3755 if (LangOpts.CPlusPlus11 || LangOpts.C11) { 3756 Char = getCharAndSize(CurPtr, SizeTmp); 3757 3758 // UTF-16 string literal 3759 if (Char == '"') 3760 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3761 tok::utf16_string_literal); 3762 3763 // UTF-16 character constant 3764 if (Char == '\'') 3765 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3766 tok::utf16_char_constant); 3767 3768 // UTF-16 raw string literal 3769 if (Char == 'R' && LangOpts.CPlusPlus11 && 3770 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 3771 return LexRawStringLiteral(Result, 3772 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3773 SizeTmp2, Result), 3774 tok::utf16_string_literal); 3775 3776 if (Char == '8') { 3777 char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2); 3778 3779 // UTF-8 string literal 3780 if (Char2 == '"') 3781 return LexStringLiteral(Result, 3782 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3783 SizeTmp2, Result), 3784 tok::utf8_string_literal); 3785 if (Char2 == '\'' && (LangOpts.CPlusPlus17 || LangOpts.C23)) 3786 return LexCharConstant( 3787 Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3788 SizeTmp2, Result), 3789 tok::utf8_char_constant); 3790 3791 if (Char2 == 'R' && LangOpts.CPlusPlus11) { 3792 unsigned SizeTmp3; 3793 char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3); 3794 // UTF-8 raw string literal 3795 if (Char3 == '"') { 3796 return LexRawStringLiteral(Result, 3797 ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3798 SizeTmp2, Result), 3799 SizeTmp3, Result), 3800 tok::utf8_string_literal); 3801 } 3802 } 3803 } 3804 } 3805 3806 // treat u like the start of an identifier. 3807 return LexIdentifierContinue(Result, CurPtr); 3808 3809 case 'U': // Identifier (e.g. Uber) or C11/C++11 UTF-32 string literal 3810 // Notify MIOpt that we read a non-whitespace/non-comment token. 3811 MIOpt.ReadToken(); 3812 3813 if (LangOpts.CPlusPlus11 || LangOpts.C11) { 3814 Char = getCharAndSize(CurPtr, SizeTmp); 3815 3816 // UTF-32 string literal 3817 if (Char == '"') 3818 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3819 tok::utf32_string_literal); 3820 3821 // UTF-32 character constant 3822 if (Char == '\'') 3823 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3824 tok::utf32_char_constant); 3825 3826 // UTF-32 raw string literal 3827 if (Char == 'R' && LangOpts.CPlusPlus11 && 3828 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 3829 return LexRawStringLiteral(Result, 3830 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3831 SizeTmp2, Result), 3832 tok::utf32_string_literal); 3833 } 3834 3835 // treat U like the start of an identifier. 3836 return LexIdentifierContinue(Result, CurPtr); 3837 3838 case 'R': // Identifier or C++0x raw string literal 3839 // Notify MIOpt that we read a non-whitespace/non-comment token. 3840 MIOpt.ReadToken(); 3841 3842 if (LangOpts.CPlusPlus11) { 3843 Char = getCharAndSize(CurPtr, SizeTmp); 3844 3845 if (Char == '"') 3846 return LexRawStringLiteral(Result, 3847 ConsumeChar(CurPtr, SizeTmp, Result), 3848 tok::string_literal); 3849 } 3850 3851 // treat R like the start of an identifier. 3852 return LexIdentifierContinue(Result, CurPtr); 3853 3854 case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz"). 3855 // Notify MIOpt that we read a non-whitespace/non-comment token. 3856 MIOpt.ReadToken(); 3857 Char = getCharAndSize(CurPtr, SizeTmp); 3858 3859 // Wide string literal. 3860 if (Char == '"') 3861 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3862 tok::wide_string_literal); 3863 3864 // Wide raw string literal. 3865 if (LangOpts.CPlusPlus11 && Char == 'R' && 3866 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"') 3867 return LexRawStringLiteral(Result, 3868 ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3869 SizeTmp2, Result), 3870 tok::wide_string_literal); 3871 3872 // Wide character constant. 3873 if (Char == '\'') 3874 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result), 3875 tok::wide_char_constant); 3876 // FALL THROUGH, treating L like the start of an identifier. 3877 [[fallthrough]]; 3878 3879 // C99 6.4.2: Identifiers. 3880 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': 3881 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N': 3882 case 'O': case 'P': case 'Q': /*'R'*/case 'S': case 'T': /*'U'*/ 3883 case 'V': case 'W': case 'X': case 'Y': case 'Z': 3884 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': 3885 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': 3886 case 'o': case 'p': case 'q': case 'r': case 's': case 't': /*'u'*/ 3887 case 'v': case 'w': case 'x': case 'y': case 'z': 3888 case '_': 3889 // Notify MIOpt that we read a non-whitespace/non-comment token. 3890 MIOpt.ReadToken(); 3891 return LexIdentifierContinue(Result, CurPtr); 3892 3893 case '$': // $ in identifiers. 3894 if (LangOpts.DollarIdents) { 3895 if (!isLexingRawMode()) 3896 Diag(CurPtr-1, diag::ext_dollar_in_identifier); 3897 // Notify MIOpt that we read a non-whitespace/non-comment token. 3898 MIOpt.ReadToken(); 3899 return LexIdentifierContinue(Result, CurPtr); 3900 } 3901 3902 Kind = tok::unknown; 3903 break; 3904 3905 // C99 6.4.4: Character Constants. 3906 case '\'': 3907 // Notify MIOpt that we read a non-whitespace/non-comment token. 3908 MIOpt.ReadToken(); 3909 return LexCharConstant(Result, CurPtr, tok::char_constant); 3910 3911 // C99 6.4.5: String Literals. 3912 case '"': 3913 // Notify MIOpt that we read a non-whitespace/non-comment token. 3914 MIOpt.ReadToken(); 3915 return LexStringLiteral(Result, CurPtr, 3916 ParsingFilename ? tok::header_name 3917 : tok::string_literal); 3918 3919 // C99 6.4.6: Punctuators. 3920 case '?': 3921 Kind = tok::question; 3922 break; 3923 case '[': 3924 Kind = tok::l_square; 3925 break; 3926 case ']': 3927 Kind = tok::r_square; 3928 break; 3929 case '(': 3930 Kind = tok::l_paren; 3931 break; 3932 case ')': 3933 Kind = tok::r_paren; 3934 break; 3935 case '{': 3936 Kind = tok::l_brace; 3937 break; 3938 case '}': 3939 Kind = tok::r_brace; 3940 break; 3941 case '.': 3942 Char = getCharAndSize(CurPtr, SizeTmp); 3943 if (Char >= '0' && Char <= '9') { 3944 // Notify MIOpt that we read a non-whitespace/non-comment token. 3945 MIOpt.ReadToken(); 3946 3947 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result)); 3948 } else if (LangOpts.CPlusPlus && Char == '*') { 3949 Kind = tok::periodstar; 3950 CurPtr += SizeTmp; 3951 } else if (Char == '.' && 3952 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') { 3953 Kind = tok::ellipsis; 3954 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 3955 SizeTmp2, Result); 3956 } else { 3957 Kind = tok::period; 3958 } 3959 break; 3960 case '&': 3961 Char = getCharAndSize(CurPtr, SizeTmp); 3962 if (Char == '&') { 3963 Kind = tok::ampamp; 3964 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3965 } else if (Char == '=') { 3966 Kind = tok::ampequal; 3967 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3968 } else { 3969 Kind = tok::amp; 3970 } 3971 break; 3972 case '*': 3973 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 3974 Kind = tok::starequal; 3975 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3976 } else { 3977 Kind = tok::star; 3978 } 3979 break; 3980 case '+': 3981 Char = getCharAndSize(CurPtr, SizeTmp); 3982 if (Char == '+') { 3983 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3984 Kind = tok::plusplus; 3985 } else if (Char == '=') { 3986 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3987 Kind = tok::plusequal; 3988 } else { 3989 Kind = tok::plus; 3990 } 3991 break; 3992 case '-': 3993 Char = getCharAndSize(CurPtr, SizeTmp); 3994 if (Char == '-') { // -- 3995 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 3996 Kind = tok::minusminus; 3997 } else if (Char == '>' && LangOpts.CPlusPlus && 3998 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->* 3999 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4000 SizeTmp2, Result); 4001 Kind = tok::arrowstar; 4002 } else if (Char == '>') { // -> 4003 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4004 Kind = tok::arrow; 4005 } else if (Char == '=') { // -= 4006 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4007 Kind = tok::minusequal; 4008 } else { 4009 Kind = tok::minus; 4010 } 4011 break; 4012 case '~': 4013 Kind = tok::tilde; 4014 break; 4015 case '!': 4016 if (getCharAndSize(CurPtr, SizeTmp) == '=') { 4017 Kind = tok::exclaimequal; 4018 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4019 } else { 4020 Kind = tok::exclaim; 4021 } 4022 break; 4023 case '/': 4024 // 6.4.9: Comments 4025 Char = getCharAndSize(CurPtr, SizeTmp); 4026 if (Char == '/') { // Line comment. 4027 // Even if Line comments are disabled (e.g. in C89 mode), we generally 4028 // want to lex this as a comment. There is one problem with this though, 4029 // that in one particular corner case, this can change the behavior of the 4030 // resultant program. For example, In "foo //**/ bar", C89 would lex 4031 // this as "foo / bar" and languages with Line comments would lex it as 4032 // "foo". Check to see if the character after the second slash is a '*'. 4033 // If so, we will lex that as a "/" instead of the start of a comment. 4034 // However, we never do this if we are just preprocessing. 4035 bool TreatAsComment = 4036 LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP); 4037 if (!TreatAsComment) 4038 if (!(PP && PP->isPreprocessedOutput())) 4039 TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*'; 4040 4041 if (TreatAsComment) { 4042 if (SkipLineComment(Result, ConsumeChar(CurPtr, SizeTmp, Result), 4043 TokAtPhysicalStartOfLine)) 4044 return true; // There is a token to return. 4045 4046 // It is common for the tokens immediately after a // comment to be 4047 // whitespace (indentation for the next line). Instead of going through 4048 // the big switch, handle it efficiently now. 4049 goto SkipIgnoredUnits; 4050 } 4051 } 4052 4053 if (Char == '*') { // /**/ comment. 4054 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result), 4055 TokAtPhysicalStartOfLine)) 4056 return true; // There is a token to return. 4057 4058 // We only saw whitespace, so just try again with this lexer. 4059 // (We manually eliminate the tail call to avoid recursion.) 4060 goto LexNextToken; 4061 } 4062 4063 if (Char == '=') { 4064 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4065 Kind = tok::slashequal; 4066 } else { 4067 Kind = tok::slash; 4068 } 4069 break; 4070 case '%': 4071 Char = getCharAndSize(CurPtr, SizeTmp); 4072 if (Char == '=') { 4073 Kind = tok::percentequal; 4074 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4075 } else if (LangOpts.Digraphs && Char == '>') { 4076 Kind = tok::r_brace; // '%>' -> '}' 4077 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4078 } else if (LangOpts.Digraphs && Char == ':') { 4079 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4080 Char = getCharAndSize(CurPtr, SizeTmp); 4081 if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') { 4082 Kind = tok::hashhash; // '%:%:' -> '##' 4083 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4084 SizeTmp2, Result); 4085 } else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize 4086 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4087 if (!isLexingRawMode()) 4088 Diag(BufferPtr, diag::ext_charize_microsoft); 4089 Kind = tok::hashat; 4090 } else { // '%:' -> '#' 4091 // We parsed a # character. If this occurs at the start of the line, 4092 // it's actually the start of a preprocessing directive. Callback to 4093 // the preprocessor to handle it. 4094 // TODO: -fpreprocessed mode?? 4095 if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer) 4096 goto HandleDirective; 4097 4098 Kind = tok::hash; 4099 } 4100 } else { 4101 Kind = tok::percent; 4102 } 4103 break; 4104 case '<': 4105 Char = getCharAndSize(CurPtr, SizeTmp); 4106 if (ParsingFilename) { 4107 return LexAngledStringLiteral(Result, CurPtr); 4108 } else if (Char == '<') { 4109 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 4110 if (After == '=') { 4111 Kind = tok::lesslessequal; 4112 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4113 SizeTmp2, Result); 4114 } else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) { 4115 // If this is actually a '<<<<<<<' version control conflict marker, 4116 // recognize it as such and recover nicely. 4117 goto LexNextToken; 4118 } else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) { 4119 // If this is '<<<<' and we're in a Perforce-style conflict marker, 4120 // ignore it. 4121 goto LexNextToken; 4122 } else if (LangOpts.CUDA && After == '<') { 4123 Kind = tok::lesslessless; 4124 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4125 SizeTmp2, Result); 4126 } else { 4127 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4128 Kind = tok::lessless; 4129 } 4130 } else if (Char == '=') { 4131 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 4132 if (After == '>') { 4133 if (LangOpts.CPlusPlus20) { 4134 if (!isLexingRawMode()) 4135 Diag(BufferPtr, diag::warn_cxx17_compat_spaceship); 4136 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4137 SizeTmp2, Result); 4138 Kind = tok::spaceship; 4139 break; 4140 } 4141 // Suggest adding a space between the '<=' and the '>' to avoid a 4142 // change in semantics if this turns up in C++ <=17 mode. 4143 if (LangOpts.CPlusPlus && !isLexingRawMode()) { 4144 Diag(BufferPtr, diag::warn_cxx20_compat_spaceship) 4145 << FixItHint::CreateInsertion( 4146 getSourceLocation(CurPtr + SizeTmp, SizeTmp2), " "); 4147 } 4148 } 4149 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4150 Kind = tok::lessequal; 4151 } else if (LangOpts.Digraphs && Char == ':') { // '<:' -> '[' 4152 if (LangOpts.CPlusPlus11 && 4153 getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') { 4154 // C++0x [lex.pptoken]p3: 4155 // Otherwise, if the next three characters are <:: and the subsequent 4156 // character is neither : nor >, the < is treated as a preprocessor 4157 // token by itself and not as the first character of the alternative 4158 // token <:. 4159 unsigned SizeTmp3; 4160 char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3); 4161 if (After != ':' && After != '>') { 4162 Kind = tok::less; 4163 if (!isLexingRawMode()) 4164 Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon); 4165 break; 4166 } 4167 } 4168 4169 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4170 Kind = tok::l_square; 4171 } else if (LangOpts.Digraphs && Char == '%') { // '<%' -> '{' 4172 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4173 Kind = tok::l_brace; 4174 } else if (Char == '#' && /*Not a trigraph*/ SizeTmp == 1 && 4175 lexEditorPlaceholder(Result, CurPtr)) { 4176 return true; 4177 } else { 4178 Kind = tok::less; 4179 } 4180 break; 4181 case '>': 4182 Char = getCharAndSize(CurPtr, SizeTmp); 4183 if (Char == '=') { 4184 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4185 Kind = tok::greaterequal; 4186 } else if (Char == '>') { 4187 char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2); 4188 if (After == '=') { 4189 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4190 SizeTmp2, Result); 4191 Kind = tok::greatergreaterequal; 4192 } else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) { 4193 // If this is actually a '>>>>' conflict marker, recognize it as such 4194 // and recover nicely. 4195 goto LexNextToken; 4196 } else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) { 4197 // If this is '>>>>>>>' and we're in a conflict marker, ignore it. 4198 goto LexNextToken; 4199 } else if (LangOpts.CUDA && After == '>') { 4200 Kind = tok::greatergreatergreater; 4201 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result), 4202 SizeTmp2, Result); 4203 } else { 4204 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4205 Kind = tok::greatergreater; 4206 } 4207 } else { 4208 Kind = tok::greater; 4209 } 4210 break; 4211 case '^': 4212 Char = getCharAndSize(CurPtr, SizeTmp); 4213 if (Char == '=') { 4214 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4215 Kind = tok::caretequal; 4216 } else if (LangOpts.OpenCL && Char == '^') { 4217 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4218 Kind = tok::caretcaret; 4219 } else { 4220 Kind = tok::caret; 4221 } 4222 break; 4223 case '|': 4224 Char = getCharAndSize(CurPtr, SizeTmp); 4225 if (Char == '=') { 4226 Kind = tok::pipeequal; 4227 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4228 } else if (Char == '|') { 4229 // If this is '|||||||' and we're in a conflict marker, ignore it. 4230 if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1)) 4231 goto LexNextToken; 4232 Kind = tok::pipepipe; 4233 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4234 } else { 4235 Kind = tok::pipe; 4236 } 4237 break; 4238 case ':': 4239 Char = getCharAndSize(CurPtr, SizeTmp); 4240 if (LangOpts.Digraphs && Char == '>') { 4241 Kind = tok::r_square; // ':>' -> ']' 4242 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4243 } else if (Char == ':') { 4244 Kind = tok::coloncolon; 4245 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4246 } else { 4247 Kind = tok::colon; 4248 } 4249 break; 4250 case ';': 4251 Kind = tok::semi; 4252 break; 4253 case '=': 4254 Char = getCharAndSize(CurPtr, SizeTmp); 4255 if (Char == '=') { 4256 // If this is '====' and we're in a conflict marker, ignore it. 4257 if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1)) 4258 goto LexNextToken; 4259 4260 Kind = tok::equalequal; 4261 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4262 } else { 4263 Kind = tok::equal; 4264 } 4265 break; 4266 case ',': 4267 Kind = tok::comma; 4268 break; 4269 case '#': 4270 Char = getCharAndSize(CurPtr, SizeTmp); 4271 if (Char == '#') { 4272 Kind = tok::hashhash; 4273 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4274 } else if (Char == '@' && LangOpts.MicrosoftExt) { // #@ -> Charize 4275 Kind = tok::hashat; 4276 if (!isLexingRawMode()) 4277 Diag(BufferPtr, diag::ext_charize_microsoft); 4278 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result); 4279 } else { 4280 // We parsed a # character. If this occurs at the start of the line, 4281 // it's actually the start of a preprocessing directive. Callback to 4282 // the preprocessor to handle it. 4283 // TODO: -fpreprocessed mode?? 4284 if (TokAtPhysicalStartOfLine && !LexingRawMode && !Is_PragmaLexer) 4285 goto HandleDirective; 4286 4287 Kind = tok::hash; 4288 } 4289 break; 4290 4291 case '@': 4292 // Objective C support. 4293 if (CurPtr[-1] == '@' && LangOpts.ObjC) 4294 Kind = tok::at; 4295 else 4296 Kind = tok::unknown; 4297 break; 4298 4299 // UCNs (C99 6.4.3, C++11 [lex.charset]p2) 4300 case '\\': 4301 if (!LangOpts.AsmPreprocessor) { 4302 if (uint32_t CodePoint = tryReadUCN(CurPtr, BufferPtr, &Result)) { 4303 if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) { 4304 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 4305 return true; // KeepWhitespaceMode 4306 4307 // We only saw whitespace, so just try again with this lexer. 4308 // (We manually eliminate the tail call to avoid recursion.) 4309 goto LexNextToken; 4310 } 4311 4312 return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr); 4313 } 4314 } 4315 4316 Kind = tok::unknown; 4317 break; 4318 4319 default: { 4320 if (isASCII(Char)) { 4321 Kind = tok::unknown; 4322 break; 4323 } 4324 4325 llvm::UTF32 CodePoint; 4326 4327 // We can't just reset CurPtr to BufferPtr because BufferPtr may point to 4328 // an escaped newline. 4329 --CurPtr; 4330 llvm::ConversionResult Status = 4331 llvm::convertUTF8Sequence((const llvm::UTF8 **)&CurPtr, 4332 (const llvm::UTF8 *)BufferEnd, 4333 &CodePoint, 4334 llvm::strictConversion); 4335 if (Status == llvm::conversionOK) { 4336 if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) { 4337 if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine)) 4338 return true; // KeepWhitespaceMode 4339 4340 // We only saw whitespace, so just try again with this lexer. 4341 // (We manually eliminate the tail call to avoid recursion.) 4342 goto LexNextToken; 4343 } 4344 return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr); 4345 } 4346 4347 if (isLexingRawMode() || ParsingPreprocessorDirective || 4348 PP->isPreprocessedOutput()) { 4349 ++CurPtr; 4350 Kind = tok::unknown; 4351 break; 4352 } 4353 4354 // Non-ASCII characters tend to creep into source code unintentionally. 4355 // Instead of letting the parser complain about the unknown token, 4356 // just diagnose the invalid UTF-8, then drop the character. 4357 Diag(CurPtr, diag::err_invalid_utf8); 4358 4359 BufferPtr = CurPtr+1; 4360 // We're pretending the character didn't exist, so just try again with 4361 // this lexer. 4362 // (We manually eliminate the tail call to avoid recursion.) 4363 goto LexNextToken; 4364 } 4365 } 4366 4367 // Notify MIOpt that we read a non-whitespace/non-comment token. 4368 MIOpt.ReadToken(); 4369 4370 // Update the location of token as well as BufferPtr. 4371 FormTokenWithChars(Result, CurPtr, Kind); 4372 return true; 4373 4374 HandleDirective: 4375 // We parsed a # character and it's the start of a preprocessing directive. 4376 4377 FormTokenWithChars(Result, CurPtr, tok::hash); 4378 PP->HandleDirective(Result); 4379 4380 if (PP->hadModuleLoaderFatalFailure()) 4381 // With a fatal failure in the module loader, we abort parsing. 4382 return true; 4383 4384 // We parsed the directive; lex a token with the new state. 4385 return false; 4386 4387 LexNextToken: 4388 Result.clearFlag(Token::NeedsCleaning); 4389 goto LexStart; 4390 } 4391 4392 const char *Lexer::convertDependencyDirectiveToken( 4393 const dependency_directives_scan::Token &DDTok, Token &Result) { 4394 const char *TokPtr = BufferStart + DDTok.Offset; 4395 Result.startToken(); 4396 Result.setLocation(getSourceLocation(TokPtr)); 4397 Result.setKind(DDTok.Kind); 4398 Result.setFlag((Token::TokenFlags)DDTok.Flags); 4399 Result.setLength(DDTok.Length); 4400 BufferPtr = TokPtr + DDTok.Length; 4401 return TokPtr; 4402 } 4403 4404 bool Lexer::LexDependencyDirectiveToken(Token &Result) { 4405 assert(isDependencyDirectivesLexer()); 4406 4407 using namespace dependency_directives_scan; 4408 4409 while (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size()) { 4410 if (DepDirectives.front().Kind == pp_eof) 4411 return LexEndOfFile(Result, BufferEnd); 4412 if (DepDirectives.front().Kind == tokens_present_before_eof) 4413 MIOpt.ReadToken(); 4414 NextDepDirectiveTokenIndex = 0; 4415 DepDirectives = DepDirectives.drop_front(); 4416 } 4417 4418 const dependency_directives_scan::Token &DDTok = 4419 DepDirectives.front().Tokens[NextDepDirectiveTokenIndex++]; 4420 if (NextDepDirectiveTokenIndex > 1 || DDTok.Kind != tok::hash) { 4421 // Read something other than a preprocessor directive hash. 4422 MIOpt.ReadToken(); 4423 } 4424 4425 if (ParsingFilename && DDTok.is(tok::less)) { 4426 BufferPtr = BufferStart + DDTok.Offset; 4427 LexAngledStringLiteral(Result, BufferPtr + 1); 4428 if (Result.isNot(tok::header_name)) 4429 return true; 4430 // Advance the index of lexed tokens. 4431 while (true) { 4432 const dependency_directives_scan::Token &NextTok = 4433 DepDirectives.front().Tokens[NextDepDirectiveTokenIndex]; 4434 if (BufferStart + NextTok.Offset >= BufferPtr) 4435 break; 4436 ++NextDepDirectiveTokenIndex; 4437 } 4438 return true; 4439 } 4440 4441 const char *TokPtr = convertDependencyDirectiveToken(DDTok, Result); 4442 4443 if (Result.is(tok::hash) && Result.isAtStartOfLine()) { 4444 PP->HandleDirective(Result); 4445 return false; 4446 } 4447 if (Result.is(tok::raw_identifier)) { 4448 Result.setRawIdentifierData(TokPtr); 4449 if (!isLexingRawMode()) { 4450 IdentifierInfo *II = PP->LookUpIdentifierInfo(Result); 4451 if (II->isHandleIdentifierCase()) 4452 return PP->HandleIdentifier(Result); 4453 } 4454 return true; 4455 } 4456 if (Result.isLiteral()) { 4457 Result.setLiteralData(TokPtr); 4458 return true; 4459 } 4460 if (Result.is(tok::colon)) { 4461 // Convert consecutive colons to 'tok::coloncolon'. 4462 if (*BufferPtr == ':') { 4463 assert(DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is( 4464 tok::colon)); 4465 ++NextDepDirectiveTokenIndex; 4466 Result.setKind(tok::coloncolon); 4467 } 4468 return true; 4469 } 4470 if (Result.is(tok::eod)) 4471 ParsingPreprocessorDirective = false; 4472 4473 return true; 4474 } 4475 4476 bool Lexer::LexDependencyDirectiveTokenWhileSkipping(Token &Result) { 4477 assert(isDependencyDirectivesLexer()); 4478 4479 using namespace dependency_directives_scan; 4480 4481 bool Stop = false; 4482 unsigned NestedIfs = 0; 4483 do { 4484 DepDirectives = DepDirectives.drop_front(); 4485 switch (DepDirectives.front().Kind) { 4486 case pp_none: 4487 llvm_unreachable("unexpected 'pp_none'"); 4488 case pp_include: 4489 case pp___include_macros: 4490 case pp_define: 4491 case pp_undef: 4492 case pp_import: 4493 case pp_pragma_import: 4494 case pp_pragma_once: 4495 case pp_pragma_push_macro: 4496 case pp_pragma_pop_macro: 4497 case pp_pragma_include_alias: 4498 case pp_pragma_system_header: 4499 case pp_include_next: 4500 case decl_at_import: 4501 case cxx_module_decl: 4502 case cxx_import_decl: 4503 case cxx_export_module_decl: 4504 case cxx_export_import_decl: 4505 case tokens_present_before_eof: 4506 break; 4507 case pp_if: 4508 case pp_ifdef: 4509 case pp_ifndef: 4510 ++NestedIfs; 4511 break; 4512 case pp_elif: 4513 case pp_elifdef: 4514 case pp_elifndef: 4515 case pp_else: 4516 if (!NestedIfs) { 4517 Stop = true; 4518 } 4519 break; 4520 case pp_endif: 4521 if (!NestedIfs) { 4522 Stop = true; 4523 } else { 4524 --NestedIfs; 4525 } 4526 break; 4527 case pp_eof: 4528 NextDepDirectiveTokenIndex = 0; 4529 return LexEndOfFile(Result, BufferEnd); 4530 } 4531 } while (!Stop); 4532 4533 const dependency_directives_scan::Token &DDTok = 4534 DepDirectives.front().Tokens.front(); 4535 assert(DDTok.is(tok::hash)); 4536 NextDepDirectiveTokenIndex = 1; 4537 4538 convertDependencyDirectiveToken(DDTok, Result); 4539 return false; 4540 } 4541