blob: df7539bc022b3af3c18378ebd87606b65a4e0d4b [file] [log] [blame]
Chris Lattner22eb9722006-06-18 05:43:12 +00001//===--- Lexer.cpp - C Language Family Lexer ------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the Lexer and LexerToken interfaces.
11//
12//===----------------------------------------------------------------------===//
13//
14// TODO: GCC Diagnostics emitted by the lexer:
15// PEDWARN: (form feed|vertical tab) in preprocessing directive
16//
17// Universal characters, unicode, char mapping:
18// WARNING: `%.*s' is not in NFKC
19// WARNING: `%.*s' is not in NFC
20//
21// Other:
22// ERROR : attempt to use poisoned \"%s\"
23//
24// TODO: Options to support:
25// -fexec-charset,-fwide-exec-charset
26//
27//===----------------------------------------------------------------------===//
28
29#include "clang/Lex/Lexer.h"
30#include "clang/Lex/Preprocessor.h"
31#include "clang/Basic/Diagnostic.h"
32#include "clang/Basic/SourceBuffer.h"
33#include "clang/Basic/SourceLocation.h"
34#include "llvm/Config/alloca.h"
35#include <cassert>
36#include <cctype>
37#include <iostream>
38using namespace llvm;
39using namespace clang;
40
41static void InitCharacterInfo();
42
43Lexer::Lexer(const SourceBuffer *File, unsigned fileid, Preprocessor &pp)
44 : BufferPtr(File->getBufferStart()), BufferStart(BufferPtr),
45 BufferEnd(File->getBufferEnd()), InputFile(File), CurFileID(fileid), PP(pp),
46 Features(PP.getLangOptions()) {
47 InitCharacterInfo();
48
49 assert(BufferEnd[0] == 0 &&
50 "We assume that the input buffer has a null character at the end"
51 " to simplify lexing!");
52
53 // Start of the file is a start of line.
54 IsAtStartOfLine = true;
55
56 // We are not after parsing a #.
57 ParsingPreprocessorDirective = false;
58
59 // We are not after parsing #include.
60 ParsingFilename = false;
61}
62
63//===----------------------------------------------------------------------===//
64// LexerToken implementation.
65//===----------------------------------------------------------------------===//
66
67/// getSourceLocation - Return a source location identifier for the specified
68/// offset in the current file.
69SourceLocation LexerToken::getSourceLocation() const {
70 if (TheLexer)
71 return TheLexer->getSourceLocation(Start);
72 return SourceLocation();
73}
74
75
76/// dump - Print the token to stderr, used for debugging.
77///
78void LexerToken::dump(bool DumpFlags) const {
79 std::cerr << clang::tok::getTokenName(Kind) << " '";
80
81 if (needsCleaning()) {
82 if (getLexer())
83 std::cerr << getLexer()->getSpelling(*this);
84 else {
85 // FIXME: expansion from macros clears location info. Testcase:
86 // #define TWELVE 1\ <whitespace only>
87 // 2
88 // TWELVE
89 std::cerr << "*unspelled*" << std::string(Start, End);
90 }
91 } else
92 std::cerr << std::string(Start, End);
93 std::cerr << "'";
94
95 if (DumpFlags) {
96 std::cerr << "\t";
97 if (isAtStartOfLine())
98 std::cerr << " [StartOfLine]";
99 if (hasLeadingSpace())
100 std::cerr << " [LeadingSpace]";
101 if (needsCleaning())
102 std::cerr << " [Spelling='" << std::string(Start, End) << "']";
103 }
104}
105
106//===----------------------------------------------------------------------===//
107// Character information.
108//===----------------------------------------------------------------------===//
109
110static unsigned char CharInfo[256];
111
112enum {
113 CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0'
114 CHAR_VERT_WS = 0x02, // '\r', '\n'
115 CHAR_LETTER = 0x04, // a-z,A-Z
116 CHAR_NUMBER = 0x08, // 0-9
117 CHAR_UNDER = 0x10, // _
118 CHAR_PERIOD = 0x20 // .
119};
120
121static void InitCharacterInfo() {
122 static bool isInited = false;
123 if (isInited) return;
124 isInited = true;
125
126 // Intiialize the CharInfo table.
127 // TODO: statically initialize this.
128 CharInfo[(int)' '] = CharInfo[(int)'\t'] =
129 CharInfo[(int)'\f'] = CharInfo[(int)'\v'] = CHAR_HORZ_WS;
130 CharInfo[(int)'\n'] = CharInfo[(int)'\r'] = CHAR_VERT_WS;
131
132 CharInfo[(int)'_'] = CHAR_UNDER;
133 for (unsigned i = 'a'; i <= 'z'; ++i)
134 CharInfo[i] = CharInfo[i+'A'-'a'] = CHAR_LETTER;
135 for (unsigned i = '0'; i <= '9'; ++i)
136 CharInfo[i] = CHAR_NUMBER;
137}
138
139/// isIdentifierBody - Return true if this is the body character of an
140/// identifier, which is [a-zA-Z0-9_].
141static inline bool isIdentifierBody(unsigned char c) {
142 return CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER);
143}
144
145/// isHorizontalWhitespace - Return true if this character is horizontal
146/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'.
147static inline bool isHorizontalWhitespace(unsigned char c) {
148 return CharInfo[c] & CHAR_HORZ_WS;
149}
150
151/// isWhitespace - Return true if this character is horizontal or vertical
152/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false
153/// for '\0'.
154static inline bool isWhitespace(unsigned char c) {
155 return CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS);
156}
157
158/// isNumberBody - Return true if this is the body character of an
159/// preprocessing number, which is [a-zA-Z0-9_.].
160static inline bool isNumberBody(unsigned char c) {
161 return CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD);
162}
163
164//===----------------------------------------------------------------------===//
165// Diagnostics forwarding code.
166//===----------------------------------------------------------------------===//
167
168/// getSourceLocation - Return a source location identifier for the specified
169/// offset in the current file.
170SourceLocation Lexer::getSourceLocation(const char *Loc) const {
171 assert(Loc >= InputFile->getBufferStart() && Loc <= InputFile->getBufferEnd()
172 && "Location out of range for this buffer!");
173 return SourceLocation(CurFileID, Loc-InputFile->getBufferStart());
174}
175
176
177/// Diag - Forwarding function for diagnostics. This translate a source
178/// position in the current buffer into a SourceLocation object for rendering.
179bool Lexer::Diag(const char *Loc, unsigned DiagID,
180 const std::string &Msg) const {
181 return PP.Diag(getSourceLocation(Loc), DiagID, Msg);
182}
183
184//===----------------------------------------------------------------------===//
185// Trigraph and Escaped Newline Handling Code.
186//===----------------------------------------------------------------------===//
187
188/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
189/// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
190static char GetTrigraphCharForLetter(char Letter) {
191 switch (Letter) {
192 default: return 0;
193 case '=': return '#';
194 case ')': return ']';
195 case '(': return '[';
196 case '!': return '|';
197 case '\'': return '^';
198 case '>': return '}';
199 case '/': return '\\';
200 case '<': return '{';
201 case '-': return '~';
202 }
203}
204
205/// DecodeTrigraphChar - If the specified character is a legal trigraph when
206/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled,
207/// return the result character. Finally, emit a warning about trigraph use
208/// whether trigraphs are enabled or not.
209static char DecodeTrigraphChar(const char *CP, Lexer *L) {
210 char Res = GetTrigraphCharForLetter(*CP);
211 if (Res && L) {
212 if (!L->getFeatures().Trigraphs) {
213 L->Diag(CP-2, diag::trigraph_ignored);
214 return 0;
215 } else {
216 L->Diag(CP-2, diag::trigraph_converted, std::string()+Res);
217 }
218 }
219 return Res;
220}
221
222/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
223/// get its size, and return it. This is tricky in several cases:
224/// 1. If currently at the start of a trigraph, we warn about the trigraph,
225/// then either return the trigraph (skipping 3 chars) or the '?',
226/// depending on whether trigraphs are enabled or not.
227/// 2. If this is an escaped newline (potentially with whitespace between
228/// the backslash and newline), implicitly skip the newline and return
229/// the char after it.
230/// 3. If this is a UCN, return it. FIXME: for C++?
231///
232/// This handles the slow/uncommon case of the getCharAndSize method. Here we
233/// know that we can accumulate into Size, and that we have already incremented
234/// Ptr by Size bytes.
235///
236/// When this method is updated, getCharAndSizeSlowNoWarn (below) should be
237/// updated to match.
238///
239char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
240 LexerToken *Tok) {
241 // If we have a slash, look for an escaped newline.
242 if (Ptr[0] == '\\') {
243 ++Size;
244 ++Ptr;
245Slash:
246 // Common case, backslash-char where the char is not whitespace.
247 if (!isWhitespace(Ptr[0])) return '\\';
248
249 // See if we have optional whitespace characters followed by a newline.
250 {
251 unsigned SizeTmp = 0;
252 do {
253 ++SizeTmp;
254 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') {
255 // Remember that this token needs to be cleaned.
256 if (Tok) Tok->SetFlag(LexerToken::NeedsCleaning);
257
258 // Warn if there was whitespace between the backslash and newline.
259 if (SizeTmp != 1 && Tok)
260 Diag(Ptr, diag::backslash_newline_space);
261
262 // If this is a \r\n or \n\r, skip the newlines.
263 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') &&
264 Ptr[SizeTmp-1] != Ptr[SizeTmp])
265 ++SizeTmp;
266
267 // Found backslash<whitespace><newline>. Parse the char after it.
268 Size += SizeTmp;
269 Ptr += SizeTmp;
270 // Use slow version to accumulate a correct size field.
271 return getCharAndSizeSlow(Ptr, Size, Tok);
272 }
273 } while (isWhitespace(Ptr[SizeTmp]));
274 }
275
276 // Otherwise, this is not an escaped newline, just return the slash.
277 return '\\';
278 }
279
280 // If this is a trigraph, process it.
281 if (Ptr[0] == '?' && Ptr[1] == '?') {
282 // If this is actually a legal trigraph (not something like "??x"), emit
283 // a trigraph warning. If so, and if trigraphs are enabled, return it.
284 if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) {
285 // Remember that this token needs to be cleaned.
286 if (Tok) Tok->SetFlag(LexerToken::NeedsCleaning);
287
288 Ptr += 3;
289 Size += 3;
290 if (C == '\\') goto Slash;
291 return C;
292 }
293 }
294
295 // If this is neither, return a single character.
296 ++Size;
297 return *Ptr;
298}
299
300/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
301/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size,
302/// and that we have already incremented Ptr by Size bytes.
303///
304/// When this method is updated, getCharAndSizeSlow (above) should be updated to
305/// match.
306static char getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
307 const LangOptions &Features) {
308 // If we have a slash, look for an escaped newline.
309 if (Ptr[0] == '\\') {
310 ++Size;
311 ++Ptr;
312Slash:
313 // Common case, backslash-char where the char is not whitespace.
314 if (!isWhitespace(Ptr[0])) return '\\';
315
316 // See if we have optional whitespace characters followed by a newline.
317 {
318 unsigned SizeTmp = 0;
319 do {
320 ++SizeTmp;
321 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') {
322
323 // If this is a \r\n or \n\r, skip the newlines.
324 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') &&
325 Ptr[SizeTmp-1] != Ptr[SizeTmp])
326 ++SizeTmp;
327
328 // Found backslash<whitespace><newline>. Parse the char after it.
329 Size += SizeTmp;
330 Ptr += SizeTmp;
331
332 // Use slow version to accumulate a correct size field.
333 return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
334 }
335 } while (isWhitespace(Ptr[SizeTmp]));
336 }
337
338 // Otherwise, this is not an escaped newline, just return the slash.
339 return '\\';
340 }
341
342 // If this is a trigraph, process it.
343 if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
344 // If this is actually a legal trigraph (not something like "??x"), return
345 // it.
346 if (char C = GetTrigraphCharForLetter(Ptr[2])) {
347 Ptr += 3;
348 Size += 3;
349 if (C == '\\') goto Slash;
350 return C;
351 }
352 }
353
354 // If this is neither, return a single character.
355 ++Size;
356 return *Ptr;
357}
358
359/// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
360/// emit a warning.
361static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size,
362 const LangOptions &Features) {
363 // If this is not a trigraph and not a UCN or escaped newline, return
364 // quickly.
365 if (Ptr[0] != '?' && Ptr[0] != '\\') {
366 Size = 1;
367 return *Ptr;
368 }
369
370 Size = 0;
371 return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
372}
373
374
375/// getSpelling() - Return the 'spelling' of this token. The spelling of a
376/// token are the characters used to represent the token in the source file
377/// after trigraph expansion and escaped-newline folding. In particular, this
378/// wants to get the true, uncanonicalized, spelling of things like digraphs
379/// UCNs, etc.
380std::string Lexer::getSpelling(const LexerToken &Tok,
381 const LangOptions &Features) {
382 assert(Tok.getStart() <= Tok.getEnd() && "Token character range is bogus!");
383
384 // If this token contains nothing interesting, return it directly.
385 if (!Tok.needsCleaning())
386 return std::string(Tok.getStart(), Tok.getEnd());
387
388 // Otherwise, hard case, relex the characters into the string.
389 std::string Result;
390 Result.reserve(Tok.getEnd()-Tok.getStart());
391
392 for (const char *Ptr = Tok.getStart(), *End = Tok.getEnd(); Ptr != End; ) {
393 unsigned CharSize;
394 Result.push_back(getCharAndSizeNoWarn(Ptr, CharSize, Features));
395 Ptr += CharSize;
396 }
397 assert(Result.size() != unsigned(Tok.getEnd()-Tok.getStart()) &&
398 "NeedsCleaning flag set on something that didn't need cleaning!");
399 return Result;
400}
401
402/// getSpelling - This method is used to get the spelling of a token into a
403/// preallocated buffer, instead of as an std::string. The caller is required
404/// to allocate enough space for the token, which is guaranteed to be at most
405/// Tok.End-Tok.Start bytes long. The actual length of the token is returned.
406unsigned Lexer::getSpelling(const LexerToken &Tok, char *Buffer,
407 const LangOptions &Features) {
408 assert(Tok.getStart() <= Tok.getEnd() && "Token character range is bogus!");
409
410 // If this token contains nothing interesting, return it directly.
411 if (!Tok.needsCleaning()) {
412 unsigned Size = Tok.getEnd()-Tok.getStart();
413 memcpy(Buffer, Tok.getStart(), Size);
414 return Size;
415 }
416 // Otherwise, hard case, relex the characters into the string.
417 std::string Result;
418 Result.reserve(Tok.getEnd()-Tok.getStart());
419
420 char *OutBuf = Buffer;
421 for (const char *Ptr = Tok.getStart(), *End = Tok.getEnd(); Ptr != End; ) {
422 unsigned CharSize;
423 *OutBuf++ = getCharAndSizeNoWarn(Ptr, CharSize, Features);
424 Ptr += CharSize;
425 }
426 assert(OutBuf-Buffer != Tok.getEnd()-Tok.getStart() &&
427 "NeedsCleaning flag set on something that didn't need cleaning!");
428
429 return OutBuf-Buffer;
430}
431
432
433//===----------------------------------------------------------------------===//
434// Helper methods for lexing.
435//===----------------------------------------------------------------------===//
436
437bool Lexer::LexIdentifier(LexerToken &Result, const char *CurPtr) {
438 // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
439 unsigned Size;
440 unsigned char C = *CurPtr++;
441 while (isIdentifierBody(C)) {
442 C = *CurPtr++;
443 }
444 --CurPtr; // Back up over the skipped character.
445
446 // Fast path, no $,\,? in identifier found. '\' might be an escaped newline
447 // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
448 // FIXME: universal chars.
449 if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) {
450FinishIdentifier:
451 Result.SetEnd(BufferPtr = CurPtr);
452 Result.SetKind(tok::identifier);
453
454 // Look up this token, see if it is a macro, or if it is a language keyword.
455 const char *SpelledTokStart, *SpelledTokEnd;
456 if (!Result.needsCleaning()) {
457 // No cleaning needed, just use the characters from the lexed buffer.
458 SpelledTokStart = Result.getStart();
459 SpelledTokEnd = Result.getEnd();
460 } else {
461 // Cleaning needed, alloca a buffer, clean into it, then use the buffer.
462 char *TmpBuf = (char*)alloca(Result.getEnd()-Result.getStart());
463 unsigned Size = getSpelling(Result, TmpBuf);
464 SpelledTokStart = TmpBuf;
465 SpelledTokEnd = TmpBuf+Size;
466 }
467
468 Result.SetIdentifierInfo(PP.getIdentifierInfo(SpelledTokStart,
469 SpelledTokEnd));
470 return PP.HandleIdentifier(Result);
471 }
472
473 // Otherwise, $,\,? in identifier found. Enter slower path.
474
475 C = getCharAndSize(CurPtr, Size);
476 while (1) {
477 if (C == '$') {
478 // If we hit a $ and they are not supported in identifiers, we are done.
479 if (!Features.DollarIdents) goto FinishIdentifier;
480
481 // Otherwise, emit a diagnostic and continue.
482 if (Diag(CurPtr, diag::ext_dollar_in_identifier))
483 return true;
484 CurPtr = ConsumeChar(CurPtr, Size, Result);
485 C = getCharAndSize(CurPtr, Size);
486 continue;
487 } else if (!isIdentifierBody(C)) { // FIXME: universal chars.
488 // Found end of identifier.
489 goto FinishIdentifier;
490 }
491
492 // Otherwise, this character is good, consume it.
493 CurPtr = ConsumeChar(CurPtr, Size, Result);
494
495 C = getCharAndSize(CurPtr, Size);
496 while (isIdentifierBody(C)) { // FIXME: universal chars.
497 CurPtr = ConsumeChar(CurPtr, Size, Result);
498 C = getCharAndSize(CurPtr, Size);
499 }
500 }
501}
502
503
504/// LexNumericConstant - Lex the remainer of a integer or floating point
505/// constant. From[-1] is the first character lexed. Return the end of the
506/// constant.
507bool Lexer::LexNumericConstant(LexerToken &Result, const char *CurPtr) {
508 unsigned Size;
509 char C = getCharAndSize(CurPtr, Size);
510 char PrevCh = 0;
511 while (isNumberBody(C)) { // FIXME: universal chars?
512 CurPtr = ConsumeChar(CurPtr, Size, Result);
513 PrevCh = C;
514 C = getCharAndSize(CurPtr, Size);
515 }
516
517 // If we fell out, check for a sign, due to 1e+12. If we have one, continue.
518 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e'))
519 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
520
521 // If we have a hex FP constant, continue.
522 if (Features.HexFloats &&
523 (C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p'))
524 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
525
526 Result.SetKind(tok::numeric_constant);
527
528 // Update the end of token position as well as the BufferPtr instance var.
529 Result.SetEnd(BufferPtr = CurPtr);
530 return false;
531}
532
533/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
534/// either " or L".
535bool Lexer::LexStringLiteral(LexerToken &Result, const char *CurPtr) {
536 const char *NulCharacter = 0; // Does this string contain the \0 character?
537
538 char C = getAndAdvanceChar(CurPtr, Result);
539 while (C != '"') {
540 // Skip escaped characters.
541 if (C == '\\') {
542 // Skip the escaped character.
543 C = getAndAdvanceChar(CurPtr, Result);
544 } else if (C == '\n' || C == '\r' || // Newline.
545 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
546 if (Diag(Result.getStart(), diag::err_unterminated_string))
547 return true;
548 BufferPtr = CurPtr-1;
549 return LexTokenInternal(Result);
550 } else if (C == 0) {
551 NulCharacter = CurPtr-1;
552 }
553 C = getAndAdvanceChar(CurPtr, Result);
554 }
555
556 if (NulCharacter && Diag(NulCharacter, diag::null_in_string))
557 return true;
558
559 Result.SetKind(tok::string_literal);
560
561 // Update the end of token position as well as the BufferPtr instance var.
562 Result.SetEnd(BufferPtr = CurPtr);
563 return false;
564}
565
566/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
567/// after having lexed the '<' character. This is used for #include filenames.
568bool Lexer::LexAngledStringLiteral(LexerToken &Result, const char *CurPtr) {
569 const char *NulCharacter = 0; // Does this string contain the \0 character?
570
571 char C = getAndAdvanceChar(CurPtr, Result);
572 while (C != '>') {
573 // Skip escaped characters.
574 if (C == '\\') {
575 // Skip the escaped character.
576 C = getAndAdvanceChar(CurPtr, Result);
577 } else if (C == '\n' || C == '\r' || // Newline.
578 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
579 if (Diag(Result.getStart(), diag::err_unterminated_string))
580 return true;
581 BufferPtr = CurPtr-1;
582 return LexTokenInternal(Result);
583 } else if (C == 0) {
584 NulCharacter = CurPtr-1;
585 }
586 C = getAndAdvanceChar(CurPtr, Result);
587 }
588
589 if (NulCharacter && Diag(NulCharacter, diag::null_in_string))
590 return true;
591
592 Result.SetKind(tok::angle_string_literal);
593
594 // Update the end of token position as well as the BufferPtr instance var.
595 Result.SetEnd(BufferPtr = CurPtr);
596 return false;
597}
598
599
600/// LexCharConstant - Lex the remainder of a character constant, after having
601/// lexed either ' or L'.
602bool Lexer::LexCharConstant(LexerToken &Result, const char *CurPtr) {
603 const char *NulCharacter = 0; // Does this character contain the \0 character?
604
605 // Handle the common case of 'x' and '\y' efficiently.
606 char C = getAndAdvanceChar(CurPtr, Result);
607 if (C == '\'') {
608 if (Diag(Result.getStart(), diag::err_empty_character))
609 return true;
610 BufferPtr = CurPtr;
611 return LexTokenInternal(Result);
612 } else if (C == '\\') {
613 // Skip the escaped character.
614 // FIXME: UCN's.
615 C = getAndAdvanceChar(CurPtr, Result);
616 }
617
618 if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') {
619 ++CurPtr;
620 } else {
621 // Fall back on generic code for embedded nulls, newlines, wide chars.
622 do {
623 // Skip escaped characters.
624 if (C == '\\') {
625 // Skip the escaped character.
626 C = getAndAdvanceChar(CurPtr, Result);
627 } else if (C == '\n' || C == '\r' || // Newline.
628 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
629 if (Diag(Result.getStart(), diag::err_unterminated_char))
630 return true;
631 BufferPtr = CurPtr-1;
632 return LexTokenInternal(Result);
633 } else if (C == 0) {
634 NulCharacter = CurPtr-1;
635 }
636 C = getAndAdvanceChar(CurPtr, Result);
637 } while (C != '\'');
638 }
639
640 if (NulCharacter && Diag(NulCharacter, diag::null_in_char))
641 return true;
642
643 Result.SetKind(tok::char_constant);
644
645 // Update the end of token position as well as the BufferPtr instance var.
646 Result.SetEnd(BufferPtr = CurPtr);
647 return false;
648}
649
650/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
651/// Update BufferPtr to point to the next non-whitespace character and return.
652bool Lexer::SkipWhitespace(LexerToken &Result, const char *CurPtr) {
653 // Whitespace - Skip it, then return the token after the whitespace.
654 unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently.
655 while (1) {
656 // Skip horizontal whitespace very aggressively.
657 while (isHorizontalWhitespace(Char))
658 Char = *++CurPtr;
659
660 // Otherwise if we something other than whitespace, we're done.
661 if (Char != '\n' && Char != '\r')
662 break;
663
664 if (ParsingPreprocessorDirective) {
665 // End of preprocessor directive line, let LexTokenInternal handle this.
666 BufferPtr = CurPtr;
667 return false;
668 }
669
670 // ok, but handle newline.
671 // The returned token is at the start of the line.
672 Result.SetFlag(LexerToken::StartOfLine);
673 // No leading whitespace seen so far.
674 Result.ClearFlag(LexerToken::LeadingSpace);
675 Char = *++CurPtr;
676 }
677
678 // If this isn't immediately after a newline, there is leading space.
679 char PrevChar = CurPtr[-1];
680 if (PrevChar != '\n' && PrevChar != '\r')
681 Result.SetFlag(LexerToken::LeadingSpace);
682
683 // If the next token is obviously a // or /* */ comment, skip it efficiently
684 // too (without going through the big switch stmt).
685 if (Char == '/' && CurPtr[1] == '/') {
686 Result.SetStart(CurPtr);
687 return SkipBCPLComment(Result, CurPtr+1);
688 }
689 if (Char == '/' && CurPtr[1] == '*') {
690 Result.SetStart(CurPtr);
691 return SkipBlockComment(Result, CurPtr+2);
692 }
693 BufferPtr = CurPtr;
694 return false;
695}
696
697// SkipBCPLComment - We have just read the // characters from input. Skip until
698// we find the newline character thats terminate the comment. Then update
699/// BufferPtr and return.
700bool Lexer::SkipBCPLComment(LexerToken &Result, const char *CurPtr) {
701 // If BCPL comments aren't explicitly enabled for this language, emit an
702 // extension warning.
703 if (!Features.BCPLComment) {
704 if (Diag(Result.getStart(), diag::ext_bcpl_comment))
705 return true;
706
707 // Mark them enabled so we only emit one warning for this translation
708 // unit.
709 Features.BCPLComment = true;
710 }
711
712 // Scan over the body of the comment. The common case, when scanning, is that
713 // the comment contains normal ascii characters with nothing interesting in
714 // them. As such, optimize for this case with the inner loop.
715 char C;
716 do {
717 C = *CurPtr;
718 // FIXME: just scan for a \n or \r character. If we find a \n character,
719 // scan backwards, checking to see if it's an escaped newline, like we do
720 // for block comments.
721
722 // Skip over characters in the fast loop.
723 while (C != 0 && // Potentially EOF.
724 C != '\\' && // Potentially escaped newline.
725 C != '?' && // Potentially trigraph.
726 C != '\n' && C != '\r') // Newline or DOS-style newline.
727 C = *++CurPtr;
728
729 // If this is a newline, we're done.
730 if (C == '\n' || C == '\r')
731 break; // Found the newline? Break out!
732
733 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to
734 // properly decode the character.
735 const char *OldPtr = CurPtr;
736 C = getAndAdvanceChar(CurPtr, Result);
737
738 // If we read multiple characters, and one of those characters was a \r or
739 // \n, then we had an escaped newline within the comment. Emit diagnostic.
740 if (CurPtr != OldPtr+1) {
741 for (; OldPtr != CurPtr; ++OldPtr)
742 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
743 if (Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment))
744 return true;
745 }
746 }
747
748 if (CurPtr == BufferEnd+1) goto FoundEOF;
749 } while (C != '\n' && C != '\r');
750
751 // Found and did not consume a newline.
752
753 // If we are inside a preprocessor directive and we see the end of line,
754 // return immediately, so that the lexer can return this as an EOM token.
755 if (ParsingPreprocessorDirective) {
756 BufferPtr = CurPtr;
757 return false;
758 }
759
760 // Otherwise, eat the \n character. We don't care if this is a \n\r or
761 // \r\n sequence.
762 ++CurPtr;
763
764 // The next returned token is at the start of the line.
765 Result.SetFlag(LexerToken::StartOfLine);
766 // No leading whitespace seen so far.
767 Result.ClearFlag(LexerToken::LeadingSpace);
768
769 // It is common for the tokens immediately after a // comment to be
770 // whitespace (indentation for the next line). Instead of going through the
771 // big switch, handle it efficiently now.
772 if (isWhitespace(*CurPtr)) {
773 Result.SetFlag(LexerToken::LeadingSpace);
774 return SkipWhitespace(Result, CurPtr+1);
775 }
776
777 BufferPtr = CurPtr;
778 return false;
779
780FoundEOF: // If we ran off the end of the buffer, return EOF.
781 BufferPtr = CurPtr-1;
782 return false;
783}
784
785/// isEndOfEscapedNewLine - Return true if the specified newline character
786/// (either \n or \r) is part of an escaped newline sequence. Issue a
787/// diagnostic if so. We know that the is inside of a block comment.
788bool Lexer::isBlockCommentEndOfEscapedNewLine(const char *CurPtr,
789 char &PrevChar) {
790 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
791 PrevChar = 0;
792
793 // Back up off the newline.
794 --CurPtr;
795
796 // If this is a two-character newline sequence, skip the other character.
797 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
798 // \n\n or \r\r -> not escaped newline.
799 if (CurPtr[0] == CurPtr[1])
800 return false;
801 // \n\r or \r\n -> skip the newline.
802 --CurPtr;
803 }
804
805 // If we have horizontal whitespace, skip over it. We allow whitespace
806 // between the slash and newline.
807 bool HasSpace = false;
808 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
809 --CurPtr;
810 HasSpace = true;
811 }
812
813 // If we have a slash, we know this is an escaped newline.
814 if (*CurPtr == '\\') {
815 PrevChar = CurPtr[-1];
816 if (PrevChar != '*') return false;
817 } else {
818 // It isn't a slash, is it the ?? / trigraph?
819 if (*CurPtr != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?')
820 return false;
821 // This is the trigraph. Emit a stern warning!
822 if ((PrevChar = CurPtr[-3]) != '*') return false;
823 CurPtr -= 2;
824
825 // If no trigraphs are enabled, warn that we ignored this trigraph and
826 // ignore this * character.
827 if (!Features.Trigraphs) {
828 PrevChar = 0;
829 return Diag(CurPtr, diag::trigraph_ignored_block_comment);
830 } else {
831 if (Diag(CurPtr, diag::trigraph_ends_block_comment))
832 return true;
833 }
834 }
835
836 // Warn about having an escaped newline between the */ characters.
837 if (Diag(CurPtr, diag::escaped_newline_block_comment_end))
838 return true;
839
840 // If there was space between the backslash and newline, warn about it.
841 if (HasSpace &&
842 Diag(CurPtr, diag::backslash_newline_space))
843 return true;
844
845 return false;
846}
847
848/// SkipBlockComment - We have just read the /* characters from input. Read
849/// until we find the */ characters that terminate the comment. Note that we
850/// don't bother decoding trigraphs or escaped newlines in block comments,
851/// because they cannot cause the comment to end. The only thing that can
852/// happen is the comment could end with an escaped newline between the */ end
853/// of comment.
854bool Lexer::SkipBlockComment(LexerToken &Result, const char *CurPtr) {
855 // Scan one character past where we should, looking for a '/' character. Once
856 // we find it, check to see if it was preceeded by a *. This common
857 // optimization helps people who like to put a lot of * characters in their
858 // comments.
859 unsigned char C = *CurPtr++;
860 if (C == 0 && CurPtr == BufferEnd+1) {
861 if (Diag(Result.getStart(), diag::err_unterminated_block_comment))
862 return true;
863 BufferPtr = CurPtr-1;
864 return false;
865 }
866
867 while (1) {
868 // Skip over all non-interesting characters.
869 // TODO: Vectorize this. Note: memchr on Darwin is slower than this loop.
870 while (C != '/' && C != '\0')
871 C = *CurPtr++;
872
873 if (C == '/') {
874 char T;
875 if (CurPtr[-2] == '*') // We found the final */. We're done!
876 break;
877
878 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
879 char Prev;
880 if (isBlockCommentEndOfEscapedNewLine(CurPtr-2, Prev))
881 return true;
882 if (Prev == '*') {
883 // We found the final */, though it had an escaped newline between the
884 // * and /. We're done!
885 break;
886 }
887 }
888 if (CurPtr[0] == '*' && CurPtr[1] != '/') {
889 // If this is a /* inside of the comment, emit a warning. Don't do this
890 // if this is a /*/, which will end the comment. This misses cases with
891 // embedded escaped newlines, but oh well.
892 if (Diag(CurPtr-1, diag::nested_block_comment))
893 return true;
894 }
895 } else if (C == 0 && CurPtr == BufferEnd+1) {
896 if (Diag(Result.getStart(), diag::err_unterminated_block_comment))
897 return true;
898 // Note: the user probably forgot a */. We could continue immediately
899 // after the /*, but this would involve lexing a lot of what really is the
900 // comment, which surely would confuse the parser.
901 BufferPtr = CurPtr-1;
902 return false;
903 }
904 C = *CurPtr++;
905 }
906
907 // It is common for the tokens immediately after a /**/ comment to be
908 // whitespace. Instead of going through the big switch, handle it
909 // efficiently now.
910 if (isHorizontalWhitespace(*CurPtr)) {
911 Result.SetFlag(LexerToken::LeadingSpace);
912 return SkipWhitespace(Result, CurPtr+1);
913 }
914
915 // Otherwise, just return so that the next character will be lexed as a token.
916 BufferPtr = CurPtr;
917 Result.SetFlag(LexerToken::LeadingSpace);
918 return false;
919}
920
921//===----------------------------------------------------------------------===//
922// Primary Lexing Entry Points
923//===----------------------------------------------------------------------===//
924
925/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
926/// (potentially) macro expand the filename.
927bool Lexer::LexIncludeFilename(LexerToken &Result) {
928 assert(ParsingPreprocessorDirective &&
929 ParsingFilename == false &&
930 "Must be in a preprocessing directive!");
931
932 // We are now parsing a filename!
933 ParsingFilename = true;
934
935 // There should be exactly two tokens here if everything is good: first the
936 // filename, then the EOM.
937 if (Lex(Result)) return true;
938
939 // We should have gotten the filename now.
940 ParsingFilename = false;
941
942 // No filename?
943 if (Result.getKind() == tok::eom)
944 return Diag(Result.getStart(), diag::err_pp_expects_filename);
945
946 // Verify that there is nothing after the filename, other than EOM.
947 LexerToken EndTok;
948 if (Lex(EndTok)) return true;
949
950 if (EndTok.getKind() != tok::eom) {
951 if (Diag(Result.getStart(), diag::err_pp_expects_filename))
952 return true;
953
954 // Lex until the end of the preprocessor directive line.
955 while (EndTok.getKind() != tok::eom) {
956 if (Lex(EndTok)) return true;
957 }
958
959 Result.SetKind(tok::eom);
960 }
961
962 // We're done now.
963 return false;
964}
965
966/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
967/// uninterpreted string. This switches the lexer out of directive mode.
968std::string Lexer::ReadToEndOfLine() {
969 assert(ParsingPreprocessorDirective && ParsingFilename == false &&
970 "Must be in a preprocessing directive!");
971 std::string Result;
972 LexerToken Tmp;
973
974 // CurPtr - Cache BufferPtr in an automatic variable.
975 const char *CurPtr = BufferPtr;
976 Tmp.SetStart(CurPtr);
977
978 while (1) {
979 char Char = getAndAdvanceChar(CurPtr, Tmp);
980 switch (Char) {
981 default:
982 Result += Char;
983 break;
984 case 0: // Null.
985 // Found end of file?
986 if (CurPtr-1 != BufferEnd) {
987 // Nope, normal character, continue.
988 Result += Char;
989 break;
990 }
991 // FALL THROUGH.
992 case '\r':
993 case '\n':
994 // Okay, we found the end of the line. First, back up past the \0, \r, \n.
995 assert(CurPtr[-1] == Char && "Trigraphs for newline?");
996 BufferPtr = CurPtr-1;
997
998 // Next, lex the character, which should handle the EOM transition.
999 bool Err = Lex(Tmp);
1000 assert(Tmp.getKind() == tok::eom && "Unexpected token!");
1001 assert(!Err && "Shouldn't have error exiting macro!");
1002
1003 // Finally, we're done, return the string we found.
1004 return Result;
1005 }
1006 }
1007}
1008
1009/// LexEndOfFile - CurPtr points to the end of this file. Handle this
1010/// condition, reporting diagnostics and handling other edge cases as required.
1011bool Lexer::LexEndOfFile(LexerToken &Result, const char *CurPtr) {
1012 // If we hit the end of the file while parsing a preprocessor directive,
1013 // end the preprocessor directive first. The next token returned will
1014 // then be the end of file.
1015 if (ParsingPreprocessorDirective) {
1016 // Done parsing the "line".
1017 ParsingPreprocessorDirective = false;
1018 Result.SetKind(tok::eom);
1019 // Update the end of token position as well as the BufferPtr instance var.
1020 Result.SetEnd(BufferPtr = CurPtr);
1021 return false;
1022 }
1023
1024 // If we are in a #if directive, emit an error.
1025 while (!ConditionalStack.empty()) {
1026 if (Diag(ConditionalStack.back().IfLoc,
1027 diag::err_pp_unterminated_conditional))
1028 return true;
1029 ConditionalStack.pop_back();
1030 }
1031
1032 // If the file was empty or didn't end in a newline, issue a pedwarn.
1033 if (CurPtr[-1] != '\n' && CurPtr[-1] != '\r' &&
1034 Diag(BufferEnd, diag::ext_no_newline_eof))
1035 return true;
1036
1037 BufferPtr = CurPtr;
1038 return PP.HandleEndOfFile(Result);
1039}
1040
1041
1042/// LexTokenInternal - This implements a simple C family lexer. It is an
1043/// extremely performance critical piece of code. This assumes that the buffer
1044/// has a null character at the end of the file. Return true if an error
1045/// occurred and compilation should terminate, false if normal. This returns a
1046/// preprocessing token, not a normal token, as such, it is an internal
1047/// interface. It assumes that the Flags of result have been cleared before
1048/// calling this.
1049bool Lexer::LexTokenInternal(LexerToken &Result) {
1050LexNextToken:
1051 // New token, can't need cleaning yet.
1052 Result.ClearFlag(LexerToken::NeedsCleaning);
1053
1054 // CurPtr - Cache BufferPtr in an automatic variable.
1055 const char *CurPtr = BufferPtr;
1056 Result.SetStart(CurPtr);
1057
1058 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below.
1059
1060 // Read a character, advancing over it.
1061 char Char = getAndAdvanceChar(CurPtr, Result);
1062 switch (Char) {
1063 case 0: // Null.
1064 // Found end of file?
1065 if (CurPtr-1 == BufferEnd)
1066 return LexEndOfFile(Result, CurPtr-1); // Retreat back into the file.
1067
1068 if (Diag(CurPtr-1, diag::null_in_file))
1069 return true;
1070 Result.SetFlag(LexerToken::LeadingSpace);
1071 if (SkipWhitespace(Result, CurPtr)) return true;
1072 goto LexNextToken; // GCC isn't tail call eliminating.
1073 case '\n':
1074 case '\r':
1075 // If we are inside a preprocessor directive and we see the end of line,
1076 // we know we are done with the directive, so return an EOM token.
1077 if (ParsingPreprocessorDirective) {
1078 // Done parsing the "line".
1079 ParsingPreprocessorDirective = false;
1080
1081 // Since we consumed a newline, we are back at the start of a line.
1082 IsAtStartOfLine = true;
1083
1084 Result.SetKind(tok::eom);
1085 break;
1086 }
1087 // The returned token is at the start of the line.
1088 Result.SetFlag(LexerToken::StartOfLine);
1089 // No leading whitespace seen so far.
1090 Result.ClearFlag(LexerToken::LeadingSpace);
1091 if (SkipWhitespace(Result, CurPtr)) return true;
1092 goto LexNextToken; // GCC isn't tail call eliminating.
1093 case ' ':
1094 case '\t':
1095 case '\f':
1096 case '\v':
1097 Result.SetFlag(LexerToken::LeadingSpace);
1098 if (SkipWhitespace(Result, CurPtr)) return true;
1099 goto LexNextToken; // GCC isn't tail call eliminating.
1100
1101 case 'L':
1102 Char = getCharAndSize(CurPtr, SizeTmp);
1103
1104 // Wide string literal.
1105 if (Char == '"')
1106 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1107
1108 // Wide character constant.
1109 if (Char == '\'')
1110 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1111 // FALL THROUGH, treating L like the start of an identifier.
1112
1113 // C99 6.4.2: Identifiers.
1114 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
1115 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N':
1116 case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U':
1117 case 'V': case 'W': case 'X': case 'Y': case 'Z':
1118 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
1119 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
1120 case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u':
1121 case 'v': case 'w': case 'x': case 'y': case 'z':
1122 case '_':
1123 return LexIdentifier(Result, CurPtr);
1124
1125 // C99 6.4.4.1: Integer Constants.
1126 // C99 6.4.4.2: Floating Constants.
1127 case '0': case '1': case '2': case '3': case '4':
1128 case '5': case '6': case '7': case '8': case '9':
1129 return LexNumericConstant(Result, CurPtr);
1130
1131 // C99 6.4.4: Character Constants.
1132 case '\'':
1133 return LexCharConstant(Result, CurPtr);
1134
1135 // C99 6.4.5: String Literals.
1136 case '"':
1137 return LexStringLiteral(Result, CurPtr);
1138
1139 // C99 6.4.6: Punctuators.
1140 case '?':
1141 Result.SetKind(tok::question);
1142 break;
1143 case '[':
1144 Result.SetKind(tok::l_square);
1145 break;
1146 case ']':
1147 Result.SetKind(tok::r_square);
1148 break;
1149 case '(':
1150 Result.SetKind(tok::l_paren);
1151 break;
1152 case ')':
1153 Result.SetKind(tok::r_paren);
1154 break;
1155 case '{':
1156 Result.SetKind(tok::l_brace);
1157 break;
1158 case '}':
1159 Result.SetKind(tok::r_brace);
1160 break;
1161 case '.':
1162 Char = getCharAndSize(CurPtr, SizeTmp);
1163 if (Char >= '0' && Char <= '9') {
1164 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1165 } else if (Features.CPlusPlus && Char == '*') {
1166 Result.SetKind(tok::periodstar);
1167 CurPtr += SizeTmp;
1168 } else if (Char == '.' &&
1169 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
1170 Result.SetKind(tok::ellipsis);
1171 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1172 SizeTmp2, Result);
1173 } else {
1174 Result.SetKind(tok::period);
1175 }
1176 break;
1177 case '&':
1178 Char = getCharAndSize(CurPtr, SizeTmp);
1179 if (Char == '&') {
1180 Result.SetKind(tok::ampamp);
1181 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1182 } else if (Char == '=') {
1183 Result.SetKind(tok::ampequal);
1184 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1185 } else {
1186 Result.SetKind(tok::amp);
1187 }
1188 break;
1189 case '*':
1190 if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1191 Result.SetKind(tok::starequal);
1192 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1193 } else {
1194 Result.SetKind(tok::star);
1195 }
1196 break;
1197 case '+':
1198 Char = getCharAndSize(CurPtr, SizeTmp);
1199 if (Char == '+') {
1200 Result.SetKind(tok::plusplus);
1201 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1202 } else if (Char == '=') {
1203 Result.SetKind(tok::plusequal);
1204 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1205 } else {
1206 Result.SetKind(tok::plus);
1207 }
1208 break;
1209 case '-':
1210 Char = getCharAndSize(CurPtr, SizeTmp);
1211 if (Char == '-') {
1212 Result.SetKind(tok::minusminus);
1213 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1214 } else if (Char == '>' && Features.CPlusPlus &&
1215 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') {
1216 Result.SetKind(tok::arrowstar); // C++ ->*
1217 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1218 SizeTmp2, Result);
1219 } else if (Char == '>') {
1220 Result.SetKind(tok::arrow);
1221 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1222 } else if (Char == '=') {
1223 Result.SetKind(tok::minusequal);
1224 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1225 } else {
1226 Result.SetKind(tok::minus);
1227 }
1228 break;
1229 case '~':
1230 Result.SetKind(tok::tilde);
1231 break;
1232 case '!':
1233 if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1234 Result.SetKind(tok::exclaimequal);
1235 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1236 } else {
1237 Result.SetKind(tok::exclaim);
1238 }
1239 break;
1240 case '/':
1241 // 6.4.9: Comments
1242 Char = getCharAndSize(CurPtr, SizeTmp);
1243 if (Char == '/') { // BCPL comment.
1244 Result.SetFlag(LexerToken::LeadingSpace);
1245 if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
1246 return true;
1247 goto LexNextToken; // GCC isn't tail call eliminating.
1248 } else if (Char == '*') { // /**/ comment.
1249 Result.SetFlag(LexerToken::LeadingSpace);
1250 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
1251 return true;
1252 goto LexNextToken; // GCC isn't tail call eliminating.
1253 } else if (Char == '=') {
1254 Result.SetKind(tok::slashequal);
1255 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1256 } else {
1257 Result.SetKind(tok::slash);
1258 }
1259 break;
1260 case '%':
1261 Char = getCharAndSize(CurPtr, SizeTmp);
1262 if (Char == '=') {
1263 Result.SetKind(tok::percentequal);
1264 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1265 } else if (Features.Digraphs && Char == '>') {
1266 Result.SetKind(tok::r_brace); // '%>' -> '}'
1267 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1268 } else if (Features.Digraphs && Char == ':') {
1269 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1270 if (getCharAndSize(CurPtr, SizeTmp) == '%' &&
1271 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
1272 Result.SetKind(tok::hashhash); // '%:%:' -> '##'
1273 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1274 SizeTmp2, Result);
1275 } else {
1276 Result.SetKind(tok::hash); // '%:' -> '#'
1277
1278 // We parsed a # character. If this occurs at the start of the line,
1279 // it's actually the start of a preprocessing directive. Callback to
1280 // the preprocessor to handle it.
1281 // FIXME: -fpreprocessed mode??
1282 if (Result.isAtStartOfLine() && !PP.isSkipping()) {
1283 BufferPtr = CurPtr;
1284 if (PP.HandleDirective(Result)) return true;
1285
1286 // As an optimization, if the preprocessor didn't switch lexers, tail
1287 // recurse.
1288 if (PP.isCurrentLexer(this)) {
1289 // Start a new token. If this is a #include or something, the PP may
1290 // want us starting at the beginning of the line again. If so, set
1291 // the StartOfLine flag.
1292 if (IsAtStartOfLine) {
1293 Result.SetFlag(LexerToken::StartOfLine);
1294 IsAtStartOfLine = false;
1295 }
1296 goto LexNextToken; // GCC isn't tail call eliminating.
1297 }
1298
1299 return PP.Lex(Result);
1300 }
1301 }
1302 } else {
1303 Result.SetKind(tok::percent);
1304 }
1305 break;
1306 case '<':
1307 Char = getCharAndSize(CurPtr, SizeTmp);
1308 if (ParsingFilename) {
1309 return LexAngledStringLiteral(Result, CurPtr+SizeTmp);
1310 } else if (Char == '<' &&
1311 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
1312 Result.SetKind(tok::lesslessequal);
1313 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1314 SizeTmp2, Result);
1315 } else if (Char == '<') {
1316 Result.SetKind(tok::lessless);
1317 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1318 } else if (Char == '=') {
1319 Result.SetKind(tok::lessequal);
1320 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1321 } else if (Features.Digraphs && Char == ':') {
1322 Result.SetKind(tok::l_square); // '<:' -> '['
1323 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1324 } else if (Features.Digraphs && Char == '>') {
1325 Result.SetKind(tok::l_brace); // '<%' -> '{'
1326 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1327 } else if (Features.CPPMinMax && Char == '?') { // <?
1328 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1329 if (Diag(Result.getStart(), diag::min_max_deprecated))
1330 return true;
1331
1332 if (getCharAndSize(CurPtr, SizeTmp) == '=') { // <?=
1333 Result.SetKind(tok::lessquestionequal);
1334 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1335 } else {
1336 Result.SetKind(tok::lessquestion);
1337 }
1338 } else {
1339 Result.SetKind(tok::less);
1340 }
1341 break;
1342 case '>':
1343 Char = getCharAndSize(CurPtr, SizeTmp);
1344 if (Char == '=') {
1345 Result.SetKind(tok::greaterequal);
1346 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1347 } else if (Char == '>' &&
1348 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
1349 Result.SetKind(tok::greatergreaterequal);
1350 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1351 SizeTmp2, Result);
1352 } else if (Char == '>') {
1353 Result.SetKind(tok::greatergreater);
1354 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1355 } else if (Features.CPPMinMax && Char == '?') {
1356 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1357 if (Diag(Result.getStart(), diag::min_max_deprecated))
1358 return true;
1359
1360 if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1361 Result.SetKind(tok::greaterquestionequal); // >?=
1362 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1363 } else {
1364 Result.SetKind(tok::greaterquestion); // >?
1365 }
1366 } else {
1367 Result.SetKind(tok::greater);
1368 }
1369 break;
1370 case '^':
1371 Char = getCharAndSize(CurPtr, SizeTmp);
1372 if (Char == '=') {
1373 Result.SetKind(tok::caretequal);
1374 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1375 } else {
1376 Result.SetKind(tok::caret);
1377 }
1378 break;
1379 case '|':
1380 Char = getCharAndSize(CurPtr, SizeTmp);
1381 if (Char == '=') {
1382 Result.SetKind(tok::pipeequal);
1383 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1384 } else if (Char == '|') {
1385 Result.SetKind(tok::pipepipe);
1386 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1387 } else {
1388 Result.SetKind(tok::pipe);
1389 }
1390 break;
1391 case ':':
1392 Char = getCharAndSize(CurPtr, SizeTmp);
1393 if (Features.Digraphs && Char == '>') {
1394 Result.SetKind(tok::r_square); // ':>' -> ']'
1395 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1396 } else if (Features.CPlusPlus && Char == ':') {
1397 Result.SetKind(tok::coloncolon);
1398 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1399 } else {
1400 Result.SetKind(tok::colon);
1401 }
1402 break;
1403 case ';':
1404 Result.SetKind(tok::semi);
1405 break;
1406 case '=':
1407 Char = getCharAndSize(CurPtr, SizeTmp);
1408 if (Char == '=') {
1409 Result.SetKind(tok::equalequal);
1410 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1411 } else {
1412 Result.SetKind(tok::equal);
1413 }
1414 break;
1415 case ',':
1416 Result.SetKind(tok::comma);
1417 break;
1418 case '#':
1419 Char = getCharAndSize(CurPtr, SizeTmp);
1420 if (Char == '#') {
1421 Result.SetKind(tok::hashhash);
1422 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1423 } else {
1424 Result.SetKind(tok::hash);
1425 // We parsed a # character. If this occurs at the start of the line,
1426 // it's actually the start of a preprocessing directive. Callback to
1427 // the preprocessor to handle it.
1428 // FIXME: not in preprocessed mode??
1429 if (Result.isAtStartOfLine() && !PP.isSkipping()) {
1430 BufferPtr = CurPtr;
1431 if (PP.HandleDirective(Result)) return true;
1432
1433 // As an optimization, if the preprocessor didn't switch lexers, tail
1434 // recurse.
1435 if (PP.isCurrentLexer(this)) {
1436 // Start a new token. If this is a #include or something, the PP may
1437 // want us starting at the beginning of the line again. If so, set
1438 // the StartOfLine flag.
1439 if (IsAtStartOfLine) {
1440 Result.SetFlag(LexerToken::StartOfLine);
1441 IsAtStartOfLine = false;
1442 }
1443 goto LexNextToken; // GCC isn't tail call eliminating.
1444 }
1445 return PP.Lex(Result);
1446 }
1447 }
1448 break;
1449
1450 case '\\':
1451 // FIXME: handle UCN's.
1452 // FALL THROUGH.
1453 default:
1454 // Objective C support.
1455 if (CurPtr[-1] == '@' && Features.ObjC1) {
1456 Result.SetKind(tok::at);
1457 break;
1458 } else if (CurPtr[-1] == '$' && Features.DollarIdents) {// $ in identifiers.
1459 if (Diag(CurPtr-1, diag::ext_dollar_in_identifier))
1460 return true;
1461 return LexIdentifier(Result, CurPtr);
1462 }
1463
1464 if (!PP.isSkipping() && Diag(CurPtr-1, diag::err_stray_character))
1465 return true;
1466 BufferPtr = CurPtr;
1467 goto LexNextToken; // GCC isn't tail call eliminating.
1468 }
1469
1470 // Update the end of token position as well as the BufferPtr instance var.
1471 Result.SetEnd(BufferPtr = CurPtr);
1472 return false;
1473}