blob: a7a58f0b1fb949d434b6d1bd6ba6d25ef8077249 [file] [log] [blame]
Chris Lattner4b009652007-07-25 00:24:17 +00001//===--- Lexer.cpp - C Language Family Lexer ------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the Lexer and Token interfaces.
11//
12//===----------------------------------------------------------------------===//
13//
14// TODO: GCC Diagnostics emitted by the lexer:
15// PEDWARN: (form feed|vertical tab) in preprocessing directive
16//
17// Universal characters, unicode, char mapping:
18// WARNING: `%.*s' is not in NFKC
19// WARNING: `%.*s' is not in NFC
20//
21// Other:
22// TODO: Options to support:
23// -fexec-charset,-fwide-exec-charset
24//
25//===----------------------------------------------------------------------===//
26
27#include "clang/Lex/Lexer.h"
28#include "clang/Lex/Preprocessor.h"
29#include "clang/Basic/Diagnostic.h"
30#include "clang/Basic/SourceManager.h"
31#include "llvm/Support/Compiler.h"
32#include "llvm/Support/MemoryBuffer.h"
33#include <cctype>
34using namespace clang;
35
36static void InitCharacterInfo();
37
Chris Lattneraa9bdf12007-10-07 08:47:24 +000038//===----------------------------------------------------------------------===//
39// Token Class Implementation
40//===----------------------------------------------------------------------===//
41
42/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
43bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
Chris Lattnercb8e41c2007-10-09 18:02:16 +000044 return is(tok::identifier) &&
45 getIdentifierInfo()->getObjCKeywordID() == objcKey;
Chris Lattneraa9bdf12007-10-07 08:47:24 +000046}
47
48/// getObjCKeywordID - Return the ObjC keyword kind.
49tok::ObjCKeywordKind Token::getObjCKeywordID() const {
50 IdentifierInfo *specId = getIdentifierInfo();
51 return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
52}
53
54//===----------------------------------------------------------------------===//
55// Lexer Class Implementation
56//===----------------------------------------------------------------------===//
57
58
Chris Lattner4b009652007-07-25 00:24:17 +000059Lexer::Lexer(SourceLocation fileloc, Preprocessor &pp,
60 const char *BufStart, const char *BufEnd)
61 : FileLoc(fileloc), PP(pp), Features(PP.getLangOptions()) {
62
63 SourceManager &SourceMgr = PP.getSourceManager();
64 unsigned InputFileID = SourceMgr.getPhysicalLoc(FileLoc).getFileID();
65 const llvm::MemoryBuffer *InputFile = SourceMgr.getBuffer(InputFileID);
66
67 Is_PragmaLexer = false;
Chris Lattner4b009652007-07-25 00:24:17 +000068 InitCharacterInfo();
69
70 // BufferStart must always be InputFile->getBufferStart().
71 BufferStart = InputFile->getBufferStart();
72
73 // BufferPtr and BufferEnd can start out somewhere inside the current buffer.
74 // If unspecified, they starts at the start/end of the buffer.
75 BufferPtr = BufStart ? BufStart : BufferStart;
76 BufferEnd = BufEnd ? BufEnd : InputFile->getBufferEnd();
77
78 assert(BufferEnd[0] == 0 &&
79 "We assume that the input buffer has a null character at the end"
80 " to simplify lexing!");
81
82 // Start of the file is a start of line.
83 IsAtStartOfLine = true;
84
85 // We are not after parsing a #.
86 ParsingPreprocessorDirective = false;
87
88 // We are not after parsing #include.
89 ParsingFilename = false;
90
91 // We are not in raw mode. Raw mode disables diagnostics and interpretation
92 // of tokens (e.g. identifiers, thus disabling macro expansion). It is used
93 // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block
94 // or otherwise skipping over tokens.
95 LexingRawMode = false;
96
97 // Default to keeping comments if requested.
98 KeepCommentMode = PP.getCommentRetentionState();
99}
100
101/// Stringify - Convert the specified string into a C string, with surrounding
102/// ""'s, and with escaped \ and " characters.
103std::string Lexer::Stringify(const std::string &Str, bool Charify) {
104 std::string Result = Str;
105 char Quote = Charify ? '\'' : '"';
106 for (unsigned i = 0, e = Result.size(); i != e; ++i) {
107 if (Result[i] == '\\' || Result[i] == Quote) {
108 Result.insert(Result.begin()+i, '\\');
109 ++i; ++e;
110 }
111 }
112 return Result;
113}
114
115/// Stringify - Convert the specified string into a C string by escaping '\'
116/// and " characters. This does not add surrounding ""'s to the string.
117void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) {
118 for (unsigned i = 0, e = Str.size(); i != e; ++i) {
119 if (Str[i] == '\\' || Str[i] == '"') {
120 Str.insert(Str.begin()+i, '\\');
121 ++i; ++e;
122 }
123 }
124}
125
126
127//===----------------------------------------------------------------------===//
128// Character information.
129//===----------------------------------------------------------------------===//
130
131static unsigned char CharInfo[256];
132
133enum {
134 CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0'
135 CHAR_VERT_WS = 0x02, // '\r', '\n'
136 CHAR_LETTER = 0x04, // a-z,A-Z
137 CHAR_NUMBER = 0x08, // 0-9
138 CHAR_UNDER = 0x10, // _
139 CHAR_PERIOD = 0x20 // .
140};
141
142static void InitCharacterInfo() {
143 static bool isInited = false;
144 if (isInited) return;
145 isInited = true;
146
147 // Intiialize the CharInfo table.
148 // TODO: statically initialize this.
149 CharInfo[(int)' '] = CharInfo[(int)'\t'] =
150 CharInfo[(int)'\f'] = CharInfo[(int)'\v'] = CHAR_HORZ_WS;
151 CharInfo[(int)'\n'] = CharInfo[(int)'\r'] = CHAR_VERT_WS;
152
153 CharInfo[(int)'_'] = CHAR_UNDER;
154 CharInfo[(int)'.'] = CHAR_PERIOD;
155 for (unsigned i = 'a'; i <= 'z'; ++i)
156 CharInfo[i] = CharInfo[i+'A'-'a'] = CHAR_LETTER;
157 for (unsigned i = '0'; i <= '9'; ++i)
158 CharInfo[i] = CHAR_NUMBER;
159}
160
161/// isIdentifierBody - Return true if this is the body character of an
162/// identifier, which is [a-zA-Z0-9_].
163static inline bool isIdentifierBody(unsigned char c) {
Chris Lattneraa9bdf12007-10-07 08:47:24 +0000164 return CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER);
Chris Lattner4b009652007-07-25 00:24:17 +0000165}
166
167/// isHorizontalWhitespace - Return true if this character is horizontal
168/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'.
169static inline bool isHorizontalWhitespace(unsigned char c) {
Chris Lattneraa9bdf12007-10-07 08:47:24 +0000170 return CharInfo[c] & CHAR_HORZ_WS;
Chris Lattner4b009652007-07-25 00:24:17 +0000171}
172
173/// isWhitespace - Return true if this character is horizontal or vertical
174/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false
175/// for '\0'.
176static inline bool isWhitespace(unsigned char c) {
Chris Lattneraa9bdf12007-10-07 08:47:24 +0000177 return CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS);
Chris Lattner4b009652007-07-25 00:24:17 +0000178}
179
180/// isNumberBody - Return true if this is the body character of an
181/// preprocessing number, which is [a-zA-Z0-9_.].
182static inline bool isNumberBody(unsigned char c) {
Chris Lattneraa9bdf12007-10-07 08:47:24 +0000183 return CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD);
Chris Lattner4b009652007-07-25 00:24:17 +0000184}
185
186
187//===----------------------------------------------------------------------===//
188// Diagnostics forwarding code.
189//===----------------------------------------------------------------------===//
190
191/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
192/// lexer buffer was all instantiated at a single point, perform the mapping.
193/// This is currently only used for _Pragma implementation, so it is the slow
194/// path of the hot getSourceLocation method. Do not allow it to be inlined.
195static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
196 SourceLocation FileLoc,
197 unsigned CharNo) DISABLE_INLINE;
198static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
199 SourceLocation FileLoc,
200 unsigned CharNo) {
201 // Otherwise, we're lexing "mapped tokens". This is used for things like
202 // _Pragma handling. Combine the instantiation location of FileLoc with the
203 // physical location.
204 SourceManager &SourceMgr = PP.getSourceManager();
205
206 // Create a new SLoc which is expanded from logical(FileLoc) but whose
207 // characters come from phys(FileLoc)+Offset.
208 SourceLocation VirtLoc = SourceMgr.getLogicalLoc(FileLoc);
209 SourceLocation PhysLoc = SourceMgr.getPhysicalLoc(FileLoc);
210 PhysLoc = SourceLocation::getFileLoc(PhysLoc.getFileID(), CharNo);
211 return SourceMgr.getInstantiationLoc(PhysLoc, VirtLoc);
212}
213
214/// getSourceLocation - Return a source location identifier for the specified
215/// offset in the current file.
216SourceLocation Lexer::getSourceLocation(const char *Loc) const {
217 assert(Loc >= BufferStart && Loc <= BufferEnd &&
218 "Location out of range for this buffer!");
219
220 // In the normal case, we're just lexing from a simple file buffer, return
221 // the file id from FileLoc with the offset specified.
222 unsigned CharNo = Loc-BufferStart;
223 if (FileLoc.isFileID())
224 return SourceLocation::getFileLoc(FileLoc.getFileID(), CharNo);
225
226 return GetMappedTokenLoc(PP, FileLoc, CharNo);
227}
228
229/// Diag - Forwarding function for diagnostics. This translate a source
230/// position in the current buffer into a SourceLocation object for rendering.
231void Lexer::Diag(const char *Loc, unsigned DiagID,
232 const std::string &Msg) const {
233 if (LexingRawMode && Diagnostic::isNoteWarningOrExtension(DiagID))
234 return;
235 PP.Diag(getSourceLocation(Loc), DiagID, Msg);
236}
237void Lexer::Diag(SourceLocation Loc, unsigned DiagID,
238 const std::string &Msg) const {
239 if (LexingRawMode && Diagnostic::isNoteWarningOrExtension(DiagID))
240 return;
241 PP.Diag(Loc, DiagID, Msg);
242}
243
244
245//===----------------------------------------------------------------------===//
246// Trigraph and Escaped Newline Handling Code.
247//===----------------------------------------------------------------------===//
248
249/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
250/// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
251static char GetTrigraphCharForLetter(char Letter) {
252 switch (Letter) {
253 default: return 0;
254 case '=': return '#';
255 case ')': return ']';
256 case '(': return '[';
257 case '!': return '|';
258 case '\'': return '^';
259 case '>': return '}';
260 case '/': return '\\';
261 case '<': return '{';
262 case '-': return '~';
263 }
264}
265
266/// DecodeTrigraphChar - If the specified character is a legal trigraph when
267/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled,
268/// return the result character. Finally, emit a warning about trigraph use
269/// whether trigraphs are enabled or not.
270static char DecodeTrigraphChar(const char *CP, Lexer *L) {
271 char Res = GetTrigraphCharForLetter(*CP);
272 if (Res && L) {
273 if (!L->getFeatures().Trigraphs) {
274 L->Diag(CP-2, diag::trigraph_ignored);
275 return 0;
276 } else {
277 L->Diag(CP-2, diag::trigraph_converted, std::string()+Res);
278 }
279 }
280 return Res;
281}
282
283/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
284/// get its size, and return it. This is tricky in several cases:
285/// 1. If currently at the start of a trigraph, we warn about the trigraph,
286/// then either return the trigraph (skipping 3 chars) or the '?',
287/// depending on whether trigraphs are enabled or not.
288/// 2. If this is an escaped newline (potentially with whitespace between
289/// the backslash and newline), implicitly skip the newline and return
290/// the char after it.
291/// 3. If this is a UCN, return it. FIXME: C++ UCN's?
292///
293/// This handles the slow/uncommon case of the getCharAndSize method. Here we
294/// know that we can accumulate into Size, and that we have already incremented
295/// Ptr by Size bytes.
296///
297/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
298/// be updated to match.
299///
300char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
301 Token *Tok) {
302 // If we have a slash, look for an escaped newline.
303 if (Ptr[0] == '\\') {
304 ++Size;
305 ++Ptr;
306Slash:
307 // Common case, backslash-char where the char is not whitespace.
308 if (!isWhitespace(Ptr[0])) return '\\';
309
310 // See if we have optional whitespace characters followed by a newline.
311 {
312 unsigned SizeTmp = 0;
313 do {
314 ++SizeTmp;
315 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') {
316 // Remember that this token needs to be cleaned.
317 if (Tok) Tok->setFlag(Token::NeedsCleaning);
318
319 // Warn if there was whitespace between the backslash and newline.
320 if (SizeTmp != 1 && Tok)
321 Diag(Ptr, diag::backslash_newline_space);
322
323 // If this is a \r\n or \n\r, skip the newlines.
324 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') &&
325 Ptr[SizeTmp-1] != Ptr[SizeTmp])
326 ++SizeTmp;
327
328 // Found backslash<whitespace><newline>. Parse the char after it.
329 Size += SizeTmp;
330 Ptr += SizeTmp;
331 // Use slow version to accumulate a correct size field.
332 return getCharAndSizeSlow(Ptr, Size, Tok);
333 }
334 } while (isWhitespace(Ptr[SizeTmp]));
335 }
336
337 // Otherwise, this is not an escaped newline, just return the slash.
338 return '\\';
339 }
340
341 // If this is a trigraph, process it.
342 if (Ptr[0] == '?' && Ptr[1] == '?') {
343 // If this is actually a legal trigraph (not something like "??x"), emit
344 // a trigraph warning. If so, and if trigraphs are enabled, return it.
345 if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) {
346 // Remember that this token needs to be cleaned.
347 if (Tok) Tok->setFlag(Token::NeedsCleaning);
348
349 Ptr += 3;
350 Size += 3;
351 if (C == '\\') goto Slash;
352 return C;
353 }
354 }
355
356 // If this is neither, return a single character.
357 ++Size;
358 return *Ptr;
359}
360
361
362/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
363/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size,
364/// and that we have already incremented Ptr by Size bytes.
365///
366/// NOTE: When this method is updated, getCharAndSizeSlow (above) should
367/// be updated to match.
368char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
369 const LangOptions &Features) {
370 // If we have a slash, look for an escaped newline.
371 if (Ptr[0] == '\\') {
372 ++Size;
373 ++Ptr;
374Slash:
375 // Common case, backslash-char where the char is not whitespace.
376 if (!isWhitespace(Ptr[0])) return '\\';
377
378 // See if we have optional whitespace characters followed by a newline.
379 {
380 unsigned SizeTmp = 0;
381 do {
382 ++SizeTmp;
383 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') {
384
385 // If this is a \r\n or \n\r, skip the newlines.
386 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') &&
387 Ptr[SizeTmp-1] != Ptr[SizeTmp])
388 ++SizeTmp;
389
390 // Found backslash<whitespace><newline>. Parse the char after it.
391 Size += SizeTmp;
392 Ptr += SizeTmp;
393
394 // Use slow version to accumulate a correct size field.
395 return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
396 }
397 } while (isWhitespace(Ptr[SizeTmp]));
398 }
399
400 // Otherwise, this is not an escaped newline, just return the slash.
401 return '\\';
402 }
403
404 // If this is a trigraph, process it.
405 if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
406 // If this is actually a legal trigraph (not something like "??x"), return
407 // it.
408 if (char C = GetTrigraphCharForLetter(Ptr[2])) {
409 Ptr += 3;
410 Size += 3;
411 if (C == '\\') goto Slash;
412 return C;
413 }
414 }
415
416 // If this is neither, return a single character.
417 ++Size;
418 return *Ptr;
419}
420
421//===----------------------------------------------------------------------===//
422// Helper methods for lexing.
423//===----------------------------------------------------------------------===//
424
425void Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
426 // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
427 unsigned Size;
428 unsigned char C = *CurPtr++;
429 while (isIdentifierBody(C)) {
430 C = *CurPtr++;
431 }
432 --CurPtr; // Back up over the skipped character.
433
434 // Fast path, no $,\,? in identifier found. '\' might be an escaped newline
435 // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
436 // FIXME: UCNs.
437 if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) {
438FinishIdentifier:
439 const char *IdStart = BufferPtr;
440 FormTokenWithChars(Result, CurPtr);
441 Result.setKind(tok::identifier);
442
443 // If we are in raw mode, return this identifier raw. There is no need to
444 // look up identifier information or attempt to macro expand it.
445 if (LexingRawMode) return;
446
447 // Fill in Result.IdentifierInfo, looking up the identifier in the
448 // identifier table.
449 PP.LookUpIdentifierInfo(Result, IdStart);
450
451 // Finally, now that we know we have an identifier, pass this off to the
452 // preprocessor, which may macro expand it or something.
453 return PP.HandleIdentifier(Result);
454 }
455
456 // Otherwise, $,\,? in identifier found. Enter slower path.
457
458 C = getCharAndSize(CurPtr, Size);
459 while (1) {
460 if (C == '$') {
461 // If we hit a $ and they are not supported in identifiers, we are done.
462 if (!Features.DollarIdents) goto FinishIdentifier;
463
464 // Otherwise, emit a diagnostic and continue.
465 Diag(CurPtr, diag::ext_dollar_in_identifier);
466 CurPtr = ConsumeChar(CurPtr, Size, Result);
467 C = getCharAndSize(CurPtr, Size);
468 continue;
469 } else if (!isIdentifierBody(C)) { // FIXME: UCNs.
470 // Found end of identifier.
471 goto FinishIdentifier;
472 }
473
474 // Otherwise, this character is good, consume it.
475 CurPtr = ConsumeChar(CurPtr, Size, Result);
476
477 C = getCharAndSize(CurPtr, Size);
478 while (isIdentifierBody(C)) { // FIXME: UCNs.
479 CurPtr = ConsumeChar(CurPtr, Size, Result);
480 C = getCharAndSize(CurPtr, Size);
481 }
482 }
483}
484
485
486/// LexNumericConstant - Lex the remainer of a integer or floating point
487/// constant. From[-1] is the first character lexed. Return the end of the
488/// constant.
489void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
490 unsigned Size;
491 char C = getCharAndSize(CurPtr, Size);
492 char PrevCh = 0;
493 while (isNumberBody(C)) { // FIXME: UCNs?
494 CurPtr = ConsumeChar(CurPtr, Size, Result);
495 PrevCh = C;
496 C = getCharAndSize(CurPtr, Size);
497 }
498
499 // If we fell out, check for a sign, due to 1e+12. If we have one, continue.
500 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e'))
501 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
502
503 // If we have a hex FP constant, continue.
504 if (Features.HexFloats &&
505 (C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p'))
506 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
507
508 Result.setKind(tok::numeric_constant);
509
510 // Update the location of token as well as BufferPtr.
511 FormTokenWithChars(Result, CurPtr);
512}
513
514/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
515/// either " or L".
516void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide){
517 const char *NulCharacter = 0; // Does this string contain the \0 character?
518
519 char C = getAndAdvanceChar(CurPtr, Result);
520 while (C != '"') {
521 // Skip escaped characters.
522 if (C == '\\') {
523 // Skip the escaped character.
524 C = getAndAdvanceChar(CurPtr, Result);
525 } else if (C == '\n' || C == '\r' || // Newline.
526 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
527 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_string);
528 Result.setKind(tok::unknown);
529 FormTokenWithChars(Result, CurPtr-1);
530 return;
531 } else if (C == 0) {
532 NulCharacter = CurPtr-1;
533 }
534 C = getAndAdvanceChar(CurPtr, Result);
535 }
536
537 // If a nul character existed in the string, warn about it.
538 if (NulCharacter) Diag(NulCharacter, diag::null_in_string);
539
540 Result.setKind(Wide ? tok::wide_string_literal : tok::string_literal);
541
542 // Update the location of the token as well as the BufferPtr instance var.
543 FormTokenWithChars(Result, CurPtr);
544}
545
546/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
547/// after having lexed the '<' character. This is used for #include filenames.
548void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
549 const char *NulCharacter = 0; // Does this string contain the \0 character?
550
551 char C = getAndAdvanceChar(CurPtr, Result);
552 while (C != '>') {
553 // Skip escaped characters.
554 if (C == '\\') {
555 // Skip the escaped character.
556 C = getAndAdvanceChar(CurPtr, Result);
557 } else if (C == '\n' || C == '\r' || // Newline.
558 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
559 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_string);
560 Result.setKind(tok::unknown);
561 FormTokenWithChars(Result, CurPtr-1);
562 return;
563 } else if (C == 0) {
564 NulCharacter = CurPtr-1;
565 }
566 C = getAndAdvanceChar(CurPtr, Result);
567 }
568
569 // If a nul character existed in the string, warn about it.
570 if (NulCharacter) Diag(NulCharacter, diag::null_in_string);
571
572 Result.setKind(tok::angle_string_literal);
573
574 // Update the location of token as well as BufferPtr.
575 FormTokenWithChars(Result, CurPtr);
576}
577
578
579/// LexCharConstant - Lex the remainder of a character constant, after having
580/// lexed either ' or L'.
581void Lexer::LexCharConstant(Token &Result, const char *CurPtr) {
582 const char *NulCharacter = 0; // Does this character contain the \0 character?
583
584 // Handle the common case of 'x' and '\y' efficiently.
585 char C = getAndAdvanceChar(CurPtr, Result);
586 if (C == '\'') {
587 if (!LexingRawMode) Diag(BufferPtr, diag::err_empty_character);
588 Result.setKind(tok::unknown);
589 FormTokenWithChars(Result, CurPtr);
590 return;
591 } else if (C == '\\') {
592 // Skip the escaped character.
593 // FIXME: UCN's.
594 C = getAndAdvanceChar(CurPtr, Result);
595 }
596
597 if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') {
598 ++CurPtr;
599 } else {
600 // Fall back on generic code for embedded nulls, newlines, wide chars.
601 do {
602 // Skip escaped characters.
603 if (C == '\\') {
604 // Skip the escaped character.
605 C = getAndAdvanceChar(CurPtr, Result);
606 } else if (C == '\n' || C == '\r' || // Newline.
607 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
608 if (!LexingRawMode) Diag(BufferPtr, diag::err_unterminated_char);
609 Result.setKind(tok::unknown);
610 FormTokenWithChars(Result, CurPtr-1);
611 return;
612 } else if (C == 0) {
613 NulCharacter = CurPtr-1;
614 }
615 C = getAndAdvanceChar(CurPtr, Result);
616 } while (C != '\'');
617 }
618
619 if (NulCharacter) Diag(NulCharacter, diag::null_in_char);
620
621 Result.setKind(tok::char_constant);
622
623 // Update the location of token as well as BufferPtr.
624 FormTokenWithChars(Result, CurPtr);
625}
626
627/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
628/// Update BufferPtr to point to the next non-whitespace character and return.
629void Lexer::SkipWhitespace(Token &Result, const char *CurPtr) {
630 // Whitespace - Skip it, then return the token after the whitespace.
631 unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently.
632 while (1) {
633 // Skip horizontal whitespace very aggressively.
634 while (isHorizontalWhitespace(Char))
635 Char = *++CurPtr;
636
637 // Otherwise if we something other than whitespace, we're done.
638 if (Char != '\n' && Char != '\r')
639 break;
640
641 if (ParsingPreprocessorDirective) {
642 // End of preprocessor directive line, let LexTokenInternal handle this.
643 BufferPtr = CurPtr;
644 return;
645 }
646
647 // ok, but handle newline.
648 // The returned token is at the start of the line.
649 Result.setFlag(Token::StartOfLine);
650 // No leading whitespace seen so far.
651 Result.clearFlag(Token::LeadingSpace);
652 Char = *++CurPtr;
653 }
654
655 // If this isn't immediately after a newline, there is leading space.
656 char PrevChar = CurPtr[-1];
657 if (PrevChar != '\n' && PrevChar != '\r')
658 Result.setFlag(Token::LeadingSpace);
659
660 BufferPtr = CurPtr;
661}
662
663// SkipBCPLComment - We have just read the // characters from input. Skip until
664// we find the newline character thats terminate the comment. Then update
665/// BufferPtr and return.
666bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
667 // If BCPL comments aren't explicitly enabled for this language, emit an
668 // extension warning.
669 if (!Features.BCPLComment) {
670 Diag(BufferPtr, diag::ext_bcpl_comment);
671
672 // Mark them enabled so we only emit one warning for this translation
673 // unit.
674 Features.BCPLComment = true;
675 }
676
677 // Scan over the body of the comment. The common case, when scanning, is that
678 // the comment contains normal ascii characters with nothing interesting in
679 // them. As such, optimize for this case with the inner loop.
680 char C;
681 do {
682 C = *CurPtr;
683 // FIXME: Speedup BCPL comment lexing. Just scan for a \n or \r character.
684 // If we find a \n character, scan backwards, checking to see if it's an
685 // escaped newline, like we do for block comments.
686
687 // Skip over characters in the fast loop.
688 while (C != 0 && // Potentially EOF.
689 C != '\\' && // Potentially escaped newline.
690 C != '?' && // Potentially trigraph.
691 C != '\n' && C != '\r') // Newline or DOS-style newline.
692 C = *++CurPtr;
693
694 // If this is a newline, we're done.
695 if (C == '\n' || C == '\r')
696 break; // Found the newline? Break out!
697
698 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to
699 // properly decode the character.
700 const char *OldPtr = CurPtr;
701 C = getAndAdvanceChar(CurPtr, Result);
702
703 // If we read multiple characters, and one of those characters was a \r or
704 // \n, then we had an escaped newline within the comment. Emit diagnostic
705 // unless the next line is also a // comment.
706 if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') {
707 for (; OldPtr != CurPtr; ++OldPtr)
708 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
709 // Okay, we found a // comment that ends in a newline, if the next
710 // line is also a // comment, but has spaces, don't emit a diagnostic.
711 if (isspace(C)) {
712 const char *ForwardPtr = CurPtr;
713 while (isspace(*ForwardPtr)) // Skip whitespace.
714 ++ForwardPtr;
715 if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
716 break;
717 }
718
719 Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment);
720 break;
721 }
722 }
723
724 if (CurPtr == BufferEnd+1) { --CurPtr; break; }
725 } while (C != '\n' && C != '\r');
726
727 // Found but did not consume the newline.
728
729 // If we are returning comments as tokens, return this comment as a token.
730 if (KeepCommentMode)
731 return SaveBCPLComment(Result, CurPtr);
732
733 // If we are inside a preprocessor directive and we see the end of line,
734 // return immediately, so that the lexer can return this as an EOM token.
735 if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
736 BufferPtr = CurPtr;
737 return true;
738 }
739
740 // Otherwise, eat the \n character. We don't care if this is a \n\r or
741 // \r\n sequence.
742 ++CurPtr;
743
744 // The next returned token is at the start of the line.
745 Result.setFlag(Token::StartOfLine);
746 // No leading whitespace seen so far.
747 Result.clearFlag(Token::LeadingSpace);
748 BufferPtr = CurPtr;
749 return true;
750}
751
752/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in
753/// an appropriate way and return it.
754bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
755 Result.setKind(tok::comment);
756 FormTokenWithChars(Result, CurPtr);
757
758 // If this BCPL-style comment is in a macro definition, transmogrify it into
759 // a C-style block comment.
760 if (ParsingPreprocessorDirective) {
761 std::string Spelling = PP.getSpelling(Result);
762 assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?");
763 Spelling[1] = '*'; // Change prefix to "/*".
764 Spelling += "*/"; // add suffix.
765
766 Result.setLocation(PP.CreateString(&Spelling[0], Spelling.size(),
767 Result.getLocation()));
768 Result.setLength(Spelling.size());
769 }
770 return false;
771}
772
773/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
774/// character (either \n or \r) is part of an escaped newline sequence. Issue a
775/// diagnostic if so. We know that the is inside of a block comment.
776static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
777 Lexer *L) {
778 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
779
780 // Back up off the newline.
781 --CurPtr;
782
783 // If this is a two-character newline sequence, skip the other character.
784 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
785 // \n\n or \r\r -> not escaped newline.
786 if (CurPtr[0] == CurPtr[1])
787 return false;
788 // \n\r or \r\n -> skip the newline.
789 --CurPtr;
790 }
791
792 // If we have horizontal whitespace, skip over it. We allow whitespace
793 // between the slash and newline.
794 bool HasSpace = false;
795 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
796 --CurPtr;
797 HasSpace = true;
798 }
799
800 // If we have a slash, we know this is an escaped newline.
801 if (*CurPtr == '\\') {
802 if (CurPtr[-1] != '*') return false;
803 } else {
804 // It isn't a slash, is it the ?? / trigraph?
805 if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
806 CurPtr[-3] != '*')
807 return false;
808
809 // This is the trigraph ending the comment. Emit a stern warning!
810 CurPtr -= 2;
811
812 // If no trigraphs are enabled, warn that we ignored this trigraph and
813 // ignore this * character.
814 if (!L->getFeatures().Trigraphs) {
815 L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
816 return false;
817 }
818 L->Diag(CurPtr, diag::trigraph_ends_block_comment);
819 }
820
821 // Warn about having an escaped newline between the */ characters.
822 L->Diag(CurPtr, diag::escaped_newline_block_comment_end);
823
824 // If there was space between the backslash and newline, warn about it.
825 if (HasSpace) L->Diag(CurPtr, diag::backslash_newline_space);
826
827 return true;
828}
829
830#ifdef __SSE2__
831#include <emmintrin.h>
832#elif __ALTIVEC__
833#include <altivec.h>
834#undef bool
835#endif
836
837/// SkipBlockComment - We have just read the /* characters from input. Read
838/// until we find the */ characters that terminate the comment. Note that we
839/// don't bother decoding trigraphs or escaped newlines in block comments,
840/// because they cannot cause the comment to end. The only thing that can
841/// happen is the comment could end with an escaped newline between the */ end
842/// of comment.
843bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
844 // Scan one character past where we should, looking for a '/' character. Once
845 // we find it, check to see if it was preceeded by a *. This common
846 // optimization helps people who like to put a lot of * characters in their
847 // comments.
848
849 // The first character we get with newlines and trigraphs skipped to handle
850 // the degenerate /*/ case below correctly if the * has an escaped newline
851 // after it.
852 unsigned CharSize;
853 unsigned char C = getCharAndSize(CurPtr, CharSize);
854 CurPtr += CharSize;
855 if (C == 0 && CurPtr == BufferEnd+1) {
856 Diag(BufferPtr, diag::err_unterminated_block_comment);
857 BufferPtr = CurPtr-1;
858 return true;
859 }
860
861 // Check to see if the first character after the '/*' is another /. If so,
862 // then this slash does not end the block comment, it is part of it.
863 if (C == '/')
864 C = *CurPtr++;
865
866 while (1) {
867 // Skip over all non-interesting characters until we find end of buffer or a
868 // (probably ending) '/' character.
869 if (CurPtr + 24 < BufferEnd) {
870 // While not aligned to a 16-byte boundary.
871 while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
872 C = *CurPtr++;
873
874 if (C == '/') goto FoundSlash;
875
876#ifdef __SSE2__
877 __m128i Slashes = _mm_set_epi8('/', '/', '/', '/', '/', '/', '/', '/',
878 '/', '/', '/', '/', '/', '/', '/', '/');
879 while (CurPtr+16 <= BufferEnd &&
880 _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes)) == 0)
881 CurPtr += 16;
882#elif __ALTIVEC__
883 __vector unsigned char Slashes = {
884 '/', '/', '/', '/', '/', '/', '/', '/',
885 '/', '/', '/', '/', '/', '/', '/', '/'
886 };
887 while (CurPtr+16 <= BufferEnd &&
888 !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes))
889 CurPtr += 16;
890#else
891 // Scan for '/' quickly. Many block comments are very large.
892 while (CurPtr[0] != '/' &&
893 CurPtr[1] != '/' &&
894 CurPtr[2] != '/' &&
895 CurPtr[3] != '/' &&
896 CurPtr+4 < BufferEnd) {
897 CurPtr += 4;
898 }
899#endif
900
901 // It has to be one of the bytes scanned, increment to it and read one.
902 C = *CurPtr++;
903 }
904
905 // Loop to scan the remainder.
906 while (C != '/' && C != '\0')
907 C = *CurPtr++;
908
909 FoundSlash:
910 if (C == '/') {
911 if (CurPtr[-2] == '*') // We found the final */. We're done!
912 break;
913
914 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
915 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
916 // We found the final */, though it had an escaped newline between the
917 // * and /. We're done!
918 break;
919 }
920 }
921 if (CurPtr[0] == '*' && CurPtr[1] != '/') {
922 // If this is a /* inside of the comment, emit a warning. Don't do this
923 // if this is a /*/, which will end the comment. This misses cases with
924 // embedded escaped newlines, but oh well.
925 Diag(CurPtr-1, diag::nested_block_comment);
926 }
927 } else if (C == 0 && CurPtr == BufferEnd+1) {
928 Diag(BufferPtr, diag::err_unterminated_block_comment);
929 // Note: the user probably forgot a */. We could continue immediately
930 // after the /*, but this would involve lexing a lot of what really is the
931 // comment, which surely would confuse the parser.
932 BufferPtr = CurPtr-1;
933 return true;
934 }
935 C = *CurPtr++;
936 }
937
938 // If we are returning comments as tokens, return this comment as a token.
939 if (KeepCommentMode) {
940 Result.setKind(tok::comment);
941 FormTokenWithChars(Result, CurPtr);
942 return false;
943 }
944
945 // It is common for the tokens immediately after a /**/ comment to be
946 // whitespace. Instead of going through the big switch, handle it
947 // efficiently now.
948 if (isHorizontalWhitespace(*CurPtr)) {
949 Result.setFlag(Token::LeadingSpace);
950 SkipWhitespace(Result, CurPtr+1);
951 return true;
952 }
953
954 // Otherwise, just return so that the next character will be lexed as a token.
955 BufferPtr = CurPtr;
956 Result.setFlag(Token::LeadingSpace);
957 return true;
958}
959
960//===----------------------------------------------------------------------===//
961// Primary Lexing Entry Points
962//===----------------------------------------------------------------------===//
963
964/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
965/// (potentially) macro expand the filename.
966void Lexer::LexIncludeFilename(Token &FilenameTok) {
967 assert(ParsingPreprocessorDirective &&
968 ParsingFilename == false &&
969 "Must be in a preprocessing directive!");
970
971 // We are now parsing a filename!
972 ParsingFilename = true;
973
974 // Lex the filename.
975 Lex(FilenameTok);
976
977 // We should have obtained the filename now.
978 ParsingFilename = false;
979
980 // No filename?
Chris Lattnercb8e41c2007-10-09 18:02:16 +0000981 if (FilenameTok.is(tok::eom))
Chris Lattner4b009652007-07-25 00:24:17 +0000982 Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
983}
984
985/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
986/// uninterpreted string. This switches the lexer out of directive mode.
987std::string Lexer::ReadToEndOfLine() {
988 assert(ParsingPreprocessorDirective && ParsingFilename == false &&
989 "Must be in a preprocessing directive!");
990 std::string Result;
991 Token Tmp;
992
993 // CurPtr - Cache BufferPtr in an automatic variable.
994 const char *CurPtr = BufferPtr;
995 while (1) {
996 char Char = getAndAdvanceChar(CurPtr, Tmp);
997 switch (Char) {
998 default:
999 Result += Char;
1000 break;
1001 case 0: // Null.
1002 // Found end of file?
1003 if (CurPtr-1 != BufferEnd) {
1004 // Nope, normal character, continue.
1005 Result += Char;
1006 break;
1007 }
1008 // FALL THROUGH.
1009 case '\r':
1010 case '\n':
1011 // Okay, we found the end of the line. First, back up past the \0, \r, \n.
1012 assert(CurPtr[-1] == Char && "Trigraphs for newline?");
1013 BufferPtr = CurPtr-1;
1014
1015 // Next, lex the character, which should handle the EOM transition.
1016 Lex(Tmp);
Chris Lattnercb8e41c2007-10-09 18:02:16 +00001017 assert(Tmp.is(tok::eom) && "Unexpected token!");
Chris Lattner4b009652007-07-25 00:24:17 +00001018
1019 // Finally, we're done, return the string we found.
1020 return Result;
1021 }
1022 }
1023}
1024
1025/// LexEndOfFile - CurPtr points to the end of this file. Handle this
1026/// condition, reporting diagnostics and handling other edge cases as required.
1027/// This returns true if Result contains a token, false if PP.Lex should be
1028/// called again.
1029bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
1030 // If we hit the end of the file while parsing a preprocessor directive,
1031 // end the preprocessor directive first. The next token returned will
1032 // then be the end of file.
1033 if (ParsingPreprocessorDirective) {
1034 // Done parsing the "line".
1035 ParsingPreprocessorDirective = false;
1036 Result.setKind(tok::eom);
1037 // Update the location of token as well as BufferPtr.
1038 FormTokenWithChars(Result, CurPtr);
1039
1040 // Restore comment saving mode, in case it was disabled for directive.
1041 KeepCommentMode = PP.getCommentRetentionState();
1042 return true; // Have a token.
1043 }
1044
1045 // If we are in raw mode, return this event as an EOF token. Let the caller
1046 // that put us in raw mode handle the event.
1047 if (LexingRawMode) {
1048 Result.startToken();
1049 BufferPtr = BufferEnd;
1050 FormTokenWithChars(Result, BufferEnd);
1051 Result.setKind(tok::eof);
1052 return true;
1053 }
1054
1055 // Otherwise, issue diagnostics for unterminated #if and missing newline.
1056
1057 // If we are in a #if directive, emit an error.
1058 while (!ConditionalStack.empty()) {
1059 Diag(ConditionalStack.back().IfLoc, diag::err_pp_unterminated_conditional);
1060 ConditionalStack.pop_back();
1061 }
1062
1063 // If the file was empty or didn't end in a newline, issue a pedwarn.
1064 if (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
1065 Diag(BufferEnd, diag::ext_no_newline_eof);
1066
1067 BufferPtr = CurPtr;
1068
1069 // Finally, let the preprocessor handle this.
1070 return PP.HandleEndOfFile(Result);
1071}
1072
1073/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
1074/// the specified lexer will return a tok::l_paren token, 0 if it is something
1075/// else and 2 if there are no more tokens in the buffer controlled by the
1076/// lexer.
1077unsigned Lexer::isNextPPTokenLParen() {
1078 assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
1079
1080 // Switch to 'skipping' mode. This will ensure that we can lex a token
1081 // without emitting diagnostics, disables macro expansion, and will cause EOF
1082 // to return an EOF token instead of popping the include stack.
1083 LexingRawMode = true;
1084
1085 // Save state that can be changed while lexing so that we can restore it.
1086 const char *TmpBufferPtr = BufferPtr;
1087
1088 Token Tok;
1089 Tok.startToken();
1090 LexTokenInternal(Tok);
1091
1092 // Restore state that may have changed.
1093 BufferPtr = TmpBufferPtr;
1094
1095 // Restore the lexer back to non-skipping mode.
1096 LexingRawMode = false;
1097
Chris Lattnercb8e41c2007-10-09 18:02:16 +00001098 if (Tok.is(tok::eof))
Chris Lattner4b009652007-07-25 00:24:17 +00001099 return 2;
Chris Lattnercb8e41c2007-10-09 18:02:16 +00001100 return Tok.is(tok::l_paren);
Chris Lattner4b009652007-07-25 00:24:17 +00001101}
1102
1103
1104/// LexTokenInternal - This implements a simple C family lexer. It is an
1105/// extremely performance critical piece of code. This assumes that the buffer
1106/// has a null character at the end of the file. Return true if an error
1107/// occurred and compilation should terminate, false if normal. This returns a
1108/// preprocessing token, not a normal token, as such, it is an internal
1109/// interface. It assumes that the Flags of result have been cleared before
1110/// calling this.
1111void Lexer::LexTokenInternal(Token &Result) {
1112LexNextToken:
1113 // New token, can't need cleaning yet.
1114 Result.clearFlag(Token::NeedsCleaning);
1115 Result.setIdentifierInfo(0);
1116
1117 // CurPtr - Cache BufferPtr in an automatic variable.
1118 const char *CurPtr = BufferPtr;
1119
1120 // Small amounts of horizontal whitespace is very common between tokens.
1121 if ((*CurPtr == ' ') || (*CurPtr == '\t')) {
1122 ++CurPtr;
1123 while ((*CurPtr == ' ') || (*CurPtr == '\t'))
1124 ++CurPtr;
1125 BufferPtr = CurPtr;
1126 Result.setFlag(Token::LeadingSpace);
1127 }
1128
1129 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below.
1130
1131 // Read a character, advancing over it.
1132 char Char = getAndAdvanceChar(CurPtr, Result);
1133 switch (Char) {
1134 case 0: // Null.
1135 // Found end of file?
1136 if (CurPtr-1 == BufferEnd) {
1137 // Read the PP instance variable into an automatic variable, because
1138 // LexEndOfFile will often delete 'this'.
1139 Preprocessor &PPCache = PP;
1140 if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file.
1141 return; // Got a token to return.
1142 return PPCache.Lex(Result);
1143 }
1144
1145 Diag(CurPtr-1, diag::null_in_file);
1146 Result.setFlag(Token::LeadingSpace);
1147 SkipWhitespace(Result, CurPtr);
1148 goto LexNextToken; // GCC isn't tail call eliminating.
1149 case '\n':
1150 case '\r':
1151 // If we are inside a preprocessor directive and we see the end of line,
1152 // we know we are done with the directive, so return an EOM token.
1153 if (ParsingPreprocessorDirective) {
1154 // Done parsing the "line".
1155 ParsingPreprocessorDirective = false;
1156
1157 // Restore comment saving mode, in case it was disabled for directive.
1158 KeepCommentMode = PP.getCommentRetentionState();
1159
1160 // Since we consumed a newline, we are back at the start of a line.
1161 IsAtStartOfLine = true;
1162
1163 Result.setKind(tok::eom);
1164 break;
1165 }
1166 // The returned token is at the start of the line.
1167 Result.setFlag(Token::StartOfLine);
1168 // No leading whitespace seen so far.
1169 Result.clearFlag(Token::LeadingSpace);
1170 SkipWhitespace(Result, CurPtr);
1171 goto LexNextToken; // GCC isn't tail call eliminating.
1172 case ' ':
1173 case '\t':
1174 case '\f':
1175 case '\v':
1176 SkipHorizontalWhitespace:
1177 Result.setFlag(Token::LeadingSpace);
1178 SkipWhitespace(Result, CurPtr);
1179
1180 SkipIgnoredUnits:
1181 CurPtr = BufferPtr;
1182
1183 // If the next token is obviously a // or /* */ comment, skip it efficiently
1184 // too (without going through the big switch stmt).
1185 if (CurPtr[0] == '/' && CurPtr[1] == '/' && !KeepCommentMode) {
1186 SkipBCPLComment(Result, CurPtr+2);
1187 goto SkipIgnoredUnits;
1188 } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !KeepCommentMode) {
1189 SkipBlockComment(Result, CurPtr+2);
1190 goto SkipIgnoredUnits;
1191 } else if (isHorizontalWhitespace(*CurPtr)) {
1192 goto SkipHorizontalWhitespace;
1193 }
1194 goto LexNextToken; // GCC isn't tail call eliminating.
1195
1196 case 'L':
1197 // Notify MIOpt that we read a non-whitespace/non-comment token.
1198 MIOpt.ReadToken();
1199 Char = getCharAndSize(CurPtr, SizeTmp);
1200
1201 // Wide string literal.
1202 if (Char == '"')
1203 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
1204 true);
1205
1206 // Wide character constant.
1207 if (Char == '\'')
1208 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1209 // FALL THROUGH, treating L like the start of an identifier.
1210
1211 // C99 6.4.2: Identifiers.
1212 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
1213 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N':
1214 case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U':
1215 case 'V': case 'W': case 'X': case 'Y': case 'Z':
1216 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
1217 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
1218 case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u':
1219 case 'v': case 'w': case 'x': case 'y': case 'z':
1220 case '_':
1221 // Notify MIOpt that we read a non-whitespace/non-comment token.
1222 MIOpt.ReadToken();
1223 return LexIdentifier(Result, CurPtr);
1224
1225 // C99 6.4.4.1: Integer Constants.
1226 // C99 6.4.4.2: Floating Constants.
1227 case '0': case '1': case '2': case '3': case '4':
1228 case '5': case '6': case '7': case '8': case '9':
1229 // Notify MIOpt that we read a non-whitespace/non-comment token.
1230 MIOpt.ReadToken();
1231 return LexNumericConstant(Result, CurPtr);
1232
1233 // C99 6.4.4: Character Constants.
1234 case '\'':
1235 // Notify MIOpt that we read a non-whitespace/non-comment token.
1236 MIOpt.ReadToken();
1237 return LexCharConstant(Result, CurPtr);
1238
1239 // C99 6.4.5: String Literals.
1240 case '"':
1241 // Notify MIOpt that we read a non-whitespace/non-comment token.
1242 MIOpt.ReadToken();
1243 return LexStringLiteral(Result, CurPtr, false);
1244
1245 // C99 6.4.6: Punctuators.
1246 case '?':
1247 Result.setKind(tok::question);
1248 break;
1249 case '[':
1250 Result.setKind(tok::l_square);
1251 break;
1252 case ']':
1253 Result.setKind(tok::r_square);
1254 break;
1255 case '(':
1256 Result.setKind(tok::l_paren);
1257 break;
1258 case ')':
1259 Result.setKind(tok::r_paren);
1260 break;
1261 case '{':
1262 Result.setKind(tok::l_brace);
1263 break;
1264 case '}':
1265 Result.setKind(tok::r_brace);
1266 break;
1267 case '.':
1268 Char = getCharAndSize(CurPtr, SizeTmp);
1269 if (Char >= '0' && Char <= '9') {
1270 // Notify MIOpt that we read a non-whitespace/non-comment token.
1271 MIOpt.ReadToken();
1272
1273 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1274 } else if (Features.CPlusPlus && Char == '*') {
1275 Result.setKind(tok::periodstar);
1276 CurPtr += SizeTmp;
1277 } else if (Char == '.' &&
1278 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
1279 Result.setKind(tok::ellipsis);
1280 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1281 SizeTmp2, Result);
1282 } else {
1283 Result.setKind(tok::period);
1284 }
1285 break;
1286 case '&':
1287 Char = getCharAndSize(CurPtr, SizeTmp);
1288 if (Char == '&') {
1289 Result.setKind(tok::ampamp);
1290 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1291 } else if (Char == '=') {
1292 Result.setKind(tok::ampequal);
1293 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1294 } else {
1295 Result.setKind(tok::amp);
1296 }
1297 break;
1298 case '*':
1299 if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1300 Result.setKind(tok::starequal);
1301 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1302 } else {
1303 Result.setKind(tok::star);
1304 }
1305 break;
1306 case '+':
1307 Char = getCharAndSize(CurPtr, SizeTmp);
1308 if (Char == '+') {
1309 Result.setKind(tok::plusplus);
1310 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1311 } else if (Char == '=') {
1312 Result.setKind(tok::plusequal);
1313 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1314 } else {
1315 Result.setKind(tok::plus);
1316 }
1317 break;
1318 case '-':
1319 Char = getCharAndSize(CurPtr, SizeTmp);
1320 if (Char == '-') {
1321 Result.setKind(tok::minusminus);
1322 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1323 } else if (Char == '>' && Features.CPlusPlus &&
1324 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') {
1325 Result.setKind(tok::arrowstar); // C++ ->*
1326 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1327 SizeTmp2, Result);
1328 } else if (Char == '>') {
1329 Result.setKind(tok::arrow);
1330 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1331 } else if (Char == '=') {
1332 Result.setKind(tok::minusequal);
1333 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1334 } else {
1335 Result.setKind(tok::minus);
1336 }
1337 break;
1338 case '~':
1339 Result.setKind(tok::tilde);
1340 break;
1341 case '!':
1342 if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1343 Result.setKind(tok::exclaimequal);
1344 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1345 } else {
1346 Result.setKind(tok::exclaim);
1347 }
1348 break;
1349 case '/':
1350 // 6.4.9: Comments
1351 Char = getCharAndSize(CurPtr, SizeTmp);
1352 if (Char == '/') { // BCPL comment.
1353 if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result))) {
1354 // It is common for the tokens immediately after a // comment to be
1355 // whitespace (indentation for the next line). Instead of going through
1356 // the big switch, handle it efficiently now.
1357 goto SkipIgnoredUnits;
1358 }
1359 return; // KeepCommentMode
1360 } else if (Char == '*') { // /**/ comment.
1361 if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
1362 goto LexNextToken; // GCC isn't tail call eliminating.
1363 return; // KeepCommentMode
1364 } else if (Char == '=') {
1365 Result.setKind(tok::slashequal);
1366 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1367 } else {
1368 Result.setKind(tok::slash);
1369 }
1370 break;
1371 case '%':
1372 Char = getCharAndSize(CurPtr, SizeTmp);
1373 if (Char == '=') {
1374 Result.setKind(tok::percentequal);
1375 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1376 } else if (Features.Digraphs && Char == '>') {
1377 Result.setKind(tok::r_brace); // '%>' -> '}'
1378 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1379 } else if (Features.Digraphs && Char == ':') {
1380 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1381 Char = getCharAndSize(CurPtr, SizeTmp);
1382 if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
1383 Result.setKind(tok::hashhash); // '%:%:' -> '##'
1384 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1385 SizeTmp2, Result);
1386 } else if (Char == '@' && Features.Microsoft) { // %:@ -> #@ -> Charize
1387 Result.setKind(tok::hashat);
1388 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1389 Diag(BufferPtr, diag::charize_microsoft_ext);
1390 } else {
1391 Result.setKind(tok::hash); // '%:' -> '#'
1392
1393 // We parsed a # character. If this occurs at the start of the line,
1394 // it's actually the start of a preprocessing directive. Callback to
1395 // the preprocessor to handle it.
1396 // FIXME: -fpreprocessed mode??
1397 if (Result.isAtStartOfLine() && !LexingRawMode) {
1398 BufferPtr = CurPtr;
1399 PP.HandleDirective(Result);
1400
1401 // As an optimization, if the preprocessor didn't switch lexers, tail
1402 // recurse.
1403 if (PP.isCurrentLexer(this)) {
1404 // Start a new token. If this is a #include or something, the PP may
1405 // want us starting at the beginning of the line again. If so, set
1406 // the StartOfLine flag.
1407 if (IsAtStartOfLine) {
1408 Result.setFlag(Token::StartOfLine);
1409 IsAtStartOfLine = false;
1410 }
1411 goto LexNextToken; // GCC isn't tail call eliminating.
1412 }
1413
1414 return PP.Lex(Result);
1415 }
1416 }
1417 } else {
1418 Result.setKind(tok::percent);
1419 }
1420 break;
1421 case '<':
1422 Char = getCharAndSize(CurPtr, SizeTmp);
1423 if (ParsingFilename) {
1424 return LexAngledStringLiteral(Result, CurPtr+SizeTmp);
1425 } else if (Char == '<' &&
1426 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
1427 Result.setKind(tok::lesslessequal);
1428 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1429 SizeTmp2, Result);
1430 } else if (Char == '<') {
1431 Result.setKind(tok::lessless);
1432 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1433 } else if (Char == '=') {
1434 Result.setKind(tok::lessequal);
1435 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1436 } else if (Features.Digraphs && Char == ':') {
1437 Result.setKind(tok::l_square); // '<:' -> '['
1438 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1439 } else if (Features.Digraphs && Char == '>') {
1440 Result.setKind(tok::l_brace); // '<%' -> '{'
1441 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1442 } else {
1443 Result.setKind(tok::less);
1444 }
1445 break;
1446 case '>':
1447 Char = getCharAndSize(CurPtr, SizeTmp);
1448 if (Char == '=') {
1449 Result.setKind(tok::greaterequal);
1450 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1451 } else if (Char == '>' &&
1452 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
1453 Result.setKind(tok::greatergreaterequal);
1454 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1455 SizeTmp2, Result);
1456 } else if (Char == '>') {
1457 Result.setKind(tok::greatergreater);
1458 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1459 } else {
1460 Result.setKind(tok::greater);
1461 }
1462 break;
1463 case '^':
1464 Char = getCharAndSize(CurPtr, SizeTmp);
1465 if (Char == '=') {
1466 Result.setKind(tok::caretequal);
1467 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1468 } else {
1469 Result.setKind(tok::caret);
1470 }
1471 break;
1472 case '|':
1473 Char = getCharAndSize(CurPtr, SizeTmp);
1474 if (Char == '=') {
1475 Result.setKind(tok::pipeequal);
1476 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1477 } else if (Char == '|') {
1478 Result.setKind(tok::pipepipe);
1479 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1480 } else {
1481 Result.setKind(tok::pipe);
1482 }
1483 break;
1484 case ':':
1485 Char = getCharAndSize(CurPtr, SizeTmp);
1486 if (Features.Digraphs && Char == '>') {
1487 Result.setKind(tok::r_square); // ':>' -> ']'
1488 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1489 } else if (Features.CPlusPlus && Char == ':') {
1490 Result.setKind(tok::coloncolon);
1491 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1492 } else {
1493 Result.setKind(tok::colon);
1494 }
1495 break;
1496 case ';':
1497 Result.setKind(tok::semi);
1498 break;
1499 case '=':
1500 Char = getCharAndSize(CurPtr, SizeTmp);
1501 if (Char == '=') {
1502 Result.setKind(tok::equalequal);
1503 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1504 } else {
1505 Result.setKind(tok::equal);
1506 }
1507 break;
1508 case ',':
1509 Result.setKind(tok::comma);
1510 break;
1511 case '#':
1512 Char = getCharAndSize(CurPtr, SizeTmp);
1513 if (Char == '#') {
1514 Result.setKind(tok::hashhash);
1515 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1516 } else if (Char == '@' && Features.Microsoft) { // #@ -> Charize
1517 Result.setKind(tok::hashat);
1518 Diag(BufferPtr, diag::charize_microsoft_ext);
1519 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1520 } else {
1521 Result.setKind(tok::hash);
1522 // We parsed a # character. If this occurs at the start of the line,
1523 // it's actually the start of a preprocessing directive. Callback to
1524 // the preprocessor to handle it.
1525 // FIXME: -fpreprocessed mode??
1526 if (Result.isAtStartOfLine() && !LexingRawMode) {
1527 BufferPtr = CurPtr;
1528 PP.HandleDirective(Result);
1529
1530 // As an optimization, if the preprocessor didn't switch lexers, tail
1531 // recurse.
1532 if (PP.isCurrentLexer(this)) {
1533 // Start a new token. If this is a #include or something, the PP may
1534 // want us starting at the beginning of the line again. If so, set
1535 // the StartOfLine flag.
1536 if (IsAtStartOfLine) {
1537 Result.setFlag(Token::StartOfLine);
1538 IsAtStartOfLine = false;
1539 }
1540 goto LexNextToken; // GCC isn't tail call eliminating.
1541 }
1542 return PP.Lex(Result);
1543 }
1544 }
1545 break;
1546
1547 case '\\':
1548 // FIXME: UCN's.
1549 // FALL THROUGH.
1550 default:
1551 // Objective C support.
1552 if (CurPtr[-1] == '@' && Features.ObjC1) {
1553 Result.setKind(tok::at);
1554 break;
1555 } else if (CurPtr[-1] == '$' && Features.DollarIdents) {// $ in identifiers.
1556 Diag(CurPtr-1, diag::ext_dollar_in_identifier);
1557 // Notify MIOpt that we read a non-whitespace/non-comment token.
1558 MIOpt.ReadToken();
1559 return LexIdentifier(Result, CurPtr);
1560 }
1561
1562 Result.setKind(tok::unknown);
1563 break;
1564 }
1565
1566 // Notify MIOpt that we read a non-whitespace/non-comment token.
1567 MIOpt.ReadToken();
1568
1569 // Update the location of token as well as BufferPtr.
1570 FormTokenWithChars(Result, CurPtr);
1571}