blob: fac724529fce6186ee1653b63217a6a90e8668d3 [file] [log] [blame]
Chris Lattner22eb9722006-06-18 05:43:12 +00001//===--- Lexer.cpp - C Language Family Lexer ------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the Lexer and LexerToken interfaces.
11//
12//===----------------------------------------------------------------------===//
13//
14// TODO: GCC Diagnostics emitted by the lexer:
15// PEDWARN: (form feed|vertical tab) in preprocessing directive
16//
17// Universal characters, unicode, char mapping:
18// WARNING: `%.*s' is not in NFKC
19// WARNING: `%.*s' is not in NFC
20//
21// Other:
22// ERROR : attempt to use poisoned \"%s\"
23//
24// TODO: Options to support:
25// -fexec-charset,-fwide-exec-charset
26//
27//===----------------------------------------------------------------------===//
28
29#include "clang/Lex/Lexer.h"
30#include "clang/Lex/Preprocessor.h"
31#include "clang/Basic/Diagnostic.h"
32#include "clang/Basic/SourceBuffer.h"
33#include "clang/Basic/SourceLocation.h"
34#include "llvm/Config/alloca.h"
35#include <cassert>
36#include <cctype>
37#include <iostream>
38using namespace llvm;
39using namespace clang;
40
41static void InitCharacterInfo();
42
43Lexer::Lexer(const SourceBuffer *File, unsigned fileid, Preprocessor &pp)
44 : BufferPtr(File->getBufferStart()), BufferStart(BufferPtr),
45 BufferEnd(File->getBufferEnd()), InputFile(File), CurFileID(fileid), PP(pp),
46 Features(PP.getLangOptions()) {
47 InitCharacterInfo();
48
49 assert(BufferEnd[0] == 0 &&
50 "We assume that the input buffer has a null character at the end"
51 " to simplify lexing!");
52
53 // Start of the file is a start of line.
54 IsAtStartOfLine = true;
55
56 // We are not after parsing a #.
57 ParsingPreprocessorDirective = false;
58
59 // We are not after parsing #include.
60 ParsingFilename = false;
61}
62
63//===----------------------------------------------------------------------===//
64// LexerToken implementation.
65//===----------------------------------------------------------------------===//
66
67/// getSourceLocation - Return a source location identifier for the specified
68/// offset in the current file.
69SourceLocation LexerToken::getSourceLocation() const {
70 if (TheLexer)
71 return TheLexer->getSourceLocation(Start);
72 return SourceLocation();
73}
74
75
76/// dump - Print the token to stderr, used for debugging.
77///
78void LexerToken::dump(bool DumpFlags) const {
79 std::cerr << clang::tok::getTokenName(Kind) << " '";
80
81 if (needsCleaning()) {
82 if (getLexer())
83 std::cerr << getLexer()->getSpelling(*this);
84 else {
85 // FIXME: expansion from macros clears location info. Testcase:
86 // #define TWELVE 1\ <whitespace only>
87 // 2
88 // TWELVE
Chris Lattner33ce7282006-06-18 07:35:33 +000089 std::cerr << "*unspelled*" << std::string(getStart(), getEnd());
Chris Lattner22eb9722006-06-18 05:43:12 +000090 }
91 } else
Chris Lattner33ce7282006-06-18 07:35:33 +000092 std::cerr << std::string(getStart(), getEnd());
Chris Lattner22eb9722006-06-18 05:43:12 +000093 std::cerr << "'";
94
95 if (DumpFlags) {
96 std::cerr << "\t";
97 if (isAtStartOfLine())
98 std::cerr << " [StartOfLine]";
99 if (hasLeadingSpace())
100 std::cerr << " [LeadingSpace]";
101 if (needsCleaning())
Chris Lattner33ce7282006-06-18 07:35:33 +0000102 std::cerr << " [Spelling='" << std::string(getStart(), getEnd()) << "']";
Chris Lattner22eb9722006-06-18 05:43:12 +0000103 }
104}
105
106//===----------------------------------------------------------------------===//
107// Character information.
108//===----------------------------------------------------------------------===//
109
110static unsigned char CharInfo[256];
111
112enum {
113 CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0'
114 CHAR_VERT_WS = 0x02, // '\r', '\n'
115 CHAR_LETTER = 0x04, // a-z,A-Z
116 CHAR_NUMBER = 0x08, // 0-9
117 CHAR_UNDER = 0x10, // _
118 CHAR_PERIOD = 0x20 // .
119};
120
121static void InitCharacterInfo() {
122 static bool isInited = false;
123 if (isInited) return;
124 isInited = true;
125
126 // Intiialize the CharInfo table.
127 // TODO: statically initialize this.
128 CharInfo[(int)' '] = CharInfo[(int)'\t'] =
129 CharInfo[(int)'\f'] = CharInfo[(int)'\v'] = CHAR_HORZ_WS;
130 CharInfo[(int)'\n'] = CharInfo[(int)'\r'] = CHAR_VERT_WS;
131
132 CharInfo[(int)'_'] = CHAR_UNDER;
133 for (unsigned i = 'a'; i <= 'z'; ++i)
134 CharInfo[i] = CharInfo[i+'A'-'a'] = CHAR_LETTER;
135 for (unsigned i = '0'; i <= '9'; ++i)
136 CharInfo[i] = CHAR_NUMBER;
137}
138
139/// isIdentifierBody - Return true if this is the body character of an
140/// identifier, which is [a-zA-Z0-9_].
141static inline bool isIdentifierBody(unsigned char c) {
142 return CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER);
143}
144
145/// isHorizontalWhitespace - Return true if this character is horizontal
146/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'.
147static inline bool isHorizontalWhitespace(unsigned char c) {
148 return CharInfo[c] & CHAR_HORZ_WS;
149}
150
151/// isWhitespace - Return true if this character is horizontal or vertical
152/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false
153/// for '\0'.
154static inline bool isWhitespace(unsigned char c) {
155 return CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS);
156}
157
158/// isNumberBody - Return true if this is the body character of an
159/// preprocessing number, which is [a-zA-Z0-9_.].
160static inline bool isNumberBody(unsigned char c) {
161 return CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD);
162}
163
164//===----------------------------------------------------------------------===//
165// Diagnostics forwarding code.
166//===----------------------------------------------------------------------===//
167
168/// getSourceLocation - Return a source location identifier for the specified
169/// offset in the current file.
170SourceLocation Lexer::getSourceLocation(const char *Loc) const {
171 assert(Loc >= InputFile->getBufferStart() && Loc <= InputFile->getBufferEnd()
172 && "Location out of range for this buffer!");
173 return SourceLocation(CurFileID, Loc-InputFile->getBufferStart());
174}
175
176
177/// Diag - Forwarding function for diagnostics. This translate a source
178/// position in the current buffer into a SourceLocation object for rendering.
Chris Lattnercb283342006-06-18 06:48:37 +0000179void Lexer::Diag(const char *Loc, unsigned DiagID,
Chris Lattner22eb9722006-06-18 05:43:12 +0000180 const std::string &Msg) const {
Chris Lattnercb283342006-06-18 06:48:37 +0000181 PP.Diag(getSourceLocation(Loc), DiagID, Msg);
Chris Lattner22eb9722006-06-18 05:43:12 +0000182}
183
184//===----------------------------------------------------------------------===//
185// Trigraph and Escaped Newline Handling Code.
186//===----------------------------------------------------------------------===//
187
188/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
189/// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
190static char GetTrigraphCharForLetter(char Letter) {
191 switch (Letter) {
192 default: return 0;
193 case '=': return '#';
194 case ')': return ']';
195 case '(': return '[';
196 case '!': return '|';
197 case '\'': return '^';
198 case '>': return '}';
199 case '/': return '\\';
200 case '<': return '{';
201 case '-': return '~';
202 }
203}
204
205/// DecodeTrigraphChar - If the specified character is a legal trigraph when
206/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled,
207/// return the result character. Finally, emit a warning about trigraph use
208/// whether trigraphs are enabled or not.
209static char DecodeTrigraphChar(const char *CP, Lexer *L) {
210 char Res = GetTrigraphCharForLetter(*CP);
211 if (Res && L) {
212 if (!L->getFeatures().Trigraphs) {
213 L->Diag(CP-2, diag::trigraph_ignored);
214 return 0;
215 } else {
216 L->Diag(CP-2, diag::trigraph_converted, std::string()+Res);
217 }
218 }
219 return Res;
220}
221
222/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
223/// get its size, and return it. This is tricky in several cases:
224/// 1. If currently at the start of a trigraph, we warn about the trigraph,
225/// then either return the trigraph (skipping 3 chars) or the '?',
226/// depending on whether trigraphs are enabled or not.
227/// 2. If this is an escaped newline (potentially with whitespace between
228/// the backslash and newline), implicitly skip the newline and return
229/// the char after it.
230/// 3. If this is a UCN, return it. FIXME: for C++?
231///
232/// This handles the slow/uncommon case of the getCharAndSize method. Here we
233/// know that we can accumulate into Size, and that we have already incremented
234/// Ptr by Size bytes.
235///
236/// When this method is updated, getCharAndSizeSlowNoWarn (below) should be
237/// updated to match.
238///
239char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
240 LexerToken *Tok) {
241 // If we have a slash, look for an escaped newline.
242 if (Ptr[0] == '\\') {
243 ++Size;
244 ++Ptr;
245Slash:
246 // Common case, backslash-char where the char is not whitespace.
247 if (!isWhitespace(Ptr[0])) return '\\';
248
249 // See if we have optional whitespace characters followed by a newline.
250 {
251 unsigned SizeTmp = 0;
252 do {
253 ++SizeTmp;
254 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') {
255 // Remember that this token needs to be cleaned.
256 if (Tok) Tok->SetFlag(LexerToken::NeedsCleaning);
257
258 // Warn if there was whitespace between the backslash and newline.
259 if (SizeTmp != 1 && Tok)
260 Diag(Ptr, diag::backslash_newline_space);
261
262 // If this is a \r\n or \n\r, skip the newlines.
263 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') &&
264 Ptr[SizeTmp-1] != Ptr[SizeTmp])
265 ++SizeTmp;
266
267 // Found backslash<whitespace><newline>. Parse the char after it.
268 Size += SizeTmp;
269 Ptr += SizeTmp;
270 // Use slow version to accumulate a correct size field.
271 return getCharAndSizeSlow(Ptr, Size, Tok);
272 }
273 } while (isWhitespace(Ptr[SizeTmp]));
274 }
275
276 // Otherwise, this is not an escaped newline, just return the slash.
277 return '\\';
278 }
279
280 // If this is a trigraph, process it.
281 if (Ptr[0] == '?' && Ptr[1] == '?') {
282 // If this is actually a legal trigraph (not something like "??x"), emit
283 // a trigraph warning. If so, and if trigraphs are enabled, return it.
284 if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) {
285 // Remember that this token needs to be cleaned.
286 if (Tok) Tok->SetFlag(LexerToken::NeedsCleaning);
287
288 Ptr += 3;
289 Size += 3;
290 if (C == '\\') goto Slash;
291 return C;
292 }
293 }
294
295 // If this is neither, return a single character.
296 ++Size;
297 return *Ptr;
298}
299
300/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
301/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size,
302/// and that we have already incremented Ptr by Size bytes.
303///
304/// When this method is updated, getCharAndSizeSlow (above) should be updated to
305/// match.
306static char getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
307 const LangOptions &Features) {
308 // If we have a slash, look for an escaped newline.
309 if (Ptr[0] == '\\') {
310 ++Size;
311 ++Ptr;
312Slash:
313 // Common case, backslash-char where the char is not whitespace.
314 if (!isWhitespace(Ptr[0])) return '\\';
315
316 // See if we have optional whitespace characters followed by a newline.
317 {
318 unsigned SizeTmp = 0;
319 do {
320 ++SizeTmp;
321 if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') {
322
323 // If this is a \r\n or \n\r, skip the newlines.
324 if ((Ptr[SizeTmp] == '\r' || Ptr[SizeTmp] == '\n') &&
325 Ptr[SizeTmp-1] != Ptr[SizeTmp])
326 ++SizeTmp;
327
328 // Found backslash<whitespace><newline>. Parse the char after it.
329 Size += SizeTmp;
330 Ptr += SizeTmp;
331
332 // Use slow version to accumulate a correct size field.
333 return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
334 }
335 } while (isWhitespace(Ptr[SizeTmp]));
336 }
337
338 // Otherwise, this is not an escaped newline, just return the slash.
339 return '\\';
340 }
341
342 // If this is a trigraph, process it.
343 if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
344 // If this is actually a legal trigraph (not something like "??x"), return
345 // it.
346 if (char C = GetTrigraphCharForLetter(Ptr[2])) {
347 Ptr += 3;
348 Size += 3;
349 if (C == '\\') goto Slash;
350 return C;
351 }
352 }
353
354 // If this is neither, return a single character.
355 ++Size;
356 return *Ptr;
357}
358
359/// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
360/// emit a warning.
361static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size,
362 const LangOptions &Features) {
363 // If this is not a trigraph and not a UCN or escaped newline, return
364 // quickly.
365 if (Ptr[0] != '?' && Ptr[0] != '\\') {
366 Size = 1;
367 return *Ptr;
368 }
369
370 Size = 0;
371 return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
372}
373
374
375/// getSpelling() - Return the 'spelling' of this token. The spelling of a
376/// token are the characters used to represent the token in the source file
377/// after trigraph expansion and escaped-newline folding. In particular, this
378/// wants to get the true, uncanonicalized, spelling of things like digraphs
379/// UCNs, etc.
380std::string Lexer::getSpelling(const LexerToken &Tok,
381 const LangOptions &Features) {
382 assert(Tok.getStart() <= Tok.getEnd() && "Token character range is bogus!");
383
384 // If this token contains nothing interesting, return it directly.
385 if (!Tok.needsCleaning())
386 return std::string(Tok.getStart(), Tok.getEnd());
387
388 // Otherwise, hard case, relex the characters into the string.
389 std::string Result;
Chris Lattner33ce7282006-06-18 07:35:33 +0000390 Result.reserve(Tok.getLength());
Chris Lattner22eb9722006-06-18 05:43:12 +0000391
392 for (const char *Ptr = Tok.getStart(), *End = Tok.getEnd(); Ptr != End; ) {
393 unsigned CharSize;
394 Result.push_back(getCharAndSizeNoWarn(Ptr, CharSize, Features));
395 Ptr += CharSize;
396 }
Chris Lattner33ce7282006-06-18 07:35:33 +0000397 assert(Result.size() != unsigned(Tok.getLength()) &&
Chris Lattner22eb9722006-06-18 05:43:12 +0000398 "NeedsCleaning flag set on something that didn't need cleaning!");
399 return Result;
400}
401
402/// getSpelling - This method is used to get the spelling of a token into a
403/// preallocated buffer, instead of as an std::string. The caller is required
404/// to allocate enough space for the token, which is guaranteed to be at most
405/// Tok.End-Tok.Start bytes long. The actual length of the token is returned.
406unsigned Lexer::getSpelling(const LexerToken &Tok, char *Buffer,
407 const LangOptions &Features) {
408 assert(Tok.getStart() <= Tok.getEnd() && "Token character range is bogus!");
409
410 // If this token contains nothing interesting, return it directly.
411 if (!Tok.needsCleaning()) {
Chris Lattner33ce7282006-06-18 07:35:33 +0000412 unsigned Size = Tok.getLength();
Chris Lattner22eb9722006-06-18 05:43:12 +0000413 memcpy(Buffer, Tok.getStart(), Size);
414 return Size;
415 }
416 // Otherwise, hard case, relex the characters into the string.
417 std::string Result;
Chris Lattner33ce7282006-06-18 07:35:33 +0000418 Result.reserve(Tok.getLength());
Chris Lattner22eb9722006-06-18 05:43:12 +0000419
420 char *OutBuf = Buffer;
421 for (const char *Ptr = Tok.getStart(), *End = Tok.getEnd(); Ptr != End; ) {
422 unsigned CharSize;
423 *OutBuf++ = getCharAndSizeNoWarn(Ptr, CharSize, Features);
424 Ptr += CharSize;
425 }
Chris Lattner33ce7282006-06-18 07:35:33 +0000426 assert(unsigned(OutBuf-Buffer) != Tok.getLength() &&
Chris Lattner22eb9722006-06-18 05:43:12 +0000427 "NeedsCleaning flag set on something that didn't need cleaning!");
428
429 return OutBuf-Buffer;
430}
431
432
433//===----------------------------------------------------------------------===//
434// Helper methods for lexing.
435//===----------------------------------------------------------------------===//
436
Chris Lattnercb283342006-06-18 06:48:37 +0000437void Lexer::LexIdentifier(LexerToken &Result, const char *CurPtr) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000438 // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
439 unsigned Size;
440 unsigned char C = *CurPtr++;
441 while (isIdentifierBody(C)) {
442 C = *CurPtr++;
443 }
444 --CurPtr; // Back up over the skipped character.
445
446 // Fast path, no $,\,? in identifier found. '\' might be an escaped newline
447 // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
448 // FIXME: universal chars.
449 if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) {
450FinishIdentifier:
451 Result.SetEnd(BufferPtr = CurPtr);
452 Result.SetKind(tok::identifier);
453
454 // Look up this token, see if it is a macro, or if it is a language keyword.
455 const char *SpelledTokStart, *SpelledTokEnd;
456 if (!Result.needsCleaning()) {
457 // No cleaning needed, just use the characters from the lexed buffer.
458 SpelledTokStart = Result.getStart();
459 SpelledTokEnd = Result.getEnd();
460 } else {
461 // Cleaning needed, alloca a buffer, clean into it, then use the buffer.
Chris Lattner33ce7282006-06-18 07:35:33 +0000462 char *TmpBuf = (char*)alloca(Result.getLength());
Chris Lattner22eb9722006-06-18 05:43:12 +0000463 unsigned Size = getSpelling(Result, TmpBuf);
464 SpelledTokStart = TmpBuf;
465 SpelledTokEnd = TmpBuf+Size;
466 }
467
468 Result.SetIdentifierInfo(PP.getIdentifierInfo(SpelledTokStart,
469 SpelledTokEnd));
470 return PP.HandleIdentifier(Result);
471 }
472
473 // Otherwise, $,\,? in identifier found. Enter slower path.
474
475 C = getCharAndSize(CurPtr, Size);
476 while (1) {
477 if (C == '$') {
478 // If we hit a $ and they are not supported in identifiers, we are done.
479 if (!Features.DollarIdents) goto FinishIdentifier;
480
481 // Otherwise, emit a diagnostic and continue.
Chris Lattnercb283342006-06-18 06:48:37 +0000482 Diag(CurPtr, diag::ext_dollar_in_identifier);
Chris Lattner22eb9722006-06-18 05:43:12 +0000483 CurPtr = ConsumeChar(CurPtr, Size, Result);
484 C = getCharAndSize(CurPtr, Size);
485 continue;
486 } else if (!isIdentifierBody(C)) { // FIXME: universal chars.
487 // Found end of identifier.
488 goto FinishIdentifier;
489 }
490
491 // Otherwise, this character is good, consume it.
492 CurPtr = ConsumeChar(CurPtr, Size, Result);
493
494 C = getCharAndSize(CurPtr, Size);
495 while (isIdentifierBody(C)) { // FIXME: universal chars.
496 CurPtr = ConsumeChar(CurPtr, Size, Result);
497 C = getCharAndSize(CurPtr, Size);
498 }
499 }
500}
501
502
503/// LexNumericConstant - Lex the remainer of a integer or floating point
504/// constant. From[-1] is the first character lexed. Return the end of the
505/// constant.
Chris Lattnercb283342006-06-18 06:48:37 +0000506void Lexer::LexNumericConstant(LexerToken &Result, const char *CurPtr) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000507 unsigned Size;
508 char C = getCharAndSize(CurPtr, Size);
509 char PrevCh = 0;
510 while (isNumberBody(C)) { // FIXME: universal chars?
511 CurPtr = ConsumeChar(CurPtr, Size, Result);
512 PrevCh = C;
513 C = getCharAndSize(CurPtr, Size);
514 }
515
516 // If we fell out, check for a sign, due to 1e+12. If we have one, continue.
517 if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e'))
518 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
519
520 // If we have a hex FP constant, continue.
521 if (Features.HexFloats &&
522 (C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p'))
523 return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
524
525 Result.SetKind(tok::numeric_constant);
526
527 // Update the end of token position as well as the BufferPtr instance var.
528 Result.SetEnd(BufferPtr = CurPtr);
Chris Lattner22eb9722006-06-18 05:43:12 +0000529}
530
531/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
532/// either " or L".
Chris Lattnercb283342006-06-18 06:48:37 +0000533void Lexer::LexStringLiteral(LexerToken &Result, const char *CurPtr) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000534 const char *NulCharacter = 0; // Does this string contain the \0 character?
535
536 char C = getAndAdvanceChar(CurPtr, Result);
537 while (C != '"') {
538 // Skip escaped characters.
539 if (C == '\\') {
540 // Skip the escaped character.
541 C = getAndAdvanceChar(CurPtr, Result);
542 } else if (C == '\n' || C == '\r' || // Newline.
543 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
Chris Lattnercb283342006-06-18 06:48:37 +0000544 Diag(Result.getStart(), diag::err_unterminated_string);
Chris Lattner22eb9722006-06-18 05:43:12 +0000545 BufferPtr = CurPtr-1;
546 return LexTokenInternal(Result);
547 } else if (C == 0) {
548 NulCharacter = CurPtr-1;
549 }
550 C = getAndAdvanceChar(CurPtr, Result);
551 }
552
Chris Lattnercb283342006-06-18 06:48:37 +0000553 if (NulCharacter) Diag(NulCharacter, diag::null_in_string);
Chris Lattner22eb9722006-06-18 05:43:12 +0000554
555 Result.SetKind(tok::string_literal);
556
557 // Update the end of token position as well as the BufferPtr instance var.
558 Result.SetEnd(BufferPtr = CurPtr);
Chris Lattner22eb9722006-06-18 05:43:12 +0000559}
560
561/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
562/// after having lexed the '<' character. This is used for #include filenames.
Chris Lattnercb283342006-06-18 06:48:37 +0000563void Lexer::LexAngledStringLiteral(LexerToken &Result, const char *CurPtr) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000564 const char *NulCharacter = 0; // Does this string contain the \0 character?
565
566 char C = getAndAdvanceChar(CurPtr, Result);
567 while (C != '>') {
568 // Skip escaped characters.
569 if (C == '\\') {
570 // Skip the escaped character.
571 C = getAndAdvanceChar(CurPtr, Result);
572 } else if (C == '\n' || C == '\r' || // Newline.
573 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
Chris Lattnercb283342006-06-18 06:48:37 +0000574 Diag(Result.getStart(), diag::err_unterminated_string);
Chris Lattner22eb9722006-06-18 05:43:12 +0000575 BufferPtr = CurPtr-1;
576 return LexTokenInternal(Result);
577 } else if (C == 0) {
578 NulCharacter = CurPtr-1;
579 }
580 C = getAndAdvanceChar(CurPtr, Result);
581 }
582
Chris Lattnercb283342006-06-18 06:48:37 +0000583 if (NulCharacter) Diag(NulCharacter, diag::null_in_string);
Chris Lattner22eb9722006-06-18 05:43:12 +0000584
585 Result.SetKind(tok::angle_string_literal);
586
587 // Update the end of token position as well as the BufferPtr instance var.
588 Result.SetEnd(BufferPtr = CurPtr);
Chris Lattner22eb9722006-06-18 05:43:12 +0000589}
590
591
592/// LexCharConstant - Lex the remainder of a character constant, after having
593/// lexed either ' or L'.
Chris Lattnercb283342006-06-18 06:48:37 +0000594void Lexer::LexCharConstant(LexerToken &Result, const char *CurPtr) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000595 const char *NulCharacter = 0; // Does this character contain the \0 character?
596
597 // Handle the common case of 'x' and '\y' efficiently.
598 char C = getAndAdvanceChar(CurPtr, Result);
599 if (C == '\'') {
Chris Lattnercb283342006-06-18 06:48:37 +0000600 Diag(Result.getStart(), diag::err_empty_character);
Chris Lattner22eb9722006-06-18 05:43:12 +0000601 BufferPtr = CurPtr;
602 return LexTokenInternal(Result);
603 } else if (C == '\\') {
604 // Skip the escaped character.
605 // FIXME: UCN's.
606 C = getAndAdvanceChar(CurPtr, Result);
607 }
608
609 if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') {
610 ++CurPtr;
611 } else {
612 // Fall back on generic code for embedded nulls, newlines, wide chars.
613 do {
614 // Skip escaped characters.
615 if (C == '\\') {
616 // Skip the escaped character.
617 C = getAndAdvanceChar(CurPtr, Result);
618 } else if (C == '\n' || C == '\r' || // Newline.
619 (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
Chris Lattnercb283342006-06-18 06:48:37 +0000620 Diag(Result.getStart(), diag::err_unterminated_char);
Chris Lattner22eb9722006-06-18 05:43:12 +0000621 BufferPtr = CurPtr-1;
622 return LexTokenInternal(Result);
623 } else if (C == 0) {
624 NulCharacter = CurPtr-1;
625 }
626 C = getAndAdvanceChar(CurPtr, Result);
627 } while (C != '\'');
628 }
629
Chris Lattnercb283342006-06-18 06:48:37 +0000630 if (NulCharacter) Diag(NulCharacter, diag::null_in_char);
Chris Lattner22eb9722006-06-18 05:43:12 +0000631
632 Result.SetKind(tok::char_constant);
633
634 // Update the end of token position as well as the BufferPtr instance var.
635 Result.SetEnd(BufferPtr = CurPtr);
Chris Lattner22eb9722006-06-18 05:43:12 +0000636}
637
638/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
639/// Update BufferPtr to point to the next non-whitespace character and return.
Chris Lattnercb283342006-06-18 06:48:37 +0000640void Lexer::SkipWhitespace(LexerToken &Result, const char *CurPtr) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000641 // Whitespace - Skip it, then return the token after the whitespace.
642 unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently.
643 while (1) {
644 // Skip horizontal whitespace very aggressively.
645 while (isHorizontalWhitespace(Char))
646 Char = *++CurPtr;
647
648 // Otherwise if we something other than whitespace, we're done.
649 if (Char != '\n' && Char != '\r')
650 break;
651
652 if (ParsingPreprocessorDirective) {
653 // End of preprocessor directive line, let LexTokenInternal handle this.
654 BufferPtr = CurPtr;
Chris Lattnercb283342006-06-18 06:48:37 +0000655 return;
Chris Lattner22eb9722006-06-18 05:43:12 +0000656 }
657
658 // ok, but handle newline.
659 // The returned token is at the start of the line.
660 Result.SetFlag(LexerToken::StartOfLine);
661 // No leading whitespace seen so far.
662 Result.ClearFlag(LexerToken::LeadingSpace);
663 Char = *++CurPtr;
664 }
665
666 // If this isn't immediately after a newline, there is leading space.
667 char PrevChar = CurPtr[-1];
668 if (PrevChar != '\n' && PrevChar != '\r')
669 Result.SetFlag(LexerToken::LeadingSpace);
670
671 // If the next token is obviously a // or /* */ comment, skip it efficiently
672 // too (without going through the big switch stmt).
673 if (Char == '/' && CurPtr[1] == '/') {
674 Result.SetStart(CurPtr);
675 return SkipBCPLComment(Result, CurPtr+1);
676 }
677 if (Char == '/' && CurPtr[1] == '*') {
678 Result.SetStart(CurPtr);
679 return SkipBlockComment(Result, CurPtr+2);
680 }
681 BufferPtr = CurPtr;
Chris Lattner22eb9722006-06-18 05:43:12 +0000682}
683
684// SkipBCPLComment - We have just read the // characters from input. Skip until
685// we find the newline character thats terminate the comment. Then update
686/// BufferPtr and return.
Chris Lattnercb283342006-06-18 06:48:37 +0000687void Lexer::SkipBCPLComment(LexerToken &Result, const char *CurPtr) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000688 // If BCPL comments aren't explicitly enabled for this language, emit an
689 // extension warning.
690 if (!Features.BCPLComment) {
Chris Lattnercb283342006-06-18 06:48:37 +0000691 Diag(Result.getStart(), diag::ext_bcpl_comment);
Chris Lattner22eb9722006-06-18 05:43:12 +0000692
693 // Mark them enabled so we only emit one warning for this translation
694 // unit.
695 Features.BCPLComment = true;
696 }
697
698 // Scan over the body of the comment. The common case, when scanning, is that
699 // the comment contains normal ascii characters with nothing interesting in
700 // them. As such, optimize for this case with the inner loop.
701 char C;
702 do {
703 C = *CurPtr;
704 // FIXME: just scan for a \n or \r character. If we find a \n character,
705 // scan backwards, checking to see if it's an escaped newline, like we do
706 // for block comments.
707
708 // Skip over characters in the fast loop.
709 while (C != 0 && // Potentially EOF.
710 C != '\\' && // Potentially escaped newline.
711 C != '?' && // Potentially trigraph.
712 C != '\n' && C != '\r') // Newline or DOS-style newline.
713 C = *++CurPtr;
714
715 // If this is a newline, we're done.
716 if (C == '\n' || C == '\r')
717 break; // Found the newline? Break out!
718
719 // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to
720 // properly decode the character.
721 const char *OldPtr = CurPtr;
722 C = getAndAdvanceChar(CurPtr, Result);
723
724 // If we read multiple characters, and one of those characters was a \r or
725 // \n, then we had an escaped newline within the comment. Emit diagnostic.
726 if (CurPtr != OldPtr+1) {
727 for (; OldPtr != CurPtr; ++OldPtr)
728 if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
Chris Lattnercb283342006-06-18 06:48:37 +0000729 Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment);
730 break;
Chris Lattner22eb9722006-06-18 05:43:12 +0000731 }
732 }
733
734 if (CurPtr == BufferEnd+1) goto FoundEOF;
735 } while (C != '\n' && C != '\r');
736
737 // Found and did not consume a newline.
738
739 // If we are inside a preprocessor directive and we see the end of line,
740 // return immediately, so that the lexer can return this as an EOM token.
741 if (ParsingPreprocessorDirective) {
742 BufferPtr = CurPtr;
Chris Lattnercb283342006-06-18 06:48:37 +0000743 return;
Chris Lattner22eb9722006-06-18 05:43:12 +0000744 }
745
746 // Otherwise, eat the \n character. We don't care if this is a \n\r or
747 // \r\n sequence.
748 ++CurPtr;
749
750 // The next returned token is at the start of the line.
751 Result.SetFlag(LexerToken::StartOfLine);
752 // No leading whitespace seen so far.
753 Result.ClearFlag(LexerToken::LeadingSpace);
754
755 // It is common for the tokens immediately after a // comment to be
756 // whitespace (indentation for the next line). Instead of going through the
757 // big switch, handle it efficiently now.
758 if (isWhitespace(*CurPtr)) {
759 Result.SetFlag(LexerToken::LeadingSpace);
760 return SkipWhitespace(Result, CurPtr+1);
761 }
762
763 BufferPtr = CurPtr;
Chris Lattnercb283342006-06-18 06:48:37 +0000764 return;
Chris Lattner22eb9722006-06-18 05:43:12 +0000765
766FoundEOF: // If we ran off the end of the buffer, return EOF.
767 BufferPtr = CurPtr-1;
Chris Lattnercb283342006-06-18 06:48:37 +0000768 return;
Chris Lattner22eb9722006-06-18 05:43:12 +0000769}
770
Chris Lattnercb283342006-06-18 06:48:37 +0000771/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
772/// character (either \n or \r) is part of an escaped newline sequence. Issue a
Chris Lattner22eb9722006-06-18 05:43:12 +0000773/// diagnostic if so. We know that the is inside of a block comment.
Chris Lattner1f583052006-06-18 06:53:56 +0000774static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
775 Lexer *L) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000776 assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
Chris Lattner22eb9722006-06-18 05:43:12 +0000777
778 // Back up off the newline.
779 --CurPtr;
780
781 // If this is a two-character newline sequence, skip the other character.
782 if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
783 // \n\n or \r\r -> not escaped newline.
784 if (CurPtr[0] == CurPtr[1])
785 return false;
786 // \n\r or \r\n -> skip the newline.
787 --CurPtr;
788 }
789
790 // If we have horizontal whitespace, skip over it. We allow whitespace
791 // between the slash and newline.
792 bool HasSpace = false;
793 while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
794 --CurPtr;
795 HasSpace = true;
796 }
797
798 // If we have a slash, we know this is an escaped newline.
799 if (*CurPtr == '\\') {
Chris Lattnercb283342006-06-18 06:48:37 +0000800 if (CurPtr[-1] != '*') return false;
Chris Lattner22eb9722006-06-18 05:43:12 +0000801 } else {
802 // It isn't a slash, is it the ?? / trigraph?
Chris Lattnercb283342006-06-18 06:48:37 +0000803 if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
804 CurPtr[-3] != '*')
Chris Lattner22eb9722006-06-18 05:43:12 +0000805 return false;
Chris Lattnercb283342006-06-18 06:48:37 +0000806
807 // This is the trigraph ending the comment. Emit a stern warning!
Chris Lattner22eb9722006-06-18 05:43:12 +0000808 CurPtr -= 2;
809
810 // If no trigraphs are enabled, warn that we ignored this trigraph and
811 // ignore this * character.
Chris Lattner1f583052006-06-18 06:53:56 +0000812 if (!L->getFeatures().Trigraphs) {
813 L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
Chris Lattnercb283342006-06-18 06:48:37 +0000814 return false;
Chris Lattner22eb9722006-06-18 05:43:12 +0000815 }
Chris Lattner1f583052006-06-18 06:53:56 +0000816 L->Diag(CurPtr, diag::trigraph_ends_block_comment);
Chris Lattner22eb9722006-06-18 05:43:12 +0000817 }
818
819 // Warn about having an escaped newline between the */ characters.
Chris Lattner1f583052006-06-18 06:53:56 +0000820 L->Diag(CurPtr, diag::escaped_newline_block_comment_end);
Chris Lattner22eb9722006-06-18 05:43:12 +0000821
822 // If there was space between the backslash and newline, warn about it.
Chris Lattner1f583052006-06-18 06:53:56 +0000823 if (HasSpace) L->Diag(CurPtr, diag::backslash_newline_space);
Chris Lattner22eb9722006-06-18 05:43:12 +0000824
Chris Lattnercb283342006-06-18 06:48:37 +0000825 return true;
Chris Lattner22eb9722006-06-18 05:43:12 +0000826}
827
828/// SkipBlockComment - We have just read the /* characters from input. Read
829/// until we find the */ characters that terminate the comment. Note that we
830/// don't bother decoding trigraphs or escaped newlines in block comments,
831/// because they cannot cause the comment to end. The only thing that can
832/// happen is the comment could end with an escaped newline between the */ end
833/// of comment.
Chris Lattnercb283342006-06-18 06:48:37 +0000834void Lexer::SkipBlockComment(LexerToken &Result, const char *CurPtr) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000835 // Scan one character past where we should, looking for a '/' character. Once
836 // we find it, check to see if it was preceeded by a *. This common
837 // optimization helps people who like to put a lot of * characters in their
838 // comments.
839 unsigned char C = *CurPtr++;
840 if (C == 0 && CurPtr == BufferEnd+1) {
Chris Lattnercb283342006-06-18 06:48:37 +0000841 Diag(Result.getStart(), diag::err_unterminated_block_comment);
Chris Lattner22eb9722006-06-18 05:43:12 +0000842 BufferPtr = CurPtr-1;
Chris Lattnercb283342006-06-18 06:48:37 +0000843 return;
Chris Lattner22eb9722006-06-18 05:43:12 +0000844 }
845
846 while (1) {
847 // Skip over all non-interesting characters.
848 // TODO: Vectorize this. Note: memchr on Darwin is slower than this loop.
849 while (C != '/' && C != '\0')
850 C = *CurPtr++;
851
852 if (C == '/') {
853 char T;
854 if (CurPtr[-2] == '*') // We found the final */. We're done!
855 break;
856
857 if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
Chris Lattner1f583052006-06-18 06:53:56 +0000858 if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000859 // We found the final */, though it had an escaped newline between the
860 // * and /. We're done!
861 break;
862 }
863 }
864 if (CurPtr[0] == '*' && CurPtr[1] != '/') {
865 // If this is a /* inside of the comment, emit a warning. Don't do this
866 // if this is a /*/, which will end the comment. This misses cases with
867 // embedded escaped newlines, but oh well.
Chris Lattnercb283342006-06-18 06:48:37 +0000868 Diag(CurPtr-1, diag::nested_block_comment);
Chris Lattner22eb9722006-06-18 05:43:12 +0000869 }
870 } else if (C == 0 && CurPtr == BufferEnd+1) {
Chris Lattnercb283342006-06-18 06:48:37 +0000871 Diag(Result.getStart(), diag::err_unterminated_block_comment);
Chris Lattner22eb9722006-06-18 05:43:12 +0000872 // Note: the user probably forgot a */. We could continue immediately
873 // after the /*, but this would involve lexing a lot of what really is the
874 // comment, which surely would confuse the parser.
875 BufferPtr = CurPtr-1;
Chris Lattnercb283342006-06-18 06:48:37 +0000876 return;
Chris Lattner22eb9722006-06-18 05:43:12 +0000877 }
878 C = *CurPtr++;
879 }
880
881 // It is common for the tokens immediately after a /**/ comment to be
882 // whitespace. Instead of going through the big switch, handle it
883 // efficiently now.
884 if (isHorizontalWhitespace(*CurPtr)) {
885 Result.SetFlag(LexerToken::LeadingSpace);
886 return SkipWhitespace(Result, CurPtr+1);
887 }
888
889 // Otherwise, just return so that the next character will be lexed as a token.
890 BufferPtr = CurPtr;
891 Result.SetFlag(LexerToken::LeadingSpace);
Chris Lattner22eb9722006-06-18 05:43:12 +0000892}
893
894//===----------------------------------------------------------------------===//
895// Primary Lexing Entry Points
896//===----------------------------------------------------------------------===//
897
898/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
899/// (potentially) macro expand the filename.
Chris Lattnercb283342006-06-18 06:48:37 +0000900void Lexer::LexIncludeFilename(LexerToken &Result) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000901 assert(ParsingPreprocessorDirective &&
902 ParsingFilename == false &&
903 "Must be in a preprocessing directive!");
904
905 // We are now parsing a filename!
906 ParsingFilename = true;
907
908 // There should be exactly two tokens here if everything is good: first the
909 // filename, then the EOM.
Chris Lattnercb283342006-06-18 06:48:37 +0000910 Lex(Result);
Chris Lattner22eb9722006-06-18 05:43:12 +0000911
912 // We should have gotten the filename now.
913 ParsingFilename = false;
914
915 // No filename?
Chris Lattnercb283342006-06-18 06:48:37 +0000916 if (Result.getKind() == tok::eom) {
917 Diag(Result.getStart(), diag::err_pp_expects_filename);
918 return;
919 }
Chris Lattner22eb9722006-06-18 05:43:12 +0000920
921 // Verify that there is nothing after the filename, other than EOM.
922 LexerToken EndTok;
Chris Lattnercb283342006-06-18 06:48:37 +0000923 Lex(EndTok);
Chris Lattner22eb9722006-06-18 05:43:12 +0000924
925 if (EndTok.getKind() != tok::eom) {
Chris Lattnercb283342006-06-18 06:48:37 +0000926 Diag(Result.getStart(), diag::err_pp_expects_filename);
Chris Lattner22eb9722006-06-18 05:43:12 +0000927
928 // Lex until the end of the preprocessor directive line.
Chris Lattnercb283342006-06-18 06:48:37 +0000929 while (EndTok.getKind() != tok::eom)
930 Lex(EndTok);
Chris Lattner22eb9722006-06-18 05:43:12 +0000931
932 Result.SetKind(tok::eom);
933 }
Chris Lattner22eb9722006-06-18 05:43:12 +0000934}
935
936/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
937/// uninterpreted string. This switches the lexer out of directive mode.
938std::string Lexer::ReadToEndOfLine() {
939 assert(ParsingPreprocessorDirective && ParsingFilename == false &&
940 "Must be in a preprocessing directive!");
941 std::string Result;
942 LexerToken Tmp;
943
944 // CurPtr - Cache BufferPtr in an automatic variable.
945 const char *CurPtr = BufferPtr;
946 Tmp.SetStart(CurPtr);
947
948 while (1) {
949 char Char = getAndAdvanceChar(CurPtr, Tmp);
950 switch (Char) {
951 default:
952 Result += Char;
953 break;
954 case 0: // Null.
955 // Found end of file?
956 if (CurPtr-1 != BufferEnd) {
957 // Nope, normal character, continue.
958 Result += Char;
959 break;
960 }
961 // FALL THROUGH.
962 case '\r':
963 case '\n':
964 // Okay, we found the end of the line. First, back up past the \0, \r, \n.
965 assert(CurPtr[-1] == Char && "Trigraphs for newline?");
966 BufferPtr = CurPtr-1;
967
968 // Next, lex the character, which should handle the EOM transition.
Chris Lattnercb283342006-06-18 06:48:37 +0000969 Lex(Tmp);
Chris Lattner22eb9722006-06-18 05:43:12 +0000970 assert(Tmp.getKind() == tok::eom && "Unexpected token!");
Chris Lattner22eb9722006-06-18 05:43:12 +0000971
972 // Finally, we're done, return the string we found.
973 return Result;
974 }
975 }
976}
977
978/// LexEndOfFile - CurPtr points to the end of this file. Handle this
979/// condition, reporting diagnostics and handling other edge cases as required.
Chris Lattnercb283342006-06-18 06:48:37 +0000980void Lexer::LexEndOfFile(LexerToken &Result, const char *CurPtr) {
Chris Lattner22eb9722006-06-18 05:43:12 +0000981 // If we hit the end of the file while parsing a preprocessor directive,
982 // end the preprocessor directive first. The next token returned will
983 // then be the end of file.
984 if (ParsingPreprocessorDirective) {
985 // Done parsing the "line".
986 ParsingPreprocessorDirective = false;
987 Result.SetKind(tok::eom);
988 // Update the end of token position as well as the BufferPtr instance var.
989 Result.SetEnd(BufferPtr = CurPtr);
Chris Lattnercb283342006-06-18 06:48:37 +0000990 return;
Chris Lattner22eb9722006-06-18 05:43:12 +0000991 }
992
993 // If we are in a #if directive, emit an error.
994 while (!ConditionalStack.empty()) {
Chris Lattnercb283342006-06-18 06:48:37 +0000995 Diag(ConditionalStack.back().IfLoc, diag::err_pp_unterminated_conditional);
Chris Lattner22eb9722006-06-18 05:43:12 +0000996 ConditionalStack.pop_back();
997 }
998
999 // If the file was empty or didn't end in a newline, issue a pedwarn.
Chris Lattnercb283342006-06-18 06:48:37 +00001000 if (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
1001 Diag(BufferEnd, diag::ext_no_newline_eof);
Chris Lattner22eb9722006-06-18 05:43:12 +00001002
1003 BufferPtr = CurPtr;
Chris Lattnercb283342006-06-18 06:48:37 +00001004 PP.HandleEndOfFile(Result);
Chris Lattner22eb9722006-06-18 05:43:12 +00001005}
1006
1007
1008/// LexTokenInternal - This implements a simple C family lexer. It is an
1009/// extremely performance critical piece of code. This assumes that the buffer
1010/// has a null character at the end of the file. Return true if an error
1011/// occurred and compilation should terminate, false if normal. This returns a
1012/// preprocessing token, not a normal token, as such, it is an internal
1013/// interface. It assumes that the Flags of result have been cleared before
1014/// calling this.
Chris Lattnercb283342006-06-18 06:48:37 +00001015void Lexer::LexTokenInternal(LexerToken &Result) {
Chris Lattner22eb9722006-06-18 05:43:12 +00001016LexNextToken:
1017 // New token, can't need cleaning yet.
1018 Result.ClearFlag(LexerToken::NeedsCleaning);
1019
1020 // CurPtr - Cache BufferPtr in an automatic variable.
1021 const char *CurPtr = BufferPtr;
1022 Result.SetStart(CurPtr);
1023
1024 unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below.
1025
1026 // Read a character, advancing over it.
1027 char Char = getAndAdvanceChar(CurPtr, Result);
1028 switch (Char) {
1029 case 0: // Null.
1030 // Found end of file?
1031 if (CurPtr-1 == BufferEnd)
1032 return LexEndOfFile(Result, CurPtr-1); // Retreat back into the file.
1033
Chris Lattnercb283342006-06-18 06:48:37 +00001034 Diag(CurPtr-1, diag::null_in_file);
Chris Lattner22eb9722006-06-18 05:43:12 +00001035 Result.SetFlag(LexerToken::LeadingSpace);
Chris Lattnercb283342006-06-18 06:48:37 +00001036 SkipWhitespace(Result, CurPtr);
Chris Lattner22eb9722006-06-18 05:43:12 +00001037 goto LexNextToken; // GCC isn't tail call eliminating.
1038 case '\n':
1039 case '\r':
1040 // If we are inside a preprocessor directive and we see the end of line,
1041 // we know we are done with the directive, so return an EOM token.
1042 if (ParsingPreprocessorDirective) {
1043 // Done parsing the "line".
1044 ParsingPreprocessorDirective = false;
1045
1046 // Since we consumed a newline, we are back at the start of a line.
1047 IsAtStartOfLine = true;
1048
1049 Result.SetKind(tok::eom);
1050 break;
1051 }
1052 // The returned token is at the start of the line.
1053 Result.SetFlag(LexerToken::StartOfLine);
1054 // No leading whitespace seen so far.
1055 Result.ClearFlag(LexerToken::LeadingSpace);
Chris Lattnercb283342006-06-18 06:48:37 +00001056 SkipWhitespace(Result, CurPtr);
Chris Lattner22eb9722006-06-18 05:43:12 +00001057 goto LexNextToken; // GCC isn't tail call eliminating.
1058 case ' ':
1059 case '\t':
1060 case '\f':
1061 case '\v':
1062 Result.SetFlag(LexerToken::LeadingSpace);
Chris Lattnercb283342006-06-18 06:48:37 +00001063 SkipWhitespace(Result, CurPtr);
Chris Lattner22eb9722006-06-18 05:43:12 +00001064 goto LexNextToken; // GCC isn't tail call eliminating.
1065
1066 case 'L':
1067 Char = getCharAndSize(CurPtr, SizeTmp);
1068
1069 // Wide string literal.
1070 if (Char == '"')
1071 return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1072
1073 // Wide character constant.
1074 if (Char == '\'')
1075 return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1076 // FALL THROUGH, treating L like the start of an identifier.
1077
1078 // C99 6.4.2: Identifiers.
1079 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
1080 case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N':
1081 case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U':
1082 case 'V': case 'W': case 'X': case 'Y': case 'Z':
1083 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
1084 case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
1085 case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u':
1086 case 'v': case 'w': case 'x': case 'y': case 'z':
1087 case '_':
1088 return LexIdentifier(Result, CurPtr);
1089
1090 // C99 6.4.4.1: Integer Constants.
1091 // C99 6.4.4.2: Floating Constants.
1092 case '0': case '1': case '2': case '3': case '4':
1093 case '5': case '6': case '7': case '8': case '9':
1094 return LexNumericConstant(Result, CurPtr);
1095
1096 // C99 6.4.4: Character Constants.
1097 case '\'':
1098 return LexCharConstant(Result, CurPtr);
1099
1100 // C99 6.4.5: String Literals.
1101 case '"':
1102 return LexStringLiteral(Result, CurPtr);
1103
1104 // C99 6.4.6: Punctuators.
1105 case '?':
1106 Result.SetKind(tok::question);
1107 break;
1108 case '[':
1109 Result.SetKind(tok::l_square);
1110 break;
1111 case ']':
1112 Result.SetKind(tok::r_square);
1113 break;
1114 case '(':
1115 Result.SetKind(tok::l_paren);
1116 break;
1117 case ')':
1118 Result.SetKind(tok::r_paren);
1119 break;
1120 case '{':
1121 Result.SetKind(tok::l_brace);
1122 break;
1123 case '}':
1124 Result.SetKind(tok::r_brace);
1125 break;
1126 case '.':
1127 Char = getCharAndSize(CurPtr, SizeTmp);
1128 if (Char >= '0' && Char <= '9') {
1129 return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
1130 } else if (Features.CPlusPlus && Char == '*') {
1131 Result.SetKind(tok::periodstar);
1132 CurPtr += SizeTmp;
1133 } else if (Char == '.' &&
1134 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
1135 Result.SetKind(tok::ellipsis);
1136 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1137 SizeTmp2, Result);
1138 } else {
1139 Result.SetKind(tok::period);
1140 }
1141 break;
1142 case '&':
1143 Char = getCharAndSize(CurPtr, SizeTmp);
1144 if (Char == '&') {
1145 Result.SetKind(tok::ampamp);
1146 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1147 } else if (Char == '=') {
1148 Result.SetKind(tok::ampequal);
1149 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1150 } else {
1151 Result.SetKind(tok::amp);
1152 }
1153 break;
1154 case '*':
1155 if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1156 Result.SetKind(tok::starequal);
1157 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1158 } else {
1159 Result.SetKind(tok::star);
1160 }
1161 break;
1162 case '+':
1163 Char = getCharAndSize(CurPtr, SizeTmp);
1164 if (Char == '+') {
1165 Result.SetKind(tok::plusplus);
1166 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1167 } else if (Char == '=') {
1168 Result.SetKind(tok::plusequal);
1169 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1170 } else {
1171 Result.SetKind(tok::plus);
1172 }
1173 break;
1174 case '-':
1175 Char = getCharAndSize(CurPtr, SizeTmp);
1176 if (Char == '-') {
1177 Result.SetKind(tok::minusminus);
1178 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1179 } else if (Char == '>' && Features.CPlusPlus &&
1180 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') {
1181 Result.SetKind(tok::arrowstar); // C++ ->*
1182 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1183 SizeTmp2, Result);
1184 } else if (Char == '>') {
1185 Result.SetKind(tok::arrow);
1186 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1187 } else if (Char == '=') {
1188 Result.SetKind(tok::minusequal);
1189 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1190 } else {
1191 Result.SetKind(tok::minus);
1192 }
1193 break;
1194 case '~':
1195 Result.SetKind(tok::tilde);
1196 break;
1197 case '!':
1198 if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1199 Result.SetKind(tok::exclaimequal);
1200 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1201 } else {
1202 Result.SetKind(tok::exclaim);
1203 }
1204 break;
1205 case '/':
1206 // 6.4.9: Comments
1207 Char = getCharAndSize(CurPtr, SizeTmp);
1208 if (Char == '/') { // BCPL comment.
1209 Result.SetFlag(LexerToken::LeadingSpace);
Chris Lattnercb283342006-06-18 06:48:37 +00001210 SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result));
Chris Lattner22eb9722006-06-18 05:43:12 +00001211 goto LexNextToken; // GCC isn't tail call eliminating.
1212 } else if (Char == '*') { // /**/ comment.
1213 Result.SetFlag(LexerToken::LeadingSpace);
Chris Lattnercb283342006-06-18 06:48:37 +00001214 SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result));
Chris Lattner22eb9722006-06-18 05:43:12 +00001215 goto LexNextToken; // GCC isn't tail call eliminating.
1216 } else if (Char == '=') {
1217 Result.SetKind(tok::slashequal);
1218 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1219 } else {
1220 Result.SetKind(tok::slash);
1221 }
1222 break;
1223 case '%':
1224 Char = getCharAndSize(CurPtr, SizeTmp);
1225 if (Char == '=') {
1226 Result.SetKind(tok::percentequal);
1227 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1228 } else if (Features.Digraphs && Char == '>') {
1229 Result.SetKind(tok::r_brace); // '%>' -> '}'
1230 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1231 } else if (Features.Digraphs && Char == ':') {
1232 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1233 if (getCharAndSize(CurPtr, SizeTmp) == '%' &&
1234 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
1235 Result.SetKind(tok::hashhash); // '%:%:' -> '##'
1236 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1237 SizeTmp2, Result);
1238 } else {
1239 Result.SetKind(tok::hash); // '%:' -> '#'
1240
1241 // We parsed a # character. If this occurs at the start of the line,
1242 // it's actually the start of a preprocessing directive. Callback to
1243 // the preprocessor to handle it.
1244 // FIXME: -fpreprocessed mode??
1245 if (Result.isAtStartOfLine() && !PP.isSkipping()) {
1246 BufferPtr = CurPtr;
Chris Lattnercb283342006-06-18 06:48:37 +00001247 PP.HandleDirective(Result);
Chris Lattner22eb9722006-06-18 05:43:12 +00001248
1249 // As an optimization, if the preprocessor didn't switch lexers, tail
1250 // recurse.
1251 if (PP.isCurrentLexer(this)) {
1252 // Start a new token. If this is a #include or something, the PP may
1253 // want us starting at the beginning of the line again. If so, set
1254 // the StartOfLine flag.
1255 if (IsAtStartOfLine) {
1256 Result.SetFlag(LexerToken::StartOfLine);
1257 IsAtStartOfLine = false;
1258 }
1259 goto LexNextToken; // GCC isn't tail call eliminating.
1260 }
1261
1262 return PP.Lex(Result);
1263 }
1264 }
1265 } else {
1266 Result.SetKind(tok::percent);
1267 }
1268 break;
1269 case '<':
1270 Char = getCharAndSize(CurPtr, SizeTmp);
1271 if (ParsingFilename) {
1272 return LexAngledStringLiteral(Result, CurPtr+SizeTmp);
1273 } else if (Char == '<' &&
1274 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
1275 Result.SetKind(tok::lesslessequal);
1276 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1277 SizeTmp2, Result);
1278 } else if (Char == '<') {
1279 Result.SetKind(tok::lessless);
1280 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1281 } else if (Char == '=') {
1282 Result.SetKind(tok::lessequal);
1283 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1284 } else if (Features.Digraphs && Char == ':') {
1285 Result.SetKind(tok::l_square); // '<:' -> '['
1286 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1287 } else if (Features.Digraphs && Char == '>') {
1288 Result.SetKind(tok::l_brace); // '<%' -> '{'
1289 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1290 } else if (Features.CPPMinMax && Char == '?') { // <?
1291 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Chris Lattnercb283342006-06-18 06:48:37 +00001292 Diag(Result.getStart(), diag::min_max_deprecated);
Chris Lattner22eb9722006-06-18 05:43:12 +00001293
1294 if (getCharAndSize(CurPtr, SizeTmp) == '=') { // <?=
1295 Result.SetKind(tok::lessquestionequal);
1296 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1297 } else {
1298 Result.SetKind(tok::lessquestion);
1299 }
1300 } else {
1301 Result.SetKind(tok::less);
1302 }
1303 break;
1304 case '>':
1305 Char = getCharAndSize(CurPtr, SizeTmp);
1306 if (Char == '=') {
1307 Result.SetKind(tok::greaterequal);
1308 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1309 } else if (Char == '>' &&
1310 getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
1311 Result.SetKind(tok::greatergreaterequal);
1312 CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
1313 SizeTmp2, Result);
1314 } else if (Char == '>') {
1315 Result.SetKind(tok::greatergreater);
1316 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1317 } else if (Features.CPPMinMax && Char == '?') {
1318 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Chris Lattnercb283342006-06-18 06:48:37 +00001319 Diag(Result.getStart(), diag::min_max_deprecated);
Chris Lattner22eb9722006-06-18 05:43:12 +00001320
1321 if (getCharAndSize(CurPtr, SizeTmp) == '=') {
1322 Result.SetKind(tok::greaterquestionequal); // >?=
1323 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1324 } else {
1325 Result.SetKind(tok::greaterquestion); // >?
1326 }
1327 } else {
1328 Result.SetKind(tok::greater);
1329 }
1330 break;
1331 case '^':
1332 Char = getCharAndSize(CurPtr, SizeTmp);
1333 if (Char == '=') {
1334 Result.SetKind(tok::caretequal);
1335 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1336 } else {
1337 Result.SetKind(tok::caret);
1338 }
1339 break;
1340 case '|':
1341 Char = getCharAndSize(CurPtr, SizeTmp);
1342 if (Char == '=') {
1343 Result.SetKind(tok::pipeequal);
1344 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1345 } else if (Char == '|') {
1346 Result.SetKind(tok::pipepipe);
1347 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1348 } else {
1349 Result.SetKind(tok::pipe);
1350 }
1351 break;
1352 case ':':
1353 Char = getCharAndSize(CurPtr, SizeTmp);
1354 if (Features.Digraphs && Char == '>') {
1355 Result.SetKind(tok::r_square); // ':>' -> ']'
1356 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1357 } else if (Features.CPlusPlus && Char == ':') {
1358 Result.SetKind(tok::coloncolon);
1359 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1360 } else {
1361 Result.SetKind(tok::colon);
1362 }
1363 break;
1364 case ';':
1365 Result.SetKind(tok::semi);
1366 break;
1367 case '=':
1368 Char = getCharAndSize(CurPtr, SizeTmp);
1369 if (Char == '=') {
1370 Result.SetKind(tok::equalequal);
1371 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1372 } else {
1373 Result.SetKind(tok::equal);
1374 }
1375 break;
1376 case ',':
1377 Result.SetKind(tok::comma);
1378 break;
1379 case '#':
1380 Char = getCharAndSize(CurPtr, SizeTmp);
1381 if (Char == '#') {
1382 Result.SetKind(tok::hashhash);
1383 CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
1384 } else {
1385 Result.SetKind(tok::hash);
1386 // We parsed a # character. If this occurs at the start of the line,
1387 // it's actually the start of a preprocessing directive. Callback to
1388 // the preprocessor to handle it.
1389 // FIXME: not in preprocessed mode??
1390 if (Result.isAtStartOfLine() && !PP.isSkipping()) {
1391 BufferPtr = CurPtr;
Chris Lattnercb283342006-06-18 06:48:37 +00001392 PP.HandleDirective(Result);
Chris Lattner22eb9722006-06-18 05:43:12 +00001393
1394 // As an optimization, if the preprocessor didn't switch lexers, tail
1395 // recurse.
1396 if (PP.isCurrentLexer(this)) {
1397 // Start a new token. If this is a #include or something, the PP may
1398 // want us starting at the beginning of the line again. If so, set
1399 // the StartOfLine flag.
1400 if (IsAtStartOfLine) {
1401 Result.SetFlag(LexerToken::StartOfLine);
1402 IsAtStartOfLine = false;
1403 }
1404 goto LexNextToken; // GCC isn't tail call eliminating.
1405 }
1406 return PP.Lex(Result);
1407 }
1408 }
1409 break;
1410
1411 case '\\':
1412 // FIXME: handle UCN's.
1413 // FALL THROUGH.
1414 default:
1415 // Objective C support.
1416 if (CurPtr[-1] == '@' && Features.ObjC1) {
1417 Result.SetKind(tok::at);
1418 break;
1419 } else if (CurPtr[-1] == '$' && Features.DollarIdents) {// $ in identifiers.
Chris Lattnercb283342006-06-18 06:48:37 +00001420 Diag(CurPtr-1, diag::ext_dollar_in_identifier);
Chris Lattner22eb9722006-06-18 05:43:12 +00001421 return LexIdentifier(Result, CurPtr);
1422 }
1423
Chris Lattnercb283342006-06-18 06:48:37 +00001424 if (!PP.isSkipping()) Diag(CurPtr-1, diag::err_stray_character);
Chris Lattner22eb9722006-06-18 05:43:12 +00001425 BufferPtr = CurPtr;
1426 goto LexNextToken; // GCC isn't tail call eliminating.
1427 }
1428
1429 // Update the end of token position as well as the BufferPtr instance var.
1430 Result.SetEnd(BufferPtr = CurPtr);
Chris Lattner22eb9722006-06-18 05:43:12 +00001431}