blob: 93017937d532cde6aabb7454e5ab05a79e578c4a [file] [log] [blame]
Reid Spencer5f016e22007-07-11 17:01:13 +00001//===--- Preprocess.cpp - C Language Family Preprocessor Implementation ---===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the Preprocessor interface.
11//
12//===----------------------------------------------------------------------===//
13//
14// Options to support:
15// -H - Print the name of each header file used.
16// -d[MDNI] - Dump various things.
17// -fworking-directory - #line's with preprocessor's working dir.
18// -fpreprocessed
19// -dependency-file,-M,-MM,-MF,-MG,-MP,-MT,-MQ,-MD,-MMD
20// -W*
21// -w
22//
23// Messages to emit:
24// "Multiple include guards may be useful for:\n"
25//
26//===----------------------------------------------------------------------===//
27
28#include "clang/Lex/Preprocessor.h"
29#include "clang/Lex/HeaderSearch.h"
30#include "clang/Lex/MacroInfo.h"
31#include "clang/Lex/PPCallbacks.h"
32#include "clang/Lex/Pragma.h"
33#include "clang/Lex/ScratchBuffer.h"
34#include "clang/Basic/Diagnostic.h"
35#include "clang/Basic/FileManager.h"
36#include "clang/Basic/SourceManager.h"
37#include "clang/Basic/TargetInfo.h"
38#include "llvm/ADT/SmallVector.h"
Chris Lattner97ba77c2007-07-16 06:48:38 +000039#include "llvm/Support/MemoryBuffer.h"
Reid Spencer5f016e22007-07-11 17:01:13 +000040#include <iostream>
Reid Spencer5f016e22007-07-11 17:01:13 +000041using namespace clang;
42
43//===----------------------------------------------------------------------===//
44
45Preprocessor::Preprocessor(Diagnostic &diags, const LangOptions &opts,
46 TargetInfo &target, SourceManager &SM,
47 HeaderSearch &Headers)
48 : Diags(diags), Features(opts), Target(target), FileMgr(Headers.getFileMgr()),
49 SourceMgr(SM), HeaderInfo(Headers), Identifiers(opts),
50 CurLexer(0), CurDirLookup(0), CurMacroExpander(0), Callbacks(0) {
51 ScratchBuf = new ScratchBuffer(SourceMgr);
Chris Lattner9594acf2007-07-15 00:25:26 +000052
Reid Spencer5f016e22007-07-11 17:01:13 +000053 // Clear stats.
54 NumDirectives = NumDefined = NumUndefined = NumPragma = 0;
55 NumIf = NumElse = NumEndif = 0;
56 NumEnteredSourceFiles = 0;
57 NumMacroExpanded = NumFnMacroExpanded = NumBuiltinMacroExpanded = 0;
58 NumFastMacroExpanded = NumTokenPaste = NumFastTokenPaste = 0;
59 MaxIncludeStackDepth = 0;
60 NumSkipped = 0;
61
62 // Default to discarding comments.
63 KeepComments = false;
64 KeepMacroComments = false;
65
66 // Macro expansion is enabled.
67 DisableMacroExpansion = false;
68 InMacroArgs = false;
Chris Lattner9594acf2007-07-15 00:25:26 +000069 NumCachedMacroExpanders = 0;
Reid Spencer5f016e22007-07-11 17:01:13 +000070
71 // "Poison" __VA_ARGS__, which can only appear in the expansion of a macro.
72 // This gets unpoisoned where it is allowed.
73 (Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned();
74
75 // Initialize the pragma handlers.
76 PragmaHandlers = new PragmaNamespace(0);
77 RegisterBuiltinPragmas();
78
79 // Initialize builtin macros like __LINE__ and friends.
80 RegisterBuiltinMacros();
81}
82
83Preprocessor::~Preprocessor() {
84 // Free any active lexers.
85 delete CurLexer;
86
87 while (!IncludeMacroStack.empty()) {
88 delete IncludeMacroStack.back().TheLexer;
89 delete IncludeMacroStack.back().TheMacroExpander;
90 IncludeMacroStack.pop_back();
91 }
92
Chris Lattner9594acf2007-07-15 00:25:26 +000093 // Free any cached macro expanders.
94 for (unsigned i = 0, e = NumCachedMacroExpanders; i != e; ++i)
95 delete MacroExpanderCache[i];
96
Reid Spencer5f016e22007-07-11 17:01:13 +000097 // Release pragma information.
98 delete PragmaHandlers;
99
100 // Delete the scratch buffer info.
101 delete ScratchBuf;
102}
103
104PPCallbacks::~PPCallbacks() {
105}
106
107/// Diag - Forwarding function for diagnostics. This emits a diagnostic at
108/// the specified LexerToken's location, translating the token's start
109/// position in the current buffer into a SourcePosition object for rendering.
110void Preprocessor::Diag(SourceLocation Loc, unsigned DiagID) {
111 Diags.Report(Loc, DiagID);
112}
113
114void Preprocessor::Diag(SourceLocation Loc, unsigned DiagID,
115 const std::string &Msg) {
116 Diags.Report(Loc, DiagID, &Msg, 1);
117}
118
119void Preprocessor::DumpToken(const LexerToken &Tok, bool DumpFlags) const {
120 std::cerr << tok::getTokenName(Tok.getKind()) << " '"
121 << getSpelling(Tok) << "'";
122
123 if (!DumpFlags) return;
124 std::cerr << "\t";
125 if (Tok.isAtStartOfLine())
126 std::cerr << " [StartOfLine]";
127 if (Tok.hasLeadingSpace())
128 std::cerr << " [LeadingSpace]";
129 if (Tok.isExpandDisabled())
130 std::cerr << " [ExpandDisabled]";
131 if (Tok.needsCleaning()) {
132 const char *Start = SourceMgr.getCharacterData(Tok.getLocation());
133 std::cerr << " [UnClean='" << std::string(Start, Start+Tok.getLength())
134 << "']";
135 }
136}
137
138void Preprocessor::DumpMacro(const MacroInfo &MI) const {
139 std::cerr << "MACRO: ";
140 for (unsigned i = 0, e = MI.getNumTokens(); i != e; ++i) {
141 DumpToken(MI.getReplacementToken(i));
142 std::cerr << " ";
143 }
144 std::cerr << "\n";
145}
146
147void Preprocessor::PrintStats() {
148 std::cerr << "\n*** Preprocessor Stats:\n";
149 std::cerr << NumDirectives << " directives found:\n";
150 std::cerr << " " << NumDefined << " #define.\n";
151 std::cerr << " " << NumUndefined << " #undef.\n";
152 std::cerr << " #include/#include_next/#import:\n";
153 std::cerr << " " << NumEnteredSourceFiles << " source files entered.\n";
154 std::cerr << " " << MaxIncludeStackDepth << " max include stack depth\n";
155 std::cerr << " " << NumIf << " #if/#ifndef/#ifdef.\n";
156 std::cerr << " " << NumElse << " #else/#elif.\n";
157 std::cerr << " " << NumEndif << " #endif.\n";
158 std::cerr << " " << NumPragma << " #pragma.\n";
159 std::cerr << NumSkipped << " #if/#ifndef#ifdef regions skipped\n";
160
161 std::cerr << NumMacroExpanded << "/" << NumFnMacroExpanded << "/"
162 << NumBuiltinMacroExpanded << " obj/fn/builtin macros expanded, "
163 << NumFastMacroExpanded << " on the fast path.\n";
164 std::cerr << (NumFastTokenPaste+NumTokenPaste)
165 << " token paste (##) operations performed, "
166 << NumFastTokenPaste << " on the fast path.\n";
167}
168
169//===----------------------------------------------------------------------===//
170// Token Spelling
171//===----------------------------------------------------------------------===//
172
173
174/// getSpelling() - Return the 'spelling' of this token. The spelling of a
175/// token are the characters used to represent the token in the source file
176/// after trigraph expansion and escaped-newline folding. In particular, this
177/// wants to get the true, uncanonicalized, spelling of things like digraphs
178/// UCNs, etc.
179std::string Preprocessor::getSpelling(const LexerToken &Tok) const {
180 assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
181
182 // If this token contains nothing interesting, return it directly.
183 const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation());
184 if (!Tok.needsCleaning())
185 return std::string(TokStart, TokStart+Tok.getLength());
186
187 std::string Result;
188 Result.reserve(Tok.getLength());
189
190 // Otherwise, hard case, relex the characters into the string.
191 for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
192 Ptr != End; ) {
193 unsigned CharSize;
194 Result.push_back(Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features));
195 Ptr += CharSize;
196 }
197 assert(Result.size() != unsigned(Tok.getLength()) &&
198 "NeedsCleaning flag set on something that didn't need cleaning!");
199 return Result;
200}
201
202/// getSpelling - This method is used to get the spelling of a token into a
203/// preallocated buffer, instead of as an std::string. The caller is required
204/// to allocate enough space for the token, which is guaranteed to be at least
205/// Tok.getLength() bytes long. The actual length of the token is returned.
206///
207/// Note that this method may do two possible things: it may either fill in
208/// the buffer specified with characters, or it may *change the input pointer*
209/// to point to a constant buffer with the data already in it (avoiding a
210/// copy). The caller is not allowed to modify the returned buffer pointer
211/// if an internal buffer is returned.
212unsigned Preprocessor::getSpelling(const LexerToken &Tok,
213 const char *&Buffer) const {
214 assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
215
216 // If this token is an identifier, just return the string from the identifier
217 // table, which is very quick.
218 if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
219 Buffer = II->getName();
220 return Tok.getLength();
221 }
222
223 // Otherwise, compute the start of the token in the input lexer buffer.
224 const char *TokStart = SourceMgr.getCharacterData(Tok.getLocation());
225
226 // If this token contains nothing interesting, return it directly.
227 if (!Tok.needsCleaning()) {
228 Buffer = TokStart;
229 return Tok.getLength();
230 }
231 // Otherwise, hard case, relex the characters into the string.
232 char *OutBuf = const_cast<char*>(Buffer);
233 for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
234 Ptr != End; ) {
235 unsigned CharSize;
236 *OutBuf++ = Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features);
237 Ptr += CharSize;
238 }
239 assert(unsigned(OutBuf-Buffer) != Tok.getLength() &&
240 "NeedsCleaning flag set on something that didn't need cleaning!");
241
242 return OutBuf-Buffer;
243}
244
245
246/// CreateString - Plop the specified string into a scratch buffer and return a
247/// location for it. If specified, the source location provides a source
248/// location for the token.
249SourceLocation Preprocessor::
250CreateString(const char *Buf, unsigned Len, SourceLocation SLoc) {
251 if (SLoc.isValid())
252 return ScratchBuf->getToken(Buf, Len, SLoc);
253 return ScratchBuf->getToken(Buf, Len);
254}
255
256
Chris Lattner97ba77c2007-07-16 06:48:38 +0000257/// AdvanceToTokenCharacter - Given a location that specifies the start of a
258/// token, return a new location that specifies a character within the token.
259SourceLocation Preprocessor::AdvanceToTokenCharacter(SourceLocation TokStart,
260 unsigned CharNo) {
261 // If they request the first char of the token, we're trivially done.
262 if (CharNo == 0) return TokStart;
263
264 // Figure out how many physical characters away the specified logical
265 // character is. This needs to take into consideration newlines and
266 // trigraphs.
267 const char *TokStartPtr = SourceMgr.getCharacterData(TokStart);
268 const char *TokPtr = TokStartPtr;
269
270 // The usual case is that tokens don't contain anything interesting. Skip
271 // over the uninteresting characters. If a token only consists of simple
272 // chars, this method is extremely fast.
273 while (CharNo && Lexer::isObviouslySimpleCharacter(*TokPtr))
274 ++TokPtr, --CharNo;
275
276 // If we have a character that may be a trigraph or escaped newline, create a
277 // lexer to parse it correctly.
278 unsigned FileID = TokStart.getFileID();
279 const llvm::MemoryBuffer *SrcBuf = SourceMgr.getBuffer(FileID);
280 if (CharNo != 0) {
281 // Create a lexer starting at this token position.
282 Lexer TheLexer(SrcBuf, FileID, *this, TokPtr);
283 LexerToken Tok;
284 // Skip over characters the remaining characters.
285 for (; CharNo; --CharNo)
286 TheLexer.getAndAdvanceChar(TokPtr, Tok);
287 }
288 return SourceLocation(FileID, TokPtr-SrcBuf->getBufferStart());
289}
290
291
292
Reid Spencer5f016e22007-07-11 17:01:13 +0000293//===----------------------------------------------------------------------===//
294// Source File Location Methods.
295//===----------------------------------------------------------------------===//
296
297/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
298/// return null on failure. isAngled indicates whether the file reference is
299/// for system #include's or not (i.e. using <> instead of "").
300const FileEntry *Preprocessor::LookupFile(const char *FilenameStart,
301 const char *FilenameEnd,
302 bool isAngled,
303 const DirectoryLookup *FromDir,
304 const DirectoryLookup *&CurDir) {
305 // If the header lookup mechanism may be relative to the current file, pass in
306 // info about where the current file is.
307 const FileEntry *CurFileEnt = 0;
308 if (!FromDir) {
309 unsigned TheFileID = getCurrentFileLexer()->getCurFileID();
310 CurFileEnt = SourceMgr.getFileEntryForFileID(TheFileID);
311 }
312
313 // Do a standard file entry lookup.
314 CurDir = CurDirLookup;
315 const FileEntry *FE =
316 HeaderInfo.LookupFile(FilenameStart, FilenameEnd,
317 isAngled, FromDir, CurDir, CurFileEnt);
318 if (FE) return FE;
319
320 // Otherwise, see if this is a subframework header. If so, this is relative
321 // to one of the headers on the #include stack. Walk the list of the current
322 // headers on the #include stack and pass them to HeaderInfo.
323 if (CurLexer && !CurLexer->Is_PragmaLexer) {
324 CurFileEnt = SourceMgr.getFileEntryForFileID(CurLexer->getCurFileID());
325 if ((FE = HeaderInfo.LookupSubframeworkHeader(FilenameStart, FilenameEnd,
326 CurFileEnt)))
327 return FE;
328 }
329
330 for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
331 IncludeStackInfo &ISEntry = IncludeMacroStack[e-i-1];
332 if (ISEntry.TheLexer && !ISEntry.TheLexer->Is_PragmaLexer) {
333 CurFileEnt =
334 SourceMgr.getFileEntryForFileID(ISEntry.TheLexer->getCurFileID());
335 if ((FE = HeaderInfo.LookupSubframeworkHeader(FilenameStart, FilenameEnd,
336 CurFileEnt)))
337 return FE;
338 }
339 }
340
341 // Otherwise, we really couldn't find the file.
342 return 0;
343}
344
345/// isInPrimaryFile - Return true if we're in the top-level file, not in a
346/// #include.
347bool Preprocessor::isInPrimaryFile() const {
348 if (CurLexer && !CurLexer->Is_PragmaLexer)
349 return CurLexer->isMainFile();
350
351 // If there are any stacked lexers, we're in a #include.
352 for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i)
353 if (IncludeMacroStack[i].TheLexer &&
354 !IncludeMacroStack[i].TheLexer->Is_PragmaLexer)
355 return IncludeMacroStack[i].TheLexer->isMainFile();
356 return false;
357}
358
359/// getCurrentLexer - Return the current file lexer being lexed from. Note
360/// that this ignores any potentially active macro expansions and _Pragma
361/// expansions going on at the time.
362Lexer *Preprocessor::getCurrentFileLexer() const {
363 if (CurLexer && !CurLexer->Is_PragmaLexer) return CurLexer;
364
365 // Look for a stacked lexer.
366 for (unsigned i = IncludeMacroStack.size(); i != 0; --i) {
367 Lexer *L = IncludeMacroStack[i-1].TheLexer;
368 if (L && !L->Is_PragmaLexer) // Ignore macro & _Pragma expansions.
369 return L;
370 }
371 return 0;
372}
373
374
375/// EnterSourceFile - Add a source file to the top of the include stack and
376/// start lexing tokens from it instead of the current buffer. Return true
377/// on failure.
378void Preprocessor::EnterSourceFile(unsigned FileID,
379 const DirectoryLookup *CurDir,
380 bool isMainFile) {
381 assert(CurMacroExpander == 0 && "Cannot #include a file inside a macro!");
382 ++NumEnteredSourceFiles;
383
384 if (MaxIncludeStackDepth < IncludeMacroStack.size())
385 MaxIncludeStackDepth = IncludeMacroStack.size();
386
387 const llvm::MemoryBuffer *Buffer = SourceMgr.getBuffer(FileID);
388 Lexer *TheLexer = new Lexer(Buffer, FileID, *this);
389 if (isMainFile) TheLexer->setIsMainFile();
390 EnterSourceFileWithLexer(TheLexer, CurDir);
391}
392
393/// EnterSourceFile - Add a source file to the top of the include stack and
394/// start lexing tokens from it instead of the current buffer.
395void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
396 const DirectoryLookup *CurDir) {
397
398 // Add the current lexer to the include stack.
399 if (CurLexer || CurMacroExpander)
400 IncludeMacroStack.push_back(IncludeStackInfo(CurLexer, CurDirLookup,
401 CurMacroExpander));
402
403 CurLexer = TheLexer;
404 CurDirLookup = CurDir;
405 CurMacroExpander = 0;
406
407 // Notify the client, if desired, that we are in a new source file.
408 if (Callbacks && !CurLexer->Is_PragmaLexer) {
409 DirectoryLookup::DirType FileType = DirectoryLookup::NormalHeaderDir;
410
411 // Get the file entry for the current file.
412 if (const FileEntry *FE =
413 SourceMgr.getFileEntryForFileID(CurLexer->getCurFileID()))
414 FileType = HeaderInfo.getFileDirFlavor(FE);
415
416 Callbacks->FileChanged(SourceLocation(CurLexer->getCurFileID(), 0),
417 PPCallbacks::EnterFile, FileType);
418 }
419}
420
421
422
423/// EnterMacro - Add a Macro to the top of the include stack and start lexing
424/// tokens from it instead of the current buffer.
425void Preprocessor::EnterMacro(LexerToken &Tok, MacroArgs *Args) {
426 IncludeMacroStack.push_back(IncludeStackInfo(CurLexer, CurDirLookup,
427 CurMacroExpander));
428 CurLexer = 0;
429 CurDirLookup = 0;
430
Chris Lattner9594acf2007-07-15 00:25:26 +0000431 if (NumCachedMacroExpanders == 0) {
432 CurMacroExpander = new MacroExpander(Tok, Args, *this);
433 } else {
434 CurMacroExpander = MacroExpanderCache[--NumCachedMacroExpanders];
435 CurMacroExpander->Init(Tok, Args);
436 }
Reid Spencer5f016e22007-07-11 17:01:13 +0000437}
438
439/// EnterTokenStream - Add a "macro" context to the top of the include stack,
440/// which will cause the lexer to start returning the specified tokens. Note
441/// that these tokens will be re-macro-expanded when/if expansion is enabled.
442/// This method assumes that the specified stream of tokens has a permanent
443/// owner somewhere, so they do not need to be copied.
444void Preprocessor::EnterTokenStream(const LexerToken *Toks, unsigned NumToks) {
445 // Save our current state.
446 IncludeMacroStack.push_back(IncludeStackInfo(CurLexer, CurDirLookup,
447 CurMacroExpander));
448 CurLexer = 0;
449 CurDirLookup = 0;
450
451 // Create a macro expander to expand from the specified token stream.
Chris Lattner9594acf2007-07-15 00:25:26 +0000452 if (NumCachedMacroExpanders == 0) {
453 CurMacroExpander = new MacroExpander(Toks, NumToks, *this);
454 } else {
455 CurMacroExpander = MacroExpanderCache[--NumCachedMacroExpanders];
456 CurMacroExpander->Init(Toks, NumToks);
457 }
Reid Spencer5f016e22007-07-11 17:01:13 +0000458}
459
460/// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the
461/// lexer stack. This should only be used in situations where the current
462/// state of the top-of-stack lexer is known.
463void Preprocessor::RemoveTopOfLexerStack() {
464 assert(!IncludeMacroStack.empty() && "Ran out of stack entries to load");
Chris Lattner9594acf2007-07-15 00:25:26 +0000465
466 if (CurMacroExpander) {
467 // Delete or cache the now-dead macro expander.
468 if (NumCachedMacroExpanders == MacroExpanderCacheSize)
469 delete CurMacroExpander;
470 else
471 MacroExpanderCache[NumCachedMacroExpanders++] = CurMacroExpander;
472 } else {
473 delete CurLexer;
474 }
Reid Spencer5f016e22007-07-11 17:01:13 +0000475 CurLexer = IncludeMacroStack.back().TheLexer;
476 CurDirLookup = IncludeMacroStack.back().TheDirLookup;
477 CurMacroExpander = IncludeMacroStack.back().TheMacroExpander;
478 IncludeMacroStack.pop_back();
479}
480
481//===----------------------------------------------------------------------===//
482// Macro Expansion Handling.
483//===----------------------------------------------------------------------===//
484
485/// RegisterBuiltinMacro - Register the specified identifier in the identifier
486/// table and mark it as a builtin macro to be expanded.
487IdentifierInfo *Preprocessor::RegisterBuiltinMacro(const char *Name) {
488 // Get the identifier.
489 IdentifierInfo *Id = getIdentifierInfo(Name);
490
491 // Mark it as being a macro that is builtin.
492 MacroInfo *MI = new MacroInfo(SourceLocation());
493 MI->setIsBuiltinMacro();
494 Id->setMacroInfo(MI);
495 return Id;
496}
497
498
499/// RegisterBuiltinMacros - Register builtin macros, such as __LINE__ with the
500/// identifier table.
501void Preprocessor::RegisterBuiltinMacros() {
502 Ident__LINE__ = RegisterBuiltinMacro("__LINE__");
503 Ident__FILE__ = RegisterBuiltinMacro("__FILE__");
504 Ident__DATE__ = RegisterBuiltinMacro("__DATE__");
505 Ident__TIME__ = RegisterBuiltinMacro("__TIME__");
506 Ident_Pragma = RegisterBuiltinMacro("_Pragma");
507
508 // GCC Extensions.
509 Ident__BASE_FILE__ = RegisterBuiltinMacro("__BASE_FILE__");
510 Ident__INCLUDE_LEVEL__ = RegisterBuiltinMacro("__INCLUDE_LEVEL__");
511 Ident__TIMESTAMP__ = RegisterBuiltinMacro("__TIMESTAMP__");
512}
513
514/// isTrivialSingleTokenExpansion - Return true if MI, which has a single token
515/// in its expansion, currently expands to that token literally.
516static bool isTrivialSingleTokenExpansion(const MacroInfo *MI,
517 const IdentifierInfo *MacroIdent) {
518 IdentifierInfo *II = MI->getReplacementToken(0).getIdentifierInfo();
519
520 // If the token isn't an identifier, it's always literally expanded.
521 if (II == 0) return true;
522
523 // If the identifier is a macro, and if that macro is enabled, it may be
524 // expanded so it's not a trivial expansion.
525 if (II->getMacroInfo() && II->getMacroInfo()->isEnabled() &&
526 // Fast expanding "#define X X" is ok, because X would be disabled.
527 II != MacroIdent)
528 return false;
529
530 // If this is an object-like macro invocation, it is safe to trivially expand
531 // it.
532 if (MI->isObjectLike()) return true;
533
534 // If this is a function-like macro invocation, it's safe to trivially expand
535 // as long as the identifier is not a macro argument.
536 for (MacroInfo::arg_iterator I = MI->arg_begin(), E = MI->arg_end();
537 I != E; ++I)
538 if (*I == II)
539 return false; // Identifier is a macro argument.
540
541 return true;
542}
543
544
545/// isNextPPTokenLParen - Determine whether the next preprocessor token to be
546/// lexed is a '('. If so, consume the token and return true, if not, this
547/// method should have no observable side-effect on the lexed tokens.
548bool Preprocessor::isNextPPTokenLParen() {
549 // Do some quick tests for rejection cases.
550 unsigned Val;
551 if (CurLexer)
552 Val = CurLexer->isNextPPTokenLParen();
553 else
554 Val = CurMacroExpander->isNextTokenLParen();
555
556 if (Val == 2) {
557 // If we ran off the end of the lexer or macro expander, walk the include
558 // stack, looking for whatever will return the next token.
559 for (unsigned i = IncludeMacroStack.size(); Val == 2 && i != 0; --i) {
560 IncludeStackInfo &Entry = IncludeMacroStack[i-1];
561 if (Entry.TheLexer)
562 Val = Entry.TheLexer->isNextPPTokenLParen();
563 else
564 Val = Entry.TheMacroExpander->isNextTokenLParen();
565 }
566 }
567
568 // Okay, if we know that the token is a '(', lex it and return. Otherwise we
569 // have found something that isn't a '(' or we found the end of the
570 // translation unit. In either case, return false.
571 if (Val != 1)
572 return false;
573
574 LexerToken Tok;
575 LexUnexpandedToken(Tok);
576 assert(Tok.getKind() == tok::l_paren && "Error computing l-paren-ness?");
577 return true;
578}
579
580/// HandleMacroExpandedIdentifier - If an identifier token is read that is to be
581/// expanded as a macro, handle it and return the next token as 'Identifier'.
582bool Preprocessor::HandleMacroExpandedIdentifier(LexerToken &Identifier,
583 MacroInfo *MI) {
584
585 // If this is a builtin macro, like __LINE__ or _Pragma, handle it specially.
586 if (MI->isBuiltinMacro()) {
587 ExpandBuiltinMacro(Identifier);
588 return false;
589 }
590
591 // If this is the first use of a target-specific macro, warn about it.
592 if (MI->isTargetSpecific()) {
593 MI->setIsTargetSpecific(false); // Don't warn on second use.
594 getTargetInfo().DiagnoseNonPortability(Identifier.getLocation(),
595 diag::port_target_macro_use);
596 }
597
598 /// Args - If this is a function-like macro expansion, this contains,
599 /// for each macro argument, the list of tokens that were provided to the
600 /// invocation.
601 MacroArgs *Args = 0;
602
603 // If this is a function-like macro, read the arguments.
604 if (MI->isFunctionLike()) {
605 // C99 6.10.3p10: If the preprocessing token immediately after the the macro
606 // name isn't a '(', this macro should not be expanded.
607 if (!isNextPPTokenLParen())
608 return true;
609
610 // Remember that we are now parsing the arguments to a macro invocation.
611 // Preprocessor directives used inside macro arguments are not portable, and
612 // this enables the warning.
613 InMacroArgs = true;
614 Args = ReadFunctionLikeMacroArgs(Identifier, MI);
615
616 // Finished parsing args.
617 InMacroArgs = false;
618
619 // If there was an error parsing the arguments, bail out.
620 if (Args == 0) return false;
621
622 ++NumFnMacroExpanded;
623 } else {
624 ++NumMacroExpanded;
625 }
626
627 // Notice that this macro has been used.
628 MI->setIsUsed(true);
629
630 // If we started lexing a macro, enter the macro expansion body.
631
632 // If this macro expands to no tokens, don't bother to push it onto the
633 // expansion stack, only to take it right back off.
634 if (MI->getNumTokens() == 0) {
635 // No need for arg info.
636 if (Args) Args->destroy();
637
638 // Ignore this macro use, just return the next token in the current
639 // buffer.
640 bool HadLeadingSpace = Identifier.hasLeadingSpace();
641 bool IsAtStartOfLine = Identifier.isAtStartOfLine();
642
643 Lex(Identifier);
644
645 // If the identifier isn't on some OTHER line, inherit the leading
646 // whitespace/first-on-a-line property of this token. This handles
647 // stuff like "! XX," -> "! ," and " XX," -> " ,", when XX is
648 // empty.
649 if (!Identifier.isAtStartOfLine()) {
650 if (IsAtStartOfLine) Identifier.setFlag(LexerToken::StartOfLine);
651 if (HadLeadingSpace) Identifier.setFlag(LexerToken::LeadingSpace);
652 }
653 ++NumFastMacroExpanded;
654 return false;
655
656 } else if (MI->getNumTokens() == 1 &&
657 isTrivialSingleTokenExpansion(MI, Identifier.getIdentifierInfo())){
658 // Otherwise, if this macro expands into a single trivially-expanded
659 // token: expand it now. This handles common cases like
660 // "#define VAL 42".
661
662 // Propagate the isAtStartOfLine/hasLeadingSpace markers of the macro
663 // identifier to the expanded token.
664 bool isAtStartOfLine = Identifier.isAtStartOfLine();
665 bool hasLeadingSpace = Identifier.hasLeadingSpace();
666
667 // Remember where the token is instantiated.
668 SourceLocation InstantiateLoc = Identifier.getLocation();
669
670 // Replace the result token.
671 Identifier = MI->getReplacementToken(0);
672
673 // Restore the StartOfLine/LeadingSpace markers.
674 Identifier.setFlagValue(LexerToken::StartOfLine , isAtStartOfLine);
675 Identifier.setFlagValue(LexerToken::LeadingSpace, hasLeadingSpace);
676
677 // Update the tokens location to include both its logical and physical
678 // locations.
679 SourceLocation Loc =
680 SourceMgr.getInstantiationLoc(Identifier.getLocation(), InstantiateLoc);
681 Identifier.setLocation(Loc);
682
683 // If this is #define X X, we must mark the result as unexpandible.
684 if (IdentifierInfo *NewII = Identifier.getIdentifierInfo())
685 if (NewII->getMacroInfo() == MI)
686 Identifier.setFlag(LexerToken::DisableExpand);
687
688 // Since this is not an identifier token, it can't be macro expanded, so
689 // we're done.
690 ++NumFastMacroExpanded;
691 return false;
692 }
693
694 // Start expanding the macro.
695 EnterMacro(Identifier, Args);
696
697 // Now that the macro is at the top of the include stack, ask the
698 // preprocessor to read the next token from it.
699 Lex(Identifier);
700 return false;
701}
702
703/// ReadFunctionLikeMacroArgs - After reading "MACRO(", this method is
704/// invoked to read all of the actual arguments specified for the macro
705/// invocation. This returns null on error.
706MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(LexerToken &MacroName,
707 MacroInfo *MI) {
708 // The number of fixed arguments to parse.
709 unsigned NumFixedArgsLeft = MI->getNumArgs();
710 bool isVariadic = MI->isVariadic();
711
712 // Outer loop, while there are more arguments, keep reading them.
713 LexerToken Tok;
714 Tok.setKind(tok::comma);
715 --NumFixedArgsLeft; // Start reading the first arg.
716
717 // ArgTokens - Build up a list of tokens that make up each argument. Each
718 // argument is separated by an EOF token. Use a SmallVector so we can avoid
719 // heap allocations in the common case.
720 llvm::SmallVector<LexerToken, 64> ArgTokens;
721
722 unsigned NumActuals = 0;
723 while (Tok.getKind() == tok::comma) {
724 // C99 6.10.3p11: Keep track of the number of l_parens we have seen.
725 unsigned NumParens = 0;
726
727 while (1) {
728 // Read arguments as unexpanded tokens. This avoids issues, e.g., where
729 // an argument value in a macro could expand to ',' or '(' or ')'.
730 LexUnexpandedToken(Tok);
731
732 if (Tok.getKind() == tok::eof) {
733 Diag(MacroName, diag::err_unterm_macro_invoc);
734 // Do not lose the EOF. Return it to the client.
735 MacroName = Tok;
736 return 0;
737 } else if (Tok.getKind() == tok::r_paren) {
738 // If we found the ) token, the macro arg list is done.
739 if (NumParens-- == 0)
740 break;
741 } else if (Tok.getKind() == tok::l_paren) {
742 ++NumParens;
743 } else if (Tok.getKind() == tok::comma && NumParens == 0) {
744 // Comma ends this argument if there are more fixed arguments expected.
745 if (NumFixedArgsLeft)
746 break;
747
748 // If this is not a variadic macro, too many args were specified.
749 if (!isVariadic) {
750 // Emit the diagnostic at the macro name in case there is a missing ).
751 // Emitting it at the , could be far away from the macro name.
752 Diag(MacroName, diag::err_too_many_args_in_macro_invoc);
753 return 0;
754 }
755 // Otherwise, continue to add the tokens to this variable argument.
756 } else if (Tok.getKind() == tok::comment && !KeepMacroComments) {
757 // If this is a comment token in the argument list and we're just in
758 // -C mode (not -CC mode), discard the comment.
759 continue;
760 }
761
762 ArgTokens.push_back(Tok);
763 }
764
765 // Empty arguments are standard in C99 and supported as an extension in
766 // other modes.
767 if (ArgTokens.empty() && !Features.C99)
768 Diag(Tok, diag::ext_empty_fnmacro_arg);
769
770 // Add a marker EOF token to the end of the token list for this argument.
771 LexerToken EOFTok;
772 EOFTok.startToken();
773 EOFTok.setKind(tok::eof);
774 EOFTok.setLocation(Tok.getLocation());
775 EOFTok.setLength(0);
776 ArgTokens.push_back(EOFTok);
777 ++NumActuals;
778 --NumFixedArgsLeft;
779 };
780
781 // Okay, we either found the r_paren. Check to see if we parsed too few
782 // arguments.
783 unsigned MinArgsExpected = MI->getNumArgs();
784
785 // See MacroArgs instance var for description of this.
786 bool isVarargsElided = false;
787
788 if (NumActuals < MinArgsExpected) {
789 // There are several cases where too few arguments is ok, handle them now.
790 if (NumActuals+1 == MinArgsExpected && MI->isVariadic()) {
791 // Varargs where the named vararg parameter is missing: ok as extension.
792 // #define A(x, ...)
793 // A("blah")
794 Diag(Tok, diag::ext_missing_varargs_arg);
795
796 // Remember this occurred if this is a C99 macro invocation with at least
797 // one actual argument.
798 isVarargsElided = MI->isC99Varargs() && MI->getNumArgs() > 1;
799 } else if (MI->getNumArgs() == 1) {
800 // #define A(x)
801 // A()
802 // is ok because it is an empty argument.
803
804 // Empty arguments are standard in C99 and supported as an extension in
805 // other modes.
806 if (ArgTokens.empty() && !Features.C99)
807 Diag(Tok, diag::ext_empty_fnmacro_arg);
808 } else {
809 // Otherwise, emit the error.
810 Diag(Tok, diag::err_too_few_args_in_macro_invoc);
811 return 0;
812 }
813
814 // Add a marker EOF token to the end of the token list for this argument.
815 SourceLocation EndLoc = Tok.getLocation();
816 Tok.startToken();
817 Tok.setKind(tok::eof);
818 Tok.setLocation(EndLoc);
819 Tok.setLength(0);
820 ArgTokens.push_back(Tok);
821 }
822
823 return MacroArgs::create(MI, &ArgTokens[0], ArgTokens.size(),isVarargsElided);
824}
825
826/// ComputeDATE_TIME - Compute the current time, enter it into the specified
827/// scratch buffer, then return DATELoc/TIMELoc locations with the position of
828/// the identifier tokens inserted.
829static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
830 Preprocessor &PP) {
831 time_t TT = time(0);
832 struct tm *TM = localtime(&TT);
833
834 static const char * const Months[] = {
835 "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"
836 };
837
838 char TmpBuffer[100];
839 sprintf(TmpBuffer, "\"%s %2d %4d\"", Months[TM->tm_mon], TM->tm_mday,
840 TM->tm_year+1900);
841 DATELoc = PP.CreateString(TmpBuffer, strlen(TmpBuffer));
842
843 sprintf(TmpBuffer, "\"%02d:%02d:%02d\"", TM->tm_hour, TM->tm_min, TM->tm_sec);
844 TIMELoc = PP.CreateString(TmpBuffer, strlen(TmpBuffer));
845}
846
847/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
848/// as a builtin macro, handle it and return the next token as 'Tok'.
849void Preprocessor::ExpandBuiltinMacro(LexerToken &Tok) {
850 // Figure out which token this is.
851 IdentifierInfo *II = Tok.getIdentifierInfo();
852 assert(II && "Can't be a macro without id info!");
853
854 // If this is an _Pragma directive, expand it, invoke the pragma handler, then
855 // lex the token after it.
856 if (II == Ident_Pragma)
857 return Handle_Pragma(Tok);
858
859 ++NumBuiltinMacroExpanded;
860
861 char TmpBuffer[100];
862
863 // Set up the return result.
864 Tok.setIdentifierInfo(0);
865 Tok.clearFlag(LexerToken::NeedsCleaning);
866
867 if (II == Ident__LINE__) {
868 // __LINE__ expands to a simple numeric value.
869 sprintf(TmpBuffer, "%u", SourceMgr.getLineNumber(Tok.getLocation()));
870 unsigned Length = strlen(TmpBuffer);
871 Tok.setKind(tok::numeric_constant);
872 Tok.setLength(Length);
873 Tok.setLocation(CreateString(TmpBuffer, Length, Tok.getLocation()));
874 } else if (II == Ident__FILE__ || II == Ident__BASE_FILE__) {
875 SourceLocation Loc = Tok.getLocation();
876 if (II == Ident__BASE_FILE__) {
877 Diag(Tok, diag::ext_pp_base_file);
878 SourceLocation NextLoc = SourceMgr.getIncludeLoc(Loc.getFileID());
879 while (NextLoc.getFileID() != 0) {
880 Loc = NextLoc;
881 NextLoc = SourceMgr.getIncludeLoc(Loc.getFileID());
882 }
883 }
884
885 // Escape this filename. Turn '\' -> '\\' '"' -> '\"'
886 std::string FN = SourceMgr.getSourceName(Loc);
887 FN = '"' + Lexer::Stringify(FN) + '"';
888 Tok.setKind(tok::string_literal);
889 Tok.setLength(FN.size());
890 Tok.setLocation(CreateString(&FN[0], FN.size(), Tok.getLocation()));
891 } else if (II == Ident__DATE__) {
892 if (!DATELoc.isValid())
893 ComputeDATE_TIME(DATELoc, TIMELoc, *this);
894 Tok.setKind(tok::string_literal);
895 Tok.setLength(strlen("\"Mmm dd yyyy\""));
896 Tok.setLocation(SourceMgr.getInstantiationLoc(DATELoc, Tok.getLocation()));
897 } else if (II == Ident__TIME__) {
898 if (!TIMELoc.isValid())
899 ComputeDATE_TIME(DATELoc, TIMELoc, *this);
900 Tok.setKind(tok::string_literal);
901 Tok.setLength(strlen("\"hh:mm:ss\""));
902 Tok.setLocation(SourceMgr.getInstantiationLoc(TIMELoc, Tok.getLocation()));
903 } else if (II == Ident__INCLUDE_LEVEL__) {
904 Diag(Tok, diag::ext_pp_include_level);
905
906 // Compute the include depth of this token.
907 unsigned Depth = 0;
908 SourceLocation Loc = SourceMgr.getIncludeLoc(Tok.getLocation().getFileID());
909 for (; Loc.getFileID() != 0; ++Depth)
910 Loc = SourceMgr.getIncludeLoc(Loc.getFileID());
911
912 // __INCLUDE_LEVEL__ expands to a simple numeric value.
913 sprintf(TmpBuffer, "%u", Depth);
914 unsigned Length = strlen(TmpBuffer);
915 Tok.setKind(tok::numeric_constant);
916 Tok.setLength(Length);
917 Tok.setLocation(CreateString(TmpBuffer, Length, Tok.getLocation()));
918 } else if (II == Ident__TIMESTAMP__) {
919 // MSVC, ICC, GCC, VisualAge C++ extension. The generated string should be
920 // of the form "Ddd Mmm dd hh::mm::ss yyyy", which is returned by asctime.
921 Diag(Tok, diag::ext_pp_timestamp);
922
923 // Get the file that we are lexing out of. If we're currently lexing from
924 // a macro, dig into the include stack.
925 const FileEntry *CurFile = 0;
926 Lexer *TheLexer = getCurrentFileLexer();
927
928 if (TheLexer)
929 CurFile = SourceMgr.getFileEntryForFileID(TheLexer->getCurFileID());
930
931 // If this file is older than the file it depends on, emit a diagnostic.
932 const char *Result;
933 if (CurFile) {
934 time_t TT = CurFile->getModificationTime();
935 struct tm *TM = localtime(&TT);
936 Result = asctime(TM);
937 } else {
938 Result = "??? ??? ?? ??:??:?? ????\n";
939 }
940 TmpBuffer[0] = '"';
941 strcpy(TmpBuffer+1, Result);
942 unsigned Len = strlen(TmpBuffer);
943 TmpBuffer[Len-1] = '"'; // Replace the newline with a quote.
944 Tok.setKind(tok::string_literal);
945 Tok.setLength(Len);
946 Tok.setLocation(CreateString(TmpBuffer, Len, Tok.getLocation()));
947 } else {
948 assert(0 && "Unknown identifier!");
949 }
950}
951
952//===----------------------------------------------------------------------===//
953// Lexer Event Handling.
954//===----------------------------------------------------------------------===//
955
956/// LookUpIdentifierInfo - Given a tok::identifier token, look up the
957/// identifier information for the token and install it into the token.
958IdentifierInfo *Preprocessor::LookUpIdentifierInfo(LexerToken &Identifier,
959 const char *BufPtr) {
960 assert(Identifier.getKind() == tok::identifier && "Not an identifier!");
961 assert(Identifier.getIdentifierInfo() == 0 && "Identinfo already exists!");
962
963 // Look up this token, see if it is a macro, or if it is a language keyword.
964 IdentifierInfo *II;
965 if (BufPtr && !Identifier.needsCleaning()) {
966 // No cleaning needed, just use the characters from the lexed buffer.
967 II = getIdentifierInfo(BufPtr, BufPtr+Identifier.getLength());
968 } else {
969 // Cleaning needed, alloca a buffer, clean into it, then use the buffer.
Chris Lattnerc35717a2007-07-13 17:10:38 +0000970 llvm::SmallVector<char, 64> IdentifierBuffer;
971 IdentifierBuffer.resize(Identifier.getLength());
972 const char *TmpBuf = &IdentifierBuffer[0];
Reid Spencer5f016e22007-07-11 17:01:13 +0000973 unsigned Size = getSpelling(Identifier, TmpBuf);
974 II = getIdentifierInfo(TmpBuf, TmpBuf+Size);
975 }
976 Identifier.setIdentifierInfo(II);
977 return II;
978}
979
980
981/// HandleIdentifier - This callback is invoked when the lexer reads an
982/// identifier. This callback looks up the identifier in the map and/or
983/// potentially macro expands it or turns it into a named token (like 'for').
984void Preprocessor::HandleIdentifier(LexerToken &Identifier) {
985 assert(Identifier.getIdentifierInfo() &&
986 "Can't handle identifiers without identifier info!");
987
988 IdentifierInfo &II = *Identifier.getIdentifierInfo();
989
990 // If this identifier was poisoned, and if it was not produced from a macro
991 // expansion, emit an error.
992 if (II.isPoisoned() && CurLexer) {
993 if (&II != Ident__VA_ARGS__) // We warn about __VA_ARGS__ with poisoning.
994 Diag(Identifier, diag::err_pp_used_poisoned_id);
995 else
996 Diag(Identifier, diag::ext_pp_bad_vaargs_use);
997 }
998
999 // If this is a macro to be expanded, do it.
1000 if (MacroInfo *MI = II.getMacroInfo()) {
1001 if (!DisableMacroExpansion && !Identifier.isExpandDisabled()) {
1002 if (MI->isEnabled()) {
1003 if (!HandleMacroExpandedIdentifier(Identifier, MI))
1004 return;
1005 } else {
1006 // C99 6.10.3.4p2 says that a disabled macro may never again be
1007 // expanded, even if it's in a context where it could be expanded in the
1008 // future.
1009 Identifier.setFlag(LexerToken::DisableExpand);
1010 }
1011 }
1012 } else if (II.isOtherTargetMacro() && !DisableMacroExpansion) {
1013 // If this identifier is a macro on some other target, emit a diagnostic.
1014 // This diagnosic is only emitted when macro expansion is enabled, because
1015 // the macro would not have been expanded for the other target either.
1016 II.setIsOtherTargetMacro(false); // Don't warn on second use.
1017 getTargetInfo().DiagnoseNonPortability(Identifier.getLocation(),
1018 diag::port_target_macro_use);
1019
1020 }
1021
1022 // C++ 2.11p2: If this is an alternative representation of a C++ operator,
1023 // then we act as if it is the actual operator and not the textual
1024 // representation of it.
1025 if (II.isCPlusPlusOperatorKeyword())
1026 Identifier.setIdentifierInfo(0);
1027
1028 // Change the kind of this identifier to the appropriate token kind, e.g.
1029 // turning "for" into a keyword.
1030 Identifier.setKind(II.getTokenID());
1031
1032 // If this is an extension token, diagnose its use.
1033 // FIXME: tried (unsuccesfully) to shut this up when compiling with gnu99
1034 // For now, I'm just commenting it out (while I work on attributes).
1035 if (II.isExtensionToken() && Features.C99)
1036 Diag(Identifier, diag::ext_token_used);
1037}
1038
1039/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
1040/// the current file. This either returns the EOF token or pops a level off
1041/// the include stack and keeps going.
1042bool Preprocessor::HandleEndOfFile(LexerToken &Result, bool isEndOfMacro) {
1043 assert(!CurMacroExpander &&
1044 "Ending a file when currently in a macro!");
1045
1046 // See if this file had a controlling macro.
1047 if (CurLexer) { // Not ending a macro, ignore it.
1048 if (const IdentifierInfo *ControllingMacro =
1049 CurLexer->MIOpt.GetControllingMacroAtEndOfFile()) {
1050 // Okay, this has a controlling macro, remember in PerFileInfo.
1051 if (const FileEntry *FE =
1052 SourceMgr.getFileEntryForFileID(CurLexer->getCurFileID()))
1053 HeaderInfo.SetFileControllingMacro(FE, ControllingMacro);
1054 }
1055 }
1056
1057 // If this is a #include'd file, pop it off the include stack and continue
1058 // lexing the #includer file.
1059 if (!IncludeMacroStack.empty()) {
1060 // We're done with the #included file.
1061 RemoveTopOfLexerStack();
1062
1063 // Notify the client, if desired, that we are in a new source file.
1064 if (Callbacks && !isEndOfMacro && CurLexer) {
1065 DirectoryLookup::DirType FileType = DirectoryLookup::NormalHeaderDir;
1066
1067 // Get the file entry for the current file.
1068 if (const FileEntry *FE =
1069 SourceMgr.getFileEntryForFileID(CurLexer->getCurFileID()))
1070 FileType = HeaderInfo.getFileDirFlavor(FE);
1071
1072 Callbacks->FileChanged(CurLexer->getSourceLocation(CurLexer->BufferPtr),
1073 PPCallbacks::ExitFile, FileType);
1074 }
1075
1076 // Client should lex another token.
1077 return false;
1078 }
1079
1080 Result.startToken();
1081 CurLexer->BufferPtr = CurLexer->BufferEnd;
1082 CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd);
1083 Result.setKind(tok::eof);
1084
1085 // We're done with the #included file.
1086 delete CurLexer;
1087 CurLexer = 0;
1088
1089 // This is the end of the top-level file. If the diag::pp_macro_not_used
1090 // diagnostic is enabled, walk all of the identifiers, looking for macros that
1091 // have not been used.
1092 if (Diags.getDiagnosticLevel(diag::pp_macro_not_used) != Diagnostic::Ignored){
1093 for (IdentifierTable::iterator I = Identifiers.begin(),
1094 E = Identifiers.end(); I != E; ++I) {
1095 const IdentifierInfo &II = I->getValue();
1096 if (II.getMacroInfo() && !II.getMacroInfo()->isUsed())
1097 Diag(II.getMacroInfo()->getDefinitionLoc(), diag::pp_macro_not_used);
1098 }
1099 }
1100
1101 return true;
1102}
1103
1104/// HandleEndOfMacro - This callback is invoked when the lexer hits the end of
1105/// the current macro expansion or token stream expansion.
1106bool Preprocessor::HandleEndOfMacro(LexerToken &Result) {
1107 assert(CurMacroExpander && !CurLexer &&
1108 "Ending a macro when currently in a #include file!");
1109
Chris Lattner9594acf2007-07-15 00:25:26 +00001110 // Delete or cache the now-dead macro expander.
1111 if (NumCachedMacroExpanders == MacroExpanderCacheSize)
1112 delete CurMacroExpander;
1113 else
1114 MacroExpanderCache[NumCachedMacroExpanders++] = CurMacroExpander;
Reid Spencer5f016e22007-07-11 17:01:13 +00001115
1116 // Handle this like a #include file being popped off the stack.
1117 CurMacroExpander = 0;
1118 return HandleEndOfFile(Result, true);
1119}
1120
1121
1122//===----------------------------------------------------------------------===//
1123// Utility Methods for Preprocessor Directive Handling.
1124//===----------------------------------------------------------------------===//
1125
1126/// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
1127/// current line until the tok::eom token is found.
1128void Preprocessor::DiscardUntilEndOfDirective() {
1129 LexerToken Tmp;
1130 do {
1131 LexUnexpandedToken(Tmp);
1132 } while (Tmp.getKind() != tok::eom);
1133}
1134
1135/// isCXXNamedOperator - Returns "true" if the token is a named operator in C++.
1136static bool isCXXNamedOperator(const std::string &Spelling) {
1137 return Spelling == "and" || Spelling == "bitand" || Spelling == "bitor" ||
1138 Spelling == "compl" || Spelling == "not" || Spelling == "not_eq" ||
1139 Spelling == "or" || Spelling == "xor";
1140}
1141
1142/// ReadMacroName - Lex and validate a macro name, which occurs after a
1143/// #define or #undef. This sets the token kind to eom and discards the rest
1144/// of the macro line if the macro name is invalid. isDefineUndef is 1 if
1145/// this is due to a a #define, 2 if #undef directive, 0 if it is something
1146/// else (e.g. #ifdef).
1147void Preprocessor::ReadMacroName(LexerToken &MacroNameTok, char isDefineUndef) {
1148 // Read the token, don't allow macro expansion on it.
1149 LexUnexpandedToken(MacroNameTok);
1150
1151 // Missing macro name?
1152 if (MacroNameTok.getKind() == tok::eom)
1153 return Diag(MacroNameTok, diag::err_pp_missing_macro_name);
1154
1155 IdentifierInfo *II = MacroNameTok.getIdentifierInfo();
1156 if (II == 0) {
1157 std::string Spelling = getSpelling(MacroNameTok);
1158 if (isCXXNamedOperator(Spelling))
1159 // C++ 2.5p2: Alternative tokens behave the same as its primary token
1160 // except for their spellings.
1161 Diag(MacroNameTok, diag::err_pp_operator_used_as_macro_name, Spelling);
1162 else
1163 Diag(MacroNameTok, diag::err_pp_macro_not_identifier);
1164 // Fall through on error.
1165 } else if (isDefineUndef && II->getPPKeywordID() == tok::pp_defined) {
1166 // Error if defining "defined": C99 6.10.8.4.
1167 Diag(MacroNameTok, diag::err_defined_macro_name);
1168 } else if (isDefineUndef && II->getMacroInfo() &&
1169 II->getMacroInfo()->isBuiltinMacro()) {
1170 // Error if defining "__LINE__" and other builtins: C99 6.10.8.4.
1171 if (isDefineUndef == 1)
1172 Diag(MacroNameTok, diag::pp_redef_builtin_macro);
1173 else
1174 Diag(MacroNameTok, diag::pp_undef_builtin_macro);
1175 } else {
1176 // Okay, we got a good identifier node. Return it.
1177 return;
1178 }
1179
1180 // Invalid macro name, read and discard the rest of the line. Then set the
1181 // token kind to tok::eom.
1182 MacroNameTok.setKind(tok::eom);
1183 return DiscardUntilEndOfDirective();
1184}
1185
1186/// CheckEndOfDirective - Ensure that the next token is a tok::eom token. If
1187/// not, emit a diagnostic and consume up until the eom.
1188void Preprocessor::CheckEndOfDirective(const char *DirType) {
1189 LexerToken Tmp;
1190 Lex(Tmp);
1191 // There should be no tokens after the directive, but we allow them as an
1192 // extension.
1193 while (Tmp.getKind() == tok::comment) // Skip comments in -C mode.
1194 Lex(Tmp);
1195
1196 if (Tmp.getKind() != tok::eom) {
1197 Diag(Tmp, diag::ext_pp_extra_tokens_at_eol, DirType);
1198 DiscardUntilEndOfDirective();
1199 }
1200}
1201
1202
1203
1204/// SkipExcludedConditionalBlock - We just read a #if or related directive and
1205/// decided that the subsequent tokens are in the #if'd out portion of the
1206/// file. Lex the rest of the file, until we see an #endif. If
1207/// FoundNonSkipPortion is true, then we have already emitted code for part of
1208/// this #if directive, so #else/#elif blocks should never be entered. If ElseOk
1209/// is true, then #else directives are ok, if not, then we have already seen one
1210/// so a #else directive is a duplicate. When this returns, the caller can lex
1211/// the first valid token.
1212void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
1213 bool FoundNonSkipPortion,
1214 bool FoundElse) {
1215 ++NumSkipped;
1216 assert(CurMacroExpander == 0 && CurLexer &&
1217 "Lexing a macro, not a file?");
1218
1219 CurLexer->pushConditionalLevel(IfTokenLoc, /*isSkipping*/false,
1220 FoundNonSkipPortion, FoundElse);
1221
1222 // Enter raw mode to disable identifier lookup (and thus macro expansion),
1223 // disabling warnings, etc.
1224 CurLexer->LexingRawMode = true;
1225 LexerToken Tok;
1226 while (1) {
1227 CurLexer->Lex(Tok);
1228
1229 // If this is the end of the buffer, we have an error.
1230 if (Tok.getKind() == tok::eof) {
1231 // Emit errors for each unterminated conditional on the stack, including
1232 // the current one.
1233 while (!CurLexer->ConditionalStack.empty()) {
1234 Diag(CurLexer->ConditionalStack.back().IfLoc,
1235 diag::err_pp_unterminated_conditional);
1236 CurLexer->ConditionalStack.pop_back();
1237 }
1238
1239 // Just return and let the caller lex after this #include.
1240 break;
1241 }
1242
1243 // If this token is not a preprocessor directive, just skip it.
1244 if (Tok.getKind() != tok::hash || !Tok.isAtStartOfLine())
1245 continue;
1246
1247 // We just parsed a # character at the start of a line, so we're in
1248 // directive mode. Tell the lexer this so any newlines we see will be
1249 // converted into an EOM token (this terminates the macro).
1250 CurLexer->ParsingPreprocessorDirective = true;
1251 CurLexer->KeepCommentMode = false;
1252
1253
1254 // Read the next token, the directive flavor.
1255 LexUnexpandedToken(Tok);
1256
1257 // If this isn't an identifier directive (e.g. is "# 1\n" or "#\n", or
1258 // something bogus), skip it.
1259 if (Tok.getKind() != tok::identifier) {
1260 CurLexer->ParsingPreprocessorDirective = false;
1261 // Restore comment saving mode.
1262 CurLexer->KeepCommentMode = KeepComments;
1263 continue;
1264 }
1265
1266 // If the first letter isn't i or e, it isn't intesting to us. We know that
1267 // this is safe in the face of spelling differences, because there is no way
1268 // to spell an i/e in a strange way that is another letter. Skipping this
1269 // allows us to avoid looking up the identifier info for #define/#undef and
1270 // other common directives.
1271 const char *RawCharData = SourceMgr.getCharacterData(Tok.getLocation());
1272 char FirstChar = RawCharData[0];
1273 if (FirstChar >= 'a' && FirstChar <= 'z' &&
1274 FirstChar != 'i' && FirstChar != 'e') {
1275 CurLexer->ParsingPreprocessorDirective = false;
1276 // Restore comment saving mode.
1277 CurLexer->KeepCommentMode = KeepComments;
1278 continue;
1279 }
1280
1281 // Get the identifier name without trigraphs or embedded newlines. Note
1282 // that we can't use Tok.getIdentifierInfo() because its lookup is disabled
1283 // when skipping.
1284 // TODO: could do this with zero copies in the no-clean case by using
1285 // strncmp below.
1286 char Directive[20];
1287 unsigned IdLen;
1288 if (!Tok.needsCleaning() && Tok.getLength() < 20) {
1289 IdLen = Tok.getLength();
1290 memcpy(Directive, RawCharData, IdLen);
1291 Directive[IdLen] = 0;
1292 } else {
1293 std::string DirectiveStr = getSpelling(Tok);
1294 IdLen = DirectiveStr.size();
1295 if (IdLen >= 20) {
1296 CurLexer->ParsingPreprocessorDirective = false;
1297 // Restore comment saving mode.
1298 CurLexer->KeepCommentMode = KeepComments;
1299 continue;
1300 }
1301 memcpy(Directive, &DirectiveStr[0], IdLen);
1302 Directive[IdLen] = 0;
1303 }
1304
1305 if (FirstChar == 'i' && Directive[1] == 'f') {
1306 if ((IdLen == 2) || // "if"
1307 (IdLen == 5 && !strcmp(Directive+2, "def")) || // "ifdef"
1308 (IdLen == 6 && !strcmp(Directive+2, "ndef"))) { // "ifndef"
1309 // We know the entire #if/#ifdef/#ifndef block will be skipped, don't
1310 // bother parsing the condition.
1311 DiscardUntilEndOfDirective();
1312 CurLexer->pushConditionalLevel(Tok.getLocation(), /*wasskipping*/true,
1313 /*foundnonskip*/false,
1314 /*fnddelse*/false);
1315 }
1316 } else if (FirstChar == 'e') {
1317 if (IdLen == 5 && !strcmp(Directive+1, "ndif")) { // "endif"
1318 CheckEndOfDirective("#endif");
1319 PPConditionalInfo CondInfo;
1320 CondInfo.WasSkipping = true; // Silence bogus warning.
1321 bool InCond = CurLexer->popConditionalLevel(CondInfo);
1322 InCond = InCond; // Silence warning in no-asserts mode.
1323 assert(!InCond && "Can't be skipping if not in a conditional!");
1324
1325 // If we popped the outermost skipping block, we're done skipping!
1326 if (!CondInfo.WasSkipping)
1327 break;
1328 } else if (IdLen == 4 && !strcmp(Directive+1, "lse")) { // "else".
1329 // #else directive in a skipping conditional. If not in some other
1330 // skipping conditional, and if #else hasn't already been seen, enter it
1331 // as a non-skipping conditional.
1332 CheckEndOfDirective("#else");
1333 PPConditionalInfo &CondInfo = CurLexer->peekConditionalLevel();
1334
1335 // If this is a #else with a #else before it, report the error.
1336 if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_else_after_else);
1337
1338 // Note that we've seen a #else in this conditional.
1339 CondInfo.FoundElse = true;
1340
1341 // If the conditional is at the top level, and the #if block wasn't
1342 // entered, enter the #else block now.
1343 if (!CondInfo.WasSkipping && !CondInfo.FoundNonSkip) {
1344 CondInfo.FoundNonSkip = true;
1345 break;
1346 }
1347 } else if (IdLen == 4 && !strcmp(Directive+1, "lif")) { // "elif".
1348 PPConditionalInfo &CondInfo = CurLexer->peekConditionalLevel();
1349
1350 bool ShouldEnter;
1351 // If this is in a skipping block or if we're already handled this #if
1352 // block, don't bother parsing the condition.
1353 if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
1354 DiscardUntilEndOfDirective();
1355 ShouldEnter = false;
1356 } else {
1357 // Restore the value of LexingRawMode so that identifiers are
1358 // looked up, etc, inside the #elif expression.
1359 assert(CurLexer->LexingRawMode && "We have to be skipping here!");
1360 CurLexer->LexingRawMode = false;
1361 IdentifierInfo *IfNDefMacro = 0;
1362 ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro);
1363 CurLexer->LexingRawMode = true;
1364 }
1365
1366 // If this is a #elif with a #else before it, report the error.
1367 if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_elif_after_else);
1368
1369 // If this condition is true, enter it!
1370 if (ShouldEnter) {
1371 CondInfo.FoundNonSkip = true;
1372 break;
1373 }
1374 }
1375 }
1376
1377 CurLexer->ParsingPreprocessorDirective = false;
1378 // Restore comment saving mode.
1379 CurLexer->KeepCommentMode = KeepComments;
1380 }
1381
1382 // Finally, if we are out of the conditional (saw an #endif or ran off the end
1383 // of the file, just stop skipping and return to lexing whatever came after
1384 // the #if block.
1385 CurLexer->LexingRawMode = false;
1386}
1387
1388//===----------------------------------------------------------------------===//
1389// Preprocessor Directive Handling.
1390//===----------------------------------------------------------------------===//
1391
1392/// HandleDirective - This callback is invoked when the lexer sees a # token
1393/// at the start of a line. This consumes the directive, modifies the
1394/// lexer/preprocessor state, and advances the lexer(s) so that the next token
1395/// read is the correct one.
1396void Preprocessor::HandleDirective(LexerToken &Result) {
1397 // FIXME: Traditional: # with whitespace before it not recognized by K&R?
1398
1399 // We just parsed a # character at the start of a line, so we're in directive
1400 // mode. Tell the lexer this so any newlines we see will be converted into an
1401 // EOM token (which terminates the directive).
1402 CurLexer->ParsingPreprocessorDirective = true;
1403
1404 ++NumDirectives;
1405
1406 // We are about to read a token. For the multiple-include optimization FA to
1407 // work, we have to remember if we had read any tokens *before* this
1408 // pp-directive.
1409 bool ReadAnyTokensBeforeDirective = CurLexer->MIOpt.getHasReadAnyTokensVal();
1410
1411 // Read the next token, the directive flavor. This isn't expanded due to
1412 // C99 6.10.3p8.
1413 LexUnexpandedToken(Result);
1414
1415 // C99 6.10.3p11: Is this preprocessor directive in macro invocation? e.g.:
1416 // #define A(x) #x
1417 // A(abc
1418 // #warning blah
1419 // def)
1420 // If so, the user is relying on non-portable behavior, emit a diagnostic.
1421 if (InMacroArgs)
1422 Diag(Result, diag::ext_embedded_directive);
1423
1424TryAgain:
1425 switch (Result.getKind()) {
1426 case tok::eom:
1427 return; // null directive.
1428 case tok::comment:
1429 // Handle stuff like "# /*foo*/ define X" in -E -C mode.
1430 LexUnexpandedToken(Result);
1431 goto TryAgain;
1432
1433 case tok::numeric_constant:
1434 // FIXME: implement # 7 line numbers!
1435 DiscardUntilEndOfDirective();
1436 return;
1437 default:
1438 IdentifierInfo *II = Result.getIdentifierInfo();
1439 if (II == 0) break; // Not an identifier.
1440
1441 // Ask what the preprocessor keyword ID is.
1442 switch (II->getPPKeywordID()) {
1443 default: break;
1444 // C99 6.10.1 - Conditional Inclusion.
1445 case tok::pp_if:
1446 return HandleIfDirective(Result, ReadAnyTokensBeforeDirective);
1447 case tok::pp_ifdef:
1448 return HandleIfdefDirective(Result, false, true/*not valid for miopt*/);
1449 case tok::pp_ifndef:
1450 return HandleIfdefDirective(Result, true, ReadAnyTokensBeforeDirective);
1451 case tok::pp_elif:
1452 return HandleElifDirective(Result);
1453 case tok::pp_else:
1454 return HandleElseDirective(Result);
1455 case tok::pp_endif:
1456 return HandleEndifDirective(Result);
1457
1458 // C99 6.10.2 - Source File Inclusion.
1459 case tok::pp_include:
1460 return HandleIncludeDirective(Result); // Handle #include.
1461
1462 // C99 6.10.3 - Macro Replacement.
1463 case tok::pp_define:
1464 return HandleDefineDirective(Result, false);
1465 case tok::pp_undef:
1466 return HandleUndefDirective(Result);
1467
1468 // C99 6.10.4 - Line Control.
1469 case tok::pp_line:
1470 // FIXME: implement #line
1471 DiscardUntilEndOfDirective();
1472 return;
1473
1474 // C99 6.10.5 - Error Directive.
1475 case tok::pp_error:
1476 return HandleUserDiagnosticDirective(Result, false);
1477
1478 // C99 6.10.6 - Pragma Directive.
1479 case tok::pp_pragma:
1480 return HandlePragmaDirective();
1481
1482 // GNU Extensions.
1483 case tok::pp_import:
1484 return HandleImportDirective(Result);
1485 case tok::pp_include_next:
1486 return HandleIncludeNextDirective(Result);
1487
1488 case tok::pp_warning:
1489 Diag(Result, diag::ext_pp_warning_directive);
1490 return HandleUserDiagnosticDirective(Result, true);
1491 case tok::pp_ident:
1492 return HandleIdentSCCSDirective(Result);
1493 case tok::pp_sccs:
1494 return HandleIdentSCCSDirective(Result);
1495 case tok::pp_assert:
1496 //isExtension = true; // FIXME: implement #assert
1497 break;
1498 case tok::pp_unassert:
1499 //isExtension = true; // FIXME: implement #unassert
1500 break;
1501
1502 // clang extensions.
1503 case tok::pp_define_target:
1504 return HandleDefineDirective(Result, true);
1505 case tok::pp_define_other_target:
1506 return HandleDefineOtherTargetDirective(Result);
1507 }
1508 break;
1509 }
1510
1511 // If we reached here, the preprocessing token is not valid!
1512 Diag(Result, diag::err_pp_invalid_directive);
1513
1514 // Read the rest of the PP line.
1515 DiscardUntilEndOfDirective();
1516
1517 // Okay, we're done parsing the directive.
1518}
1519
1520void Preprocessor::HandleUserDiagnosticDirective(LexerToken &Tok,
1521 bool isWarning) {
1522 // Read the rest of the line raw. We do this because we don't want macros
1523 // to be expanded and we don't require that the tokens be valid preprocessing
1524 // tokens. For example, this is allowed: "#warning ` 'foo". GCC does
1525 // collapse multiple consequtive white space between tokens, but this isn't
1526 // specified by the standard.
1527 std::string Message = CurLexer->ReadToEndOfLine();
1528
1529 unsigned DiagID = isWarning ? diag::pp_hash_warning : diag::err_pp_hash_error;
1530 return Diag(Tok, DiagID, Message);
1531}
1532
1533/// HandleIdentSCCSDirective - Handle a #ident/#sccs directive.
1534///
1535void Preprocessor::HandleIdentSCCSDirective(LexerToken &Tok) {
1536 // Yes, this directive is an extension.
1537 Diag(Tok, diag::ext_pp_ident_directive);
1538
1539 // Read the string argument.
1540 LexerToken StrTok;
1541 Lex(StrTok);
1542
1543 // If the token kind isn't a string, it's a malformed directive.
1544 if (StrTok.getKind() != tok::string_literal &&
1545 StrTok.getKind() != tok::wide_string_literal)
1546 return Diag(StrTok, diag::err_pp_malformed_ident);
1547
1548 // Verify that there is nothing after the string, other than EOM.
1549 CheckEndOfDirective("#ident");
1550
1551 if (Callbacks)
1552 Callbacks->Ident(Tok.getLocation(), getSpelling(StrTok));
1553}
1554
1555//===----------------------------------------------------------------------===//
1556// Preprocessor Include Directive Handling.
1557//===----------------------------------------------------------------------===//
1558
1559/// GetIncludeFilenameSpelling - Turn the specified lexer token into a fully
1560/// checked and spelled filename, e.g. as an operand of #include. This returns
1561/// true if the input filename was in <>'s or false if it were in ""'s. The
1562/// caller is expected to provide a buffer that is large enough to hold the
1563/// spelling of the filename, but is also expected to handle the case when
1564/// this method decides to use a different buffer.
1565bool Preprocessor::GetIncludeFilenameSpelling(const LexerToken &FilenameTok,
1566 const char *&BufStart,
1567 const char *&BufEnd) {
1568 // Get the text form of the filename.
1569 unsigned Len = getSpelling(FilenameTok, BufStart);
1570 BufEnd = BufStart+Len;
1571 assert(BufStart != BufEnd && "Can't have tokens with empty spellings!");
1572
1573 // Make sure the filename is <x> or "x".
1574 bool isAngled;
1575 if (BufStart[0] == '<') {
1576 if (BufEnd[-1] != '>') {
1577 Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
1578 BufStart = 0;
1579 return true;
1580 }
1581 isAngled = true;
1582 } else if (BufStart[0] == '"') {
1583 if (BufEnd[-1] != '"') {
1584 Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
1585 BufStart = 0;
1586 return true;
1587 }
1588 isAngled = false;
1589 } else {
1590 Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
1591 BufStart = 0;
1592 return true;
1593 }
1594
1595 // Diagnose #include "" as invalid.
1596 if (BufEnd-BufStart <= 2) {
1597 Diag(FilenameTok.getLocation(), diag::err_pp_empty_filename);
1598 BufStart = 0;
1599 return "";
1600 }
1601
1602 // Skip the brackets.
1603 ++BufStart;
1604 --BufEnd;
1605 return isAngled;
1606}
1607
1608/// HandleIncludeDirective - The "#include" tokens have just been read, read the
1609/// file to be included from the lexer, then include it! This is a common
1610/// routine with functionality shared between #include, #include_next and
1611/// #import.
1612void Preprocessor::HandleIncludeDirective(LexerToken &IncludeTok,
1613 const DirectoryLookup *LookupFrom,
1614 bool isImport) {
1615
1616 LexerToken FilenameTok;
1617 CurLexer->LexIncludeFilename(FilenameTok);
1618
1619 // If the token kind is EOM, the error has already been diagnosed.
1620 if (FilenameTok.getKind() == tok::eom)
1621 return;
1622
1623 // Reserve a buffer to get the spelling.
1624 llvm::SmallVector<char, 128> FilenameBuffer;
1625 FilenameBuffer.resize(FilenameTok.getLength());
1626
1627 const char *FilenameStart = &FilenameBuffer[0], *FilenameEnd;
1628 bool isAngled = GetIncludeFilenameSpelling(FilenameTok,
1629 FilenameStart, FilenameEnd);
1630 // If GetIncludeFilenameSpelling set the start ptr to null, there was an
1631 // error.
1632 if (FilenameStart == 0)
1633 return;
1634
1635 // Verify that there is nothing after the filename, other than EOM. Use the
1636 // preprocessor to lex this in case lexing the filename entered a macro.
1637 CheckEndOfDirective("#include");
1638
1639 // Check that we don't have infinite #include recursion.
1640 if (IncludeMacroStack.size() == MaxAllowedIncludeStackDepth-1)
1641 return Diag(FilenameTok, diag::err_pp_include_too_deep);
1642
1643 // Search include directories.
1644 const DirectoryLookup *CurDir;
1645 const FileEntry *File = LookupFile(FilenameStart, FilenameEnd,
1646 isAngled, LookupFrom, CurDir);
1647 if (File == 0)
1648 return Diag(FilenameTok, diag::err_pp_file_not_found,
1649 std::string(FilenameStart, FilenameEnd));
1650
1651 // Ask HeaderInfo if we should enter this #include file.
1652 if (!HeaderInfo.ShouldEnterIncludeFile(File, isImport)) {
1653 // If it returns true, #including this file will have no effect.
1654 return;
1655 }
1656
1657 // Look up the file, create a File ID for it.
1658 unsigned FileID = SourceMgr.createFileID(File, FilenameTok.getLocation());
1659 if (FileID == 0)
1660 return Diag(FilenameTok, diag::err_pp_file_not_found,
1661 std::string(FilenameStart, FilenameEnd));
1662
1663 // Finally, if all is good, enter the new file!
1664 EnterSourceFile(FileID, CurDir);
1665}
1666
1667/// HandleIncludeNextDirective - Implements #include_next.
1668///
1669void Preprocessor::HandleIncludeNextDirective(LexerToken &IncludeNextTok) {
1670 Diag(IncludeNextTok, diag::ext_pp_include_next_directive);
1671
1672 // #include_next is like #include, except that we start searching after
1673 // the current found directory. If we can't do this, issue a
1674 // diagnostic.
1675 const DirectoryLookup *Lookup = CurDirLookup;
1676 if (isInPrimaryFile()) {
1677 Lookup = 0;
1678 Diag(IncludeNextTok, diag::pp_include_next_in_primary);
1679 } else if (Lookup == 0) {
1680 Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
1681 } else {
1682 // Start looking up in the next directory.
1683 ++Lookup;
1684 }
1685
1686 return HandleIncludeDirective(IncludeNextTok, Lookup);
1687}
1688
1689/// HandleImportDirective - Implements #import.
1690///
1691void Preprocessor::HandleImportDirective(LexerToken &ImportTok) {
1692 Diag(ImportTok, diag::ext_pp_import_directive);
1693
1694 return HandleIncludeDirective(ImportTok, 0, true);
1695}
1696
1697//===----------------------------------------------------------------------===//
1698// Preprocessor Macro Directive Handling.
1699//===----------------------------------------------------------------------===//
1700
1701/// ReadMacroDefinitionArgList - The ( starting an argument list of a macro
1702/// definition has just been read. Lex the rest of the arguments and the
1703/// closing ), updating MI with what we learn. Return true if an error occurs
1704/// parsing the arg list.
1705bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI) {
Chris Lattner25c96482007-07-14 22:46:43 +00001706 llvm::SmallVector<IdentifierInfo*, 32> Arguments;
1707
Reid Spencer5f016e22007-07-11 17:01:13 +00001708 LexerToken Tok;
1709 while (1) {
1710 LexUnexpandedToken(Tok);
1711 switch (Tok.getKind()) {
1712 case tok::r_paren:
1713 // Found the end of the argument list.
Chris Lattner25c96482007-07-14 22:46:43 +00001714 if (Arguments.empty()) { // #define FOO()
1715 MI->setArgumentList(Arguments.begin(), Arguments.end());
1716 return false;
1717 }
Reid Spencer5f016e22007-07-11 17:01:13 +00001718 // Otherwise we have #define FOO(A,)
1719 Diag(Tok, diag::err_pp_expected_ident_in_arg_list);
1720 return true;
1721 case tok::ellipsis: // #define X(... -> C99 varargs
1722 // Warn if use of C99 feature in non-C99 mode.
1723 if (!Features.C99) Diag(Tok, diag::ext_variadic_macro);
1724
1725 // Lex the token after the identifier.
1726 LexUnexpandedToken(Tok);
1727 if (Tok.getKind() != tok::r_paren) {
1728 Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
1729 return true;
1730 }
1731 // Add the __VA_ARGS__ identifier as an argument.
Chris Lattner25c96482007-07-14 22:46:43 +00001732 Arguments.push_back(Ident__VA_ARGS__);
Reid Spencer5f016e22007-07-11 17:01:13 +00001733 MI->setIsC99Varargs();
Chris Lattner25c96482007-07-14 22:46:43 +00001734 MI->setArgumentList(Arguments.begin(), Arguments.end());
Reid Spencer5f016e22007-07-11 17:01:13 +00001735 return false;
1736 case tok::eom: // #define X(
1737 Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
1738 return true;
1739 default:
1740 // Handle keywords and identifiers here to accept things like
1741 // #define Foo(for) for.
1742 IdentifierInfo *II = Tok.getIdentifierInfo();
1743 if (II == 0) {
1744 // #define X(1
1745 Diag(Tok, diag::err_pp_invalid_tok_in_arg_list);
1746 return true;
1747 }
1748
1749 // If this is already used as an argument, it is used multiple times (e.g.
1750 // #define X(A,A.
Chris Lattner25c96482007-07-14 22:46:43 +00001751 if (std::find(Arguments.begin(), Arguments.end(), II) !=
1752 Arguments.end()) { // C99 6.10.3p6
Reid Spencer5f016e22007-07-11 17:01:13 +00001753 Diag(Tok, diag::err_pp_duplicate_name_in_arg_list, II->getName());
1754 return true;
1755 }
1756
1757 // Add the argument to the macro info.
Chris Lattner25c96482007-07-14 22:46:43 +00001758 Arguments.push_back(II);
Reid Spencer5f016e22007-07-11 17:01:13 +00001759
1760 // Lex the token after the identifier.
1761 LexUnexpandedToken(Tok);
1762
1763 switch (Tok.getKind()) {
1764 default: // #define X(A B
1765 Diag(Tok, diag::err_pp_expected_comma_in_arg_list);
1766 return true;
1767 case tok::r_paren: // #define X(A)
Chris Lattner25c96482007-07-14 22:46:43 +00001768 MI->setArgumentList(Arguments.begin(), Arguments.end());
Reid Spencer5f016e22007-07-11 17:01:13 +00001769 return false;
1770 case tok::comma: // #define X(A,
1771 break;
1772 case tok::ellipsis: // #define X(A... -> GCC extension
1773 // Diagnose extension.
1774 Diag(Tok, diag::ext_named_variadic_macro);
1775
1776 // Lex the token after the identifier.
1777 LexUnexpandedToken(Tok);
1778 if (Tok.getKind() != tok::r_paren) {
1779 Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
1780 return true;
1781 }
1782
1783 MI->setIsGNUVarargs();
Chris Lattner25c96482007-07-14 22:46:43 +00001784 MI->setArgumentList(Arguments.begin(), Arguments.end());
Reid Spencer5f016e22007-07-11 17:01:13 +00001785 return false;
1786 }
1787 }
1788 }
1789}
1790
1791/// HandleDefineDirective - Implements #define. This consumes the entire macro
1792/// line then lets the caller lex the next real token. If 'isTargetSpecific' is
1793/// true, then this is a "#define_target", otherwise this is a "#define".
1794///
1795void Preprocessor::HandleDefineDirective(LexerToken &DefineTok,
1796 bool isTargetSpecific) {
1797 ++NumDefined;
1798
1799 LexerToken MacroNameTok;
1800 ReadMacroName(MacroNameTok, 1);
1801
1802 // Error reading macro name? If so, diagnostic already issued.
1803 if (MacroNameTok.getKind() == tok::eom)
1804 return;
Chris Lattnerc215bd62007-07-14 22:11:41 +00001805
Reid Spencer5f016e22007-07-11 17:01:13 +00001806 // If we are supposed to keep comments in #defines, reenable comment saving
1807 // mode.
1808 CurLexer->KeepCommentMode = KeepMacroComments;
1809
1810 // Create the new macro.
1811 MacroInfo *MI = new MacroInfo(MacroNameTok.getLocation());
1812 if (isTargetSpecific) MI->setIsTargetSpecific();
1813
1814 // If the identifier is an 'other target' macro, clear this bit.
1815 MacroNameTok.getIdentifierInfo()->setIsOtherTargetMacro(false);
1816
1817
1818 LexerToken Tok;
1819 LexUnexpandedToken(Tok);
1820
1821 // If this is a function-like macro definition, parse the argument list,
1822 // marking each of the identifiers as being used as macro arguments. Also,
1823 // check other constraints on the first token of the macro body.
1824 if (Tok.getKind() == tok::eom) {
1825 // If there is no body to this macro, we have no special handling here.
1826 } else if (Tok.getKind() == tok::l_paren && !Tok.hasLeadingSpace()) {
1827 // This is a function-like macro definition. Read the argument list.
1828 MI->setIsFunctionLike();
1829 if (ReadMacroDefinitionArgList(MI)) {
1830 // Forget about MI.
1831 delete MI;
1832 // Throw away the rest of the line.
1833 if (CurLexer->ParsingPreprocessorDirective)
1834 DiscardUntilEndOfDirective();
1835 return;
1836 }
1837
1838 // Read the first token after the arg list for down below.
1839 LexUnexpandedToken(Tok);
1840 } else if (!Tok.hasLeadingSpace()) {
1841 // C99 requires whitespace between the macro definition and the body. Emit
1842 // a diagnostic for something like "#define X+".
1843 if (Features.C99) {
1844 Diag(Tok, diag::ext_c99_whitespace_required_after_macro_name);
1845 } else {
1846 // FIXME: C90/C++ do not get this diagnostic, but it does get a similar
1847 // one in some cases!
1848 }
1849 } else {
1850 // This is a normal token with leading space. Clear the leading space
1851 // marker on the first token to get proper expansion.
1852 Tok.clearFlag(LexerToken::LeadingSpace);
1853 }
1854
1855 // If this is a definition of a variadic C99 function-like macro, not using
1856 // the GNU named varargs extension, enabled __VA_ARGS__.
1857
1858 // "Poison" __VA_ARGS__, which can only appear in the expansion of a macro.
1859 // This gets unpoisoned where it is allowed.
1860 assert(Ident__VA_ARGS__->isPoisoned() && "__VA_ARGS__ should be poisoned!");
1861 if (MI->isC99Varargs())
1862 Ident__VA_ARGS__->setIsPoisoned(false);
1863
1864 // Read the rest of the macro body.
Chris Lattnerb5e240f2007-07-14 21:54:03 +00001865 if (MI->isObjectLike()) {
1866 // Object-like macros are very simple, just read their body.
1867 while (Tok.getKind() != tok::eom) {
1868 MI->AddTokenToBody(Tok);
Reid Spencer5f016e22007-07-11 17:01:13 +00001869 // Get the next token of the macro.
1870 LexUnexpandedToken(Tok);
Reid Spencer5f016e22007-07-11 17:01:13 +00001871 }
1872
Chris Lattnerb5e240f2007-07-14 21:54:03 +00001873 } else {
1874 // Otherwise, read the body of a function-like macro. This has to validate
1875 // the # (stringize) operator.
1876 while (Tok.getKind() != tok::eom) {
1877 MI->AddTokenToBody(Tok);
Reid Spencer5f016e22007-07-11 17:01:13 +00001878
Chris Lattnerb5e240f2007-07-14 21:54:03 +00001879 // Check C99 6.10.3.2p1: ensure that # operators are followed by macro
1880 // parameters in function-like macro expansions.
1881 if (Tok.getKind() != tok::hash) {
1882 // Get the next token of the macro.
1883 LexUnexpandedToken(Tok);
1884 continue;
1885 }
1886
1887 // Get the next token of the macro.
1888 LexUnexpandedToken(Tok);
1889
1890 // Not a macro arg identifier?
1891 if (!Tok.getIdentifierInfo() ||
1892 MI->getArgumentNum(Tok.getIdentifierInfo()) == -1) {
1893 Diag(Tok, diag::err_pp_stringize_not_parameter);
1894 delete MI;
1895
1896 // Disable __VA_ARGS__ again.
1897 Ident__VA_ARGS__->setIsPoisoned(true);
1898 return;
1899 }
1900
1901 // Things look ok, add the param name token to the macro.
1902 MI->AddTokenToBody(Tok);
1903
1904 // Get the next token of the macro.
1905 LexUnexpandedToken(Tok);
1906 }
Reid Spencer5f016e22007-07-11 17:01:13 +00001907 }
1908
Chris Lattnerc215bd62007-07-14 22:11:41 +00001909
Reid Spencer5f016e22007-07-11 17:01:13 +00001910 // Disable __VA_ARGS__ again.
1911 Ident__VA_ARGS__->setIsPoisoned(true);
1912
1913 // Check that there is no paste (##) operator at the begining or end of the
1914 // replacement list.
1915 unsigned NumTokens = MI->getNumTokens();
1916 if (NumTokens != 0) {
1917 if (MI->getReplacementToken(0).getKind() == tok::hashhash) {
1918 Diag(MI->getReplacementToken(0), diag::err_paste_at_start);
1919 delete MI;
1920 return;
1921 }
1922 if (MI->getReplacementToken(NumTokens-1).getKind() == tok::hashhash) {
1923 Diag(MI->getReplacementToken(NumTokens-1), diag::err_paste_at_end);
1924 delete MI;
1925 return;
1926 }
1927 }
1928
1929 // If this is the primary source file, remember that this macro hasn't been
1930 // used yet.
1931 if (isInPrimaryFile())
1932 MI->setIsUsed(false);
1933
1934 // Finally, if this identifier already had a macro defined for it, verify that
1935 // the macro bodies are identical and free the old definition.
1936 if (MacroInfo *OtherMI = MacroNameTok.getIdentifierInfo()->getMacroInfo()) {
1937 if (!OtherMI->isUsed())
1938 Diag(OtherMI->getDefinitionLoc(), diag::pp_macro_not_used);
1939
1940 // Macros must be identical. This means all tokes and whitespace separation
1941 // must be the same. C99 6.10.3.2.
1942 if (!MI->isIdenticalTo(*OtherMI, *this)) {
1943 Diag(MI->getDefinitionLoc(), diag::ext_pp_macro_redef,
1944 MacroNameTok.getIdentifierInfo()->getName());
1945 Diag(OtherMI->getDefinitionLoc(), diag::ext_pp_macro_redef2);
1946 }
1947 delete OtherMI;
1948 }
1949
1950 MacroNameTok.getIdentifierInfo()->setMacroInfo(MI);
1951}
1952
1953/// HandleDefineOtherTargetDirective - Implements #define_other_target.
1954void Preprocessor::HandleDefineOtherTargetDirective(LexerToken &Tok) {
1955 LexerToken MacroNameTok;
1956 ReadMacroName(MacroNameTok, 1);
1957
1958 // Error reading macro name? If so, diagnostic already issued.
1959 if (MacroNameTok.getKind() == tok::eom)
1960 return;
1961
1962 // Check to see if this is the last token on the #undef line.
1963 CheckEndOfDirective("#define_other_target");
1964
1965 // If there is already a macro defined by this name, turn it into a
1966 // target-specific define.
1967 if (MacroInfo *MI = MacroNameTok.getIdentifierInfo()->getMacroInfo()) {
1968 MI->setIsTargetSpecific(true);
1969 return;
1970 }
1971
1972 // Mark the identifier as being a macro on some other target.
1973 MacroNameTok.getIdentifierInfo()->setIsOtherTargetMacro();
1974}
1975
1976
1977/// HandleUndefDirective - Implements #undef.
1978///
1979void Preprocessor::HandleUndefDirective(LexerToken &UndefTok) {
1980 ++NumUndefined;
1981
1982 LexerToken MacroNameTok;
1983 ReadMacroName(MacroNameTok, 2);
1984
1985 // Error reading macro name? If so, diagnostic already issued.
1986 if (MacroNameTok.getKind() == tok::eom)
1987 return;
1988
1989 // Check to see if this is the last token on the #undef line.
1990 CheckEndOfDirective("#undef");
1991
1992 // Okay, we finally have a valid identifier to undef.
1993 MacroInfo *MI = MacroNameTok.getIdentifierInfo()->getMacroInfo();
1994
1995 // #undef untaints an identifier if it were marked by define_other_target.
1996 MacroNameTok.getIdentifierInfo()->setIsOtherTargetMacro(false);
1997
1998 // If the macro is not defined, this is a noop undef, just return.
1999 if (MI == 0) return;
2000
2001 if (!MI->isUsed())
2002 Diag(MI->getDefinitionLoc(), diag::pp_macro_not_used);
2003
2004 // Free macro definition.
2005 delete MI;
2006 MacroNameTok.getIdentifierInfo()->setMacroInfo(0);
2007}
2008
2009
2010//===----------------------------------------------------------------------===//
2011// Preprocessor Conditional Directive Handling.
2012//===----------------------------------------------------------------------===//
2013
2014/// HandleIfdefDirective - Implements the #ifdef/#ifndef directive. isIfndef is
2015/// true when this is a #ifndef directive. ReadAnyTokensBeforeDirective is true
2016/// if any tokens have been returned or pp-directives activated before this
2017/// #ifndef has been lexed.
2018///
2019void Preprocessor::HandleIfdefDirective(LexerToken &Result, bool isIfndef,
2020 bool ReadAnyTokensBeforeDirective) {
2021 ++NumIf;
2022 LexerToken DirectiveTok = Result;
2023
2024 LexerToken MacroNameTok;
2025 ReadMacroName(MacroNameTok);
2026
2027 // Error reading macro name? If so, diagnostic already issued.
2028 if (MacroNameTok.getKind() == tok::eom)
2029 return;
2030
2031 // Check to see if this is the last token on the #if[n]def line.
2032 CheckEndOfDirective(isIfndef ? "#ifndef" : "#ifdef");
2033
2034 // If the start of a top-level #ifdef, inform MIOpt.
2035 if (!ReadAnyTokensBeforeDirective &&
2036 CurLexer->getConditionalStackDepth() == 0) {
2037 assert(isIfndef && "#ifdef shouldn't reach here");
2038 CurLexer->MIOpt.EnterTopLevelIFNDEF(MacroNameTok.getIdentifierInfo());
2039 }
2040
2041 IdentifierInfo *MII = MacroNameTok.getIdentifierInfo();
2042 MacroInfo *MI = MII->getMacroInfo();
2043
2044 // If there is a macro, process it.
2045 if (MI) {
2046 // Mark it used.
2047 MI->setIsUsed(true);
2048
2049 // If this is the first use of a target-specific macro, warn about it.
2050 if (MI->isTargetSpecific()) {
2051 MI->setIsTargetSpecific(false); // Don't warn on second use.
2052 getTargetInfo().DiagnoseNonPortability(MacroNameTok.getLocation(),
2053 diag::port_target_macro_use);
2054 }
2055 } else {
2056 // Use of a target-specific macro for some other target? If so, warn.
2057 if (MII->isOtherTargetMacro()) {
2058 MII->setIsOtherTargetMacro(false); // Don't warn on second use.
2059 getTargetInfo().DiagnoseNonPortability(MacroNameTok.getLocation(),
2060 diag::port_target_macro_use);
2061 }
2062 }
2063
2064 // Should we include the stuff contained by this directive?
2065 if (!MI == isIfndef) {
2066 // Yes, remember that we are inside a conditional, then lex the next token.
2067 CurLexer->pushConditionalLevel(DirectiveTok.getLocation(), /*wasskip*/false,
2068 /*foundnonskip*/true, /*foundelse*/false);
2069 } else {
2070 // No, skip the contents of this block and return the first token after it.
2071 SkipExcludedConditionalBlock(DirectiveTok.getLocation(),
2072 /*Foundnonskip*/false,
2073 /*FoundElse*/false);
2074 }
2075}
2076
2077/// HandleIfDirective - Implements the #if directive.
2078///
2079void Preprocessor::HandleIfDirective(LexerToken &IfToken,
2080 bool ReadAnyTokensBeforeDirective) {
2081 ++NumIf;
2082
2083 // Parse and evaluation the conditional expression.
2084 IdentifierInfo *IfNDefMacro = 0;
2085 bool ConditionalTrue = EvaluateDirectiveExpression(IfNDefMacro);
2086
2087 // Should we include the stuff contained by this directive?
2088 if (ConditionalTrue) {
2089 // If this condition is equivalent to #ifndef X, and if this is the first
2090 // directive seen, handle it for the multiple-include optimization.
2091 if (!ReadAnyTokensBeforeDirective &&
2092 CurLexer->getConditionalStackDepth() == 0 && IfNDefMacro)
2093 CurLexer->MIOpt.EnterTopLevelIFNDEF(IfNDefMacro);
2094
2095 // Yes, remember that we are inside a conditional, then lex the next token.
2096 CurLexer->pushConditionalLevel(IfToken.getLocation(), /*wasskip*/false,
2097 /*foundnonskip*/true, /*foundelse*/false);
2098 } else {
2099 // No, skip the contents of this block and return the first token after it.
2100 SkipExcludedConditionalBlock(IfToken.getLocation(), /*Foundnonskip*/false,
2101 /*FoundElse*/false);
2102 }
2103}
2104
2105/// HandleEndifDirective - Implements the #endif directive.
2106///
2107void Preprocessor::HandleEndifDirective(LexerToken &EndifToken) {
2108 ++NumEndif;
2109
2110 // Check that this is the whole directive.
2111 CheckEndOfDirective("#endif");
2112
2113 PPConditionalInfo CondInfo;
2114 if (CurLexer->popConditionalLevel(CondInfo)) {
2115 // No conditionals on the stack: this is an #endif without an #if.
2116 return Diag(EndifToken, diag::err_pp_endif_without_if);
2117 }
2118
2119 // If this the end of a top-level #endif, inform MIOpt.
2120 if (CurLexer->getConditionalStackDepth() == 0)
2121 CurLexer->MIOpt.ExitTopLevelConditional();
2122
2123 assert(!CondInfo.WasSkipping && !CurLexer->LexingRawMode &&
2124 "This code should only be reachable in the non-skipping case!");
2125}
2126
2127
2128void Preprocessor::HandleElseDirective(LexerToken &Result) {
2129 ++NumElse;
2130
2131 // #else directive in a non-skipping conditional... start skipping.
2132 CheckEndOfDirective("#else");
2133
2134 PPConditionalInfo CI;
2135 if (CurLexer->popConditionalLevel(CI))
2136 return Diag(Result, diag::pp_err_else_without_if);
2137
2138 // If this is a top-level #else, inform the MIOpt.
2139 if (CurLexer->getConditionalStackDepth() == 0)
2140 CurLexer->MIOpt.FoundTopLevelElse();
2141
2142 // If this is a #else with a #else before it, report the error.
2143 if (CI.FoundElse) Diag(Result, diag::pp_err_else_after_else);
2144
2145 // Finally, skip the rest of the contents of this block and return the first
2146 // token after it.
2147 return SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
2148 /*FoundElse*/true);
2149}
2150
2151void Preprocessor::HandleElifDirective(LexerToken &ElifToken) {
2152 ++NumElse;
2153
2154 // #elif directive in a non-skipping conditional... start skipping.
2155 // We don't care what the condition is, because we will always skip it (since
2156 // the block immediately before it was included).
2157 DiscardUntilEndOfDirective();
2158
2159 PPConditionalInfo CI;
2160 if (CurLexer->popConditionalLevel(CI))
2161 return Diag(ElifToken, diag::pp_err_elif_without_if);
2162
2163 // If this is a top-level #elif, inform the MIOpt.
2164 if (CurLexer->getConditionalStackDepth() == 0)
2165 CurLexer->MIOpt.FoundTopLevelElse();
2166
2167 // If this is a #elif with a #else before it, report the error.
2168 if (CI.FoundElse) Diag(ElifToken, diag::pp_err_elif_after_else);
2169
2170 // Finally, skip the rest of the contents of this block and return the first
2171 // token after it.
2172 return SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
2173 /*FoundElse*/CI.FoundElse);
2174}
2175