Updated to Clang 3.5a.
Change-Id: I8127eb568f674c2e72635b639a3295381fe8af82
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index 1f970a4..949cd63 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -68,7 +68,7 @@
/// EnterSourceFile - Add a source file to the top of the include stack and
/// start lexing tokens from it instead of the current buffer.
-void Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
+bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
SourceLocation Loc) {
assert(!CurTokenLexer && "Cannot #include a file inside a macro!");
++NumEnteredSourceFiles;
@@ -79,7 +79,7 @@
if (PTH) {
if (PTHLexer *PL = PTH->CreateLexer(FID)) {
EnterSourceFileWithPTH(PL, CurDir);
- return;
+ return false;
}
}
@@ -91,7 +91,7 @@
SourceLocation FileStart = SourceMgr.getLocForStartOfFile(FID);
Diag(Loc, diag::err_pp_error_opening_file)
<< std::string(SourceMgr.getBufferName(FileStart)) << "";
- return;
+ return true;
}
if (isCodeCompletionEnabled() &&
@@ -102,7 +102,7 @@
}
EnterSourceFileWithLexer(new Lexer(FID, InputFile, *this), CurDir);
- return;
+ return false;
}
/// EnterSourceFileWithLexer - Add a source file to the top of the include stack
@@ -117,6 +117,7 @@
CurLexer.reset(TheLexer);
CurPPLexer = TheLexer;
CurDirLookup = CurDir;
+ CurSubmodule = 0;
if (CurLexerKind != CLK_LexAfterModuleImport)
CurLexerKind = CLK_Lexer;
@@ -141,6 +142,7 @@
CurDirLookup = CurDir;
CurPTHLexer.reset(PL);
CurPPLexer = CurPTHLexer.get();
+ CurSubmodule = 0;
if (CurLexerKind != CLK_LexAfterModuleImport)
CurLexerKind = CLK_PTHLexer;
@@ -244,6 +246,29 @@
// but it might if they're empty?
}
+/// \brief Determine the location to use as the end of the buffer for a lexer.
+///
+/// If the file ends with a newline, form the EOF token on the newline itself,
+/// rather than "on the line following it", which doesn't exist. This makes
+/// diagnostics relating to the end of file include the last file that the user
+/// actually typed, which is goodness.
+const char *Preprocessor::getCurLexerEndPos() {
+ const char *EndPos = CurLexer->BufferEnd;
+ if (EndPos != CurLexer->BufferStart &&
+ (EndPos[-1] == '\n' || EndPos[-1] == '\r')) {
+ --EndPos;
+
+ // Handle \n\r and \r\n:
+ if (EndPos != CurLexer->BufferStart &&
+ (EndPos[-1] == '\n' || EndPos[-1] == '\r') &&
+ EndPos[-1] != EndPos[0])
+ --EndPos;
+ }
+
+ return EndPos;
+}
+
+
/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
/// the current file. This either returns the EOF token or pops a level off
/// the include stack and keeps going.
@@ -342,7 +367,18 @@
FileID ExitedFID;
if (Callbacks && !isEndOfMacro && CurPPLexer)
ExitedFID = CurPPLexer->getFileID();
-
+
+ bool LeavingSubmodule = CurSubmodule && CurLexer;
+ if (LeavingSubmodule) {
+ // Notify the parser that we've left the module.
+ const char *EndPos = getCurLexerEndPos();
+ Result.startToken();
+ CurLexer->BufferPtr = EndPos;
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::annot_module_end);
+ Result.setAnnotationEndLoc(Result.getLocation());
+ Result.setAnnotationValue(CurSubmodule);
+ }
+
// We're done with the #included file.
RemoveTopOfLexerStack();
@@ -357,27 +393,13 @@
PPCallbacks::ExitFile, FileType, ExitedFID);
}
- // Client should lex another token.
- return false;
+ // Client should lex another token unless we generated an EOM.
+ return LeavingSubmodule;
}
- // If the file ends with a newline, form the EOF token on the newline itself,
- // rather than "on the line following it", which doesn't exist. This makes
- // diagnostics relating to the end of file include the last file that the user
- // actually typed, which is goodness.
+ // If this is the end of the main file, form an EOF token.
if (CurLexer) {
- const char *EndPos = CurLexer->BufferEnd;
- if (EndPos != CurLexer->BufferStart &&
- (EndPos[-1] == '\n' || EndPos[-1] == '\r')) {
- --EndPos;
-
- // Handle \n\r and \r\n:
- if (EndPos != CurLexer->BufferStart &&
- (EndPos[-1] == '\n' || EndPos[-1] == '\r') &&
- EndPos[-1] != EndPos[0])
- --EndPos;
- }
-
+ const char *EndPos = getCurLexerEndPos();
Result.startToken();
CurLexer->BufferPtr = EndPos;
CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
@@ -405,11 +427,15 @@
if (!isIncrementalProcessingEnabled())
CurPPLexer = 0;
- // This is the end of the top-level file. 'WarnUnusedMacroLocs' has collected
- // all macro locations that we need to warn because they are not used.
- for (WarnUnusedMacroLocsTy::iterator
- I=WarnUnusedMacroLocs.begin(), E=WarnUnusedMacroLocs.end(); I!=E; ++I)
- Diag(*I, diag::pp_macro_not_used);
+ if (TUKind == TU_Complete) {
+ // This is the end of the top-level file. 'WarnUnusedMacroLocs' has
+ // collected all macro locations that we need to warn because they are not
+ // used.
+ for (WarnUnusedMacroLocsTy::iterator
+ I=WarnUnusedMacroLocs.begin(), E=WarnUnusedMacroLocs.end();
+ I!=E; ++I)
+ Diag(*I, diag::pp_macro_not_used);
+ }
// If we are building a module that has an umbrella header, make sure that
// each of the headers within the directory covered by the umbrella header
@@ -498,7 +524,7 @@
if (NumCachedTokenLexers == TokenLexerCacheSize)
CurTokenLexer.reset();
else
- TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.take();
+ TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.release();
// Handle this like a #include file being popped off the stack.
return HandleEndOfFile(Result, true);
@@ -515,7 +541,7 @@
if (NumCachedTokenLexers == TokenLexerCacheSize)
CurTokenLexer.reset();
else
- TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.take();
+ TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.release();
}
PopIncludeMacroStack();