Rename tok::eom to tok::eod.

The previous name was inaccurate as this token in fact appears at
the end of every preprocessing directive, not just macro definitions.
No functionality change, except for a diagnostic tweak.

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@126631 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index b17198b..9643d8e 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -178,7 +178,7 @@
                                          InstantiationLocEnd, TokLen);
 
   // Ensure that the lexer thinks it is inside a directive, so that end \n will
-  // return an EOM token.
+  // return an EOD token.
   L->ParsingPreprocessorDirective = true;
 
   // This lexer really is for _Pragma.
@@ -1407,7 +1407,7 @@
     return SaveBCPLComment(Result, CurPtr);
 
   // If we are inside a preprocessor directive and we see the end of line,
-  // return immediately, so that the lexer can return this as an EOM token.
+  // return immediately, so that the lexer can return this as an EOD token.
   if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
     BufferPtr = CurPtr;
     return false;
@@ -1715,14 +1715,14 @@
       assert(CurPtr[-1] == Char && "Trigraphs for newline?");
       BufferPtr = CurPtr-1;
 
-      // Next, lex the character, which should handle the EOM transition.
+      // Next, lex the character, which should handle the EOD transition.
       Lex(Tmp);
       if (Tmp.is(tok::code_completion)) {
         if (PP && PP->getCodeCompletionHandler())
           PP->getCodeCompletionHandler()->CodeCompleteNaturalLanguage();
         Lex(Tmp);
       }
-      assert(Tmp.is(tok::eom) && "Unexpected token!");
+      assert(Tmp.is(tok::eod) && "Unexpected token!");
 
       // Finally, we're done, return the string we found.
       return Result;
@@ -1758,7 +1758,7 @@
     // Done parsing the "line".
     ParsingPreprocessorDirective = false;
     // Update the location of token as well as BufferPtr.
-    FormTokenWithChars(Result, CurPtr, tok::eom);
+    FormTokenWithChars(Result, CurPtr, tok::eod);
 
     // Restore comment saving mode, in case it was disabled for directive.
     SetCommentRetentionState(PP->getCommentRetentionState());
@@ -2006,7 +2006,7 @@
   case '\n':
   case '\r':
     // If we are inside a preprocessor directive and we see the end of line,
-    // we know we are done with the directive, so return an EOM token.
+    // we know we are done with the directive, so return an EOD token.
     if (ParsingPreprocessorDirective) {
       // Done parsing the "line".
       ParsingPreprocessorDirective = false;
@@ -2017,7 +2017,7 @@
       // Since we consumed a newline, we are back at the start of a line.
       IsAtStartOfLine = true;
 
-      Kind = tok::eom;
+      Kind = tok::eod;
       break;
     }
     // The returned token is at the start of the line.
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 3e871ae..cea0798 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -81,17 +81,17 @@
 }
 
 /// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
-/// current line until the tok::eom token is found.
+/// current line until the tok::eod token is found.
 void Preprocessor::DiscardUntilEndOfDirective() {
   Token Tmp;
   do {
     LexUnexpandedToken(Tmp);
     assert(Tmp.isNot(tok::eof) && "EOF seen while discarding directive tokens");
-  } while (Tmp.isNot(tok::eom));
+  } while (Tmp.isNot(tok::eod));
 }
 
 /// ReadMacroName - Lex and validate a macro name, which occurs after a
-/// #define or #undef.  This sets the token kind to eom and discards the rest
+/// #define or #undef.  This sets the token kind to eod and discards the rest
 /// of the macro line if the macro name is invalid.  isDefineUndef is 1 if
 /// this is due to a a #define, 2 if #undef directive, 0 if it is something
 /// else (e.g. #ifdef).
@@ -107,7 +107,7 @@
   }
   
   // Missing macro name?
-  if (MacroNameTok.is(tok::eom)) {
+  if (MacroNameTok.is(tok::eod)) {
     Diag(MacroNameTok, diag::err_pp_missing_macro_name);
     return;
   }
@@ -143,13 +143,13 @@
   }
 
   // Invalid macro name, read and discard the rest of the line.  Then set the
-  // token kind to tok::eom.
-  MacroNameTok.setKind(tok::eom);
+  // token kind to tok::eod.
+  MacroNameTok.setKind(tok::eod);
   return DiscardUntilEndOfDirective();
 }
 
-/// CheckEndOfDirective - Ensure that the next token is a tok::eom token.  If
-/// not, emit a diagnostic and consume up until the eom.  If EnableMacros is
+/// CheckEndOfDirective - Ensure that the next token is a tok::eod token.  If
+/// not, emit a diagnostic and consume up until the eod.  If EnableMacros is
 /// true, then we consider macros that expand to zero tokens as being ok.
 void Preprocessor::CheckEndOfDirective(const char *DirType, bool EnableMacros) {
   Token Tmp;
@@ -166,7 +166,7 @@
   while (Tmp.is(tok::comment))  // Skip comments in -C mode.
     LexUnexpandedToken(Tmp);
 
-  if (Tmp.isNot(tok::eom)) {
+  if (Tmp.isNot(tok::eod)) {
     // Add a fixit in GNU/C99/C++ mode.  Don't offer a fixit for strict-C89,
     // or if this is a macro-style preprocessing directive, because it is more
     // trouble than it is worth to insert /**/ and check that there is no /**/
@@ -238,7 +238,7 @@
 
     // We just parsed a # character at the start of a line, so we're in
     // directive mode.  Tell the lexer this so any newlines we see will be
-    // converted into an EOM token (this terminates the macro).
+    // converted into an EOD token (this terminates the macro).
     CurPPLexer->ParsingPreprocessorDirective = true;
     if (CurLexer) CurLexer->SetCommentRetentionState(false);
 
@@ -425,7 +425,7 @@
       if (!CondInfo.FoundNonSkip) {
         CondInfo.FoundNonSkip = true;
 
-        // Scan until the eom token.
+        // Scan until the eod token.
         CurPTHLexer->ParsingPreprocessorDirective = true;
         DiscardUntilEndOfDirective();
         CurPTHLexer->ParsingPreprocessorDirective = false;
@@ -535,7 +535,7 @@
 
   // We just parsed a # character at the start of a line, so we're in directive
   // mode.  Tell the lexer this so any newlines we see will be converted into an
-  // EOM token (which terminates the directive).
+  // EOD token (which terminates the directive).
   CurPPLexer->ParsingPreprocessorDirective = true;
 
   ++NumDirectives;
@@ -563,7 +563,7 @@
 
 TryAgain:
   switch (Result.getKind()) {
-  case tok::eom:
+  case tok::eod:
     return;   // null directive.
   case tok::comment:
     // Handle stuff like "# /*foo*/ define X" in -E -C mode.
@@ -686,7 +686,7 @@
   if (DigitTok.isNot(tok::numeric_constant)) {
     PP.Diag(DigitTok, DiagID);
 
-    if (DigitTok.isNot(tok::eom))
+    if (DigitTok.isNot(tok::eod))
       PP.DiscardUntilEndOfDirective();
     return true;
   }
@@ -758,9 +758,9 @@
   Token StrTok;
   Lex(StrTok);
 
-  // If the StrTok is "eom", then it wasn't present.  Otherwise, it must be a
-  // string followed by eom.
-  if (StrTok.is(tok::eom))
+  // If the StrTok is "eod", then it wasn't present.  Otherwise, it must be a
+  // string followed by eod.
+  if (StrTok.is(tok::eod))
     ; // ok
   else if (StrTok.isNot(tok::string_literal)) {
     Diag(StrTok, diag::err_pp_line_invalid_filename);
@@ -779,7 +779,7 @@
     FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString(),
                                                   Literal.GetStringLength());
 
-    // Verify that there is nothing after the string, other than EOM.  Because
+    // Verify that there is nothing after the string, other than EOD.  Because
     // of C99 6.10.4p5, macros that expand to empty tokens are ok.
     CheckEndOfDirective("line", true);
   }
@@ -800,7 +800,7 @@
   unsigned FlagVal;
   Token FlagTok;
   PP.Lex(FlagTok);
-  if (FlagTok.is(tok::eom)) return false;
+  if (FlagTok.is(tok::eod)) return false;
   if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag, PP))
     return true;
 
@@ -808,7 +808,7 @@
     IsFileEntry = true;
 
     PP.Lex(FlagTok);
-    if (FlagTok.is(tok::eom)) return false;
+    if (FlagTok.is(tok::eod)) return false;
     if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag,PP))
       return true;
   } else if (FlagVal == 2) {
@@ -834,7 +834,7 @@
     }
 
     PP.Lex(FlagTok);
-    if (FlagTok.is(tok::eom)) return false;
+    if (FlagTok.is(tok::eod)) return false;
     if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag,PP))
       return true;
   }
@@ -849,7 +849,7 @@
   IsSystemHeader = true;
 
   PP.Lex(FlagTok);
-  if (FlagTok.is(tok::eom)) return false;
+  if (FlagTok.is(tok::eod)) return false;
   if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag, PP))
     return true;
 
@@ -863,7 +863,7 @@
   IsExternCHeader = true;
 
   PP.Lex(FlagTok);
-  if (FlagTok.is(tok::eom)) return false;
+  if (FlagTok.is(tok::eod)) return false;
 
   // There are no more valid flags here.
   PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_flag);
@@ -893,9 +893,9 @@
   bool IsSystemHeader = false, IsExternCHeader = false;
   int FilenameID = -1;
 
-  // If the StrTok is "eom", then it wasn't present.  Otherwise, it must be a
-  // string followed by eom.
-  if (StrTok.is(tok::eom))
+  // If the StrTok is "eod", then it wasn't present.  Otherwise, it must be a
+  // string followed by eod.
+  if (StrTok.is(tok::eod))
     ; // ok
   else if (StrTok.isNot(tok::string_literal)) {
     Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
@@ -978,12 +978,12 @@
   if (StrTok.isNot(tok::string_literal) &&
       StrTok.isNot(tok::wide_string_literal)) {
     Diag(StrTok, diag::err_pp_malformed_ident);
-    if (StrTok.isNot(tok::eom))
+    if (StrTok.isNot(tok::eod))
       DiscardUntilEndOfDirective();
     return;
   }
 
-  // Verify that there is nothing after the string, other than EOM.
+  // Verify that there is nothing after the string, other than EOD.
   CheckEndOfDirective("ident");
 
   if (Callbacks) {
@@ -1052,14 +1052,14 @@
 ///
 /// This code concatenates and consumes tokens up to the '>' token.  It returns
 /// false if the > was found, otherwise it returns true if it finds and consumes
-/// the EOM marker.
+/// the EOD marker.
 bool Preprocessor::ConcatenateIncludeName(
                                         llvm::SmallString<128> &FilenameBuffer,
                                           SourceLocation &End) {
   Token CurTok;
 
   Lex(CurTok);
-  while (CurTok.isNot(tok::eom)) {
+  while (CurTok.isNot(tok::eod)) {
     End = CurTok.getLocation();
     
     // FIXME: Provide code completion for #includes.
@@ -1095,8 +1095,8 @@
     Lex(CurTok);
   }
 
-  // If we hit the eom marker, emit an error and return true so that the caller
-  // knows the EOM has been read.
+  // If we hit the eod marker, emit an error and return true so that the caller
+  // knows the EOD has been read.
   Diag(CurTok.getLocation(), diag::err_pp_expects_filename);
   return true;
 }
@@ -1120,8 +1120,8 @@
   SourceLocation End;
   
   switch (FilenameTok.getKind()) {
-  case tok::eom:
-    // If the token kind is EOM, the error has already been diagnosed.
+  case tok::eod:
+    // If the token kind is EOD, the error has already been diagnosed.
     return;
 
   case tok::angle_string_literal:
@@ -1135,7 +1135,7 @@
     // case, glue the tokens together into FilenameBuffer and interpret those.
     FilenameBuffer.push_back('<');
     if (ConcatenateIncludeName(FilenameBuffer, End))
-      return;   // Found <eom> but no ">"?  Diagnostic already emitted.
+      return;   // Found <eod> but no ">"?  Diagnostic already emitted.
     Filename = FilenameBuffer.str();
     break;
   default:
@@ -1153,7 +1153,7 @@
     return;
   }
 
-  // Verify that there is nothing after the filename, other than EOM.  Note that
+  // Verify that there is nothing after the filename, other than EOD.  Note that
   // we allow macros that expand to nothing after the filename, because this
   // falls into the category of "#include pp-tokens new-line" specified in
   // C99 6.10.2p4.
@@ -1302,7 +1302,7 @@
       MI->setIsC99Varargs();
       MI->setArgumentList(&Arguments[0], Arguments.size(), BP);
       return false;
-    case tok::eom:  // #define X(
+    case tok::eod:  // #define X(
       Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
       return true;
     default:
@@ -1366,7 +1366,7 @@
   ReadMacroName(MacroNameTok, 1);
 
   // Error reading macro name?  If so, diagnostic already issued.
-  if (MacroNameTok.is(tok::eom))
+  if (MacroNameTok.is(tok::eod))
     return;
 
   Token LastTok = MacroNameTok;
@@ -1384,7 +1384,7 @@
   // If this is a function-like macro definition, parse the argument list,
   // marking each of the identifiers as being used as macro arguments.  Also,
   // check other constraints on the first token of the macro body.
-  if (Tok.is(tok::eom)) {
+  if (Tok.is(tok::eod)) {
     // If there is no body to this macro, we have no special handling here.
   } else if (Tok.hasLeadingSpace()) {
     // This is a normal token with leading space.  Clear the leading space
@@ -1439,13 +1439,13 @@
       Diag(Tok, diag::warn_missing_whitespace_after_macro_name);
   }
 
-  if (!Tok.is(tok::eom))
+  if (!Tok.is(tok::eod))
     LastTok = Tok;
 
   // Read the rest of the macro body.
   if (MI->isObjectLike()) {
     // Object-like macros are very simple, just read their body.
-    while (Tok.isNot(tok::eom)) {
+    while (Tok.isNot(tok::eod)) {
       LastTok = Tok;
       MI->AddTokenToBody(Tok);
       // Get the next token of the macro.
@@ -1456,7 +1456,7 @@
     // Otherwise, read the body of a function-like macro.  While we are at it,
     // check C99 6.10.3.2p1: ensure that # operators are followed by macro
     // parameters in function-like macro expansions.
-    while (Tok.isNot(tok::eom)) {
+    while (Tok.isNot(tok::eod)) {
       LastTok = Tok;
 
       if (Tok.isNot(tok::hash)) {
@@ -1478,7 +1478,7 @@
         // the '#' because '#' is often a comment character.  However, change
         // the kind of the token to tok::unknown so that the preprocessor isn't
         // confused.
-        if (getLangOptions().AsmPreprocessor && Tok.isNot(tok::eom)) {
+        if (getLangOptions().AsmPreprocessor && Tok.isNot(tok::eod)) {
           LastTok.setKind(tok::unknown);
         } else {
           Diag(Tok, diag::err_pp_stringize_not_parameter);
@@ -1573,7 +1573,7 @@
   ReadMacroName(MacroNameTok, 2);
 
   // Error reading macro name?  If so, diagnostic already issued.
-  if (MacroNameTok.is(tok::eom))
+  if (MacroNameTok.is(tok::eod))
     return;
 
   // Check to see if this is the last token on the #undef line.
@@ -1619,7 +1619,7 @@
   ReadMacroName(MacroNameTok);
 
   // Error reading macro name?  If so, diagnostic already issued.
-  if (MacroNameTok.is(tok::eom)) {
+  if (MacroNameTok.is(tok::eod)) {
     // Skip code until we get to #endif.  This helps with recovery by not
     // emitting an error when the #endif is reached.
     SkipExcludedConditionalBlock(DirectiveTok.getLocation(),
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
index 1451c5a..8fcfc70 100644
--- a/lib/Lex/PPExpressions.cpp
+++ b/lib/Lex/PPExpressions.cpp
@@ -180,7 +180,7 @@
   default:  // Non-value token.
     PP.Diag(PeekTok, diag::err_pp_expr_bad_token_start_expr);
     return true;
-  case tok::eom:
+  case tok::eod:
   case tok::r_paren:
     // If there is no expression, report and exit.
     PP.Diag(PeekTok, diag::err_pp_expected_value_in_expr);
@@ -372,7 +372,7 @@
 /// token.  This returns:
 ///   ~0 - Invalid token.
 ///   14 -> 3 - various operators.
-///    0 - 'eom' or ')'
+///    0 - 'eod' or ')'
 static unsigned getPrecedence(tok::TokenKind Kind) {
   switch (Kind) {
   default: return ~0U;
@@ -397,8 +397,8 @@
   case tok::question:             return 4;
   case tok::comma:                return 3;
   case tok::colon:                return 2;
-  case tok::r_paren:              return 0;   // Lowest priority, end of expr.
-  case tok::eom:                  return 0;   // Lowest priority, end of macro.
+  case tok::r_paren:              return 0;// Lowest priority, end of expr.
+  case tok::eod:                  return 0;// Lowest priority, end of directive.
   }
 }
 
@@ -713,7 +713,7 @@
   DefinedTracker DT;
   if (EvaluateValue(ResVal, Tok, DT, true, *this)) {
     // Parse error, skip the rest of the macro line.
-    if (Tok.isNot(tok::eom))
+    if (Tok.isNot(tok::eod))
       DiscardUntilEndOfDirective();
     
     // Restore 'DisableMacroExpansion'.
@@ -724,7 +724,7 @@
   // If we are at the end of the expression after just parsing a value, there
   // must be no (unparenthesized) binary operators involved, so we can exit
   // directly.
-  if (Tok.is(tok::eom)) {
+  if (Tok.is(tok::eod)) {
     // If the expression we parsed was of the form !defined(macro), return the
     // macro in IfNDefMacro.
     if (DT.State == DefinedTracker::NotDefinedMacro)
@@ -740,7 +740,7 @@
   if (EvaluateDirectiveSubExpr(ResVal, getPrecedence(tok::question),
                                Tok, true, *this)) {
     // Parse error, skip the rest of the macro line.
-    if (Tok.isNot(tok::eom))
+    if (Tok.isNot(tok::eod))
       DiscardUntilEndOfDirective();
     
     // Restore 'DisableMacroExpansion'.
@@ -748,9 +748,9 @@
     return false;
   }
 
-  // If we aren't at the tok::eom token, something bad happened, like an extra
+  // If we aren't at the tok::eod token, something bad happened, like an extra
   // ')' token.
-  if (Tok.isNot(tok::eom)) {
+  if (Tok.isNot(tok::eod)) {
     Diag(Tok, diag::err_pp_expected_eol);
     DiscardUntilEndOfDirective();
   }
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index eef42b6..bf0a7fb 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -301,7 +301,7 @@
 
   // We handle this by scanning for the closest real lexer, switching it to
   // raw mode and preprocessor mode.  This will cause it to return \n as an
-  // explicit EOM token.
+  // explicit EOD token.
   PreprocessorLexer *FoundLexer = 0;
   bool LexerWasInPPMode = false;
   for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
@@ -309,11 +309,11 @@
     if (ISI.ThePPLexer == 0) continue;  // Scan for a real lexer.
 
     // Once we find a real lexer, mark it as raw mode (disabling macro
-    // expansions) and preprocessor mode (return EOM).  We know that the lexer
+    // expansions) and preprocessor mode (return EOD).  We know that the lexer
     // was *not* in raw mode before, because the macro that the comment came
     // from was expanded.  However, it could have already been in preprocessor
     // mode (#if COMMENT) in which case we have to return it to that mode and
-    // return EOM.
+    // return EOD.
     FoundLexer = ISI.ThePPLexer;
     FoundLexer->LexingRawMode = true;
     LexerWasInPPMode = FoundLexer->ParsingPreprocessorDirective;
@@ -326,22 +326,22 @@
   // the next token.
   if (!HandleEndOfTokenLexer(Tok)) Lex(Tok);
 
-  // Discarding comments as long as we don't have EOF or EOM.  This 'comments
+  // Discarding comments as long as we don't have EOF or EOD.  This 'comments
   // out' the rest of the line, including any tokens that came from other macros
   // that were active, as in:
   //  #define submacro a COMMENT b
   //    submacro c
   // which should lex to 'a' only: 'b' and 'c' should be removed.
-  while (Tok.isNot(tok::eom) && Tok.isNot(tok::eof))
+  while (Tok.isNot(tok::eod) && Tok.isNot(tok::eof))
     Lex(Tok);
 
-  // If we got an eom token, then we successfully found the end of the line.
-  if (Tok.is(tok::eom)) {
+  // If we got an eod token, then we successfully found the end of the line.
+  if (Tok.is(tok::eod)) {
     assert(FoundLexer && "Can't get end of line without an active lexer");
     // Restore the lexer back to normal mode instead of raw mode.
     FoundLexer->LexingRawMode = false;
 
-    // If the lexer was already in preprocessor mode, just return the EOM token
+    // If the lexer was already in preprocessor mode, just return the EOD token
     // to finish the preprocessor line.
     if (LexerWasInPPMode) return;
 
@@ -352,7 +352,7 @@
 
   // If we got an EOF token, then we reached the end of the token stream but
   // didn't find an explicit \n.  This can only happen if there was no lexer
-  // active (an active lexer would return EOM at EOF if there was no \n in
+  // active (an active lexer would return EOD at EOF if there was no \n in
   // preprocessor directive mode), so just return EOF as our token.
-  assert(!FoundLexer && "Lexer should return EOM before EOF in PP mode");
+  assert(!FoundLexer && "Lexer should return EOD before EOF in PP mode");
 }
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index ba92614..374f85d 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -355,9 +355,9 @@
         LexUnexpandedToken(Tok);
       }
       
-      if (Tok.is(tok::eof) || Tok.is(tok::eom)) { // "#if f(<eof>" & "#if f(\n"
+      if (Tok.is(tok::eof) || Tok.is(tok::eod)) { // "#if f(<eof>" & "#if f(\n"
         Diag(MacroName, diag::err_unterm_macro_invoc);
-        // Do not lose the EOF/EOM.  Return it to the client.
+        // Do not lose the EOF/EOD.  Return it to the client.
         MacroName = Tok;
         return 0;
       } else if (Tok.is(tok::r_paren)) {
@@ -626,8 +626,8 @@
   SourceLocation EndLoc;
   
   switch (Tok.getKind()) {
-  case tok::eom:
-    // If the token kind is EOM, the error has already been diagnosed.
+  case tok::eod:
+    // If the token kind is EOD, the error has already been diagnosed.
     return false;
 
   case tok::angle_string_literal:
@@ -644,7 +644,7 @@
     // case, glue the tokens together into FilenameBuffer and interpret those.
     FilenameBuffer.push_back('<');
     if (PP.ConcatenateIncludeName(FilenameBuffer, EndLoc))
-      return false;   // Found <eom> but no ">"?  Diagnostic already emitted.
+      return false;   // Found <eod> but no ">"?  Diagnostic already emitted.
     Filename = FilenameBuffer.str();
     break;
   default:
diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp
index 975753b..3a53881 100644
--- a/lib/Lex/PTHLexer.cpp
+++ b/lib/Lex/PTHLexer.cpp
@@ -125,7 +125,7 @@
     return PP->Lex(Tok);
   }
 
-  if (TKind == tok::eom) {
+  if (TKind == tok::eod) {
     assert(ParsingPreprocessorDirective);
     ParsingPreprocessorDirective = false;
     return;
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index 80d3bb1..8fd5ec2 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -229,8 +229,8 @@
 
   PragmaToks.front().setFlag(Token::LeadingSpace);
 
-  // Replace the ')' with an EOM to mark the end of the pragma.
-  PragmaToks.back().setKind(tok::eom);
+  // Replace the ')' with an EOD to mark the end of the pragma.
+  PragmaToks.back().setKind(tok::eod);
 
   Token *TokArray = new Token[PragmaToks.size()];
   std::copy(PragmaToks.begin(), PragmaToks.end(), TokArray);
@@ -283,7 +283,7 @@
     if (CurPPLexer) CurPPLexer->LexingRawMode = false;
 
     // If we reached the end of line, we're done.
-    if (Tok.is(tok::eom)) return;
+    if (Tok.is(tok::eod)) return;
 
     // Can only poison identifiers.
     if (Tok.isNot(tok::raw_identifier)) {
@@ -348,8 +348,8 @@
   Token FilenameTok;
   CurPPLexer->LexIncludeFilename(FilenameTok);
 
-  // If the token kind is EOM, the error has already been diagnosed.
-  if (FilenameTok.is(tok::eom))
+  // If the token kind is EOD, the error has already been diagnosed.
+  if (FilenameTok.is(tok::eod))
     return;
 
   // Reserve a buffer to get the spelling.
@@ -381,7 +381,7 @@
     // Lex tokens at the end of the message and include them in the message.
     std::string Message;
     Lex(DependencyTok);
-    while (DependencyTok.isNot(tok::eom)) {
+    while (DependencyTok.isNot(tok::eod)) {
       Message += getSpelling(DependencyTok) + " ";
       Lex(DependencyTok);
     }
@@ -470,7 +470,7 @@
   }
   Lex(Tok);  // eat the r_paren.
 
-  if (Tok.isNot(tok::eom)) {
+  if (Tok.isNot(tok::eod)) {
     Diag(Tok.getLocation(), diag::err_pragma_comment_malformed);
     return;
   }
@@ -541,7 +541,7 @@
     Lex(Tok);  // eat the r_paren.
   }
 
-  if (Tok.isNot(tok::eom)) {
+  if (Tok.isNot(tok::eod)) {
     Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
     return;
   }
@@ -737,10 +737,10 @@
     return true;
   }
 
-  // Verify that this is followed by EOM.
+  // Verify that this is followed by EOD.
   LexUnexpandedToken(Tok);
-  if (Tok.isNot(tok::eom))
-    Diag(Tok, diag::ext_pragma_syntax_eom);
+  if (Tok.isNot(tok::eod))
+    Diag(Tok, diag::ext_pragma_syntax_eod);
   return false;
 }
 
@@ -883,7 +883,7 @@
       PP.LexUnexpandedToken(Tok);
     }
 
-    if (Tok.isNot(tok::eom)) {
+    if (Tok.isNot(tok::eod)) {
       PP.Diag(Tok.getLocation(), diag::warn_pragma_diagnostic_invalid_token);
       return;
     }
diff --git a/lib/Lex/PreprocessorLexer.cpp b/lib/Lex/PreprocessorLexer.cpp
index e005c49..808a81b 100644
--- a/lib/Lex/PreprocessorLexer.cpp
+++ b/lib/Lex/PreprocessorLexer.cpp
@@ -34,7 +34,7 @@
   ParsingFilename = false;
 
   // No filename?
-  if (FilenameTok.is(tok::eom))
+  if (FilenameTok.is(tok::eod))
     PP->Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
 }
 
diff --git a/lib/Lex/TokenLexer.cpp b/lib/Lex/TokenLexer.cpp
index caa44bf..f1e1596 100644
--- a/lib/Lex/TokenLexer.cpp
+++ b/lib/Lex/TokenLexer.cpp
@@ -546,7 +546,7 @@
 /// isParsingPreprocessorDirective - Return true if we are in the middle of a
 /// preprocessor directive.
 bool TokenLexer::isParsingPreprocessorDirective() const {
-  return Tokens[NumTokens-1].is(tok::eom) && !isAtEnd();
+  return Tokens[NumTokens-1].is(tok::eod) && !isAtEnd();
 }
 
 /// HandleMicrosoftCommentPaste - In microsoft compatibility mode, /##/ pastes