Change how raw lexers are handled: instead of creating them and then
using LexRawToken, create one and use LexFromRawLexer.  This avoids
twiddling the RawLexer flag around and simplifies some code (even 
speeding raw lexing up a tiny bit).

This change also improves the token paster to use a Lexer on the stack
instead of new/deleting it. 



git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@57393 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Rewrite/HTMLRewrite.cpp b/lib/Rewrite/HTMLRewrite.cpp
index b411e56..67afc08 100644
--- a/lib/Rewrite/HTMLRewrite.cpp
+++ b/lib/Rewrite/HTMLRewrite.cpp
@@ -357,7 +357,7 @@
   // Lex all the tokens in raw mode, to avoid entering #includes or expanding
   // macros.
   Token Tok;
-  L.LexRawToken(Tok);
+  L.LexFromRawLexer(Tok);
   
   while (Tok.isNot(tok::eof)) {
     // Since we are lexing unexpanded tokens, all tokens are from the main
@@ -398,10 +398,10 @@
       // Eat all of the tokens until we get to the next one at the start of
       // line.
       unsigned TokEnd = TokOffs+TokLen;
-      L.LexRawToken(Tok);
+      L.LexFromRawLexer(Tok);
       while (!Tok.isAtStartOfLine() && Tok.isNot(tok::eof)) {
         TokEnd = SourceMgr.getFullFilePos(Tok.getLocation())+Tok.getLength();
-        L.LexRawToken(Tok);
+        L.LexFromRawLexer(Tok);
       }
       
       // Find end of line.  This is a hack.
@@ -413,7 +413,7 @@
     }
     }
     
-    L.LexRawToken(Tok);
+    L.LexFromRawLexer(Tok);
   }
 }