Merged revisions 88535,88661 via svnmerge from
svn+ssh://pythondev@svn.python.org/sandbox/trunk/2to3/lib2to3

........
  r88535 | brett.cannon | 2011-02-23 13:46:46 -0600 (Wed, 23 Feb 2011) | 1 line

  Add lib2to3.__main__ for easy testing from the console.
........
  r88661 | benjamin.peterson | 2011-02-26 16:06:24 -0600 (Sat, 26 Feb 2011) | 6 lines

  fix refactoring on formfeed characters #11250

  This is because text.splitlines() is not the same as
  list(StringIO.StringIO(text)).
........
diff --git a/Lib/lib2to3/__main__.py b/Lib/lib2to3/__main__.py
new file mode 100644
index 0000000..80688ba
--- /dev/null
+++ b/Lib/lib2to3/__main__.py
@@ -0,0 +1,4 @@
+import sys
+from .main import main
+
+sys.exit(main("lib2to3.fixes"))
diff --git a/Lib/lib2to3/patcomp.py b/Lib/lib2to3/patcomp.py
index 84fee5b..093e5f9 100644
--- a/Lib/lib2to3/patcomp.py
+++ b/Lib/lib2to3/patcomp.py
@@ -12,6 +12,7 @@
 
 # Python imports
 import os
+import StringIO
 
 # Fairly local imports
 from .pgen2 import driver, literals, token, tokenize, parse, grammar
@@ -32,7 +33,7 @@
 def tokenize_wrapper(input):
     """Tokenizes a string suppressing significant whitespace."""
     skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
-    tokens = tokenize.generate_tokens(driver.generate_lines(input).next)
+    tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
     for quintuple in tokens:
         type, value, start, end, line_text = quintuple
         if type not in skip:
diff --git a/Lib/lib2to3/pgen2/driver.py b/Lib/lib2to3/pgen2/driver.py
index 6b3825e..16adec0 100644
--- a/Lib/lib2to3/pgen2/driver.py
+++ b/Lib/lib2to3/pgen2/driver.py
@@ -19,6 +19,7 @@
 import codecs
 import os
 import logging
+import StringIO
 import sys
 
 # Pgen imports
@@ -101,18 +102,10 @@
 
     def parse_string(self, text, debug=False):
         """Parse a string and return the syntax tree."""
-        tokens = tokenize.generate_tokens(generate_lines(text).next)
+        tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
         return self.parse_tokens(tokens, debug)
 
 
-def generate_lines(text):
-    """Generator that behaves like readline without using StringIO."""
-    for line in text.splitlines(True):
-        yield line
-    while True:
-        yield ""
-
-
 def load_grammar(gt="Grammar.txt", gp=None,
                  save=True, force=False, logger=None):
     """Load the grammar (maybe from a pickle)."""
diff --git a/Lib/lib2to3/tests/test_parser.py b/Lib/lib2to3/tests/test_parser.py
index 703d879..2602381 100644
--- a/Lib/lib2to3/tests/test_parser.py
+++ b/Lib/lib2to3/tests/test_parser.py
@@ -19,6 +19,16 @@
 # Local imports
 from lib2to3.pgen2 import tokenize
 from ..pgen2.parse import ParseError
+from lib2to3.pygram import python_symbols as syms
+
+
+class TestDriver(support.TestCase):
+
+    def test_formfeed(self):
+        s = """print 1\n\x0Cprint 2\n"""
+        t = driver.parse_string(s)
+        self.assertEqual(t.children[0].children[0].type, syms.print_stmt)
+        self.assertEqual(t.children[1].children[0].type, syms.print_stmt)
 
 
 class GrammarTest(support.TestCase):