Repaired a number of errors in this test:

- The doctests in decistmt() weren't run at all when
  test_tokenize was run via regrtest.py.

- Some expected output in decistmt() was Windows-specific
  (but nobody noticed because the doctests weren't getting
  run).

- test_roundtrip() didn't actually test anything when
  running the tests with -O.  Now it does.

- Changed test_roundtrip() to show the name of the input
  file when it fails.  That would have saved a lot of
  time earlier today.

- Added a bunch of comments.
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index d3c1cc4..b064967 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1,70 +1,30 @@
-from test.test_support import verbose, findfile, is_resource_enabled, TestFailed
 import os, glob, random
+from cStringIO import StringIO
+from test.test_support import (verbose, findfile, is_resource_enabled,
+                               TestFailed)
 from tokenize import (tokenize, generate_tokens, untokenize,
                       NUMBER, NAME, OP, STRING)
 
-if verbose:
-    print 'starting...'
-
-f = file(findfile('tokenize_tests' + os.extsep + 'txt'))
-tokenize(f.readline)
-f.close()
-
-
-
-###### Test roundtrip for untokenize ##########################
-
+# Test roundtrip for `untokenize`.  `f` is a file path.  The source code in f
+# is tokenized, converted back to source code via tokenize.untokenize(),
+# and tokenized again from the latter.  The test fails if the second
+# tokenization doesn't match the first.
 def test_roundtrip(f):
     ## print 'Testing:', f
-    f = file(f)
+    fobj = open(f)
     try:
-        fulltok = list(generate_tokens(f.readline))
+        fulltok = list(generate_tokens(fobj.readline))
     finally:
-        f.close()
+        fobj.close()
 
     t1 = [tok[:2] for tok in fulltok]
     newtext = untokenize(t1)
     readline = iter(newtext.splitlines(1)).next
     t2 = [tok[:2] for tok in generate_tokens(readline)]
-    assert t1 == t2
+    if t1 != t2:
+        raise TestFailed("untokenize() roundtrip failed for %r" % f)
 
-
-f = findfile('tokenize_tests' + os.extsep + 'txt')
-test_roundtrip(f)
-
-testdir = os.path.dirname(f) or os.curdir
-testfiles = glob.glob(testdir + os.sep + 'test*.py')
-if not is_resource_enabled('compiler'):
-    testfiles = random.sample(testfiles, 10)
-
-for f in testfiles:
-    test_roundtrip(f)
-
-
-###### Test detecton of IndentationError ######################
-
-from cStringIO import StringIO
-
-sampleBadText = """
-def foo():
-    bar
-  baz
-"""
-
-try:
-    for tok in generate_tokens(StringIO(sampleBadText).readline):
-        pass
-except IndentationError:
-    pass
-else:
-    raise TestFailed("Did not detect IndentationError:")
-
-
-###### Test example in the docs ###############################
-
-from decimal import Decimal
-from cStringIO import StringIO
-
+# This is an example from the docs, set up as a doctest.
 def decistmt(s):
     """Substitute Decimals for floats in a string of statements.
 
@@ -73,12 +33,21 @@
     >>> decistmt(s)
     "print +Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')"
 
-    >>> exec(s)
-    -3.21716034272e-007
+    The format of the exponent is inherited from the platform C library.
+    Known cases are "e-007" (Windows) and "e-07" (not Windows).  Since
+    we're only showing 12 digits, and the 13th isn't close to 5, the
+    rest of the output should be platform-independent.
+
+    >>> exec(s) #doctest: +ELLIPSIS
+    -3.21716034272e-0...7
+
+    Output from calculations with Decimal should be identical across all
+    platforms.
+
     >>> exec(decistmt(s))
     -3.217160342717258261933904529E-7
-
     """
+
     result = []
     g = generate_tokens(StringIO(s).readline)   # tokenize the string
     for toknum, tokval, _, _, _  in g:
@@ -93,8 +62,53 @@
             result.append((toknum, tokval))
     return untokenize(result)
 
-import doctest
-doctest.testmod()
+def test_main():
+    if verbose:
+        print 'starting...'
 
-if verbose:
-    print 'finished'
+    # This displays the tokenization of tokenize_tests.py to stdout, and
+    # regrtest.py checks that this equals the expected output (in the
+    # test/output/ directory).
+    f = open(findfile('tokenize_tests' + os.extsep + 'txt'))
+    tokenize(f.readline)
+    f.close()
+
+    # Now run test_roundtrip() over tokenize_test.py too, and over all
+    # (if the "compiler" resource is enabled) or a small random sample (if
+    # "compiler" is not enabled) of the test*.py files.
+    f = findfile('tokenize_tests' + os.extsep + 'txt')
+    test_roundtrip(f)
+
+    testdir = os.path.dirname(f) or os.curdir
+    testfiles = glob.glob(testdir + os.sep + 'test*.py')
+    if not is_resource_enabled('compiler'):
+        testfiles = random.sample(testfiles, 10)
+
+    for f in testfiles:
+        test_roundtrip(f)
+
+    # Test detecton of IndentationError.
+    sampleBadText = """\
+def foo():
+    bar
+  baz
+"""
+
+    try:
+        for tok in generate_tokens(StringIO(sampleBadText).readline):
+            pass
+    except IndentationError:
+        pass
+    else:
+        raise TestFailed("Did not detect IndentationError:")
+
+    # Run the doctests in this module.
+    from test import test_tokenize  # i.e., this module
+    from test.test_support import run_doctest
+    run_doctest(test_tokenize)
+
+    if verbose:
+        print 'finished'
+
+if __name__ == "__main__":
+    test_main()