bpo-30455: Generate all token related code and docs from Grammar/Tokens. (GH-10370)

"Include/token.h", "Lib/token.py" (containing now some data moved from
"Lib/tokenize.py") and new files "Parser/token.c" (containing the code
moved from "Parser/tokenizer.c") and "Doc/library/token-list.inc" (included
in "Doc/library/token.rst") are now generated from "Grammar/Tokens" by
"Tools/scripts/generate_token.py". The script overwrites files only if
needed and can be used on the read-only sources tree.

"Lib/symbol.py" is now generated by "Tools/scripts/generate_symbol_py.py"
instead of been executable itself.

Added new make targets "regen-token" and "regen-symbol" which are now
dependencies of "regen-all".

The documentation contains now strings for operators and punctuation tokens.
diff --git a/Lib/test/test_symbol.py b/Lib/test/test_symbol.py
index c1306f5..ed86aec 100644
--- a/Lib/test/test_symbol.py
+++ b/Lib/test/test_symbol.py
@@ -6,6 +6,9 @@
 
 
 SYMBOL_FILE              = support.findfile('symbol.py')
+GEN_SYMBOL_FILE          = os.path.join(os.path.dirname(__file__),
+                                        '..', '..', 'Tools', 'scripts',
+                                        'generate_symbol_py.py')
 GRAMMAR_FILE             = os.path.join(os.path.dirname(__file__),
                                         '..', '..', 'Include', 'graminit.h')
 TEST_PY_FILE             = 'symbol_test.py'
@@ -22,7 +25,7 @@
 
     def _generate_symbols(self, grammar_file, target_symbol_py_file):
         proc = subprocess.Popen([sys.executable,
-                                 SYMBOL_FILE,
+                                 GEN_SYMBOL_FILE,
                                  grammar_file,
                                  target_symbol_py_file], stderr=subprocess.PIPE)
         stderr = proc.communicate()[1]
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index ff14479..04a1254 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -1619,6 +1619,8 @@
             testfiles = random.sample(testfiles, 10)
 
         for testfile in testfiles:
+            if support.verbose >= 2:
+                print('tokenize', testfile)
             with open(testfile, 'rb') as f:
                 with self.subTest(file=testfile):
                     self.check_roundtrip(f)