bpo-36623: Clean parser headers and include files (GH-12253)

After the removal of pgen, multiple header and function prototypes that lack implementation or are unused are still lying around.
diff --git a/Parser/acceler.c b/Parser/acceler.c
index 9b14263..3a230c1 100644
--- a/Parser/acceler.c
+++ b/Parser/acceler.c
@@ -10,7 +10,7 @@
    are not part of the static data structure written on graminit.[ch]
    by the parser generator. */
 
-#include "pgenheaders.h"
+#include "Python.h"
 #include "grammar.h"
 #include "node.h"
 #include "token.h"
diff --git a/Parser/grammar1.c b/Parser/grammar1.c
index 9c32391..fec6d9e 100644
--- a/Parser/grammar1.c
+++ b/Parser/grammar1.c
@@ -2,7 +2,6 @@
 /* Grammar subroutines needed by parser */
 
 #include "Python.h"
-#include "pgenheaders.h"
 #include "grammar.h"
 #include "token.h"
 
diff --git a/Parser/listnode.c b/Parser/listnode.c
index 71300ae..8f1a116 100644
--- a/Parser/listnode.c
+++ b/Parser/listnode.c
@@ -1,7 +1,7 @@
 
 /* List a node on a file */
 
-#include "pgenheaders.h"
+#include "Python.h"
 #include "token.h"
 #include "node.h"
 
diff --git a/Parser/parser.c b/Parser/parser.c
index fa4a8f0..c21b6fd 100644
--- a/Parser/parser.c
+++ b/Parser/parser.c
@@ -6,7 +6,6 @@
 /* XXX To do: error recovery */
 
 #include "Python.h"
-#include "pgenheaders.h"
 #include "token.h"
 #include "grammar.h"
 #include "node.h"
diff --git a/Parser/parser.h b/Parser/parser.h
index aee1c86..ebb06c2 100644
--- a/Parser/parser.h
+++ b/Parser/parser.h
@@ -38,6 +38,11 @@
                       int *expected_ret);
 void PyGrammar_AddAccelerators(grammar *g);
 
+
+#define showtree _Py_showtree
+#define printtree _Py_printtree
+#define dumptree _Py_dumptree
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/Parser/parsetok.c b/Parser/parsetok.c
index ba33a9a..31be0eb 100644
--- a/Parser/parsetok.c
+++ b/Parser/parsetok.c
@@ -1,7 +1,7 @@
 
 /* Parser-tokenizer link implementation */
 
-#include "pgenheaders.h"
+#include "Python.h"
 #include "tokenizer.h"
 #include "node.h"
 #include "grammar.h"
diff --git a/Parser/pgen/grammar.py b/Parser/pgen/grammar.py
index 340bf64..1ab9434 100644
--- a/Parser/pgen/grammar.py
+++ b/Parser/pgen/grammar.py
@@ -61,7 +61,6 @@
     def produce_graminit_c(self, writer):
         writer("/* Generated by Parser/pgen */\n\n")
 
-        writer('#include "pgenheaders.h"\n')
         writer('#include "grammar.h"\n')
         writer("grammar _PyParser_Grammar;\n")
 
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index 58dd1cd..e8068f2 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -2,7 +2,6 @@
 /* Tokenizer implementation */
 
 #include "Python.h"
-#include "pgenheaders.h"
 
 #include <ctype.h>
 #include <assert.h>
diff --git a/Parser/tokenizer.h b/Parser/tokenizer.h
index 06c7a14..92669bf 100644
--- a/Parser/tokenizer.h
+++ b/Parser/tokenizer.h
@@ -80,6 +80,8 @@
 extern void PyTokenizer_Free(struct tok_state *);
 extern int PyTokenizer_Get(struct tok_state *, char **, char **);
 
+#define tok_dump _Py_tok_dump
+
 #ifdef __cplusplus
 }
 #endif