Another directory quickly renamed.
diff --git a/Parser/acceler.c b/Parser/acceler.c
index ef1edc3..9417b76 100644
--- a/Parser/acceler.c
+++ b/Parser/acceler.c
@@ -47,11 +47,11 @@
 #include "parser.h"
 
 /* Forward references */
-static void fixdfa PROTO((grammar *, dfa *));
-static void fixstate PROTO((grammar *, state *));
+static void fixdfa Py_PROTO((grammar *, dfa *));
+static void fixstate Py_PROTO((grammar *, state *));
 
 void
-addaccelerators(g)
+PyGrammar_AddAccelerators(g)
 	grammar *g;
 {
 	dfa *d;
@@ -90,7 +90,7 @@
 	int *accel;
 	int nl = g->g_ll.ll_nlabels;
 	s->s_accept = 0;
-	accel = NEW(int, nl);
+	accel = PyMem_NEW(int, nl);
 	for (k = 0; k < nl; k++)
 		accel[k] = -1;
 	a = s->s_arc;
@@ -103,7 +103,7 @@
 			continue;
 		}
 		if (ISNONTERMINAL(type)) {
-			dfa *d1 = finddfa(g, type);
+			dfa *d1 = PyGrammar_FindDFA(g, type);
 			int ibit;
 			if (type - NT_OFFSET >= (1 << 7)) {
 				printf("XXX too high nonterminal number!\n");
@@ -146,7 +146,7 @@
 		k++;
 	if (k < nl) {
 		int i;
-		s->s_accel = NEW(int, nl-k);
+		s->s_accel = PyMem_NEW(int, nl-k);
 		if (s->s_accel == NULL) {
 			fprintf(stderr, "no mem to add parser accelerators\n");
 			exit(1);
@@ -156,5 +156,5 @@
 		for (i = 0; k < nl; i++, k++)
 			s->s_accel[i] = accel[k];
 	}
-	DEL(accel);
+	PyMem_DEL(accel);
 }
diff --git a/Parser/bitset.c b/Parser/bitset.c
index e2315e4..afe828c 100644
--- a/Parser/bitset.c
+++ b/Parser/bitset.c
@@ -39,10 +39,10 @@
 	int nbits;
 {
 	int nbytes = NBYTES(nbits);
-	bitset ss = NEW(BYTE, nbytes);
+	bitset ss = PyMem_NEW(BYTE, nbytes);
 	
 	if (ss == NULL)
-		fatal("no mem for bitset");
+		Py_FatalError("no mem for bitset");
 	
 	ss += nbytes;
 	while (--nbytes >= 0)
@@ -54,7 +54,7 @@
 delbitset(ss)
 	bitset ss;
 {
-	DEL(ss);
+	PyMem_DEL(ss);
 }
 
 int
diff --git a/Parser/firstsets.c b/Parser/firstsets.c
index 5c2a872..a9e793a 100644
--- a/Parser/firstsets.c
+++ b/Parser/firstsets.c
@@ -35,10 +35,10 @@
 #include "grammar.h"
 #include "token.h"
 
-extern int debugging;
+extern int Py_DebugFlag;
 
 /* Forward */
-static void calcfirstset PROTO((grammar *, dfa *));
+static void calcfirstset Py_PROTO((grammar *, dfa *));
 
 void
 addfirstsets(g)
@@ -72,7 +72,7 @@
 	dfa *d1;
 	label *l0;
 	
-	if (debugging)
+	if (Py_DebugFlag)
 		printf("Calculate FIRST set for '%s'\n", d->d_name);
 	
 	if (dummy == NULL)
@@ -91,9 +91,9 @@
 	nbits = g->g_ll.ll_nlabels;
 	result = newbitset(nbits);
 	
-	sym = NEW(int, 1);
+	sym = PyMem_NEW(int, 1);
 	if (sym == NULL)
-		fatal("no mem for new sym in calcfirstset");
+		Py_FatalError("no mem for new sym in calcfirstset");
 	nsyms = 1;
 	sym[0] = findlabel(&g->g_ll, d->d_type, (char *)NULL);
 	
@@ -105,13 +105,14 @@
 				break;
 		}
 		if (j >= nsyms) { /* New label */
-			RESIZE(sym, int, nsyms + 1);
+			PyMem_RESIZE(sym, int, nsyms + 1);
 			if (sym == NULL)
-				fatal("no mem to resize sym in calcfirstset");
+				Py_FatalError(
+				    "no mem to resize sym in calcfirstset");
 			sym[nsyms++] = a->a_lbl;
 			type = l0[a->a_lbl].lb_type;
 			if (ISNONTERMINAL(type)) {
-				d1 = finddfa(g, type);
+				d1 = PyGrammar_FindDFA(g, type);
 				if (d1->d_first == dummy) {
 					fprintf(stderr,
 						"Left-recursion below '%s'\n",
@@ -120,7 +121,8 @@
 				else {
 					if (d1->d_first == NULL)
 						calcfirstset(g, d1);
-					mergebitset(result, d1->d_first, nbits);
+					mergebitset(result,
+						    d1->d_first, nbits);
 				}
 			}
 			else if (ISTERMINAL(type)) {
@@ -129,11 +131,11 @@
 		}
 	}
 	d->d_first = result;
-	if (debugging) {
+	if (Py_DebugFlag) {
 		printf("FIRST set for '%s': {", d->d_name);
 		for (i = 0; i < nbits; i++) {
 			if (testbit(result, i))
-				printf(" %s", labelrepr(&l0[i]));
+				printf(" %s", PyGrammar_LabelRepr(&l0[i]));
 		}
 		printf(" }\n");
 	}
diff --git a/Parser/grammar.c b/Parser/grammar.c
index 608f6bd..07e3f62 100644
--- a/Parser/grammar.c
+++ b/Parser/grammar.c
@@ -39,7 +39,7 @@
 #include "token.h"
 #include "grammar.h"
 
-extern int debugging;
+extern int Py_DebugFlag;
 
 grammar *
 newgrammar(start)
@@ -47,9 +47,9 @@
 {
 	grammar *g;
 	
-	g = NEW(grammar, 1);
+	g = PyMem_NEW(grammar, 1);
 	if (g == NULL)
-		fatal("no mem for new grammar");
+		Py_FatalError("no mem for new grammar");
 	g->g_ndfas = 0;
 	g->g_dfa = NULL;
 	g->g_start = start;
@@ -67,9 +67,9 @@
 {
 	dfa *d;
 	
-	RESIZE(g->g_dfa, dfa, g->g_ndfas + 1);
+	PyMem_RESIZE(g->g_dfa, dfa, g->g_ndfas + 1);
 	if (g->g_dfa == NULL)
-		fatal("no mem to resize dfa in adddfa");
+		Py_FatalError("no mem to resize dfa in adddfa");
 	d = &g->g_dfa[g->g_ndfas++];
 	d->d_type = type;
 	d->d_name = name;
@@ -86,9 +86,9 @@
 {
 	state *s;
 	
-	RESIZE(d->d_state, state, d->d_nstates + 1);
+	PyMem_RESIZE(d->d_state, state, d->d_nstates + 1);
 	if (d->d_state == NULL)
-		fatal("no mem to resize state in addstate");
+		Py_FatalError("no mem to resize state in addstate");
 	s = &d->d_state[d->d_nstates++];
 	s->s_narcs = 0;
 	s->s_arc = NULL;
@@ -111,9 +111,9 @@
 	assert(0 <= to && to < d->d_nstates);
 	
 	s = &d->d_state[from];
-	RESIZE(s->s_arc, arc, s->s_narcs + 1);
+	PyMem_RESIZE(s->s_arc, arc, s->s_narcs + 1);
 	if (s->s_arc == NULL)
-		fatal("no mem to resize arc list in addarc");
+		Py_FatalError("no mem to resize arc list in addarc");
 	a = &s->s_arc[s->s_narcs++];
 	a->a_lbl = lbl;
 	a->a_arrow = to;
@@ -133,9 +133,9 @@
 			strcmp(ll->ll_label[i].lb_str, str) == 0)
 			return i;
 	}
-	RESIZE(ll->ll_label, label, ll->ll_nlabels + 1);
+	PyMem_RESIZE(ll->ll_label, label, ll->ll_nlabels + 1);
 	if (ll->ll_label == NULL)
-		fatal("no mem to resize labellist in addlabel");
+		Py_FatalError("no mem to resize labellist in addlabel");
 	lb = &ll->ll_label[ll->ll_nlabels++];
 	lb->lb_type = type;
 	lb->lb_str = str; /* XXX strdup(str) ??? */
@@ -158,12 +158,12 @@
 			return i;
 	}
 	fprintf(stderr, "Label %d/'%s' not found\n", type, str);
-	fatal("grammar.c:findlabel()");
+	Py_FatalError("grammar.c:findlabel()");
 	return 0; /* Make gcc -Wall happy */
 }
 
 /* Forward */
-static void translabel PROTO((grammar *, label *));
+static void translabel Py_PROTO((grammar *, label *));
 
 void
 translatelabels(g)
@@ -186,24 +186,25 @@
 {
 	int i;
 	
-	if (debugging)
-		printf("Translating label %s ...\n", labelrepr(lb));
+	if (Py_DebugFlag)
+		printf("Translating label %s ...\n", PyGrammar_LabelRepr(lb));
 	
 	if (lb->lb_type == NAME) {
 		for (i = 0; i < g->g_ndfas; i++) {
 			if (strcmp(lb->lb_str, g->g_dfa[i].d_name) == 0) {
-				if (debugging)
-					printf("Label %s is non-terminal %d.\n",
-						lb->lb_str,
-						g->g_dfa[i].d_type);
+				if (Py_DebugFlag)
+					printf(
+					    "Label %s is non-terminal %d.\n",
+					    lb->lb_str,
+					    g->g_dfa[i].d_type);
 				lb->lb_type = g->g_dfa[i].d_type;
 				lb->lb_str = NULL;
 				return;
 			}
 		}
 		for (i = 0; i < (int)N_TOKENS; i++) {
-			if (strcmp(lb->lb_str, tok_name[i]) == 0) {
-				if (debugging)
+			if (strcmp(lb->lb_str, _PyParser_TokenNames[i]) == 0) {
+				if (Py_DebugFlag)
 					printf("Label %s is terminal %d.\n",
 						lb->lb_str, i);
 				lb->lb_type = i;
@@ -218,7 +219,7 @@
 	if (lb->lb_type == STRING) {
 		if (isalpha(lb->lb_str[1]) || lb->lb_str[1] == '_') {
 			char *p;
-			if (debugging)
+			if (Py_DebugFlag)
 				printf("Label %s is a keyword\n", lb->lb_str);
 			lb->lb_type = NAME;
 			lb->lb_str++;
@@ -227,7 +228,7 @@
 				*p = '\0';
 		}
 		else if (lb->lb_str[2] == lb->lb_str[0]) {
-			int type = (int) tok_1char(lb->lb_str[1]);
+			int type = (int) PyToken_OneChar(lb->lb_str[1]);
 			if (type != OP) {
 				lb->lb_type = type;
 				lb->lb_str = NULL;
@@ -237,7 +238,7 @@
 					lb->lb_str);
 		}
 		else if (lb->lb_str[2] && lb->lb_str[3] == lb->lb_str[0]) {
-			int type = (int) tok_2char(lb->lb_str[1],
+			int type = (int) PyToken_TwoChars(lb->lb_str[1],
 						   lb->lb_str[2]);
 			if (type != OP) {
 				lb->lb_type = type;
@@ -252,5 +253,6 @@
 				lb->lb_str);
 	}
 	else
-		printf("Can't translate label '%s'\n", labelrepr(lb));
+		printf("Can't translate label '%s'\n",
+		       PyGrammar_LabelRepr(lb));
 }
diff --git a/Parser/grammar1.c b/Parser/grammar1.c
index 45d7399..38e4de6 100644
--- a/Parser/grammar1.c
+++ b/Parser/grammar1.c
@@ -39,7 +39,7 @@
 /* Return the DFA for the given type */
 
 dfa *
-finddfa(g, type)
+PyGrammar_FindDFA(g, type)
 	grammar *g;
 	register int type;
 {
@@ -63,7 +63,7 @@
 }
 
 char *
-labelrepr(lb)
+PyGrammar_LabelRepr(lb)
 	label *lb;
 {
 	static char buf[100];
@@ -80,10 +80,10 @@
 	}
 	else {
 		if (lb->lb_str == NULL)
-			return tok_name[lb->lb_type];
+			return _PyParser_TokenNames[lb->lb_type];
 		else {
 			sprintf(buf, "%.32s(%.32s)",
-				tok_name[lb->lb_type], lb->lb_str);
+				_PyParser_TokenNames[lb->lb_type], lb->lb_str);
 			return buf;
 		}
 	}
diff --git a/Parser/intrcheck.c b/Parser/intrcheck.c
index 40f335e..685e066 100644
--- a/Parser/intrcheck.c
+++ b/Parser/intrcheck.c
@@ -45,12 +45,12 @@
 #include <io.h>
 
 void
-initintr()
+PyOS_InitInterrupts()
 {
 }
 
 int
-intrcheck()
+PyOS_InterruptOccurred()
 {
 	_wyield();
 }
@@ -76,13 +76,13 @@
 #include <go32.h>
 
 void
-initintr()
+PyOS_InitInterrupts()
 {
 	_go32_want_ctrl_break(1 /* TRUE */);
 }
 
 int
-intrcheck()
+PyOS_InterruptOccurred()
 {
 	return _go32_was_ctrl_break_hit();
 }
@@ -92,12 +92,12 @@
 /* This might work for MS-DOS (untested though): */
 
 void
-initintr()
+PyOS_InitInterrupts()
 {
 }
 
 int
-intrcheck()
+PyOS_InterruptOccurred()
 {
 	int interrupted = 0;
 	while (kbhit()) {
@@ -141,7 +141,7 @@
 	interrupted = 1;
 }
 
-extern int sigcheck();
+extern int PyErr_CheckSignals();
 
 /* ARGSUSED */
 static RETSIGTYPE
@@ -152,7 +152,7 @@
 	int sig; /* Not used by required by interface */
 #endif /* _M_IX86 */
 {
-	extern void goaway PROTO((int));
+	extern void Py_Exit Py_PROTO((int));
 	static char message[] =
 "python: to interrupt a truly hanging Python program, interrupt once more.\n";
 	switch (interrupted++) {
@@ -163,15 +163,15 @@
 		break;
 	case 2:
 		interrupted = 0;
-		goaway(1);
+		Py_Exit(1);
 		break;
 	}
 	signal(SIGINT, intcatcher);
-	Py_AddPendingCall(sigcheck, NULL);
+	Py_AddPendingCall(PyErr_CheckSignals, NULL);
 }
 
 void
-initintr()
+PyOS_InitInterrupts()
 {
 	if (signal(SIGINT, SIG_IGN) != SIG_IGN)
 		signal(SIGINT, intcatcher);
@@ -187,7 +187,7 @@
 }
 
 int
-intrcheck()
+PyOS_InterruptOccurred()
 {
 	if (!interrupted)
 		return 0;
diff --git a/Parser/listnode.c b/Parser/listnode.c
index 8e5d8ee..9895fd3 100644
--- a/Parser/listnode.c
+++ b/Parser/listnode.c
@@ -36,11 +36,11 @@
 #include "node.h"
 
 /* Forward */
-static void list1node PROTO((FILE *, node *));
-static void listnode PROTO((FILE *, node *));
+static void list1node Py_PROTO((FILE *, node *));
+static void listnode Py_PROTO((FILE *, node *));
 
 void
-listtree(n)
+PyNode_ListTree(n)
 	node *n;
 {
 	listnode(stdout, n);
diff --git a/Parser/metagrammar.c b/Parser/metagrammar.c
index 4720b20..e9f4245 100644
--- a/Parser/metagrammar.c
+++ b/Parser/metagrammar.c
@@ -169,7 +169,7 @@
 	{7, 0},
 	{8, 0},
 };
-static grammar gram = {
+static grammar _PyParser_Grammar = {
 	6,
 	dfas,
 	{19, labels},
@@ -179,5 +179,5 @@
 grammar *
 meta_grammar()
 {
-	return &gram;
+	return &_PyParser_Grammar;
 }
diff --git a/Parser/node.c b/Parser/node.c
index c331240..c2308fd 100644
--- a/Parser/node.c
+++ b/Parser/node.c
@@ -35,10 +35,10 @@
 #include "node.h"
 
 node *
-newtree(type)
+PyNode_New(type)
 	int type;
 {
-	node *n = NEW(node, 1);
+	node *n = PyMem_NEW(node, 1);
 	if (n == NULL)
 		return NULL;
 	n->n_type = type;
@@ -53,7 +53,7 @@
 #define XXXROUNDUP(n) ((n) == 1 ? 1 : ((n) + XXX - 1) / XXX * XXX)
 
 node *
-addchild(n1, type, str, lineno)
+PyNode_AddChild(n1, type, str, lineno)
 	register node *n1;
 	int type;
 	char *str;
@@ -65,7 +65,7 @@
 	if (XXXROUNDUP(nch) < nch1) {
 		n = n1->n_child;
 		nch1 = XXXROUNDUP(nch1);
-		RESIZE(n, node, nch1);
+		PyMem_RESIZE(n, node, nch1);
 		if (n == NULL)
 			return NULL;
 		n1->n_child = n;
@@ -80,16 +80,16 @@
 }
 
 /* Forward */
-static void freechildren PROTO((node *));
+static void freechildren Py_PROTO((node *));
 
 
 void
-freetree(n)
+PyNode_Free(n)
 	node *n;
 {
 	if (n != NULL) {
 		freechildren(n);
-		DEL(n);
+		PyMem_DEL(n);
 	}
 }
 
@@ -101,7 +101,7 @@
 	for (i = NCH(n); --i >= 0; )
 		freechildren(CHILD(n, i));
 	if (n->n_child != NULL)
-		DEL(n->n_child);
+		PyMem_DEL(n->n_child);
 	if (STR(n) != NULL)
-		DEL(STR(n));
+		PyMem_DEL(STR(n));
 }
diff --git a/Parser/parser.c b/Parser/parser.c
index e42fb11..3b75dbc 100644
--- a/Parser/parser.c
+++ b/Parser/parser.c
@@ -45,8 +45,8 @@
 
 
 #ifdef Py_DEBUG
-extern int debugging;
-#define D(x) if (!debugging); else x
+extern int Py_DebugFlag;
+#define D(x) if (!Py_DebugFlag); else x
 #else
 #define D(x)
 #endif
@@ -54,7 +54,7 @@
 
 /* STACK DATA TYPE */
 
-static void s_reset PROTO((stack *));
+static void s_reset Py_PROTO((stack *));
 
 static void
 s_reset(s)
@@ -65,7 +65,7 @@
 
 #define s_empty(s) ((s)->s_top == &(s)->s_base[MAXSTACK])
 
-static int s_push PROTO((stack *, dfa *, node *));
+static int s_push Py_PROTO((stack *, dfa *, node *));
 
 static int
 s_push(s, d, parent)
@@ -87,14 +87,14 @@
 
 #ifdef Py_DEBUG
 
-static void s_pop PROTO((stack *));
+static void s_pop Py_PROTO((stack *));
 
 static void
 s_pop(s)
 	register stack *s;
 {
 	if (s_empty(s))
-		fatal("s_pop: parser stack underflow -- FATAL");
+		Py_FatalError("s_pop: parser stack underflow -- FATAL");
 	s->s_top++;
 }
 
@@ -108,42 +108,42 @@
 /* PARSER CREATION */
 
 parser_state *
-newparser(g, start)
+PyParser_New(g, start)
 	grammar *g;
 	int start;
 {
 	parser_state *ps;
 	
 	if (!g->g_accel)
-		addaccelerators(g);
-	ps = NEW(parser_state, 1);
+		PyGrammar_AddAccelerators(g);
+	ps = PyMem_NEW(parser_state, 1);
 	if (ps == NULL)
 		return NULL;
 	ps->p_grammar = g;
-	ps->p_tree = newtree(start);
+	ps->p_tree = PyNode_New(start);
 	if (ps->p_tree == NULL) {
-		DEL(ps);
+		PyMem_DEL(ps);
 		return NULL;
 	}
 	s_reset(&ps->p_stack);
-	(void) s_push(&ps->p_stack, finddfa(g, start), ps->p_tree);
+	(void) s_push(&ps->p_stack, PyGrammar_FindDFA(g, start), ps->p_tree);
 	return ps;
 }
 
 void
-delparser(ps)
+PyParser_Delete(ps)
 	parser_state *ps;
 {
 	/* NB If you want to save the parse tree,
 	   you must set p_tree to NULL before calling delparser! */
-	freetree(ps->p_tree);
-	DEL(ps);
+	PyNode_Free(ps->p_tree);
+	PyMem_DEL(ps);
 }
 
 
 /* PARSER STACK OPERATIONS */
 
-static int shift PROTO((stack *, int, char *, int, int));
+static int shift Py_PROTO((stack *, int, char *, int, int));
 
 static int
 shift(s, type, str, newstate, lineno)
@@ -154,7 +154,7 @@
 	int lineno;
 {
 	assert(!s_empty(s));
-	if (addchild(s->s_top->s_parent, type, str, lineno) == NULL) {
+	if (PyNode_AddChild(s->s_top->s_parent, type, str, lineno) == NULL) {
 		fprintf(stderr, "shift: no mem in addchild\n");
 		return -1;
 	}
@@ -162,7 +162,7 @@
 	return 0;
 }
 
-static int push PROTO((stack *, int, dfa *, int, int));
+static int push Py_PROTO((stack *, int, dfa *, int, int));
 
 static int
 push(s, type, d, newstate, lineno)
@@ -175,7 +175,7 @@
 	register node *n;
 	n = s->s_top->s_parent;
 	assert(!s_empty(s));
-	if (addchild(n, type, (char *)NULL, lineno) == NULL) {
+	if (PyNode_AddChild(n, type, (char *)NULL, lineno) == NULL) {
 		fprintf(stderr, "push: no mem in addchild\n");
 		return -1;
 	}
@@ -186,7 +186,7 @@
 
 /* PARSER PROPER */
 
-static int classify PROTO((grammar *, int, char *));
+static int classify Py_PROTO((grammar *, int, char *));
 
 static int
 classify(g, type, str)
@@ -226,7 +226,7 @@
 }
 
 int
-addtoken(ps, type, str, lineno)
+PyParser_AddToken(ps, type, str, lineno)
 	register parser_state *ps;
 	register int type;
 	char *str;
@@ -234,7 +234,7 @@
 {
 	register int ilabel;
 	
-	D(printf("Token %s/'%s' ... ", tok_name[type], str));
+	D(printf("Token %s/'%s' ... ", _PyParser_TokenNames[type], str));
 	
 	/* Find out which label this token is */
 	ilabel = classify(ps->p_grammar, type, str);
@@ -258,10 +258,11 @@
 					/* Push non-terminal */
 					int nt = (x >> 8) + NT_OFFSET;
 					int arrow = x & ((1<<7)-1);
-					dfa *d1 = finddfa(ps->p_grammar, nt);
+					dfa *d1 = PyGrammar_FindDFA(
+						ps->p_grammar, nt);
 					if (push(&ps->p_stack, nt, d1,
 						arrow, lineno) < 0) {
-						D(printf(" MemError: push.\n"));
+						D(printf(" MemError: push\n"));
 						return E_NOMEM;
 					}
 					D(printf(" Push ...\n"));
@@ -326,7 +327,7 @@
 		label l;
 		l.lb_type = TYPE(n);
 		l.lb_str = STR(n);
-		printf("%s", labelrepr(&l));
+		printf("%s", PyGrammar_LabelRepr(&l));
 		if (ISNONTERMINAL(TYPE(n))) {
 			printf("(");
 			for (i = 0; i < NCH(n); i++) {
@@ -353,7 +354,7 @@
 			showtree(g, CHILD(n, i));
 	}
 	else if (ISTERMINAL(TYPE(n))) {
-		printf("%s", tok_name[TYPE(n)]);
+		printf("%s", _PyParser_TokenNames[TYPE(n)]);
 		if (TYPE(n) == NUMBER || TYPE(n) == NAME)
 			printf("(%s)", STR(n));
 		printf(" ");
@@ -366,7 +367,7 @@
 printtree(ps)
 	parser_state *ps;
 {
-	if (debugging) {
+	if (Py_DebugFlag) {
 		printf("Parse tree:\n");
 		dumptree(ps->p_grammar, ps->p_tree);
 		printf("\n");
@@ -375,7 +376,7 @@
 		printf("\n");
 	}
 	printf("Listing:\n");
-	listtree(ps->p_tree);
+	PyNode_ListTree(ps->p_tree);
 	printf("\n");
 }
 
diff --git a/Parser/parser.h b/Parser/parser.h
index 81b83a6..78069f4 100644
--- a/Parser/parser.h
+++ b/Parser/parser.h
@@ -57,10 +57,11 @@
 	node		*p_tree;	/* Top of parse tree */
 } parser_state;
 
-parser_state *newparser PROTO((grammar *g, int start));
-void delparser PROTO((parser_state *ps));
-int addtoken PROTO((parser_state *ps, int type, char *str, int lineno));
-void addaccelerators PROTO((grammar *g));
+parser_state *PyParser_New Py_PROTO((grammar *g, int start));
+void PyParser_Delete Py_PROTO((parser_state *ps));
+int PyParser_AddToken
+	Py_PROTO((parser_state *ps, int type, char *str, int lineno));
+void PyGrammar_AddAccelerators Py_PROTO((grammar *g));
 
 #ifdef __cplusplus
 }
diff --git a/Parser/parsetok.c b/Parser/parsetok.c
index 7bccd35..251e507 100644
--- a/Parser/parsetok.c
+++ b/Parser/parsetok.c
@@ -41,13 +41,13 @@
 
 
 /* Forward */
-static node *parsetok PROTO((struct tok_state *, grammar *, int,
+static node *parsetok Py_PROTO((struct tok_state *, grammar *, int,
 			     perrdetail *));
 
 /* Parse input coming from a string.  Return error code, print some errors. */
 
 node *
-parsestring(s, g, start, err_ret)
+PyParser_ParseString(s, g, start, err_ret)
 	char *s;
 	grammar *g;
 	int start;
@@ -61,7 +61,7 @@
 	err_ret->offset = 0;
 	err_ret->text = NULL;
 
-	if ((tok = tok_setups(s)) == NULL) {
+	if ((tok = PyTokenizer_FromString(s)) == NULL) {
 		err_ret->error = E_NOMEM;
 		return NULL;
 	}
@@ -73,7 +73,7 @@
 /* Parse input coming from a file.  Return error code, print some errors. */
 
 node *
-parsefile(fp, filename, g, start, ps1, ps2, err_ret)
+PyParser_ParseFile(fp, filename, g, start, ps1, ps2, err_ret)
 	FILE *fp;
 	char *filename;
 	grammar *g;
@@ -89,7 +89,7 @@
 	err_ret->offset = 0;
 	err_ret->text = NULL;
 
-	if ((tok = tok_setupf(fp, ps1, ps2)) == NULL) {
+	if ((tok = PyTokenizer_FromFile(fp, ps1, ps2)) == NULL) {
 		err_ret->error = E_NOMEM;
 		return NULL;
 	}
@@ -119,7 +119,7 @@
 	node *n;
 	int started = 0;
 
-	if ((ps = newparser(g, start)) == NULL) {
+	if ((ps = PyParser_New(g, start)) == NULL) {
 		fprintf(stderr, "no mem for new parser\n");
 		err_ret->error = E_NOMEM;
 		return NULL;
@@ -131,7 +131,7 @@
 		int len;
 		char *str;
 
-		type = tok_get(tok, &a, &b);
+		type = PyTokenizer_Get(tok, &a, &b);
 		if (type == ERRORTOKEN) {
 			err_ret->error = tok->done;
 			break;
@@ -143,7 +143,7 @@
 		else
 			started = 1;
 		len = b - a; /* XXX this may compute NULL - NULL */
-		str = NEW(char, len + 1);
+		str = PyMem_NEW(char, len + 1);
 		if (str == NULL) {
 			fprintf(stderr, "no mem for next token\n");
 			err_ret->error = E_NOMEM;
@@ -153,7 +153,8 @@
 			strncpy(str, a, len);
 		str[len] = '\0';
 		if ((err_ret->error =
-		     addtoken(ps, (int)type, str, tok->lineno)) != E_OK)
+		     PyParser_AddToken(ps, (int)type, str,
+				       tok->lineno)) != E_OK)
 			break;
 	}
 
@@ -164,7 +165,7 @@
 	else
 		n = NULL;
 
-	delparser(ps);
+	PyParser_Delete(ps);
 
 	if (n == NULL) {
 		if (tok->lineno <= 1 && tok->done == E_EOF)
@@ -182,7 +183,7 @@
 		}
 	}
 
-	tok_free(tok);
+	PyTokenizer_Free(tok);
 
 	return n;
 }
diff --git a/Parser/pgen.c b/Parser/pgen.c
index 9668630..4b2acb8 100644
--- a/Parser/pgen.c
+++ b/Parser/pgen.c
@@ -42,7 +42,7 @@
 #include "metagrammar.h"
 #include "pgen.h"
 
-extern int debugging;
+extern int Py_DebugFlag;
 
 
 /* PART ONE -- CONSTRUCT NFA -- Cf. Algorithm 3.2 from [Aho&Ullman 77] */
@@ -66,13 +66,13 @@
 } nfa;
 
 /* Forward */
-static void compile_rhs PROTO((labellist *ll,
+static void compile_rhs Py_PROTO((labellist *ll,
 			       nfa *nf, node *n, int *pa, int *pb));
-static void compile_alt PROTO((labellist *ll,
+static void compile_alt Py_PROTO((labellist *ll,
 			       nfa *nf, node *n, int *pa, int *pb));
-static void compile_item PROTO((labellist *ll,
+static void compile_item Py_PROTO((labellist *ll,
 				nfa *nf, node *n, int *pa, int *pb));
-static void compile_atom PROTO((labellist *ll,
+static void compile_atom Py_PROTO((labellist *ll,
 				nfa *nf, node *n, int *pa, int *pb));
 
 static int
@@ -81,9 +81,9 @@
 {
 	nfastate *st;
 	
-	RESIZE(nf->nf_state, nfastate, nf->nf_nstates + 1);
+	PyMem_RESIZE(nf->nf_state, nfastate, nf->nf_nstates + 1);
 	if (nf->nf_state == NULL)
-		fatal("out of mem");
+		Py_FatalError("out of mem");
 	st = &nf->nf_state[nf->nf_nstates++];
 	st->st_narcs = 0;
 	st->st_arc = NULL;
@@ -99,9 +99,9 @@
 	nfaarc *ar;
 	
 	st = &nf->nf_state[from];
-	RESIZE(st->st_arc, nfaarc, st->st_narcs + 1);
+	PyMem_RESIZE(st->st_arc, nfaarc, st->st_narcs + 1);
 	if (st->st_arc == NULL)
-		fatal("out of mem");
+		Py_FatalError("out of mem");
 	ar = &st->st_arc[st->st_narcs++];
 	ar->ar_label = lbl;
 	ar->ar_arrow = to;
@@ -114,9 +114,9 @@
 	nfa *nf;
 	static type = NT_OFFSET; /* All types will be disjunct */
 	
-	nf = NEW(nfa, 1);
+	nf = PyMem_NEW(nfa, 1);
 	if (nf == NULL)
-		fatal("no mem for new nfa");
+		Py_FatalError("no mem for new nfa");
 	nf->nf_type = type++;
 	nf->nf_name = name; /* XXX strdup(name) ??? */
 	nf->nf_nstates = 0;
@@ -132,16 +132,16 @@
 } nfagrammar;
 
 /* Forward */
-static void compile_rule PROTO((nfagrammar *gr, node *n));
+static void compile_rule Py_PROTO((nfagrammar *gr, node *n));
 
 static nfagrammar *
 newnfagrammar()
 {
 	nfagrammar *gr;
 	
-	gr = NEW(nfagrammar, 1);
+	gr = PyMem_NEW(nfagrammar, 1);
 	if (gr == NULL)
-		fatal("no mem for new nfa grammar");
+		Py_FatalError("no mem for new nfa grammar");
 	gr->gr_nnfas = 0;
 	gr->gr_nfa = NULL;
 	gr->gr_ll.ll_nlabels = 0;
@@ -158,9 +158,9 @@
 	nfa *nf;
 	
 	nf = newnfa(name);
-	RESIZE(gr->gr_nfa, nfa *, gr->gr_nnfas + 1);
+	PyMem_RESIZE(gr->gr_nfa, nfa *, gr->gr_nnfas + 1);
 	if (gr->gr_nfa == NULL)
-		fatal("out of mem");
+		Py_FatalError("out of mem");
 	gr->gr_nfa[gr->gr_nnfas++] = nf;
 	addlabel(&gr->gr_ll, NAME, nf->nf_name);
 	return nf;
@@ -173,7 +173,7 @@
 #define REQN(i, count) \
  	if (i < count) { \
 		fprintf(stderr, REQNFMT, count); \
-		fatal("REQN"); \
+		Py_FatalError("REQN"); \
 	} else
 
 #else
@@ -379,7 +379,7 @@
 		if (i > 0)
 			printf("\n    ");
 		printf("-> %2d  %s", ar->ar_arrow,
-			labelrepr(&ll->ll_label[ar->ar_label]));
+			PyGrammar_LabelRepr(&ll->ll_label[ar->ar_label]));
 		ar++;
 	}
 	printf("\n");
@@ -441,10 +441,10 @@
 } ss_dfa;
 
 /* Forward */
-static void printssdfa PROTO((int xx_nstates, ss_state *xx_state, int nbits,
+static void printssdfa Py_PROTO((int xx_nstates, ss_state *xx_state, int nbits,
 			      labellist *ll, char *msg));
-static void simplify PROTO((int xx_nstates, ss_state *xx_state));
-static void convert PROTO((dfa *d, int xx_nstates, ss_state *xx_state));
+static void simplify Py_PROTO((int xx_nstates, ss_state *xx_state));
+static void convert Py_PROTO((dfa *d, int xx_nstates, ss_state *xx_state));
 
 static void
 makedfa(gr, nf, d)
@@ -463,9 +463,9 @@
 	
 	ss = newbitset(nbits);
 	addclosure(ss, nf, nf->nf_start);
-	xx_state = NEW(ss_state, 1);
+	xx_state = PyMem_NEW(ss_state, 1);
 	if (xx_state == NULL)
-		fatal("no mem for xx_state in makedfa");
+		Py_FatalError("no mem for xx_state in makedfa");
 	xx_nstates = 1;
 	yy = &xx_state[0];
 	yy->ss_ss = ss;
@@ -501,9 +501,10 @@
 						goto found;
 				}
 				/* Add new arc for this state */
-				RESIZE(yy->ss_arc, ss_arc, yy->ss_narcs + 1);
+				PyMem_RESIZE(yy->ss_arc, ss_arc,
+					     yy->ss_narcs + 1);
 				if (yy->ss_arc == NULL)
-					fatal("out of mem");
+					Py_FatalError("out of mem");
 				zz = &yy->ss_arc[yy->ss_narcs++];
 				zz->sa_label = ar->ar_label;
 				zz->sa_bitset = newbitset(nbits);
@@ -523,9 +524,9 @@
 					goto done;
 				}
 			}
-			RESIZE(xx_state, ss_state, xx_nstates + 1);
+			PyMem_RESIZE(xx_state, ss_state, xx_nstates + 1);
 			if (xx_state == NULL)
-				fatal("out of mem");
+				Py_FatalError("out of mem");
 			zz->sa_arrow = xx_nstates;
 			yy = &xx_state[xx_nstates++];
 			yy->ss_ss = zz->sa_bitset;
@@ -537,13 +538,13 @@
 		}
 	}
 	
-	if (debugging)
+	if (Py_DebugFlag)
 		printssdfa(xx_nstates, xx_state, nbits, &gr->gr_ll,
 						"before minimizing");
 	
 	simplify(xx_nstates, xx_state);
 	
-	if (debugging)
+	if (Py_DebugFlag)
 		printssdfa(xx_nstates, xx_state, nbits, &gr->gr_ll,
 						"after minimizing");
 	
@@ -582,7 +583,8 @@
 			zz = &yy->ss_arc[iarc];
 			printf("  Arc to state %d, label %s\n",
 				zz->sa_arrow,
-				labelrepr(&ll->ll_label[zz->sa_label]));
+				PyGrammar_LabelRepr(
+					&ll->ll_label[zz->sa_label]));
 		}
 	}
 }
@@ -621,7 +623,7 @@
 {
 	int i, j;
 	
-	if (debugging)
+	if (Py_DebugFlag)
 		printf("Rename state %d to %d.\n", from, to);
 	for (i = 0; i < xx_nstates; i++) {
 		if (xx_state[i].ss_deleted)
@@ -651,7 +653,8 @@
 					continue;
 				if (samestate(&xx_state[i], &xx_state[j])) {
 					xx_state[i].ss_deleted++;
-					renamestates(xx_nstates, xx_state, i, j);
+					renamestates(xx_nstates, xx_state,
+						     i, j);
 					changes++;
 					break;
 				}
@@ -719,7 +722,7 @@
 	
 	for (i = 0; i < gr->gr_nnfas; i++) {
 		nf = gr->gr_nfa[i];
-		if (debugging) {
+		if (Py_DebugFlag) {
 			printf("Dump of NFA for '%s' ...\n", nf->nf_name);
 			dumpnfa(&gr->gr_ll, nf);
 		}
diff --git a/Parser/pgen.h b/Parser/pgen.h
index eca1d0c..d215225 100644
--- a/Parser/pgen.h
+++ b/Parser/pgen.h
@@ -37,10 +37,10 @@
 
 /* Parser generator interface */
 
-extern grammar *meta_grammar PROTO((void));
+extern grammar *meta_grammar Py_PROTO((void));
 
 struct _node;
-extern grammar *pgen PROTO((struct _node *));
+extern grammar *pgen Py_PROTO((struct _node *));
 
 #ifdef __cplusplus
 }
diff --git a/Parser/pgenmain.c b/Parser/pgenmain.c
index d0395e9..9ed8cab 100644
--- a/Parser/pgenmain.c
+++ b/Parser/pgenmain.c
@@ -49,17 +49,17 @@
 #include "parsetok.h"
 #include "pgen.h"
 
-int debugging;
+int Py_DebugFlag;
 
 /* Forward */
-grammar *getgrammar PROTO((char *filename));
+grammar *getgrammar Py_PROTO((char *filename));
 #ifdef THINK_C
-int main PROTO((int, char **));
-char *askfile PROTO((void));
+int main Py_PROTO((int, char **));
+char *askfile Py_PROTO((void));
 #endif
 
 void
-goaway(sts)
+Py_Exit(sts)
 	int sts;
 {
 	exit(sts);
@@ -79,7 +79,7 @@
 #else
 	if (argc != 2) {
 		fprintf(stderr, "usage: %s grammar\n", argv[0]);
-		goaway(2);
+		Py_Exit(2);
 	}
 	filename = argv[1];
 #endif
@@ -87,7 +87,7 @@
 	fp = fopen("graminit.c", "w");
 	if (fp == NULL) {
 		perror("graminit.c");
-		goaway(1);
+		Py_Exit(1);
 	}
 	printf("Writing graminit.c ...\n");
 	printgrammar(g, fp);
@@ -95,12 +95,12 @@
 	fp = fopen("graminit.h", "w");
 	if (fp == NULL) {
 		perror("graminit.h");
-		goaway(1);
+		Py_Exit(1);
 	}
 	printf("Writing graminit.h ...\n");
 	printnonterminals(g, fp);
 	fclose(fp);
-	goaway(0);
+	Py_Exit(0);
 	return 0; /* Make gcc -Wall happy */
 }
 
@@ -116,10 +116,10 @@
 	fp = fopen(filename, "r");
 	if (fp == NULL) {
 		perror(filename);
-		goaway(1);
+		Py_Exit(1);
 	}
 	g0 = meta_grammar();
-	n = parsefile(fp, filename, g0, g0->g_start,
+	n = PyParser_ParseFile(fp, filename, g0, g0->g_start,
 		      (char *)NULL, (char *)NULL, &err);
 	fclose(fp);
 	if (n == NULL) {
@@ -140,12 +140,12 @@
 			fprintf(stderr, "^\n");
 			free(err.text);
 		}
-		goaway(1);
+		Py_Exit(1);
 	}
 	g = pgen(n);
 	if (g == NULL) {
 		printf("Bad grammar.\n");
-		goaway(1);
+		Py_Exit(1);
 	}
 	return g;
 }
@@ -159,23 +159,23 @@
 	printf("Input file name: ");
 	if (fgets(buf, sizeof buf, stdin) == NULL) {
 		printf("EOF\n");
-		goaway(1);
+		Py_Exit(1);
 	}
 	/* XXX The (unsigned char *) case is needed by THINK C 3.0 */
 	if (sscanf(/*(unsigned char *)*/buf, " %s ", name) != 1) {
 		printf("No file\n");
-		goaway(1);
+		Py_Exit(1);
 	}
 	return name;
 }
 #endif
 
 void
-fatal(msg)
+Py_FatalError(msg)
 	char *msg;
 {
 	fprintf(stderr, "pgen: FATAL ERROR: %s\n", msg);
-	goaway(1);
+	Py_Exit(1);
 }
 
 #ifdef macintosh
@@ -191,7 +191,7 @@
 /* No-nonsense my_readline() for tokenizer.c */
 
 char *
-my_readline(prompt)
+PyOS_Readline(prompt)
 	char *prompt;
 {
 	int n = 1000;
diff --git a/Parser/printgrammar.c b/Parser/printgrammar.c
index 443f651..e3da97e 100644
--- a/Parser/printgrammar.c
+++ b/Parser/printgrammar.c
@@ -35,10 +35,10 @@
 #include "grammar.h"
 
 /* Forward */
-static void printarcs PROTO((int, dfa *, FILE *));
-static void printstates PROTO((grammar *, FILE *));
-static void printdfas PROTO((grammar *, FILE *));
-static void printlabels PROTO((grammar *, FILE *));
+static void printarcs Py_PROTO((int, dfa *, FILE *));
+static void printstates Py_PROTO((grammar *, FILE *));
+static void printdfas Py_PROTO((grammar *, FILE *));
+static void printlabels Py_PROTO((grammar *, FILE *));
 
 void
 printgrammar(g, fp)
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index 9f26840..3dc6c82 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -38,7 +38,7 @@
 #include "tokenizer.h"
 #include "errcode.h"
 
-extern char *my_readline PROTO((char *));
+extern char *PyOS_Readline Py_PROTO((char *));
 /* Return malloc'ed string including trailing \n;
    empty malloc'ed string for EOF;
    NULL if interrupted */
@@ -47,13 +47,13 @@
 #define TABSIZE 8
 
 /* Forward */
-static struct tok_state *tok_new PROTO((void));
-static int tok_nextc PROTO((struct tok_state *tok));
-static void tok_backup PROTO((struct tok_state *tok, int c));
+static struct tok_state *tok_new Py_PROTO((void));
+static int tok_nextc Py_PROTO((struct tok_state *tok));
+static void tok_backup Py_PROTO((struct tok_state *tok, int c));
 
 /* Token names */
 
-char *tok_name[] = {
+char *_PyParser_TokenNames[] = {
 	"ENDMARKER",
 	"NAME",
 	"NUMBER",
@@ -103,7 +103,7 @@
 static struct tok_state *
 tok_new()
 {
-	struct tok_state *tok = NEW(struct tok_state, 1);
+	struct tok_state *tok = PyMem_NEW(struct tok_state, 1);
 	if (tok == NULL)
 		return NULL;
 	tok->buf = tok->cur = tok->end = tok->inp = tok->start = NULL;
@@ -124,7 +124,7 @@
 /* Set up tokenizer for string */
 
 struct tok_state *
-tok_setups(str)
+PyTokenizer_FromString(str)
 	char *str;
 {
 	struct tok_state *tok = tok_new();
@@ -138,15 +138,15 @@
 /* Set up tokenizer for file */
 
 struct tok_state *
-tok_setupf(fp, ps1, ps2)
+PyTokenizer_FromFile(fp, ps1, ps2)
 	FILE *fp;
 	char *ps1, *ps2;
 {
 	struct tok_state *tok = tok_new();
 	if (tok == NULL)
 		return NULL;
-	if ((tok->buf = NEW(char, BUFSIZ)) == NULL) {
-		DEL(tok);
+	if ((tok->buf = PyMem_NEW(char, BUFSIZ)) == NULL) {
+		PyMem_DEL(tok);
 		return NULL;
 	}
 	tok->cur = tok->inp = tok->buf;
@@ -161,12 +161,12 @@
 /* Free a tok_state structure */
 
 void
-tok_free(tok)
+PyTokenizer_Free(tok)
 	struct tok_state *tok;
 {
 	if (tok->fp != NULL && tok->buf != NULL)
-		DEL(tok->buf);
-	DEL(tok);
+		PyMem_DEL(tok->buf);
+	PyMem_DEL(tok);
 }
 
 
@@ -200,7 +200,7 @@
 			return *tok->cur++;
 		}
 		if (tok->prompt != NULL) {
-			char *new = my_readline(tok->prompt);
+			char *new = PyOS_Readline(tok->prompt);
 			if (tok->nextprompt != NULL)
 				tok->prompt = tok->nextprompt;
 			if (new == NULL)
@@ -246,7 +246,7 @@
 			char *pt;
 			if (tok->start == NULL) {
 				if (tok->buf == NULL) {
-					tok->buf = NEW(char, BUFSIZ);
+					tok->buf = PyMem_NEW(char, BUFSIZ);
 					if (tok->buf == NULL) {
 						tok->done = E_NOMEM;
 						return EOF;
@@ -281,7 +281,7 @@
 				int curvalid = tok->inp - tok->buf;
 				int newsize = curvalid + BUFSIZ;
 				char *newbuf = tok->buf;
-				RESIZE(newbuf, char, newsize);
+				PyMem_RESIZE(newbuf, char, newsize);
 				if (newbuf == NULL) {
 					tok->done = E_NOMEM;
 					tok->cur = tok->inp;
@@ -334,7 +334,7 @@
 {
 	if (c != EOF) {
 		if (--tok->cur < tok->buf)
-			fatal("tok_backup: begin of buffer");
+			Py_FatalError("tok_backup: begin of buffer");
 		if (*tok->cur != c)
 			*tok->cur = c;
 	}
@@ -344,7 +344,7 @@
 /* Return the token corresponding to a single character */
 
 int
-tok_1char(c)
+PyToken_OneChar(c)
 	int c;
 {
 	switch (c) {
@@ -377,7 +377,7 @@
 
 
 int
-tok_2char(c1, c2)
+PyToken_TwoChars(c1, c2)
 	int c1, c2;
 {
 	switch (c1) {
@@ -417,7 +417,7 @@
 /* Get next token, after space stripping etc. */
 
 int
-tok_get(tok, p_start, p_end)
+PyTokenizer_Get(tok, p_start, p_end)
 	register struct tok_state *tok; /* In/out: tokenizer state */
 	char **p_start, **p_end; /* Out: point to start/end of token */
 {
@@ -481,7 +481,8 @@
 					tok->pendin--;
 				}
 				if (col != tok->indstack[tok->indent]) {
-					fprintf(stderr, "inconsistent dedent\n");
+					fprintf(stderr,
+						"inconsistent dedent\n");
 					tok->done = E_TOKEN;
 					tok->cur = tok->inp;
 					return ERRORTOKEN;
@@ -569,7 +570,8 @@
 	
 #ifdef macintosh
 	if (c == '\r') {
-		fprintf(stderr, "File contains \\r characters (incorrect line endings?)\n");
+		fprintf(stderr,
+		  "File contains \\r characters (incorrect line endings?)\n");
 		tok->done = E_TOKEN;
 		tok->cur = tok->inp;
 		return ERRORTOKEN;
@@ -726,7 +728,7 @@
 	/* Check for two-character token */
 	{
 		int c2 = tok_nextc(tok);
-		int token = tok_2char(c, c2);
+		int token = PyToken_TwoChars(c, c2);
 		if (token != OP) {
 			*p_start = tok->start;
 			*p_end = tok->cur;
@@ -752,7 +754,7 @@
 	/* Punctuation character */
 	*p_start = tok->start;
 	*p_end = tok->cur;
-	return tok_1char(c);
+	return PyToken_OneChar(c);
 }
 
 
@@ -763,7 +765,7 @@
 	int type;
 	char *start, *end;
 {
-	printf("%s", tok_name[type]);
+	printf("%s", _PyParser_TokenNames[type]);
 	if (type == NAME || type == NUMBER || type == STRING || type == OP)
 		printf("(%.*s)", (int)(end - start), start);
 }
diff --git a/Parser/tokenizer.h b/Parser/tokenizer.h
index cef90da..837bdca 100644
--- a/Parser/tokenizer.h
+++ b/Parser/tokenizer.h
@@ -64,10 +64,11 @@
 			/* Used to allow free continuations inside them */
 };
 
-extern struct tok_state *tok_setups PROTO((char *));
-extern struct tok_state *tok_setupf PROTO((FILE *, char *, char *));
-extern void tok_free PROTO((struct tok_state *));
-extern int tok_get PROTO((struct tok_state *, char **, char **));
+extern struct tok_state *PyTokenizer_FromString Py_PROTO((char *));
+extern struct tok_state *PyTokenizer_FromFile
+	Py_PROTO((FILE *, char *, char *));
+extern void PyTokenizer_Free Py_PROTO((struct tok_state *));
+extern int PyTokenizer_Get Py_PROTO((struct tok_state *, char **, char **));
 
 #ifdef __cplusplus
 }