Fix PEP 263 code --without-unicode. Fixes #591943.
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index 3761a48..34fbfa6 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -256,11 +256,18 @@
 			    strcmp(cs, "iso-8859-1") == 0) {
 				tok->encoding = cs;
 			} else {
+#ifdef Py_USING_UNICODE
 				r = set_readline(tok, cs);
 				if (r) {
 					tok->encoding = cs;
 					tok->decoding_state = -1;
 				}
+#else
+                                /* Without Unicode support, we cannot
+                                   process the coding spec. Since there
+                                   won't be any Unicode literals, that
+                                   won't matter. */
+#endif
 			}
 		} else {	/* then, compare cs with BOM */
 			r = (strcmp(tok->encoding, cs) == 0);
@@ -317,6 +324,10 @@
 static char *
 fp_readl(char *s, int size, struct tok_state *tok)
 {
+#ifndef Py_USING_UNICODE
+	/* In a non-Unicode built, this should never be called. */
+	abort();
+#else
 	PyObject* utf8;
 	PyObject* buf = tok->decoding_buffer;
 	if (buf == NULL) {
@@ -338,6 +349,7 @@
 		if (s[0] == '\0') return NULL; /* EOF */
 		return s;
 	}
+#endif
 }
 
 /* Set the readline function for TOK to a StreamReader's
@@ -487,6 +499,7 @@
 /* Return a UTF-8 encoding Python string object from the
    C byte string STR, which is encoded with ENC. */
 
+#ifdef Py_USING_UNICODE
 static PyObject *
 translate_into_utf8(const char* str, const char* enc) {
 	PyObject *utf8;
@@ -497,6 +510,7 @@
 	Py_DECREF(buf);
 	return utf8;
 }
+#endif
 
 /* Decode a byte string STR for use as the buffer of TOK.
    Look for encoding declarations inside STR, and record them
@@ -514,12 +528,14 @@
 		return NULL;
 	str = tok->str;		/* string after BOM if any */
 	assert(str);
+#ifdef Py_USING_UNICODE
 	if (tok->enc != NULL) {
 		utf8 = translate_into_utf8(str, tok->enc);
 		if (utf8 == NULL)
 			return NULL;
 		str = PyString_AsString(utf8);
 	}
+#endif
 	for (s = str;; s++) {
 		if (*s == '\0') break;
 		else if (*s == '\n') {
@@ -530,6 +546,7 @@
 	tok->enc = NULL;
 	if (!check_coding_spec(str, s - str, tok, buf_setreadl))
 		return NULL;
+#ifdef Py_USING_UNICODE
 	if (tok->enc != NULL) {
 		assert(utf8 == NULL);
 		utf8 = translate_into_utf8(str, tok->enc);
@@ -537,6 +554,7 @@
 			return NULL;
 		str = PyString_AsString(utf8);
 	}
+#endif
 	assert(tok->decoding_buffer == NULL);
 	tok->decoding_buffer = utf8; /* CAUTION */
 	return str;
diff --git a/Python/compile.c b/Python/compile.c
index 512b5a3..b671937 100644
--- a/Python/compile.c
+++ b/Python/compile.c
@@ -1185,6 +1185,9 @@
 static PyObject *
 decode_utf8(char **sPtr, char *end, char* encoding)
 {
+#ifndef Py_USING_UNICODE
+	abort();
+#else
 	PyObject *u, *v;
 	char *s, *t;
 	t = s = *sPtr;
@@ -1197,6 +1200,7 @@
 	v = PyUnicode_AsEncodedString(u, encoding, NULL);
 	Py_DECREF(u);
 	return v;
+#endif
 }
 
 static PyObject *
@@ -1312,12 +1316,18 @@
 			 strcmp(encoding, "iso-8859-1") != 0);
 	if (rawmode || strchr(s, '\\') == NULL) {
 		if (need_encoding) {
+#ifndef Py_USING_UNICODE
+			/* This should not happen - we never see any other
+			   encoding. */
+			abort();
+#else
 			PyObject* u = PyUnicode_DecodeUTF8(s, len, NULL);
 			if (u == NULL)
 				return NULL;
 			v = PyUnicode_AsEncodedString(u, encoding, NULL);
 			Py_DECREF(u);
 			return v;
+#endif
 		} else {
 			return PyString_FromStringAndSize(s, len);
 		}