Fixed bug #1915: Python compiles with --enable-unicode=no again. However several extension methods and modules do not work without unicode support.
diff --git a/Lib/encodings/__init__.py b/Lib/encodings/__init__.py
index 98ae2fa..b94c374 100644
--- a/Lib/encodings/__init__.py
+++ b/Lib/encodings/__init__.py
@@ -60,7 +60,7 @@
     """
     # Make sure we have an 8-bit string, because .translate() works
     # differently for Unicode strings.
-    if type(encoding) is types.UnicodeType:
+    if hasattr(types, "UnicodeType") and type(encoding) is types.UnicodeType:
         # Note that .encode('latin-1') does *not* use the codec
         # registry, so this call doesn't recurse. (See unicodeobject.c
         # PyUnicode_AsEncodedString() for details)
diff --git a/Lib/optparse.py b/Lib/optparse.py
index 62d2f7e..4fbe094 100644
--- a/Lib/optparse.py
+++ b/Lib/optparse.py
@@ -824,7 +824,11 @@
     (True, False) = (1, 0)
 
 def isbasestring(x):
-    return isinstance(x, types.StringType) or isinstance(x, types.UnicodeType)
+    try:
+        return isinstance(x, basestring)
+    except:
+        return isinstance(x, types.StringType) or isinstance(x, types.UnicodeType)
+
 
 class Values:
 
diff --git a/Misc/NEWS b/Misc/NEWS
index 1fa60b0..eb23c09 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -12,6 +12,10 @@
 Core and builtins
 -----------------
 
+- Bug #1915: Python compiles with --enable-unicode=no again. However
+  several extension methods and modules do not work without unicode
+  support.
+
 - Issue #1678380: distinction between 0.0 and -0.0 was lost during constant
   folding optimization.  This was a regression from Python 2.4.
 
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index 1533502..f065962 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -1537,7 +1537,7 @@
    there, as it must be empty for PGEN, and we can check for PGEN only
    in this file. */
 
-#ifdef PGEN
+#if defined(PGEN) || !defined(Py_USING_UNICODE)
 char*
 PyTokenizer_RestoreEncoding(struct tok_state* tok, int len, int* offset)
 {
@@ -1557,7 +1557,6 @@
 	}
 	return ret;
 }
-
 char *
 PyTokenizer_RestoreEncoding(struct tok_state* tok, int len, int *offset)
 {