Fix an issue in PyTokenizer_RestoreEncoding() which was treating a PyBytes
object with PyString calls and not checking errors.  This caused the display
of syntax errors to be deformed.
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index 0d43381..bb171c3 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -1556,7 +1556,10 @@
 		Py_DECREF(unicode_text);
 	}
 	if (!ret) {
-		PyErr_Print();
+		PyErr_Clear();
+	}
+        else {
+		assert(PyBytes_Check(ret));
 	}
 	return ret;
 }
@@ -1569,8 +1572,8 @@
 		/* convert source to original encondig */
 		PyObject *lineobj = dec_utf8(tok->encoding, tok->buf, len);
 		if (lineobj != NULL) {
-			int linelen = PyString_Size(lineobj);
-			const char *line = PyString_AsString(lineobj);
+			int linelen = PyBytes_GET_SIZE(lineobj);
+			const char *line = PyBytes_AS_STRING(lineobj);
 			text = PyObject_MALLOC(linelen + 1);
 			if (text != NULL && line != NULL) {
 				if (linelen)
@@ -1582,9 +1585,11 @@
 			/* adjust error offset */
 			if (*offset > 1) {
 				PyObject *offsetobj = dec_utf8(tok->encoding, 
-							       tok->buf, *offset-1);
+							       tok->buf,
+							       *offset-1);
 				if (offsetobj) {
-					*offset = PyString_Size(offsetobj) + 1;
+					*offset = 1 +
+						PyBytes_GET_SIZE(offsetobj);
 					Py_DECREF(offsetobj);
 				}
 			}