Convert code from sys.stdin.encoding to UTF-8 in
interactive mode. Fixes #1100.
diff --git a/Parser/parsetok.c b/Parser/parsetok.c
index 71bed29..b9664ea 100644
--- a/Parser/parsetok.c
+++ b/Parser/parsetok.c
@@ -59,19 +59,20 @@
 PyParser_ParseFile(FILE *fp, const char *filename, grammar *g, int start,
 		   char *ps1, char *ps2, perrdetail *err_ret)
 {
-	return PyParser_ParseFileFlags(fp, filename, g, start, ps1, ps2,
-				       err_ret, 0);
+	return PyParser_ParseFileFlags(fp, filename, NULL, 
+				       g, start, ps1, ps2, err_ret, 0);
 }
 
 node *
-PyParser_ParseFileFlags(FILE *fp, const char *filename, grammar *g, int start,
+PyParser_ParseFileFlags(FILE *fp, const char *filename, const char* enc,
+			grammar *g, int start,
 			char *ps1, char *ps2, perrdetail *err_ret, int flags)
 {
 	struct tok_state *tok;
 
 	initerr(err_ret, filename);
 
-	if ((tok = PyTokenizer_FromFile(fp, ps1, ps2)) == NULL) {
+	if ((tok = PyTokenizer_FromFile(fp, enc, ps1, ps2)) == NULL) {
 		err_ret->error = E_NOMEM;
 		return NULL;
 	}
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index 776183d..7f51e14 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -677,7 +677,7 @@
 /* Set up tokenizer for file */
 
 struct tok_state *
-PyTokenizer_FromFile(FILE *fp, char *ps1, char *ps2)
+PyTokenizer_FromFile(FILE *fp, char* enc, char *ps1, char *ps2)
 {
 	struct tok_state *tok = tok_new();
 	if (tok == NULL)
@@ -691,6 +691,17 @@
 	tok->fp = fp;
 	tok->prompt = ps1;
 	tok->nextprompt = ps2;
+	if (enc != NULL) {
+		/* Must copy encoding declaration since it
+		   gets copied into the parse tree. */
+		tok->encoding = PyMem_MALLOC(strlen(enc)+1);
+		if (!tok->encoding) {
+			PyTokenizer_Free(tok);
+			return NULL;
+		}
+		strcpy(tok->encoding, enc);
+		tok->decoding_state = -1;
+	}
 	return tok;
 }
 
@@ -742,6 +753,29 @@
 		}
 		if (tok->prompt != NULL) {
 			char *newtok = PyOS_Readline(stdin, stdout, tok->prompt);
+#ifndef PGEN
+			if (tok->encoding && newtok && *newtok) {
+				/* Recode to UTF-8 */
+				Py_ssize_t buflen;
+				const char* buf;
+				PyObject *u = translate_into_utf8(newtok, tok->encoding);
+				PyMem_FREE(newtok);
+				if (!u) {
+					tok->done = E_DECODE;
+					return EOF;
+				}
+				buflen = PyBytes_Size(u);
+				buf = PyBytes_AsString(u);
+				if (!buf) {
+					Py_DECREF(u);
+					tok->done = E_DECODE;
+					return EOF;
+				}
+				newtok = PyMem_MALLOC(buflen+1);
+				strcpy(newtok, buf);
+				Py_DECREF(u);
+			}
+#endif
 			if (tok->nextprompt != NULL)
 				tok->prompt = tok->nextprompt;
 			if (newtok == NULL)
diff --git a/Parser/tokenizer.h b/Parser/tokenizer.h
index 5e7ebf7..ba90a5f 100644
--- a/Parser/tokenizer.h
+++ b/Parser/tokenizer.h
@@ -55,7 +55,8 @@
 };
 
 extern struct tok_state *PyTokenizer_FromString(const char *);
-extern struct tok_state *PyTokenizer_FromFile(FILE *, char *, char *);
+extern struct tok_state *PyTokenizer_FromFile(FILE *, char*,
+					      char *, char *);
 extern void PyTokenizer_Free(struct tok_state *);
 extern int PyTokenizer_Get(struct tok_state *, char **, char **);