Issue #7820: The parser tokenizer restores all bytes in the right if the BOM
check fails.
Fix an assertion in pydebug mode.
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index 04749c8..b881e7c 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -312,47 +312,57 @@
int set_readline(struct tok_state *, const char *),
struct tok_state *tok)
{
- int ch = get_char(tok);
+ int ch1, ch2, ch3;
+ ch1 = get_char(tok);
tok->decoding_state = 1;
- if (ch == EOF) {
+ if (ch1 == EOF) {
return 1;
- } else if (ch == 0xEF) {
- ch = get_char(tok);
- if (ch != 0xBB)
- goto NON_BOM;
- ch = get_char(tok);
- if (ch != 0xBF)
- goto NON_BOM;
+ } else if (ch1 == 0xEF) {
+ ch2 = get_char(tok);
+ if (ch2 != 0xBB) {
+ unget_char(ch2, tok);
+ unget_char(ch1, tok);
+ return 1;
+ }
+ ch3 = get_char(tok);
+ if (ch3 != 0xBF) {
+ unget_char(ch3, tok);
+ unget_char(ch2, tok);
+ unget_char(ch1, tok);
+ return 1;
+ }
#if 0
/* Disable support for UTF-16 BOMs until a decision
is made whether this needs to be supported. */
- } else if (ch == 0xFE) {
- ch = get_char(tok);
- if (ch != 0xFF)
- goto NON_BOM;
+ } else if (ch1 == 0xFE) {
+ ch2 = get_char(tok);
+ if (ch2 != 0xFF) {
+ unget_char(ch2, tok);
+ unget_char(ch1, tok);
+ return 1;
+ }
if (!set_readline(tok, "utf-16-be"))
return 0;
tok->decoding_state = -1;
- } else if (ch == 0xFF) {
- ch = get_char(tok);
- if (ch != 0xFE)
- goto NON_BOM;
+ } else if (ch1 == 0xFF) {
+ ch2 = get_char(tok);
+ if (ch2 != 0xFE) {
+ unget_char(ch2, tok);
+ unget_char(ch1, tok);
+ return 1;
+ }
if (!set_readline(tok, "utf-16-le"))
return 0;
tok->decoding_state = -1;
#endif
} else {
- unget_char(ch, tok);
+ unget_char(ch1, tok);
return 1;
}
if (tok->encoding != NULL)
PyMem_FREE(tok->encoding);
tok->encoding = new_string("utf-8", 5); /* resulting is in utf-8 */
return 1;
- NON_BOM:
- /* any token beginning with '\xEF', '\xFE', '\xFF' is a bad token */
- unget_char(0xFF, tok); /* XXX this will cause a syntax error */
- return 1;
}
/* Read a line of text from TOK into S, using the stream in TOK.