Remove the use of non-existing re.ASCII.
(fixes a regression in 3d46ef0c62c5, issue #18873)
diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py
index 9cf2a69..f6e0284 100644
--- a/Lib/lib2to3/pgen2/tokenize.py
+++ b/Lib/lib2to3/pgen2/tokenize.py
@@ -236,7 +236,7 @@
                 startline = False
             toks_append(tokval)
 
-cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
+cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
 
 def _get_normal_name(orig_enc):
     """Imitates get_normal_name in tokenizer.c."""