Merged revisions 84364 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/branches/py3k

........
  r84364 | benjamin.peterson | 2010-08-30 09:41:20 -0500 (Mon, 30 Aug 2010) | 1 line

  handle names starting with non-ascii characters correctly #9712
........
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 7b91ab2..eeefce1 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -533,6 +533,7 @@
     True
 
 Evil tabs
+
     >>> dump_tokens("def f():\\n\\tif x\\n        \\tpass")
     ENCODING   'utf-8'       (0, 0) (0, 0)
     NAME       'def'         (1, 0) (1, 3)
@@ -549,6 +550,18 @@
     NAME       'pass'        (3, 9) (3, 13)
     DEDENT     ''            (4, 0) (4, 0)
     DEDENT     ''            (4, 0) (4, 0)
+
+Non-ascii identifiers
+
+    >>> dump_tokens("Örter = 'places'\\ngrün = 'green'")
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    NAME       'Örter'       (1, 0) (1, 5)
+    OP         '='           (1, 6) (1, 7)
+    STRING     "'places'"    (1, 8) (1, 16)
+    NEWLINE    '\\n'          (1, 16) (1, 17)
+    NAME       'grün'        (2, 0) (2, 4)
+    OP         '='           (2, 5) (2, 6)
+    STRING     "'green'"     (2, 7) (2, 14)
 """
 
 from test import support
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index 9d2a6bb..e711a21 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -92,7 +92,7 @@
 Whitespace = r'[ \f\t]*'
 Comment = r'#[^\r\n]*'
 Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
-Name = r'[a-zA-Z_]\w*'
+Name = r'\w+'
 
 Hexnumber = r'0[xX][0-9a-fA-F]+'
 Binnumber = r'0[bB][01]+'
@@ -142,9 +142,12 @@
 PseudoExtras = group(r'\\\r?\n', Comment, Triple)
 PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
 
+def _compile(expr):
+    return re.compile(expr, re.UNICODE)
+
 tokenprog, pseudoprog, single3prog, double3prog = map(
-    re.compile, (Token, PseudoToken, Single3, Double3))
-endprogs = {"'": re.compile(Single), '"': re.compile(Double),
+    _compile, (Token, PseudoToken, Single3, Double3))
+endprogs = {"'": _compile(Single), '"': _compile(Double),
             "'''": single3prog, '"""': double3prog,
             "r'''": single3prog, 'r"""': double3prog,
             "b'''": single3prog, 'b"""': double3prog,
@@ -171,6 +174,8 @@
           "bR'", 'bR"', "BR'", 'BR"' ):
     single_quoted[t] = t
 
+del _compile
+
 tabsize = 8
 
 class TokenError(Exception): pass
@@ -392,7 +397,7 @@
 
 def _tokenize(readline, encoding):
     lnum = parenlev = continued = 0
-    namechars, numchars = string.ascii_letters + '_', '0123456789'
+    numchars = '0123456789'
     contstr, needcont = '', 0
     contline = None
     indents = [0]
@@ -516,7 +521,7 @@
                         break
                     else:                                  # ordinary string
                         yield TokenInfo(STRING, token, spos, epos, line)
-                elif initial in namechars:                 # ordinary name
+                elif initial.isidentifier():               # ordinary name
                     yield TokenInfo(NAME, token, spos, epos, line)
                 elif initial == '\\':                      # continued stmt
                     continued = 1
diff --git a/Misc/NEWS b/Misc/NEWS
index 0ff6fa0..4464411 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -14,6 +14,8 @@
 
 - Restore GIL in nis_cat in case of error.
 
+- Issue #9712: Fix tokenize on identifiers that start with non-ascii names.
+
 - Issue #9688: __basicsize__ and __itemsize__ must be accessed as Py_ssize_t.
 
 - Issue #5319: Print an error if flushing stdout fails at interpreter