blob: f58c2869017aa62e8d90721f3fc607c5cb1e5598 [file] [log] [blame]
Guido van Rossumb51eaa11997-03-07 00:21:55 +00001"""Tokenization help for Python programs.
Guido van Rossum4d8e8591992-01-01 19:34:47 +00002
Florent Xicluna43e4ea12010-09-03 19:54:02 +00003tokenize(readline) is a generator that breaks a stream of bytes into
4Python tokens. It decodes the bytes according to PEP-0263 for
5determining source file encoding.
Trent Nelson428de652008-03-18 22:41:35 +00006
Florent Xicluna43e4ea12010-09-03 19:54:02 +00007It accepts a readline-like method which is called repeatedly to get the
8next line of input (or b"" for EOF). It generates 5-tuples with these
9members:
Tim Peters4efb6e92001-06-29 23:51:08 +000010
11 the token type (see token.py)
12 the token (a string)
13 the starting (row, column) indices of the token (a 2-tuple of ints)
14 the ending (row, column) indices of the token (a 2-tuple of ints)
15 the original line (string)
16
17It is designed to match the working of the Python tokenizer exactly, except
18that it produces COMMENT tokens for comments and gives type OP for all
Florent Xicluna43e4ea12010-09-03 19:54:02 +000019operators. Additionally, all token lists start with an ENCODING token
20which tells you which encoding was used to decode the bytes stream.
21"""
Guido van Rossumb51eaa11997-03-07 00:21:55 +000022
Ka-Ping Yee244c5932001-03-01 13:56:40 +000023__author__ = 'Ka-Ping Yee <ping@lfw.org>'
Trent Nelson428de652008-03-18 22:41:35 +000024__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
25 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
26 'Michael Foord')
Serhiy Storchakacf4a2f22015-03-11 17:18:03 +020027from builtins import open as _builtin_open
Benjamin Peterson433f32c2008-12-12 01:25:05 +000028from codecs import lookup, BOM_UTF8
Raymond Hettinger3fb79c72010-09-09 07:15:18 +000029import collections
Victor Stinner58c07522010-11-09 01:08:59 +000030from io import TextIOWrapper
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -050031from itertools import chain
32import re
33import sys
34from token import *
35
Serhiy Storchakadafea852013-09-16 23:51:56 +030036cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
Serhiy Storchaka768c16c2014-01-09 18:36:09 +020037blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
Guido van Rossum4d8e8591992-01-01 19:34:47 +000038
Skip Montanaro40fc1602001-03-01 04:27:19 +000039import token
Alexander Belopolskyb9d10d02010-11-11 14:07:41 +000040__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
41 "NL", "untokenize", "ENCODING", "TokenInfo"]
Skip Montanaro40fc1602001-03-01 04:27:19 +000042del token
43
Guido van Rossum1aec3231997-04-08 14:24:39 +000044COMMENT = N_TOKENS
45tok_name[COMMENT] = 'COMMENT'
Guido van Rossuma90c78b1998-04-03 16:05:38 +000046NL = N_TOKENS + 1
47tok_name[NL] = 'NL'
Trent Nelson428de652008-03-18 22:41:35 +000048ENCODING = N_TOKENS + 2
49tok_name[ENCODING] = 'ENCODING'
50N_TOKENS += 3
Meador Inge00c7f852012-01-19 00:44:45 -060051EXACT_TOKEN_TYPES = {
52 '(': LPAR,
53 ')': RPAR,
54 '[': LSQB,
55 ']': RSQB,
56 ':': COLON,
57 ',': COMMA,
58 ';': SEMI,
59 '+': PLUS,
60 '-': MINUS,
61 '*': STAR,
62 '/': SLASH,
63 '|': VBAR,
64 '&': AMPER,
65 '<': LESS,
66 '>': GREATER,
67 '=': EQUAL,
68 '.': DOT,
69 '%': PERCENT,
70 '{': LBRACE,
71 '}': RBRACE,
72 '==': EQEQUAL,
73 '!=': NOTEQUAL,
74 '<=': LESSEQUAL,
75 '>=': GREATEREQUAL,
76 '~': TILDE,
77 '^': CIRCUMFLEX,
78 '<<': LEFTSHIFT,
79 '>>': RIGHTSHIFT,
80 '**': DOUBLESTAR,
81 '+=': PLUSEQUAL,
82 '-=': MINEQUAL,
83 '*=': STAREQUAL,
84 '/=': SLASHEQUAL,
85 '%=': PERCENTEQUAL,
86 '&=': AMPEREQUAL,
87 '|=': VBAREQUAL,
88 '^=': CIRCUMFLEXEQUAL,
89 '<<=': LEFTSHIFTEQUAL,
90 '>>=': RIGHTSHIFTEQUAL,
91 '**=': DOUBLESTAREQUAL,
92 '//': DOUBLESLASH,
93 '//=': DOUBLESLASHEQUAL,
Benjamin Petersond51374e2014-04-09 23:55:56 -040094 '@': AT,
95 '@=': ATEQUAL,
Meador Inge00c7f852012-01-19 00:44:45 -060096}
Guido van Rossum1aec3231997-04-08 14:24:39 +000097
Raymond Hettinger3fb79c72010-09-09 07:15:18 +000098class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
Raymond Hettingeraa17a7f2009-04-29 14:21:25 +000099 def __repr__(self):
Raymond Hettingera0e79402010-09-09 08:29:05 +0000100 annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
101 return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
102 self._replace(type=annotated_type))
Raymond Hettingeraa17a7f2009-04-29 14:21:25 +0000103
Meador Inge00c7f852012-01-19 00:44:45 -0600104 @property
105 def exact_type(self):
106 if self.type == OP and self.string in EXACT_TOKEN_TYPES:
107 return EXACT_TOKEN_TYPES[self.string]
108 else:
109 return self.type
110
Eric S. Raymondb08b2d32001-02-09 11:10:16 +0000111def group(*choices): return '(' + '|'.join(choices) + ')'
Guido van Rossum68468eb2003-02-27 20:14:51 +0000112def any(*choices): return group(*choices) + '*'
113def maybe(*choices): return group(*choices) + '?'
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000114
Antoine Pitroufd036452008-08-19 17:56:33 +0000115# Note: we use unicode matching for names ("\w") but ascii matching for
116# number literals.
Guido van Rossum3b631771997-10-27 20:44:15 +0000117Whitespace = r'[ \f\t]*'
118Comment = r'#[^\r\n]*'
119Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Benjamin Peterson33856de2010-08-30 14:41:20 +0000120Name = r'\w+'
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000121
Antoine Pitroufd036452008-08-19 17:56:33 +0000122Hexnumber = r'0[xX][0-9a-fA-F]+'
Georg Brandlfceab5a2008-01-19 20:08:23 +0000123Binnumber = r'0[bB][01]+'
124Octnumber = r'0[oO][0-7]+'
Antoine Pitroufd036452008-08-19 17:56:33 +0000125Decnumber = r'(?:0+|[1-9][0-9]*)'
Guido van Rossumcd16bf62007-06-13 18:07:49 +0000126Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Antoine Pitroufd036452008-08-19 17:56:33 +0000127Exponent = r'[eE][-+]?[0-9]+'
128Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
129Expfloat = r'[0-9]+' + Exponent
Guido van Rossum1aec3231997-04-08 14:24:39 +0000130Floatnumber = group(Pointfloat, Expfloat)
Antoine Pitroufd036452008-08-19 17:56:33 +0000131Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Guido van Rossum1aec3231997-04-08 14:24:39 +0000132Number = group(Imagnumber, Floatnumber, Intnumber)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000133
Christian Heimes0b3847d2012-06-20 11:17:58 +0200134StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000135
Tim Petersde495832000-10-07 05:09:39 +0000136# Tail end of ' string.
137Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
138# Tail end of " string.
139Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
140# Tail end of ''' string.
141Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
142# Tail end of """ string.
143Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000144Triple = group(StringPrefix + "'''", StringPrefix + '"""')
Tim Petersde495832000-10-07 05:09:39 +0000145# Single-line ' or " string.
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000146String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
147 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000148
Tim Petersde495832000-10-07 05:09:39 +0000149# Because of leftmost-then-longest match semantics, be sure to put the
150# longest operators first (e.g., if = came before ==, == would get
151# recognized as two instances of =).
Guido van Rossumb053cd82006-08-24 03:53:23 +0000152Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
Neal Norwitzc1505362006-12-28 06:47:50 +0000153 r"//=?", r"->",
Benjamin Petersond51374e2014-04-09 23:55:56 -0400154 r"[+\-*/%&@|^=<>]=?",
Tim Petersde495832000-10-07 05:09:39 +0000155 r"~")
Thomas Wouterse1519a12000-08-24 21:44:52 +0000156
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000157Bracket = '[][(){}]'
Georg Brandldde00282007-03-18 19:01:53 +0000158Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000159Funny = group(Operator, Bracket, Special)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000160
Guido van Rossum3b631771997-10-27 20:44:15 +0000161PlainToken = group(Number, Funny, String, Name)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000162Token = Ignore + PlainToken
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000163
Tim Petersde495832000-10-07 05:09:39 +0000164# First (or only) line of ' or " string.
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000165ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000166 group("'", r'\\\r?\n'),
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000167 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000168 group('"', r'\\\r?\n'))
Ezio Melotti2cc3b4b2012-11-03 17:38:43 +0200169PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
Guido van Rossum3b631771997-10-27 20:44:15 +0000170PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000171
Benjamin Peterson33856de2010-08-30 14:41:20 +0000172def _compile(expr):
173 return re.compile(expr, re.UNICODE)
174
Antoine Pitrou10a99b02011-10-11 15:45:56 +0200175endpats = {"'": Single, '"': Double,
176 "'''": Single3, '"""': Double3,
177 "r'''": Single3, 'r"""': Double3,
178 "b'''": Single3, 'b"""': Double3,
Antoine Pitrou10a99b02011-10-11 15:45:56 +0200179 "R'''": Single3, 'R"""': Double3,
180 "B'''": Single3, 'B"""': Double3,
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000181 "br'''": Single3, 'br"""': Double3,
Antoine Pitrou10a99b02011-10-11 15:45:56 +0200182 "bR'''": Single3, 'bR"""': Double3,
183 "Br'''": Single3, 'Br"""': Double3,
184 "BR'''": Single3, 'BR"""': Double3,
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000185 "rb'''": Single3, 'rb"""': Double3,
186 "Rb'''": Single3, 'Rb"""': Double3,
187 "rB'''": Single3, 'rB"""': Double3,
188 "RB'''": Single3, 'RB"""': Double3,
Armin Ronacher6ecf77b2012-03-04 12:04:06 +0000189 "u'''": Single3, 'u"""': Double3,
Armin Ronacher6ecf77b2012-03-04 12:04:06 +0000190 "U'''": Single3, 'U"""': Double3,
Armin Ronacher6ecf77b2012-03-04 12:04:06 +0000191 'r': None, 'R': None, 'b': None, 'B': None,
192 'u': None, 'U': None}
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000193
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000194triple_quoted = {}
195for t in ("'''", '"""',
196 "r'''", 'r"""', "R'''", 'R"""',
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000197 "b'''", 'b"""', "B'''", 'B"""',
198 "br'''", 'br"""', "Br'''", 'Br"""',
Armin Ronacher6ecf77b2012-03-04 12:04:06 +0000199 "bR'''", 'bR"""', "BR'''", 'BR"""',
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000200 "rb'''", 'rb"""', "rB'''", 'rB"""',
201 "Rb'''", 'Rb"""', "RB'''", 'RB"""',
Armin Ronacher6ecf77b2012-03-04 12:04:06 +0000202 "u'''", 'u"""', "U'''", 'U"""',
Christian Heimes0b3847d2012-06-20 11:17:58 +0200203 ):
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000204 triple_quoted[t] = t
205single_quoted = {}
206for t in ("'", '"',
207 "r'", 'r"', "R'", 'R"',
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000208 "b'", 'b"', "B'", 'B"',
209 "br'", 'br"', "Br'", 'Br"',
Armin Ronacher6ecf77b2012-03-04 12:04:06 +0000210 "bR'", 'bR"', "BR'", 'BR"' ,
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000211 "rb'", 'rb"', "rB'", 'rB"',
212 "Rb'", 'Rb"', "RB'", 'RB"' ,
Armin Ronacher6ecf77b2012-03-04 12:04:06 +0000213 "u'", 'u"', "U'", 'U"',
Christian Heimes0b3847d2012-06-20 11:17:58 +0200214 ):
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000215 single_quoted[t] = t
216
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000217tabsize = 8
Fred Drake9b8d8012000-08-17 04:45:13 +0000218
Ka-Ping Yee28c62bb2001-03-23 05:22:49 +0000219class TokenError(Exception): pass
220
221class StopTokenizing(Exception): pass
Fred Drake9b8d8012000-08-17 04:45:13 +0000222
Tim Peters5ca576e2001-06-18 22:08:13 +0000223
Thomas Wouters89f507f2006-12-13 04:49:30 +0000224class Untokenizer:
225
226 def __init__(self):
227 self.tokens = []
228 self.prev_row = 1
229 self.prev_col = 0
Trent Nelson428de652008-03-18 22:41:35 +0000230 self.encoding = None
Thomas Wouters89f507f2006-12-13 04:49:30 +0000231
232 def add_whitespace(self, start):
233 row, col = start
Terry Jan Reedy5e6db312014-02-17 16:45:48 -0500234 if row < self.prev_row or row == self.prev_row and col < self.prev_col:
235 raise ValueError("start ({},{}) precedes previous end ({},{})"
236 .format(row, col, self.prev_row, self.prev_col))
Terry Jan Reedy9dc3a362014-02-23 23:33:08 -0500237 row_offset = row - self.prev_row
Terry Jan Reedyf106f8f2014-02-23 23:39:57 -0500238 if row_offset:
Terry Jan Reedy9dc3a362014-02-23 23:33:08 -0500239 self.tokens.append("\\\n" * row_offset)
240 self.prev_col = 0
Thomas Wouters89f507f2006-12-13 04:49:30 +0000241 col_offset = col - self.prev_col
242 if col_offset:
243 self.tokens.append(" " * col_offset)
244
245 def untokenize(self, iterable):
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500246 it = iter(iterable)
247 for t in it:
Thomas Wouters89f507f2006-12-13 04:49:30 +0000248 if len(t) == 2:
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500249 self.compat(t, it)
Thomas Wouters89f507f2006-12-13 04:49:30 +0000250 break
251 tok_type, token, start, end, line = t
Trent Nelson428de652008-03-18 22:41:35 +0000252 if tok_type == ENCODING:
253 self.encoding = token
254 continue
Terry Jan Reedy9dc3a362014-02-23 23:33:08 -0500255 if tok_type == ENDMARKER:
256 break
Thomas Wouters89f507f2006-12-13 04:49:30 +0000257 self.add_whitespace(start)
258 self.tokens.append(token)
259 self.prev_row, self.prev_col = end
260 if tok_type in (NEWLINE, NL):
261 self.prev_row += 1
262 self.prev_col = 0
263 return "".join(self.tokens)
264
265 def compat(self, token, iterable):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000266 indents = []
267 toks_append = self.tokens.append
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500268 startline = token[0] in (NEWLINE, NL)
Christian Heimesba4af492008-03-28 00:55:15 +0000269 prevstring = False
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500270
271 for tok in chain([token], iterable):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000272 toknum, tokval = tok[:2]
Trent Nelson428de652008-03-18 22:41:35 +0000273 if toknum == ENCODING:
274 self.encoding = tokval
275 continue
Thomas Wouters89f507f2006-12-13 04:49:30 +0000276
Yury Selivanov75445082015-05-11 22:57:16 -0400277 if toknum in (NAME, NUMBER, ASYNC, AWAIT):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000278 tokval += ' '
279
Christian Heimesba4af492008-03-28 00:55:15 +0000280 # Insert a space between two consecutive strings
281 if toknum == STRING:
282 if prevstring:
283 tokval = ' ' + tokval
284 prevstring = True
285 else:
286 prevstring = False
287
Thomas Wouters89f507f2006-12-13 04:49:30 +0000288 if toknum == INDENT:
289 indents.append(tokval)
290 continue
291 elif toknum == DEDENT:
292 indents.pop()
293 continue
294 elif toknum in (NEWLINE, NL):
295 startline = True
296 elif startline and indents:
297 toks_append(indents[-1])
298 startline = False
299 toks_append(tokval)
Raymond Hettinger68c04532005-06-10 11:05:19 +0000300
Trent Nelson428de652008-03-18 22:41:35 +0000301
Raymond Hettinger68c04532005-06-10 11:05:19 +0000302def untokenize(iterable):
303 """Transform tokens back into Python source code.
Trent Nelson428de652008-03-18 22:41:35 +0000304 It returns a bytes object, encoded using the ENCODING
305 token, which is the first token sequence output by tokenize.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000306
307 Each element returned by the iterable must be a token sequence
Thomas Wouters89f507f2006-12-13 04:49:30 +0000308 with at least two elements, a token number and token value. If
309 only two tokens are passed, the resulting output is poor.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000310
Thomas Wouters89f507f2006-12-13 04:49:30 +0000311 Round-trip invariant for full input:
312 Untokenized source will match input source exactly
313
314 Round-trip invariant for limited intput:
Trent Nelson428de652008-03-18 22:41:35 +0000315 # Output bytes will tokenize the back to the input
316 t1 = [tok[:2] for tok in tokenize(f.readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000317 newcode = untokenize(t1)
Trent Nelson428de652008-03-18 22:41:35 +0000318 readline = BytesIO(newcode).readline
319 t2 = [tok[:2] for tok in tokenize(readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000320 assert t1 == t2
321 """
Thomas Wouters89f507f2006-12-13 04:49:30 +0000322 ut = Untokenizer()
Trent Nelson428de652008-03-18 22:41:35 +0000323 out = ut.untokenize(iterable)
324 if ut.encoding is not None:
325 out = out.encode(ut.encoding)
326 return out
Raymond Hettinger68c04532005-06-10 11:05:19 +0000327
Trent Nelson428de652008-03-18 22:41:35 +0000328
Benjamin Petersond3afada2009-10-09 21:43:09 +0000329def _get_normal_name(orig_enc):
330 """Imitates get_normal_name in tokenizer.c."""
331 # Only care about the first 12 characters.
332 enc = orig_enc[:12].lower().replace("_", "-")
333 if enc == "utf-8" or enc.startswith("utf-8-"):
334 return "utf-8"
335 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
336 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
337 return "iso-8859-1"
338 return orig_enc
339
Trent Nelson428de652008-03-18 22:41:35 +0000340def detect_encoding(readline):
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000341 """
Trent Nelson428de652008-03-18 22:41:35 +0000342 The detect_encoding() function is used to detect the encoding that should
Ezio Melotti4bcc7962013-11-25 05:14:51 +0200343 be used to decode a Python source file. It requires one argument, readline,
Trent Nelson428de652008-03-18 22:41:35 +0000344 in the same way as the tokenize() generator.
345
346 It will call readline a maximum of twice, and return the encoding used
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000347 (as a string) and a list of any lines (left as bytes) it has read in.
Trent Nelson428de652008-03-18 22:41:35 +0000348
349 It detects the encoding from the presence of a utf-8 bom or an encoding
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000350 cookie as specified in pep-0263. If both a bom and a cookie are present,
351 but disagree, a SyntaxError will be raised. If the encoding cookie is an
352 invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
Benjamin Peterson689a5582010-03-18 22:29:52 +0000353 'utf-8-sig' is returned.
Trent Nelson428de652008-03-18 22:41:35 +0000354
355 If no encoding is specified, then the default of 'utf-8' will be returned.
356 """
Brett Cannonc33f3f22012-04-20 13:23:54 -0400357 try:
358 filename = readline.__self__.name
359 except AttributeError:
360 filename = None
Trent Nelson428de652008-03-18 22:41:35 +0000361 bom_found = False
362 encoding = None
Benjamin Peterson689a5582010-03-18 22:29:52 +0000363 default = 'utf-8'
Trent Nelson428de652008-03-18 22:41:35 +0000364 def read_or_stop():
365 try:
366 return readline()
367 except StopIteration:
368 return b''
369
370 def find_cookie(line):
371 try:
Martin v. Löwis63674f42012-04-20 14:36:47 +0200372 # Decode as UTF-8. Either the line is an encoding declaration,
373 # in which case it should be pure ASCII, or it must be UTF-8
374 # per default encoding.
375 line_string = line.decode('utf-8')
Trent Nelson428de652008-03-18 22:41:35 +0000376 except UnicodeDecodeError:
Brett Cannonc33f3f22012-04-20 13:23:54 -0400377 msg = "invalid or missing encoding declaration"
378 if filename is not None:
379 msg = '{} for {!r}'.format(msg, filename)
380 raise SyntaxError(msg)
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000381
Serhiy Storchakadafea852013-09-16 23:51:56 +0300382 match = cookie_re.match(line_string)
383 if not match:
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000384 return None
Serhiy Storchakadafea852013-09-16 23:51:56 +0300385 encoding = _get_normal_name(match.group(1))
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000386 try:
387 codec = lookup(encoding)
388 except LookupError:
389 # This behaviour mimics the Python interpreter
Brett Cannonc33f3f22012-04-20 13:23:54 -0400390 if filename is None:
391 msg = "unknown encoding: " + encoding
392 else:
393 msg = "unknown encoding for {!r}: {}".format(filename,
394 encoding)
395 raise SyntaxError(msg)
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000396
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000397 if bom_found:
Florent Xicluna11f0b412012-07-07 12:13:35 +0200398 if encoding != 'utf-8':
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000399 # This behaviour mimics the Python interpreter
Brett Cannonc33f3f22012-04-20 13:23:54 -0400400 if filename is None:
401 msg = 'encoding problem: utf-8'
402 else:
403 msg = 'encoding problem for {!r}: utf-8'.format(filename)
404 raise SyntaxError(msg)
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000405 encoding += '-sig'
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000406 return encoding
Trent Nelson428de652008-03-18 22:41:35 +0000407
408 first = read_or_stop()
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000409 if first.startswith(BOM_UTF8):
Trent Nelson428de652008-03-18 22:41:35 +0000410 bom_found = True
411 first = first[3:]
Benjamin Peterson689a5582010-03-18 22:29:52 +0000412 default = 'utf-8-sig'
Trent Nelson428de652008-03-18 22:41:35 +0000413 if not first:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000414 return default, []
Trent Nelson428de652008-03-18 22:41:35 +0000415
416 encoding = find_cookie(first)
417 if encoding:
418 return encoding, [first]
Serhiy Storchaka768c16c2014-01-09 18:36:09 +0200419 if not blank_re.match(first):
420 return default, [first]
Trent Nelson428de652008-03-18 22:41:35 +0000421
422 second = read_or_stop()
423 if not second:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000424 return default, [first]
Trent Nelson428de652008-03-18 22:41:35 +0000425
426 encoding = find_cookie(second)
427 if encoding:
428 return encoding, [first, second]
429
Benjamin Peterson689a5582010-03-18 22:29:52 +0000430 return default, [first, second]
Trent Nelson428de652008-03-18 22:41:35 +0000431
432
Victor Stinner58c07522010-11-09 01:08:59 +0000433def open(filename):
434 """Open a file in read only mode using the encoding detected by
435 detect_encoding().
436 """
Victor Stinner96917502014-12-05 10:17:10 +0100437 buffer = _builtin_open(filename, 'rb')
Victor Stinner387729e2015-05-26 00:43:58 +0200438 try:
439 encoding, lines = detect_encoding(buffer.readline)
440 buffer.seek(0)
441 text = TextIOWrapper(buffer, encoding, line_buffering=True)
442 text.mode = 'r'
443 return text
444 except:
445 buffer.close()
446 raise
Victor Stinner58c07522010-11-09 01:08:59 +0000447
448
Trent Nelson428de652008-03-18 22:41:35 +0000449def tokenize(readline):
450 """
451 The tokenize() generator requires one argment, readline, which
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000452 must be a callable object which provides the same interface as the
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000453 readline() method of built-in file objects. Each call to the function
Trent Nelson428de652008-03-18 22:41:35 +0000454 should return one line of input as bytes. Alternately, readline
Raymond Hettinger68c04532005-06-10 11:05:19 +0000455 can be a callable function terminating with StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000456 readline = open(myfile, 'rb').__next__ # Example of alternate readline
Tim Peters8ac14952002-05-23 15:15:30 +0000457
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000458 The generator produces 5-tuples with these members: the token type; the
459 token string; a 2-tuple (srow, scol) of ints specifying the row and
460 column where the token begins in the source; a 2-tuple (erow, ecol) of
461 ints specifying the row and column where the token ends in the source;
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000462 and the line on which the token was found. The line passed is the
Tim Peters8ac14952002-05-23 15:15:30 +0000463 logical line; continuation lines are included.
Trent Nelson428de652008-03-18 22:41:35 +0000464
465 The first token sequence will always be an ENCODING token
466 which tells you which encoding was used to decode the bytes stream.
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000467 """
Benjamin Peterson21db77e2009-11-14 16:27:26 +0000468 # This import is here to avoid problems when the itertools module is not
469 # built yet and tokenize is imported.
Benjamin Peterson81dd8b92009-11-14 18:09:17 +0000470 from itertools import chain, repeat
Trent Nelson428de652008-03-18 22:41:35 +0000471 encoding, consumed = detect_encoding(readline)
Benjamin Peterson81dd8b92009-11-14 18:09:17 +0000472 rl_gen = iter(readline, b"")
473 empty = repeat(b"")
474 return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
Trent Nelson428de652008-03-18 22:41:35 +0000475
476
477def _tokenize(readline, encoding):
Guido van Rossum1aec3231997-04-08 14:24:39 +0000478 lnum = parenlev = continued = 0
Benjamin Peterson33856de2010-08-30 14:41:20 +0000479 numchars = '0123456789'
Guido van Rossumde655271997-04-09 17:15:54 +0000480 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000481 contline = None
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000482 indents = [0]
Guido van Rossum1aec3231997-04-08 14:24:39 +0000483
Yury Selivanov75445082015-05-11 22:57:16 -0400484 # 'stashed' and 'ctx' are used for async/await parsing
485 stashed = None
486 ctx = [('sync', 0)]
487
Trent Nelson428de652008-03-18 22:41:35 +0000488 if encoding is not None:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000489 if encoding == "utf-8-sig":
490 # BOM will already have been stripped.
491 encoding = "utf-8"
Raymond Hettingera48db392009-04-29 00:34:27 +0000492 yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
Benjamin Peterson0fe14382008-06-05 23:07:42 +0000493 while True: # loop over lines in stream
Raymond Hettinger68c04532005-06-10 11:05:19 +0000494 try:
495 line = readline()
496 except StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000497 line = b''
498
499 if encoding is not None:
500 line = line.decode(encoding)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000501 lnum += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000502 pos, max = 0, len(line)
503
504 if contstr: # continued string
Guido van Rossumde655271997-04-09 17:15:54 +0000505 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000506 raise TokenError("EOF in multi-line string", strstart)
Guido van Rossum3b631771997-10-27 20:44:15 +0000507 endmatch = endprog.match(line)
508 if endmatch:
509 pos = end = endmatch.end(0)
Raymond Hettingera48db392009-04-29 00:34:27 +0000510 yield TokenInfo(STRING, contstr + line[:end],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000511 strstart, (lnum, end), contline + line)
Guido van Rossumde655271997-04-09 17:15:54 +0000512 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000513 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000514 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
Raymond Hettingera48db392009-04-29 00:34:27 +0000515 yield TokenInfo(ERRORTOKEN, contstr + line,
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000516 strstart, (lnum, len(line)), contline)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000517 contstr = ''
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000518 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000519 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000520 else:
521 contstr = contstr + line
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000522 contline = contline + line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000523 continue
524
Guido van Rossum1aec3231997-04-08 14:24:39 +0000525 elif parenlev == 0 and not continued: # new statement
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000526 if not line: break
527 column = 0
Guido van Rossum1aec3231997-04-08 14:24:39 +0000528 while pos < max: # measure leading whitespace
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000529 if line[pos] == ' ':
530 column += 1
531 elif line[pos] == '\t':
532 column = (column//tabsize + 1)*tabsize
533 elif line[pos] == '\f':
534 column = 0
535 else:
536 break
537 pos += 1
538 if pos == max:
539 break
Guido van Rossum1aec3231997-04-08 14:24:39 +0000540
541 if line[pos] in '#\r\n': # skip comments or blank lines
Thomas Wouters89f507f2006-12-13 04:49:30 +0000542 if line[pos] == '#':
543 comment_token = line[pos:].rstrip('\r\n')
544 nl_pos = pos + len(comment_token)
Raymond Hettingera48db392009-04-29 00:34:27 +0000545 yield TokenInfo(COMMENT, comment_token,
Thomas Wouters89f507f2006-12-13 04:49:30 +0000546 (lnum, pos), (lnum, pos + len(comment_token)), line)
Raymond Hettingera48db392009-04-29 00:34:27 +0000547 yield TokenInfo(NL, line[nl_pos:],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000548 (lnum, nl_pos), (lnum, len(line)), line)
549 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000550 yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
Guido van Rossum1aec3231997-04-08 14:24:39 +0000551 (lnum, pos), (lnum, len(line)), line)
552 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000553
554 if column > indents[-1]: # count indents or dedents
555 indents.append(column)
Raymond Hettingera48db392009-04-29 00:34:27 +0000556 yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000557 while column < indents[-1]:
Raymond Hettingerda99d1c2005-06-21 07:43:58 +0000558 if column not in indents:
559 raise IndentationError(
Thomas Wouters00ee7ba2006-08-21 19:07:27 +0000560 "unindent does not match any outer indentation level",
561 ("<tokenize>", lnum, pos, line))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000562 indents = indents[:-1]
Yury Selivanov75445082015-05-11 22:57:16 -0400563
564 cur_indent = indents[-1]
565 while len(ctx) > 1 and ctx[-1][1] >= cur_indent:
566 ctx.pop()
567
Raymond Hettingera48db392009-04-29 00:34:27 +0000568 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000569
570 else: # continued statement
Guido van Rossumde655271997-04-09 17:15:54 +0000571 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000572 raise TokenError("EOF in multi-line statement", (lnum, 0))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000573 continued = 0
574
575 while pos < max:
Antoine Pitrou10a99b02011-10-11 15:45:56 +0200576 pseudomatch = _compile(PseudoToken).match(line, pos)
Guido van Rossum3b631771997-10-27 20:44:15 +0000577 if pseudomatch: # scan for tokens
578 start, end = pseudomatch.span(1)
Guido van Rossumde655271997-04-09 17:15:54 +0000579 spos, epos, pos = (lnum, start), (lnum, end), end
Ezio Melotti2cc3b4b2012-11-03 17:38:43 +0200580 if start == end:
581 continue
Guido van Rossum1aec3231997-04-08 14:24:39 +0000582 token, initial = line[start:end], line[start]
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000583
Georg Brandldde00282007-03-18 19:01:53 +0000584 if (initial in numchars or # ordinary number
585 (initial == '.' and token != '.' and token != '...')):
Raymond Hettingera48db392009-04-29 00:34:27 +0000586 yield TokenInfo(NUMBER, token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000587 elif initial in '\r\n':
Yury Selivanov75445082015-05-11 22:57:16 -0400588 if stashed:
589 yield stashed
590 stashed = None
Raymond Hettingera48db392009-04-29 00:34:27 +0000591 yield TokenInfo(NL if parenlev > 0 else NEWLINE,
Thomas Wouters89f507f2006-12-13 04:49:30 +0000592 token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000593 elif initial == '#':
Thomas Wouters89f507f2006-12-13 04:49:30 +0000594 assert not token.endswith("\n")
Yury Selivanov75445082015-05-11 22:57:16 -0400595 if stashed:
596 yield stashed
597 stashed = None
Raymond Hettingera48db392009-04-29 00:34:27 +0000598 yield TokenInfo(COMMENT, token, spos, epos, line)
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000599 elif token in triple_quoted:
Antoine Pitrou10a99b02011-10-11 15:45:56 +0200600 endprog = _compile(endpats[token])
Guido van Rossum3b631771997-10-27 20:44:15 +0000601 endmatch = endprog.match(line, pos)
602 if endmatch: # all on one line
603 pos = endmatch.end(0)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000604 token = line[start:pos]
Raymond Hettingera48db392009-04-29 00:34:27 +0000605 yield TokenInfo(STRING, token, spos, (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000606 else:
Guido van Rossum1aec3231997-04-08 14:24:39 +0000607 strstart = (lnum, start) # multiple lines
608 contstr = line[start:]
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000609 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000610 break
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000611 elif initial in single_quoted or \
612 token[:2] in single_quoted or \
613 token[:3] in single_quoted:
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000614 if token[-1] == '\n': # continued string
Guido van Rossum1aec3231997-04-08 14:24:39 +0000615 strstart = (lnum, start)
Antoine Pitrou10a99b02011-10-11 15:45:56 +0200616 endprog = _compile(endpats[initial] or
617 endpats[token[1]] or
618 endpats[token[2]])
Guido van Rossumde655271997-04-09 17:15:54 +0000619 contstr, needcont = line[start:], 1
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000620 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000621 break
622 else: # ordinary string
Raymond Hettingera48db392009-04-29 00:34:27 +0000623 yield TokenInfo(STRING, token, spos, epos, line)
Benjamin Peterson33856de2010-08-30 14:41:20 +0000624 elif initial.isidentifier(): # ordinary name
Yury Selivanov75445082015-05-11 22:57:16 -0400625 if token in ('async', 'await'):
626 if ctx[-1][0] == 'async' and ctx[-1][1] < indents[-1]:
627 yield TokenInfo(
628 ASYNC if token == 'async' else AWAIT,
629 token, spos, epos, line)
630 continue
631
632 tok = TokenInfo(NAME, token, spos, epos, line)
633 if token == 'async' and not stashed:
634 stashed = tok
635 continue
636
637 if token == 'def':
638 if (stashed
639 and stashed.type == NAME
640 and stashed.string == 'async'):
641
642 ctx.append(('async', indents[-1]))
643
644 yield TokenInfo(ASYNC, stashed.string,
645 stashed.start, stashed.end,
646 stashed.line)
647 stashed = None
648 else:
649 ctx.append(('sync', indents[-1]))
650
651 if stashed:
652 yield stashed
653 stashed = None
654
655 yield tok
Guido van Rossum3b631771997-10-27 20:44:15 +0000656 elif initial == '\\': # continued stmt
657 continued = 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000658 else:
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000659 if initial in '([{':
660 parenlev += 1
661 elif initial in ')]}':
662 parenlev -= 1
Yury Selivanov75445082015-05-11 22:57:16 -0400663 if stashed:
664 yield stashed
665 stashed = None
Raymond Hettingera48db392009-04-29 00:34:27 +0000666 yield TokenInfo(OP, token, spos, epos, line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000667 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000668 yield TokenInfo(ERRORTOKEN, line[pos],
Guido van Rossumde655271997-04-09 17:15:54 +0000669 (lnum, pos), (lnum, pos+1), line)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000670 pos += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000671
Yury Selivanov75445082015-05-11 22:57:16 -0400672 if stashed:
673 yield stashed
674 stashed = None
675
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000676 for indent in indents[1:]: # pop remaining indent levels
Raymond Hettingera48db392009-04-29 00:34:27 +0000677 yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
678 yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000679
Trent Nelson428de652008-03-18 22:41:35 +0000680
681# An undocumented, backwards compatible, API for all the places in the standard
682# library that expect to be able to use tokenize with strings
683def generate_tokens(readline):
684 return _tokenize(readline, None)
Raymond Hettinger6c60d092010-09-09 04:32:39 +0000685
Meador Inge14c0f032011-10-07 08:53:38 -0500686def main():
687 import argparse
688
689 # Helper error handling routines
690 def perror(message):
691 print(message, file=sys.stderr)
692
693 def error(message, filename=None, location=None):
694 if location:
695 args = (filename,) + location + (message,)
696 perror("%s:%d:%d: error: %s" % args)
697 elif filename:
698 perror("%s: error: %s" % (filename, message))
699 else:
700 perror("error: %s" % message)
701 sys.exit(1)
702
703 # Parse the arguments and options
704 parser = argparse.ArgumentParser(prog='python -m tokenize')
705 parser.add_argument(dest='filename', nargs='?',
706 metavar='filename.py',
707 help='the file to tokenize; defaults to stdin')
Meador Inge00c7f852012-01-19 00:44:45 -0600708 parser.add_argument('-e', '--exact', dest='exact', action='store_true',
709 help='display token names using the exact type')
Meador Inge14c0f032011-10-07 08:53:38 -0500710 args = parser.parse_args()
711
712 try:
713 # Tokenize the input
714 if args.filename:
715 filename = args.filename
Victor Stinner96917502014-12-05 10:17:10 +0100716 with _builtin_open(filename, 'rb') as f:
Meador Inge14c0f032011-10-07 08:53:38 -0500717 tokens = list(tokenize(f.readline))
718 else:
719 filename = "<stdin>"
720 tokens = _tokenize(sys.stdin.readline, None)
721
722 # Output the tokenization
723 for token in tokens:
Meador Inge00c7f852012-01-19 00:44:45 -0600724 token_type = token.type
725 if args.exact:
726 token_type = token.exact_type
Meador Inge14c0f032011-10-07 08:53:38 -0500727 token_range = "%d,%d-%d,%d:" % (token.start + token.end)
728 print("%-20s%-15s%-15r" %
Meador Inge00c7f852012-01-19 00:44:45 -0600729 (token_range, tok_name[token_type], token.string))
Meador Inge14c0f032011-10-07 08:53:38 -0500730 except IndentationError as err:
731 line, column = err.args[1][1:3]
732 error(err.args[0], filename, (line, column))
733 except TokenError as err:
734 line, column = err.args[1]
735 error(err.args[0], filename, (line, column))
736 except SyntaxError as err:
737 error(err, filename)
Andrew Svetlovf7a17b42012-12-25 16:47:37 +0200738 except OSError as err:
Meador Inge14c0f032011-10-07 08:53:38 -0500739 error(err)
740 except KeyboardInterrupt:
741 print("interrupted\n")
742 except Exception as err:
743 perror("unexpected error: %s" % err)
744 raise
745
Raymond Hettinger6c60d092010-09-09 04:32:39 +0000746if __name__ == "__main__":
Meador Inge14c0f032011-10-07 08:53:38 -0500747 main()