blob: fce010bc5e7aa7f32db477f64b02be8e7ea502df [file] [log] [blame]
Guido van Rossumb51eaa11997-03-07 00:21:55 +00001"""Tokenization help for Python programs.
Guido van Rossum4d8e8591992-01-01 19:34:47 +00002
Florent Xicluna43e4ea12010-09-03 19:54:02 +00003tokenize(readline) is a generator that breaks a stream of bytes into
4Python tokens. It decodes the bytes according to PEP-0263 for
5determining source file encoding.
Trent Nelson428de652008-03-18 22:41:35 +00006
Florent Xicluna43e4ea12010-09-03 19:54:02 +00007It accepts a readline-like method which is called repeatedly to get the
8next line of input (or b"" for EOF). It generates 5-tuples with these
9members:
Tim Peters4efb6e92001-06-29 23:51:08 +000010
11 the token type (see token.py)
12 the token (a string)
13 the starting (row, column) indices of the token (a 2-tuple of ints)
14 the ending (row, column) indices of the token (a 2-tuple of ints)
15 the original line (string)
16
17It is designed to match the working of the Python tokenizer exactly, except
18that it produces COMMENT tokens for comments and gives type OP for all
Florent Xicluna43e4ea12010-09-03 19:54:02 +000019operators. Additionally, all token lists start with an ENCODING token
20which tells you which encoding was used to decode the bytes stream.
21"""
Guido van Rossumb51eaa11997-03-07 00:21:55 +000022
Ka-Ping Yee244c5932001-03-01 13:56:40 +000023__author__ = 'Ka-Ping Yee <ping@lfw.org>'
Trent Nelson428de652008-03-18 22:41:35 +000024__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
25 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
26 'Michael Foord')
Serhiy Storchakacf4a2f22015-03-11 17:18:03 +020027from builtins import open as _builtin_open
Benjamin Peterson433f32c2008-12-12 01:25:05 +000028from codecs import lookup, BOM_UTF8
Raymond Hettinger3fb79c72010-09-09 07:15:18 +000029import collections
Victor Stinner58c07522010-11-09 01:08:59 +000030from io import TextIOWrapper
Eric V. Smith1c8222c2015-10-26 04:37:55 -040031import itertools as _itertools
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -050032import re
33import sys
34from token import *
35
Serhiy Storchakae431d3c2016-03-20 23:36:29 +020036cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
Serhiy Storchaka768c16c2014-01-09 18:36:09 +020037blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
Guido van Rossum4d8e8591992-01-01 19:34:47 +000038
Skip Montanaro40fc1602001-03-01 04:27:19 +000039import token
Thomas Kluyverc56b17b2018-06-05 19:26:39 +020040__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding",
Albert-Jan Nijburgfc354f02017-05-31 15:00:21 +010041 "untokenize", "TokenInfo"]
Skip Montanaro40fc1602001-03-01 04:27:19 +000042del token
43
Meador Inge00c7f852012-01-19 00:44:45 -060044EXACT_TOKEN_TYPES = {
45 '(': LPAR,
46 ')': RPAR,
47 '[': LSQB,
48 ']': RSQB,
49 ':': COLON,
50 ',': COMMA,
51 ';': SEMI,
52 '+': PLUS,
53 '-': MINUS,
54 '*': STAR,
55 '/': SLASH,
56 '|': VBAR,
57 '&': AMPER,
58 '<': LESS,
59 '>': GREATER,
60 '=': EQUAL,
61 '.': DOT,
62 '%': PERCENT,
63 '{': LBRACE,
64 '}': RBRACE,
65 '==': EQEQUAL,
66 '!=': NOTEQUAL,
67 '<=': LESSEQUAL,
68 '>=': GREATEREQUAL,
69 '~': TILDE,
70 '^': CIRCUMFLEX,
71 '<<': LEFTSHIFT,
72 '>>': RIGHTSHIFT,
73 '**': DOUBLESTAR,
74 '+=': PLUSEQUAL,
75 '-=': MINEQUAL,
76 '*=': STAREQUAL,
77 '/=': SLASHEQUAL,
78 '%=': PERCENTEQUAL,
79 '&=': AMPEREQUAL,
80 '|=': VBAREQUAL,
Jim Fasarakis-Hilliardd4914e92017-03-14 22:16:15 +020081 '^=': CIRCUMFLEXEQUAL,
Meador Inge00c7f852012-01-19 00:44:45 -060082 '<<=': LEFTSHIFTEQUAL,
83 '>>=': RIGHTSHIFTEQUAL,
84 '**=': DOUBLESTAREQUAL,
85 '//': DOUBLESLASH,
86 '//=': DOUBLESLASHEQUAL,
Jim Fasarakis-Hilliardd4914e92017-03-14 22:16:15 +020087 '...': ELLIPSIS,
88 '->': RARROW,
Benjamin Petersond51374e2014-04-09 23:55:56 -040089 '@': AT,
90 '@=': ATEQUAL,
Meador Inge00c7f852012-01-19 00:44:45 -060091}
Guido van Rossum1aec3231997-04-08 14:24:39 +000092
Raymond Hettinger3fb79c72010-09-09 07:15:18 +000093class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
Raymond Hettingeraa17a7f2009-04-29 14:21:25 +000094 def __repr__(self):
Raymond Hettingera0e79402010-09-09 08:29:05 +000095 annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
96 return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
97 self._replace(type=annotated_type))
Raymond Hettingeraa17a7f2009-04-29 14:21:25 +000098
Meador Inge00c7f852012-01-19 00:44:45 -060099 @property
100 def exact_type(self):
101 if self.type == OP and self.string in EXACT_TOKEN_TYPES:
102 return EXACT_TOKEN_TYPES[self.string]
103 else:
104 return self.type
105
Eric S. Raymondb08b2d32001-02-09 11:10:16 +0000106def group(*choices): return '(' + '|'.join(choices) + ')'
Guido van Rossum68468eb2003-02-27 20:14:51 +0000107def any(*choices): return group(*choices) + '*'
108def maybe(*choices): return group(*choices) + '?'
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000109
Antoine Pitroufd036452008-08-19 17:56:33 +0000110# Note: we use unicode matching for names ("\w") but ascii matching for
111# number literals.
Guido van Rossum3b631771997-10-27 20:44:15 +0000112Whitespace = r'[ \f\t]*'
113Comment = r'#[^\r\n]*'
114Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Benjamin Peterson33856de2010-08-30 14:41:20 +0000115Name = r'\w+'
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000116
Brett Cannona721aba2016-09-09 14:57:09 -0700117Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
118Binnumber = r'0[bB](?:_?[01])+'
119Octnumber = r'0[oO](?:_?[0-7])+'
120Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Guido van Rossumcd16bf62007-06-13 18:07:49 +0000121Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Brett Cannona721aba2016-09-09 14:57:09 -0700122Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
123Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
124 r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
125Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Guido van Rossum1aec3231997-04-08 14:24:39 +0000126Floatnumber = group(Pointfloat, Expfloat)
Brett Cannona721aba2016-09-09 14:57:09 -0700127Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
Guido van Rossum1aec3231997-04-08 14:24:39 +0000128Number = group(Imagnumber, Floatnumber, Intnumber)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000129
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400130# Return the empty string, plus all of the valid string prefixes.
131def _all_string_prefixes():
132 # The valid string prefixes. Only contain the lower case versions,
133 # and don't contain any permuations (include 'fr', but not
134 # 'rf'). The various permutations will be generated.
135 _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
136 # if we add binary f-strings, add: ['fb', 'fbr']
Jon Dufresne39726282017-05-18 07:35:54 -0700137 result = {''}
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400138 for prefix in _valid_string_prefixes:
139 for t in _itertools.permutations(prefix):
140 # create a list with upper and lower versions of each
141 # character
142 for u in _itertools.product(*[(c, c.upper()) for c in t]):
143 result.add(''.join(u))
144 return result
145
146def _compile(expr):
147 return re.compile(expr, re.UNICODE)
148
149# Note that since _all_string_prefixes includes the empty string,
150# StringPrefix can be the empty string (making it optional).
151StringPrefix = group(*_all_string_prefixes())
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000152
Tim Petersde495832000-10-07 05:09:39 +0000153# Tail end of ' string.
154Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
155# Tail end of " string.
156Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
157# Tail end of ''' string.
158Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
159# Tail end of """ string.
160Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000161Triple = group(StringPrefix + "'''", StringPrefix + '"""')
Tim Petersde495832000-10-07 05:09:39 +0000162# Single-line ' or " string.
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000163String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
164 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000165
Tim Petersde495832000-10-07 05:09:39 +0000166# Because of leftmost-then-longest match semantics, be sure to put the
167# longest operators first (e.g., if = came before ==, == would get
168# recognized as two instances of =).
Guido van Rossumb053cd82006-08-24 03:53:23 +0000169Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
Neal Norwitzc1505362006-12-28 06:47:50 +0000170 r"//=?", r"->",
Benjamin Petersond51374e2014-04-09 23:55:56 -0400171 r"[+\-*/%&@|^=<>]=?",
Tim Petersde495832000-10-07 05:09:39 +0000172 r"~")
Thomas Wouterse1519a12000-08-24 21:44:52 +0000173
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000174Bracket = '[][(){}]'
Georg Brandldde00282007-03-18 19:01:53 +0000175Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000176Funny = group(Operator, Bracket, Special)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000177
Guido van Rossum3b631771997-10-27 20:44:15 +0000178PlainToken = group(Number, Funny, String, Name)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000179Token = Ignore + PlainToken
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000180
Tim Petersde495832000-10-07 05:09:39 +0000181# First (or only) line of ' or " string.
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000182ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000183 group("'", r'\\\r?\n'),
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000184 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000185 group('"', r'\\\r?\n'))
Ezio Melotti2cc3b4b2012-11-03 17:38:43 +0200186PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
Guido van Rossum3b631771997-10-27 20:44:15 +0000187PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000188
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400189# For a given string prefix plus quotes, endpats maps it to a regex
190# to match the remainder of that string. _prefix can be empty, for
191# a normal single or triple quoted string (with no prefix).
192endpats = {}
193for _prefix in _all_string_prefixes():
194 endpats[_prefix + "'"] = Single
195 endpats[_prefix + '"'] = Double
196 endpats[_prefix + "'''"] = Single3
197 endpats[_prefix + '"""'] = Double3
Benjamin Peterson33856de2010-08-30 14:41:20 +0000198
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400199# A set of all of the single and triple quoted string prefixes,
200# including the opening quotes.
201single_quoted = set()
202triple_quoted = set()
203for t in _all_string_prefixes():
204 for u in (t + '"', t + "'"):
205 single_quoted.add(u)
206 for u in (t + '"""', t + "'''"):
207 triple_quoted.add(u)
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000208
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000209tabsize = 8
Fred Drake9b8d8012000-08-17 04:45:13 +0000210
Ka-Ping Yee28c62bb2001-03-23 05:22:49 +0000211class TokenError(Exception): pass
212
213class StopTokenizing(Exception): pass
Fred Drake9b8d8012000-08-17 04:45:13 +0000214
Tim Peters5ca576e2001-06-18 22:08:13 +0000215
Thomas Wouters89f507f2006-12-13 04:49:30 +0000216class Untokenizer:
217
218 def __init__(self):
219 self.tokens = []
220 self.prev_row = 1
221 self.prev_col = 0
Trent Nelson428de652008-03-18 22:41:35 +0000222 self.encoding = None
Thomas Wouters89f507f2006-12-13 04:49:30 +0000223
224 def add_whitespace(self, start):
225 row, col = start
Terry Jan Reedy5e6db312014-02-17 16:45:48 -0500226 if row < self.prev_row or row == self.prev_row and col < self.prev_col:
227 raise ValueError("start ({},{}) precedes previous end ({},{})"
228 .format(row, col, self.prev_row, self.prev_col))
Terry Jan Reedy9dc3a362014-02-23 23:33:08 -0500229 row_offset = row - self.prev_row
Terry Jan Reedyf106f8f2014-02-23 23:39:57 -0500230 if row_offset:
Terry Jan Reedy9dc3a362014-02-23 23:33:08 -0500231 self.tokens.append("\\\n" * row_offset)
232 self.prev_col = 0
Thomas Wouters89f507f2006-12-13 04:49:30 +0000233 col_offset = col - self.prev_col
234 if col_offset:
235 self.tokens.append(" " * col_offset)
236
237 def untokenize(self, iterable):
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500238 it = iter(iterable)
Dingyuan Wange411b662015-06-22 10:01:12 +0800239 indents = []
240 startline = False
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500241 for t in it:
Thomas Wouters89f507f2006-12-13 04:49:30 +0000242 if len(t) == 2:
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500243 self.compat(t, it)
Thomas Wouters89f507f2006-12-13 04:49:30 +0000244 break
245 tok_type, token, start, end, line = t
Trent Nelson428de652008-03-18 22:41:35 +0000246 if tok_type == ENCODING:
247 self.encoding = token
248 continue
Terry Jan Reedy9dc3a362014-02-23 23:33:08 -0500249 if tok_type == ENDMARKER:
250 break
Dingyuan Wange411b662015-06-22 10:01:12 +0800251 if tok_type == INDENT:
252 indents.append(token)
253 continue
254 elif tok_type == DEDENT:
255 indents.pop()
256 self.prev_row, self.prev_col = end
257 continue
258 elif tok_type in (NEWLINE, NL):
259 startline = True
260 elif startline and indents:
261 indent = indents[-1]
262 if start[1] >= len(indent):
263 self.tokens.append(indent)
264 self.prev_col = len(indent)
265 startline = False
Thomas Wouters89f507f2006-12-13 04:49:30 +0000266 self.add_whitespace(start)
267 self.tokens.append(token)
268 self.prev_row, self.prev_col = end
269 if tok_type in (NEWLINE, NL):
270 self.prev_row += 1
271 self.prev_col = 0
272 return "".join(self.tokens)
273
274 def compat(self, token, iterable):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000275 indents = []
276 toks_append = self.tokens.append
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500277 startline = token[0] in (NEWLINE, NL)
Christian Heimesba4af492008-03-28 00:55:15 +0000278 prevstring = False
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500279
Ɓukasz Langac2d384d2018-04-23 01:07:11 -0700280 for tok in _itertools.chain([token], iterable):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000281 toknum, tokval = tok[:2]
Trent Nelson428de652008-03-18 22:41:35 +0000282 if toknum == ENCODING:
283 self.encoding = tokval
284 continue
Thomas Wouters89f507f2006-12-13 04:49:30 +0000285
Serhiy Storchakad08972f2018-04-11 19:15:51 +0300286 if toknum in (NAME, NUMBER):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000287 tokval += ' '
288
Christian Heimesba4af492008-03-28 00:55:15 +0000289 # Insert a space between two consecutive strings
290 if toknum == STRING:
291 if prevstring:
292 tokval = ' ' + tokval
293 prevstring = True
294 else:
295 prevstring = False
296
Thomas Wouters89f507f2006-12-13 04:49:30 +0000297 if toknum == INDENT:
298 indents.append(tokval)
299 continue
300 elif toknum == DEDENT:
301 indents.pop()
302 continue
303 elif toknum in (NEWLINE, NL):
304 startline = True
305 elif startline and indents:
306 toks_append(indents[-1])
307 startline = False
308 toks_append(tokval)
Raymond Hettinger68c04532005-06-10 11:05:19 +0000309
Trent Nelson428de652008-03-18 22:41:35 +0000310
Raymond Hettinger68c04532005-06-10 11:05:19 +0000311def untokenize(iterable):
312 """Transform tokens back into Python source code.
Trent Nelson428de652008-03-18 22:41:35 +0000313 It returns a bytes object, encoded using the ENCODING
314 token, which is the first token sequence output by tokenize.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000315
316 Each element returned by the iterable must be a token sequence
Thomas Wouters89f507f2006-12-13 04:49:30 +0000317 with at least two elements, a token number and token value. If
318 only two tokens are passed, the resulting output is poor.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000319
Thomas Wouters89f507f2006-12-13 04:49:30 +0000320 Round-trip invariant for full input:
321 Untokenized source will match input source exactly
322
Berker Peksagff8d0872015-12-30 01:41:58 +0200323 Round-trip invariant for limited input:
324 # Output bytes will tokenize back to the input
Trent Nelson428de652008-03-18 22:41:35 +0000325 t1 = [tok[:2] for tok in tokenize(f.readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000326 newcode = untokenize(t1)
Trent Nelson428de652008-03-18 22:41:35 +0000327 readline = BytesIO(newcode).readline
328 t2 = [tok[:2] for tok in tokenize(readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000329 assert t1 == t2
330 """
Thomas Wouters89f507f2006-12-13 04:49:30 +0000331 ut = Untokenizer()
Trent Nelson428de652008-03-18 22:41:35 +0000332 out = ut.untokenize(iterable)
333 if ut.encoding is not None:
334 out = out.encode(ut.encoding)
335 return out
Raymond Hettinger68c04532005-06-10 11:05:19 +0000336
Trent Nelson428de652008-03-18 22:41:35 +0000337
Benjamin Petersond3afada2009-10-09 21:43:09 +0000338def _get_normal_name(orig_enc):
339 """Imitates get_normal_name in tokenizer.c."""
340 # Only care about the first 12 characters.
341 enc = orig_enc[:12].lower().replace("_", "-")
342 if enc == "utf-8" or enc.startswith("utf-8-"):
343 return "utf-8"
344 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
345 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
346 return "iso-8859-1"
347 return orig_enc
348
Trent Nelson428de652008-03-18 22:41:35 +0000349def detect_encoding(readline):
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000350 """
Trent Nelson428de652008-03-18 22:41:35 +0000351 The detect_encoding() function is used to detect the encoding that should
Ezio Melotti4bcc7962013-11-25 05:14:51 +0200352 be used to decode a Python source file. It requires one argument, readline,
Trent Nelson428de652008-03-18 22:41:35 +0000353 in the same way as the tokenize() generator.
354
355 It will call readline a maximum of twice, and return the encoding used
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000356 (as a string) and a list of any lines (left as bytes) it has read in.
Trent Nelson428de652008-03-18 22:41:35 +0000357
358 It detects the encoding from the presence of a utf-8 bom or an encoding
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000359 cookie as specified in pep-0263. If both a bom and a cookie are present,
360 but disagree, a SyntaxError will be raised. If the encoding cookie is an
361 invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
Benjamin Peterson689a5582010-03-18 22:29:52 +0000362 'utf-8-sig' is returned.
Trent Nelson428de652008-03-18 22:41:35 +0000363
364 If no encoding is specified, then the default of 'utf-8' will be returned.
365 """
Brett Cannonc33f3f22012-04-20 13:23:54 -0400366 try:
367 filename = readline.__self__.name
368 except AttributeError:
369 filename = None
Trent Nelson428de652008-03-18 22:41:35 +0000370 bom_found = False
371 encoding = None
Benjamin Peterson689a5582010-03-18 22:29:52 +0000372 default = 'utf-8'
Trent Nelson428de652008-03-18 22:41:35 +0000373 def read_or_stop():
374 try:
375 return readline()
376 except StopIteration:
377 return b''
378
379 def find_cookie(line):
380 try:
Martin v. Löwis63674f42012-04-20 14:36:47 +0200381 # Decode as UTF-8. Either the line is an encoding declaration,
382 # in which case it should be pure ASCII, or it must be UTF-8
383 # per default encoding.
384 line_string = line.decode('utf-8')
Trent Nelson428de652008-03-18 22:41:35 +0000385 except UnicodeDecodeError:
Brett Cannonc33f3f22012-04-20 13:23:54 -0400386 msg = "invalid or missing encoding declaration"
387 if filename is not None:
388 msg = '{} for {!r}'.format(msg, filename)
389 raise SyntaxError(msg)
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000390
Serhiy Storchakadafea852013-09-16 23:51:56 +0300391 match = cookie_re.match(line_string)
392 if not match:
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000393 return None
Serhiy Storchakadafea852013-09-16 23:51:56 +0300394 encoding = _get_normal_name(match.group(1))
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000395 try:
396 codec = lookup(encoding)
397 except LookupError:
398 # This behaviour mimics the Python interpreter
Brett Cannonc33f3f22012-04-20 13:23:54 -0400399 if filename is None:
400 msg = "unknown encoding: " + encoding
401 else:
402 msg = "unknown encoding for {!r}: {}".format(filename,
403 encoding)
404 raise SyntaxError(msg)
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000405
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000406 if bom_found:
Florent Xicluna11f0b412012-07-07 12:13:35 +0200407 if encoding != 'utf-8':
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000408 # This behaviour mimics the Python interpreter
Brett Cannonc33f3f22012-04-20 13:23:54 -0400409 if filename is None:
410 msg = 'encoding problem: utf-8'
411 else:
412 msg = 'encoding problem for {!r}: utf-8'.format(filename)
413 raise SyntaxError(msg)
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000414 encoding += '-sig'
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000415 return encoding
Trent Nelson428de652008-03-18 22:41:35 +0000416
417 first = read_or_stop()
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000418 if first.startswith(BOM_UTF8):
Trent Nelson428de652008-03-18 22:41:35 +0000419 bom_found = True
420 first = first[3:]
Benjamin Peterson689a5582010-03-18 22:29:52 +0000421 default = 'utf-8-sig'
Trent Nelson428de652008-03-18 22:41:35 +0000422 if not first:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000423 return default, []
Trent Nelson428de652008-03-18 22:41:35 +0000424
425 encoding = find_cookie(first)
426 if encoding:
427 return encoding, [first]
Serhiy Storchaka768c16c2014-01-09 18:36:09 +0200428 if not blank_re.match(first):
429 return default, [first]
Trent Nelson428de652008-03-18 22:41:35 +0000430
431 second = read_or_stop()
432 if not second:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000433 return default, [first]
Trent Nelson428de652008-03-18 22:41:35 +0000434
435 encoding = find_cookie(second)
436 if encoding:
437 return encoding, [first, second]
438
Benjamin Peterson689a5582010-03-18 22:29:52 +0000439 return default, [first, second]
Trent Nelson428de652008-03-18 22:41:35 +0000440
441
Victor Stinner58c07522010-11-09 01:08:59 +0000442def open(filename):
443 """Open a file in read only mode using the encoding detected by
444 detect_encoding().
445 """
Victor Stinner96917502014-12-05 10:17:10 +0100446 buffer = _builtin_open(filename, 'rb')
Victor Stinner387729e2015-05-26 00:43:58 +0200447 try:
448 encoding, lines = detect_encoding(buffer.readline)
449 buffer.seek(0)
450 text = TextIOWrapper(buffer, encoding, line_buffering=True)
451 text.mode = 'r'
452 return text
453 except:
454 buffer.close()
455 raise
Victor Stinner58c07522010-11-09 01:08:59 +0000456
457
Trent Nelson428de652008-03-18 22:41:35 +0000458def tokenize(readline):
459 """
Berker Peksagff8d0872015-12-30 01:41:58 +0200460 The tokenize() generator requires one argument, readline, which
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000461 must be a callable object which provides the same interface as the
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000462 readline() method of built-in file objects. Each call to the function
Berker Peksagff8d0872015-12-30 01:41:58 +0200463 should return one line of input as bytes. Alternatively, readline
Raymond Hettinger68c04532005-06-10 11:05:19 +0000464 can be a callable function terminating with StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000465 readline = open(myfile, 'rb').__next__ # Example of alternate readline
Tim Peters8ac14952002-05-23 15:15:30 +0000466
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000467 The generator produces 5-tuples with these members: the token type; the
468 token string; a 2-tuple (srow, scol) of ints specifying the row and
469 column where the token begins in the source; a 2-tuple (erow, ecol) of
470 ints specifying the row and column where the token ends in the source;
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000471 and the line on which the token was found. The line passed is the
Tim Peters8ac14952002-05-23 15:15:30 +0000472 logical line; continuation lines are included.
Trent Nelson428de652008-03-18 22:41:35 +0000473
474 The first token sequence will always be an ENCODING token
475 which tells you which encoding was used to decode the bytes stream.
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000476 """
Trent Nelson428de652008-03-18 22:41:35 +0000477 encoding, consumed = detect_encoding(readline)
Ɓukasz Langac2d384d2018-04-23 01:07:11 -0700478 empty = _itertools.repeat(b"")
479 rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
480 return _tokenize(rl_gen.__next__, encoding)
Trent Nelson428de652008-03-18 22:41:35 +0000481
482
483def _tokenize(readline, encoding):
Guido van Rossum1aec3231997-04-08 14:24:39 +0000484 lnum = parenlev = continued = 0
Benjamin Peterson33856de2010-08-30 14:41:20 +0000485 numchars = '0123456789'
Guido van Rossumde655271997-04-09 17:15:54 +0000486 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000487 contline = None
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000488 indents = [0]
Guido van Rossum1aec3231997-04-08 14:24:39 +0000489
Trent Nelson428de652008-03-18 22:41:35 +0000490 if encoding is not None:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000491 if encoding == "utf-8-sig":
492 # BOM will already have been stripped.
493 encoding = "utf-8"
Raymond Hettingera48db392009-04-29 00:34:27 +0000494 yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
Ammar Askarc4ef4892018-07-06 03:19:08 -0400495 last_line = b''
496 line = b''
Ɓukasz Langac2d384d2018-04-23 01:07:11 -0700497 while True: # loop over lines in stream
Raymond Hettinger68c04532005-06-10 11:05:19 +0000498 try:
Ammar Askarc4ef4892018-07-06 03:19:08 -0400499 # We capture the value of the line variable here because
500 # readline uses the empty string '' to signal end of input,
501 # hence `line` itself will always be overwritten at the end
502 # of this loop.
503 last_line = line
Raymond Hettinger68c04532005-06-10 11:05:19 +0000504 line = readline()
505 except StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000506 line = b''
507
508 if encoding is not None:
509 line = line.decode(encoding)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000510 lnum += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000511 pos, max = 0, len(line)
512
513 if contstr: # continued string
Guido van Rossumde655271997-04-09 17:15:54 +0000514 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000515 raise TokenError("EOF in multi-line string", strstart)
Guido van Rossum3b631771997-10-27 20:44:15 +0000516 endmatch = endprog.match(line)
517 if endmatch:
518 pos = end = endmatch.end(0)
Raymond Hettingera48db392009-04-29 00:34:27 +0000519 yield TokenInfo(STRING, contstr + line[:end],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000520 strstart, (lnum, end), contline + line)
Guido van Rossumde655271997-04-09 17:15:54 +0000521 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000522 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000523 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
Raymond Hettingera48db392009-04-29 00:34:27 +0000524 yield TokenInfo(ERRORTOKEN, contstr + line,
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000525 strstart, (lnum, len(line)), contline)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000526 contstr = ''
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000527 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000528 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000529 else:
530 contstr = contstr + line
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000531 contline = contline + line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000532 continue
533
Guido van Rossum1aec3231997-04-08 14:24:39 +0000534 elif parenlev == 0 and not continued: # new statement
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000535 if not line: break
536 column = 0
Guido van Rossum1aec3231997-04-08 14:24:39 +0000537 while pos < max: # measure leading whitespace
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000538 if line[pos] == ' ':
539 column += 1
540 elif line[pos] == '\t':
541 column = (column//tabsize + 1)*tabsize
542 elif line[pos] == '\f':
543 column = 0
544 else:
545 break
546 pos += 1
547 if pos == max:
548 break
Guido van Rossum1aec3231997-04-08 14:24:39 +0000549
550 if line[pos] in '#\r\n': # skip comments or blank lines
Thomas Wouters89f507f2006-12-13 04:49:30 +0000551 if line[pos] == '#':
552 comment_token = line[pos:].rstrip('\r\n')
Raymond Hettingera48db392009-04-29 00:34:27 +0000553 yield TokenInfo(COMMENT, comment_token,
Thomas Wouters89f507f2006-12-13 04:49:30 +0000554 (lnum, pos), (lnum, pos + len(comment_token)), line)
Albert-Jan Nijburgc471ca42017-05-24 12:31:57 +0100555 pos += len(comment_token)
556
557 yield TokenInfo(NL, line[pos:],
Guido van Rossum1aec3231997-04-08 14:24:39 +0000558 (lnum, pos), (lnum, len(line)), line)
559 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000560
561 if column > indents[-1]: # count indents or dedents
562 indents.append(column)
Raymond Hettingera48db392009-04-29 00:34:27 +0000563 yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000564 while column < indents[-1]:
Raymond Hettingerda99d1c2005-06-21 07:43:58 +0000565 if column not in indents:
566 raise IndentationError(
Thomas Wouters00ee7ba2006-08-21 19:07:27 +0000567 "unindent does not match any outer indentation level",
568 ("<tokenize>", lnum, pos, line))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000569 indents = indents[:-1]
Yury Selivanov75445082015-05-11 22:57:16 -0400570
Raymond Hettingera48db392009-04-29 00:34:27 +0000571 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000572
573 else: # continued statement
Guido van Rossumde655271997-04-09 17:15:54 +0000574 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000575 raise TokenError("EOF in multi-line statement", (lnum, 0))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000576 continued = 0
577
578 while pos < max:
Antoine Pitrou10a99b02011-10-11 15:45:56 +0200579 pseudomatch = _compile(PseudoToken).match(line, pos)
Guido van Rossum3b631771997-10-27 20:44:15 +0000580 if pseudomatch: # scan for tokens
581 start, end = pseudomatch.span(1)
Guido van Rossumde655271997-04-09 17:15:54 +0000582 spos, epos, pos = (lnum, start), (lnum, end), end
Ezio Melotti2cc3b4b2012-11-03 17:38:43 +0200583 if start == end:
584 continue
Guido van Rossum1aec3231997-04-08 14:24:39 +0000585 token, initial = line[start:end], line[start]
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000586
Ɓukasz Langac2d384d2018-04-23 01:07:11 -0700587 if (initial in numchars or # ordinary number
Georg Brandldde00282007-03-18 19:01:53 +0000588 (initial == '.' and token != '.' and token != '...')):
Raymond Hettingera48db392009-04-29 00:34:27 +0000589 yield TokenInfo(NUMBER, token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000590 elif initial in '\r\n':
Yury Selivanov96ec9342015-07-23 15:01:58 +0300591 if parenlev > 0:
592 yield TokenInfo(NL, token, spos, epos, line)
593 else:
594 yield TokenInfo(NEWLINE, token, spos, epos, line)
Yury Selivanov96ec9342015-07-23 15:01:58 +0300595
Guido van Rossum1aec3231997-04-08 14:24:39 +0000596 elif initial == '#':
Thomas Wouters89f507f2006-12-13 04:49:30 +0000597 assert not token.endswith("\n")
Raymond Hettingera48db392009-04-29 00:34:27 +0000598 yield TokenInfo(COMMENT, token, spos, epos, line)
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400599
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000600 elif token in triple_quoted:
Antoine Pitrou10a99b02011-10-11 15:45:56 +0200601 endprog = _compile(endpats[token])
Guido van Rossum3b631771997-10-27 20:44:15 +0000602 endmatch = endprog.match(line, pos)
603 if endmatch: # all on one line
604 pos = endmatch.end(0)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000605 token = line[start:pos]
Raymond Hettingera48db392009-04-29 00:34:27 +0000606 yield TokenInfo(STRING, token, spos, (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000607 else:
Guido van Rossum1aec3231997-04-08 14:24:39 +0000608 strstart = (lnum, start) # multiple lines
609 contstr = line[start:]
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000610 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000611 break
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400612
613 # Check up to the first 3 chars of the token to see if
614 # they're in the single_quoted set. If so, they start
615 # a string.
616 # We're using the first 3, because we're looking for
617 # "rb'" (for example) at the start of the token. If
618 # we switch to longer prefixes, this needs to be
619 # adjusted.
620 # Note that initial == token[:1].
Berker Peksaga7161e72015-12-30 01:42:43 +0200621 # Also note that single quote checking must come after
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400622 # triple quote checking (above).
623 elif (initial in single_quoted or
624 token[:2] in single_quoted or
625 token[:3] in single_quoted):
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000626 if token[-1] == '\n': # continued string
Guido van Rossum1aec3231997-04-08 14:24:39 +0000627 strstart = (lnum, start)
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400628 # Again, using the first 3 chars of the
629 # token. This is looking for the matching end
630 # regex for the correct type of quote
631 # character. So it's really looking for
632 # endpats["'"] or endpats['"'], by trying to
633 # skip string prefix characters, if any.
634 endprog = _compile(endpats.get(initial) or
635 endpats.get(token[1]) or
636 endpats.get(token[2]))
Guido van Rossumde655271997-04-09 17:15:54 +0000637 contstr, needcont = line[start:], 1
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000638 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000639 break
640 else: # ordinary string
Raymond Hettingera48db392009-04-29 00:34:27 +0000641 yield TokenInfo(STRING, token, spos, epos, line)
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400642
Benjamin Peterson33856de2010-08-30 14:41:20 +0000643 elif initial.isidentifier(): # ordinary name
Jelle Zijlstraac317702017-10-05 20:24:46 -0700644 yield TokenInfo(NAME, token, spos, epos, line)
Guido van Rossum3b631771997-10-27 20:44:15 +0000645 elif initial == '\\': # continued stmt
646 continued = 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000647 else:
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000648 if initial in '([{':
649 parenlev += 1
650 elif initial in ')]}':
651 parenlev -= 1
Raymond Hettingera48db392009-04-29 00:34:27 +0000652 yield TokenInfo(OP, token, spos, epos, line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000653 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000654 yield TokenInfo(ERRORTOKEN, line[pos],
Guido van Rossumde655271997-04-09 17:15:54 +0000655 (lnum, pos), (lnum, pos+1), line)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000656 pos += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000657
Ammar Askarc4ef4892018-07-06 03:19:08 -0400658 # Add an implicit NEWLINE if the input doesn't end in one
659 if last_line and last_line[-1] not in '\r\n':
660 yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000661 for indent in indents[1:]: # pop remaining indent levels
Raymond Hettingera48db392009-04-29 00:34:27 +0000662 yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
663 yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000664
Trent Nelson428de652008-03-18 22:41:35 +0000665
Trent Nelson428de652008-03-18 22:41:35 +0000666def generate_tokens(readline):
Thomas Kluyverc56b17b2018-06-05 19:26:39 +0200667 """Tokenize a source reading Python code as unicode strings.
668
669 This has the same API as tokenize(), except that it expects the *readline*
670 callable to return str objects instead of bytes.
671 """
Trent Nelson428de652008-03-18 22:41:35 +0000672 return _tokenize(readline, None)
Raymond Hettinger6c60d092010-09-09 04:32:39 +0000673
Meador Inge14c0f032011-10-07 08:53:38 -0500674def main():
675 import argparse
676
677 # Helper error handling routines
678 def perror(message):
Ɓukasz Langac2d384d2018-04-23 01:07:11 -0700679 sys.stderr.write(message)
680 sys.stderr.write('\n')
Meador Inge14c0f032011-10-07 08:53:38 -0500681
682 def error(message, filename=None, location=None):
683 if location:
684 args = (filename,) + location + (message,)
685 perror("%s:%d:%d: error: %s" % args)
686 elif filename:
687 perror("%s: error: %s" % (filename, message))
688 else:
689 perror("error: %s" % message)
690 sys.exit(1)
691
692 # Parse the arguments and options
693 parser = argparse.ArgumentParser(prog='python -m tokenize')
694 parser.add_argument(dest='filename', nargs='?',
695 metavar='filename.py',
696 help='the file to tokenize; defaults to stdin')
Meador Inge00c7f852012-01-19 00:44:45 -0600697 parser.add_argument('-e', '--exact', dest='exact', action='store_true',
698 help='display token names using the exact type')
Meador Inge14c0f032011-10-07 08:53:38 -0500699 args = parser.parse_args()
700
701 try:
702 # Tokenize the input
703 if args.filename:
704 filename = args.filename
Victor Stinner96917502014-12-05 10:17:10 +0100705 with _builtin_open(filename, 'rb') as f:
Meador Inge14c0f032011-10-07 08:53:38 -0500706 tokens = list(tokenize(f.readline))
707 else:
708 filename = "<stdin>"
709 tokens = _tokenize(sys.stdin.readline, None)
710
711 # Output the tokenization
712 for token in tokens:
Meador Inge00c7f852012-01-19 00:44:45 -0600713 token_type = token.type
714 if args.exact:
715 token_type = token.exact_type
Meador Inge14c0f032011-10-07 08:53:38 -0500716 token_range = "%d,%d-%d,%d:" % (token.start + token.end)
717 print("%-20s%-15s%-15r" %
Meador Inge00c7f852012-01-19 00:44:45 -0600718 (token_range, tok_name[token_type], token.string))
Meador Inge14c0f032011-10-07 08:53:38 -0500719 except IndentationError as err:
720 line, column = err.args[1][1:3]
721 error(err.args[0], filename, (line, column))
722 except TokenError as err:
723 line, column = err.args[1]
724 error(err.args[0], filename, (line, column))
725 except SyntaxError as err:
726 error(err, filename)
Andrew Svetlovf7a17b42012-12-25 16:47:37 +0200727 except OSError as err:
Meador Inge14c0f032011-10-07 08:53:38 -0500728 error(err)
729 except KeyboardInterrupt:
730 print("interrupted\n")
731 except Exception as err:
732 perror("unexpected error: %s" % err)
733 raise
734
Raymond Hettinger6c60d092010-09-09 04:32:39 +0000735if __name__ == "__main__":
Meador Inge14c0f032011-10-07 08:53:38 -0500736 main()