blob: 506aa6a42f91ee8c5b2bb5f94296718d7c57228c [file] [log] [blame]
Guido van Rossumb51eaa11997-03-07 00:21:55 +00001"""Tokenization help for Python programs.
Guido van Rossum4d8e8591992-01-01 19:34:47 +00002
Florent Xicluna43e4ea12010-09-03 19:54:02 +00003tokenize(readline) is a generator that breaks a stream of bytes into
4Python tokens. It decodes the bytes according to PEP-0263 for
5determining source file encoding.
Trent Nelson428de652008-03-18 22:41:35 +00006
Florent Xicluna43e4ea12010-09-03 19:54:02 +00007It accepts a readline-like method which is called repeatedly to get the
8next line of input (or b"" for EOF). It generates 5-tuples with these
9members:
Tim Peters4efb6e92001-06-29 23:51:08 +000010
11 the token type (see token.py)
12 the token (a string)
13 the starting (row, column) indices of the token (a 2-tuple of ints)
14 the ending (row, column) indices of the token (a 2-tuple of ints)
15 the original line (string)
16
17It is designed to match the working of the Python tokenizer exactly, except
18that it produces COMMENT tokens for comments and gives type OP for all
Florent Xicluna43e4ea12010-09-03 19:54:02 +000019operators. Additionally, all token lists start with an ENCODING token
20which tells you which encoding was used to decode the bytes stream.
21"""
Guido van Rossumb51eaa11997-03-07 00:21:55 +000022
Ka-Ping Yee244c5932001-03-01 13:56:40 +000023__author__ = 'Ka-Ping Yee <ping@lfw.org>'
Trent Nelson428de652008-03-18 22:41:35 +000024__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
25 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
26 'Michael Foord')
Florent Xicluna43e4ea12010-09-03 19:54:02 +000027import re
28import sys
Guido van Rossumfc6f5331997-03-07 00:21:12 +000029from token import *
Benjamin Peterson433f32c2008-12-12 01:25:05 +000030from codecs import lookup, BOM_UTF8
Raymond Hettinger3fb79c72010-09-09 07:15:18 +000031import collections
Victor Stinner58c07522010-11-09 01:08:59 +000032from io import TextIOWrapper
Trent Nelson428de652008-03-18 22:41:35 +000033cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
Guido van Rossum4d8e8591992-01-01 19:34:47 +000034
Skip Montanaro40fc1602001-03-01 04:27:19 +000035import token
Alexander Belopolskyb9d10d02010-11-11 14:07:41 +000036__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
37 "NL", "untokenize", "ENCODING", "TokenInfo"]
Skip Montanaro40fc1602001-03-01 04:27:19 +000038del token
39
Guido van Rossum1aec3231997-04-08 14:24:39 +000040COMMENT = N_TOKENS
41tok_name[COMMENT] = 'COMMENT'
Guido van Rossuma90c78b1998-04-03 16:05:38 +000042NL = N_TOKENS + 1
43tok_name[NL] = 'NL'
Trent Nelson428de652008-03-18 22:41:35 +000044ENCODING = N_TOKENS + 2
45tok_name[ENCODING] = 'ENCODING'
46N_TOKENS += 3
Guido van Rossum1aec3231997-04-08 14:24:39 +000047
Raymond Hettinger3fb79c72010-09-09 07:15:18 +000048class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
Raymond Hettingeraa17a7f2009-04-29 14:21:25 +000049 def __repr__(self):
Raymond Hettingera0e79402010-09-09 08:29:05 +000050 annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
51 return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
52 self._replace(type=annotated_type))
Raymond Hettingeraa17a7f2009-04-29 14:21:25 +000053
Eric S. Raymondb08b2d32001-02-09 11:10:16 +000054def group(*choices): return '(' + '|'.join(choices) + ')'
Guido van Rossum68468eb2003-02-27 20:14:51 +000055def any(*choices): return group(*choices) + '*'
56def maybe(*choices): return group(*choices) + '?'
Guido van Rossum4d8e8591992-01-01 19:34:47 +000057
Antoine Pitroufd036452008-08-19 17:56:33 +000058# Note: we use unicode matching for names ("\w") but ascii matching for
59# number literals.
Guido van Rossum3b631771997-10-27 20:44:15 +000060Whitespace = r'[ \f\t]*'
61Comment = r'#[^\r\n]*'
62Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Benjamin Peterson33856de2010-08-30 14:41:20 +000063Name = r'\w+'
Guido van Rossum4d8e8591992-01-01 19:34:47 +000064
Antoine Pitroufd036452008-08-19 17:56:33 +000065Hexnumber = r'0[xX][0-9a-fA-F]+'
Georg Brandlfceab5a2008-01-19 20:08:23 +000066Binnumber = r'0[bB][01]+'
67Octnumber = r'0[oO][0-7]+'
Antoine Pitroufd036452008-08-19 17:56:33 +000068Decnumber = r'(?:0+|[1-9][0-9]*)'
Guido van Rossumcd16bf62007-06-13 18:07:49 +000069Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Antoine Pitroufd036452008-08-19 17:56:33 +000070Exponent = r'[eE][-+]?[0-9]+'
71Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
72Expfloat = r'[0-9]+' + Exponent
Guido van Rossum1aec3231997-04-08 14:24:39 +000073Floatnumber = group(Pointfloat, Expfloat)
Antoine Pitroufd036452008-08-19 17:56:33 +000074Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Guido van Rossum1aec3231997-04-08 14:24:39 +000075Number = group(Imagnumber, Floatnumber, Intnumber)
Guido van Rossum4d8e8591992-01-01 19:34:47 +000076
Tim Petersde495832000-10-07 05:09:39 +000077# Tail end of ' string.
78Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
79# Tail end of " string.
80Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
81# Tail end of ''' string.
82Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
83# Tail end of """ string.
84Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Guido van Rossum4fe72f92007-11-12 17:40:10 +000085Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""')
Tim Petersde495832000-10-07 05:09:39 +000086# Single-line ' or " string.
Guido van Rossum4fe72f92007-11-12 17:40:10 +000087String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
88 r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
Guido van Rossum4d8e8591992-01-01 19:34:47 +000089
Tim Petersde495832000-10-07 05:09:39 +000090# Because of leftmost-then-longest match semantics, be sure to put the
91# longest operators first (e.g., if = came before ==, == would get
92# recognized as two instances of =).
Guido van Rossumb053cd82006-08-24 03:53:23 +000093Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
Neal Norwitzc1505362006-12-28 06:47:50 +000094 r"//=?", r"->",
Tim Petersde495832000-10-07 05:09:39 +000095 r"[+\-*/%&|^=<>]=?",
96 r"~")
Thomas Wouterse1519a12000-08-24 21:44:52 +000097
Guido van Rossum4d8e8591992-01-01 19:34:47 +000098Bracket = '[][(){}]'
Georg Brandldde00282007-03-18 19:01:53 +000099Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000100Funny = group(Operator, Bracket, Special)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000101
Guido van Rossum3b631771997-10-27 20:44:15 +0000102PlainToken = group(Number, Funny, String, Name)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000103Token = Ignore + PlainToken
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000104
Tim Petersde495832000-10-07 05:09:39 +0000105# First (or only) line of ' or " string.
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000106ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000107 group("'", r'\\\r?\n'),
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000108 r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000109 group('"', r'\\\r?\n'))
Guido van Rossum3b631771997-10-27 20:44:15 +0000110PseudoExtras = group(r'\\\r?\n', Comment, Triple)
111PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000112
Benjamin Peterson33856de2010-08-30 14:41:20 +0000113def _compile(expr):
114 return re.compile(expr, re.UNICODE)
115
Guido van Rossum3b631771997-10-27 20:44:15 +0000116tokenprog, pseudoprog, single3prog, double3prog = map(
Benjamin Peterson33856de2010-08-30 14:41:20 +0000117 _compile, (Token, PseudoToken, Single3, Double3))
118endprogs = {"'": _compile(Single), '"': _compile(Double),
Guido van Rossum3b631771997-10-27 20:44:15 +0000119 "'''": single3prog, '"""': double3prog,
Guido van Rossumfefc9221997-10-27 21:17:24 +0000120 "r'''": single3prog, 'r"""': double3prog,
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000121 "b'''": single3prog, 'b"""': double3prog,
122 "br'''": single3prog, 'br"""': double3prog,
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000123 "R'''": single3prog, 'R"""': double3prog,
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000124 "B'''": single3prog, 'B"""': double3prog,
125 "bR'''": single3prog, 'bR"""': double3prog,
126 "Br'''": single3prog, 'Br"""': double3prog,
127 "BR'''": single3prog, 'BR"""': double3prog,
128 'r': None, 'R': None, 'b': None, 'B': None}
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000129
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000130triple_quoted = {}
131for t in ("'''", '"""',
132 "r'''", 'r"""', "R'''", 'R"""',
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000133 "b'''", 'b"""', "B'''", 'B"""',
134 "br'''", 'br"""', "Br'''", 'Br"""',
135 "bR'''", 'bR"""', "BR'''", 'BR"""'):
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000136 triple_quoted[t] = t
137single_quoted = {}
138for t in ("'", '"',
139 "r'", 'r"', "R'", 'R"',
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000140 "b'", 'b"', "B'", 'B"',
141 "br'", 'br"', "Br'", 'Br"',
142 "bR'", 'bR"', "BR'", 'BR"' ):
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000143 single_quoted[t] = t
144
Benjamin Peterson33856de2010-08-30 14:41:20 +0000145del _compile
146
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000147tabsize = 8
Fred Drake9b8d8012000-08-17 04:45:13 +0000148
Ka-Ping Yee28c62bb2001-03-23 05:22:49 +0000149class TokenError(Exception): pass
150
151class StopTokenizing(Exception): pass
Fred Drake9b8d8012000-08-17 04:45:13 +0000152
Tim Peters5ca576e2001-06-18 22:08:13 +0000153
Thomas Wouters89f507f2006-12-13 04:49:30 +0000154class Untokenizer:
155
156 def __init__(self):
157 self.tokens = []
158 self.prev_row = 1
159 self.prev_col = 0
Trent Nelson428de652008-03-18 22:41:35 +0000160 self.encoding = None
Thomas Wouters89f507f2006-12-13 04:49:30 +0000161
162 def add_whitespace(self, start):
163 row, col = start
164 assert row <= self.prev_row
165 col_offset = col - self.prev_col
166 if col_offset:
167 self.tokens.append(" " * col_offset)
168
169 def untokenize(self, iterable):
170 for t in iterable:
171 if len(t) == 2:
172 self.compat(t, iterable)
173 break
174 tok_type, token, start, end, line = t
Trent Nelson428de652008-03-18 22:41:35 +0000175 if tok_type == ENCODING:
176 self.encoding = token
177 continue
Thomas Wouters89f507f2006-12-13 04:49:30 +0000178 self.add_whitespace(start)
179 self.tokens.append(token)
180 self.prev_row, self.prev_col = end
181 if tok_type in (NEWLINE, NL):
182 self.prev_row += 1
183 self.prev_col = 0
184 return "".join(self.tokens)
185
186 def compat(self, token, iterable):
187 startline = False
188 indents = []
189 toks_append = self.tokens.append
190 toknum, tokval = token
Trent Nelson428de652008-03-18 22:41:35 +0000191
Thomas Wouters89f507f2006-12-13 04:49:30 +0000192 if toknum in (NAME, NUMBER):
193 tokval += ' '
194 if toknum in (NEWLINE, NL):
195 startline = True
Christian Heimesba4af492008-03-28 00:55:15 +0000196 prevstring = False
Thomas Wouters89f507f2006-12-13 04:49:30 +0000197 for tok in iterable:
198 toknum, tokval = tok[:2]
Trent Nelson428de652008-03-18 22:41:35 +0000199 if toknum == ENCODING:
200 self.encoding = tokval
201 continue
Thomas Wouters89f507f2006-12-13 04:49:30 +0000202
203 if toknum in (NAME, NUMBER):
204 tokval += ' '
205
Christian Heimesba4af492008-03-28 00:55:15 +0000206 # Insert a space between two consecutive strings
207 if toknum == STRING:
208 if prevstring:
209 tokval = ' ' + tokval
210 prevstring = True
211 else:
212 prevstring = False
213
Thomas Wouters89f507f2006-12-13 04:49:30 +0000214 if toknum == INDENT:
215 indents.append(tokval)
216 continue
217 elif toknum == DEDENT:
218 indents.pop()
219 continue
220 elif toknum in (NEWLINE, NL):
221 startline = True
222 elif startline and indents:
223 toks_append(indents[-1])
224 startline = False
225 toks_append(tokval)
Raymond Hettinger68c04532005-06-10 11:05:19 +0000226
Trent Nelson428de652008-03-18 22:41:35 +0000227
Raymond Hettinger68c04532005-06-10 11:05:19 +0000228def untokenize(iterable):
229 """Transform tokens back into Python source code.
Trent Nelson428de652008-03-18 22:41:35 +0000230 It returns a bytes object, encoded using the ENCODING
231 token, which is the first token sequence output by tokenize.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000232
233 Each element returned by the iterable must be a token sequence
Thomas Wouters89f507f2006-12-13 04:49:30 +0000234 with at least two elements, a token number and token value. If
235 only two tokens are passed, the resulting output is poor.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000236
Thomas Wouters89f507f2006-12-13 04:49:30 +0000237 Round-trip invariant for full input:
238 Untokenized source will match input source exactly
239
240 Round-trip invariant for limited intput:
Trent Nelson428de652008-03-18 22:41:35 +0000241 # Output bytes will tokenize the back to the input
242 t1 = [tok[:2] for tok in tokenize(f.readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000243 newcode = untokenize(t1)
Trent Nelson428de652008-03-18 22:41:35 +0000244 readline = BytesIO(newcode).readline
245 t2 = [tok[:2] for tok in tokenize(readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000246 assert t1 == t2
247 """
Thomas Wouters89f507f2006-12-13 04:49:30 +0000248 ut = Untokenizer()
Trent Nelson428de652008-03-18 22:41:35 +0000249 out = ut.untokenize(iterable)
250 if ut.encoding is not None:
251 out = out.encode(ut.encoding)
252 return out
Raymond Hettinger68c04532005-06-10 11:05:19 +0000253
Trent Nelson428de652008-03-18 22:41:35 +0000254
Benjamin Petersond3afada2009-10-09 21:43:09 +0000255def _get_normal_name(orig_enc):
256 """Imitates get_normal_name in tokenizer.c."""
257 # Only care about the first 12 characters.
258 enc = orig_enc[:12].lower().replace("_", "-")
259 if enc == "utf-8" or enc.startswith("utf-8-"):
260 return "utf-8"
261 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
262 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
263 return "iso-8859-1"
264 return orig_enc
265
Trent Nelson428de652008-03-18 22:41:35 +0000266def detect_encoding(readline):
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000267 """
Trent Nelson428de652008-03-18 22:41:35 +0000268 The detect_encoding() function is used to detect the encoding that should
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000269 be used to decode a Python source file. It requires one argment, readline,
Trent Nelson428de652008-03-18 22:41:35 +0000270 in the same way as the tokenize() generator.
271
272 It will call readline a maximum of twice, and return the encoding used
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000273 (as a string) and a list of any lines (left as bytes) it has read in.
Trent Nelson428de652008-03-18 22:41:35 +0000274
275 It detects the encoding from the presence of a utf-8 bom or an encoding
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000276 cookie as specified in pep-0263. If both a bom and a cookie are present,
277 but disagree, a SyntaxError will be raised. If the encoding cookie is an
278 invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
Benjamin Peterson689a5582010-03-18 22:29:52 +0000279 'utf-8-sig' is returned.
Trent Nelson428de652008-03-18 22:41:35 +0000280
281 If no encoding is specified, then the default of 'utf-8' will be returned.
282 """
Trent Nelson428de652008-03-18 22:41:35 +0000283 bom_found = False
284 encoding = None
Benjamin Peterson689a5582010-03-18 22:29:52 +0000285 default = 'utf-8'
Trent Nelson428de652008-03-18 22:41:35 +0000286 def read_or_stop():
287 try:
288 return readline()
289 except StopIteration:
290 return b''
291
292 def find_cookie(line):
293 try:
294 line_string = line.decode('ascii')
295 except UnicodeDecodeError:
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000296 return None
297
298 matches = cookie_re.findall(line_string)
299 if not matches:
300 return None
Benjamin Petersond3afada2009-10-09 21:43:09 +0000301 encoding = _get_normal_name(matches[0])
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000302 try:
303 codec = lookup(encoding)
304 except LookupError:
305 # This behaviour mimics the Python interpreter
306 raise SyntaxError("unknown encoding: " + encoding)
307
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000308 if bom_found:
309 if codec.name != 'utf-8':
310 # This behaviour mimics the Python interpreter
311 raise SyntaxError('encoding problem: utf-8')
312 encoding += '-sig'
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000313 return encoding
Trent Nelson428de652008-03-18 22:41:35 +0000314
315 first = read_or_stop()
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000316 if first.startswith(BOM_UTF8):
Trent Nelson428de652008-03-18 22:41:35 +0000317 bom_found = True
318 first = first[3:]
Benjamin Peterson689a5582010-03-18 22:29:52 +0000319 default = 'utf-8-sig'
Trent Nelson428de652008-03-18 22:41:35 +0000320 if not first:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000321 return default, []
Trent Nelson428de652008-03-18 22:41:35 +0000322
323 encoding = find_cookie(first)
324 if encoding:
325 return encoding, [first]
326
327 second = read_or_stop()
328 if not second:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000329 return default, [first]
Trent Nelson428de652008-03-18 22:41:35 +0000330
331 encoding = find_cookie(second)
332 if encoding:
333 return encoding, [first, second]
334
Benjamin Peterson689a5582010-03-18 22:29:52 +0000335 return default, [first, second]
Trent Nelson428de652008-03-18 22:41:35 +0000336
337
Victor Stinner58c07522010-11-09 01:08:59 +0000338_builtin_open = open
339
340def open(filename):
341 """Open a file in read only mode using the encoding detected by
342 detect_encoding().
343 """
344 buffer = _builtin_open(filename, 'rb')
345 encoding, lines = detect_encoding(buffer.readline)
346 buffer.seek(0)
347 text = TextIOWrapper(buffer, encoding, line_buffering=True)
348 text.mode = 'r'
349 return text
350
351
Trent Nelson428de652008-03-18 22:41:35 +0000352def tokenize(readline):
353 """
354 The tokenize() generator requires one argment, readline, which
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000355 must be a callable object which provides the same interface as the
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000356 readline() method of built-in file objects. Each call to the function
Trent Nelson428de652008-03-18 22:41:35 +0000357 should return one line of input as bytes. Alternately, readline
Raymond Hettinger68c04532005-06-10 11:05:19 +0000358 can be a callable function terminating with StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000359 readline = open(myfile, 'rb').__next__ # Example of alternate readline
Tim Peters8ac14952002-05-23 15:15:30 +0000360
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000361 The generator produces 5-tuples with these members: the token type; the
362 token string; a 2-tuple (srow, scol) of ints specifying the row and
363 column where the token begins in the source; a 2-tuple (erow, ecol) of
364 ints specifying the row and column where the token ends in the source;
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000365 and the line on which the token was found. The line passed is the
Tim Peters8ac14952002-05-23 15:15:30 +0000366 logical line; continuation lines are included.
Trent Nelson428de652008-03-18 22:41:35 +0000367
368 The first token sequence will always be an ENCODING token
369 which tells you which encoding was used to decode the bytes stream.
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000370 """
Benjamin Peterson21db77e2009-11-14 16:27:26 +0000371 # This import is here to avoid problems when the itertools module is not
372 # built yet and tokenize is imported.
Benjamin Peterson81dd8b92009-11-14 18:09:17 +0000373 from itertools import chain, repeat
Trent Nelson428de652008-03-18 22:41:35 +0000374 encoding, consumed = detect_encoding(readline)
Benjamin Peterson81dd8b92009-11-14 18:09:17 +0000375 rl_gen = iter(readline, b"")
376 empty = repeat(b"")
377 return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
Trent Nelson428de652008-03-18 22:41:35 +0000378
379
380def _tokenize(readline, encoding):
Guido van Rossum1aec3231997-04-08 14:24:39 +0000381 lnum = parenlev = continued = 0
Benjamin Peterson33856de2010-08-30 14:41:20 +0000382 numchars = '0123456789'
Guido van Rossumde655271997-04-09 17:15:54 +0000383 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000384 contline = None
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000385 indents = [0]
Guido van Rossum1aec3231997-04-08 14:24:39 +0000386
Trent Nelson428de652008-03-18 22:41:35 +0000387 if encoding is not None:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000388 if encoding == "utf-8-sig":
389 # BOM will already have been stripped.
390 encoding = "utf-8"
Raymond Hettingera48db392009-04-29 00:34:27 +0000391 yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
Benjamin Peterson0fe14382008-06-05 23:07:42 +0000392 while True: # loop over lines in stream
Raymond Hettinger68c04532005-06-10 11:05:19 +0000393 try:
394 line = readline()
395 except StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000396 line = b''
397
398 if encoding is not None:
399 line = line.decode(encoding)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000400 lnum += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000401 pos, max = 0, len(line)
402
403 if contstr: # continued string
Guido van Rossumde655271997-04-09 17:15:54 +0000404 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000405 raise TokenError("EOF in multi-line string", strstart)
Guido van Rossum3b631771997-10-27 20:44:15 +0000406 endmatch = endprog.match(line)
407 if endmatch:
408 pos = end = endmatch.end(0)
Raymond Hettingera48db392009-04-29 00:34:27 +0000409 yield TokenInfo(STRING, contstr + line[:end],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000410 strstart, (lnum, end), contline + line)
Guido van Rossumde655271997-04-09 17:15:54 +0000411 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000412 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000413 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
Raymond Hettingera48db392009-04-29 00:34:27 +0000414 yield TokenInfo(ERRORTOKEN, contstr + line,
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000415 strstart, (lnum, len(line)), contline)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000416 contstr = ''
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000417 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000418 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000419 else:
420 contstr = contstr + line
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000421 contline = contline + line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000422 continue
423
Guido van Rossum1aec3231997-04-08 14:24:39 +0000424 elif parenlev == 0 and not continued: # new statement
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000425 if not line: break
426 column = 0
Guido van Rossum1aec3231997-04-08 14:24:39 +0000427 while pos < max: # measure leading whitespace
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000428 if line[pos] == ' ':
429 column += 1
430 elif line[pos] == '\t':
431 column = (column//tabsize + 1)*tabsize
432 elif line[pos] == '\f':
433 column = 0
434 else:
435 break
436 pos += 1
437 if pos == max:
438 break
Guido van Rossum1aec3231997-04-08 14:24:39 +0000439
440 if line[pos] in '#\r\n': # skip comments or blank lines
Thomas Wouters89f507f2006-12-13 04:49:30 +0000441 if line[pos] == '#':
442 comment_token = line[pos:].rstrip('\r\n')
443 nl_pos = pos + len(comment_token)
Raymond Hettingera48db392009-04-29 00:34:27 +0000444 yield TokenInfo(COMMENT, comment_token,
Thomas Wouters89f507f2006-12-13 04:49:30 +0000445 (lnum, pos), (lnum, pos + len(comment_token)), line)
Raymond Hettingera48db392009-04-29 00:34:27 +0000446 yield TokenInfo(NL, line[nl_pos:],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000447 (lnum, nl_pos), (lnum, len(line)), line)
448 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000449 yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
Guido van Rossum1aec3231997-04-08 14:24:39 +0000450 (lnum, pos), (lnum, len(line)), line)
451 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000452
453 if column > indents[-1]: # count indents or dedents
454 indents.append(column)
Raymond Hettingera48db392009-04-29 00:34:27 +0000455 yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000456 while column < indents[-1]:
Raymond Hettingerda99d1c2005-06-21 07:43:58 +0000457 if column not in indents:
458 raise IndentationError(
Thomas Wouters00ee7ba2006-08-21 19:07:27 +0000459 "unindent does not match any outer indentation level",
460 ("<tokenize>", lnum, pos, line))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000461 indents = indents[:-1]
Raymond Hettingera48db392009-04-29 00:34:27 +0000462 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000463
464 else: # continued statement
Guido van Rossumde655271997-04-09 17:15:54 +0000465 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000466 raise TokenError("EOF in multi-line statement", (lnum, 0))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000467 continued = 0
468
469 while pos < max:
Guido van Rossum3b631771997-10-27 20:44:15 +0000470 pseudomatch = pseudoprog.match(line, pos)
471 if pseudomatch: # scan for tokens
472 start, end = pseudomatch.span(1)
Guido van Rossumde655271997-04-09 17:15:54 +0000473 spos, epos, pos = (lnum, start), (lnum, end), end
Guido van Rossum1aec3231997-04-08 14:24:39 +0000474 token, initial = line[start:end], line[start]
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000475
Georg Brandldde00282007-03-18 19:01:53 +0000476 if (initial in numchars or # ordinary number
477 (initial == '.' and token != '.' and token != '...')):
Raymond Hettingera48db392009-04-29 00:34:27 +0000478 yield TokenInfo(NUMBER, token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000479 elif initial in '\r\n':
Raymond Hettingera48db392009-04-29 00:34:27 +0000480 yield TokenInfo(NL if parenlev > 0 else NEWLINE,
Thomas Wouters89f507f2006-12-13 04:49:30 +0000481 token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000482 elif initial == '#':
Thomas Wouters89f507f2006-12-13 04:49:30 +0000483 assert not token.endswith("\n")
Raymond Hettingera48db392009-04-29 00:34:27 +0000484 yield TokenInfo(COMMENT, token, spos, epos, line)
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000485 elif token in triple_quoted:
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000486 endprog = endprogs[token]
Guido van Rossum3b631771997-10-27 20:44:15 +0000487 endmatch = endprog.match(line, pos)
488 if endmatch: # all on one line
489 pos = endmatch.end(0)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000490 token = line[start:pos]
Raymond Hettingera48db392009-04-29 00:34:27 +0000491 yield TokenInfo(STRING, token, spos, (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000492 else:
Guido van Rossum1aec3231997-04-08 14:24:39 +0000493 strstart = (lnum, start) # multiple lines
494 contstr = line[start:]
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000495 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000496 break
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000497 elif initial in single_quoted or \
498 token[:2] in single_quoted or \
499 token[:3] in single_quoted:
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000500 if token[-1] == '\n': # continued string
Guido van Rossum1aec3231997-04-08 14:24:39 +0000501 strstart = (lnum, start)
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000502 endprog = (endprogs[initial] or endprogs[token[1]] or
503 endprogs[token[2]])
Guido van Rossumde655271997-04-09 17:15:54 +0000504 contstr, needcont = line[start:], 1
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000505 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000506 break
507 else: # ordinary string
Raymond Hettingera48db392009-04-29 00:34:27 +0000508 yield TokenInfo(STRING, token, spos, epos, line)
Benjamin Peterson33856de2010-08-30 14:41:20 +0000509 elif initial.isidentifier(): # ordinary name
Raymond Hettingera48db392009-04-29 00:34:27 +0000510 yield TokenInfo(NAME, token, spos, epos, line)
Guido van Rossum3b631771997-10-27 20:44:15 +0000511 elif initial == '\\': # continued stmt
512 continued = 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000513 else:
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000514 if initial in '([{':
515 parenlev += 1
516 elif initial in ')]}':
517 parenlev -= 1
Raymond Hettingera48db392009-04-29 00:34:27 +0000518 yield TokenInfo(OP, token, spos, epos, line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000519 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000520 yield TokenInfo(ERRORTOKEN, line[pos],
Guido van Rossumde655271997-04-09 17:15:54 +0000521 (lnum, pos), (lnum, pos+1), line)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000522 pos += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000523
524 for indent in indents[1:]: # pop remaining indent levels
Raymond Hettingera48db392009-04-29 00:34:27 +0000525 yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
526 yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000527
Trent Nelson428de652008-03-18 22:41:35 +0000528
529# An undocumented, backwards compatible, API for all the places in the standard
530# library that expect to be able to use tokenize with strings
531def generate_tokens(readline):
532 return _tokenize(readline, None)
Raymond Hettinger6c60d092010-09-09 04:32:39 +0000533
534if __name__ == "__main__":
535 # Quick sanity check
536 s = b'''def parseline(self, line):
537 """Parse the line into a command name and a string containing
538 the arguments. Returns a tuple containing (command, args, line).
539 'command' and 'args' may be None if the line couldn't be parsed.
540 """
541 line = line.strip()
542 if not line:
543 return None, None, line
544 elif line[0] == '?':
545 line = 'help ' + line[1:]
546 elif line[0] == '!':
547 if hasattr(self, 'do_shell'):
548 line = 'shell ' + line[1:]
549 else:
550 return None, None, line
551 i, n = 0, len(line)
552 while i < n and line[i] in self.identchars: i = i+1
553 cmd, arg = line[:i], line[i:].strip()
554 return cmd, arg, line
555 '''
556 for tok in tokenize(iter(s.splitlines()).__next__):
557 print(tok)