blob: 51b49e4da254ab7b204951058019506e50a00152 [file] [log] [blame]
Guido van Rossumb51eaa11997-03-07 00:21:55 +00001"""Tokenization help for Python programs.
Guido van Rossum4d8e8591992-01-01 19:34:47 +00002
Trent Nelson428de652008-03-18 22:41:35 +00003tokenize(readline) is a generator that breaks a stream of
4bytes into Python tokens. It decodes the bytes according to
5PEP-0263 for determining source file encoding.
6
7It accepts a readline-like method which is called
8repeatedly to get the next line of input (or b"" for EOF). It generates
Tim Peters4efb6e92001-06-29 23:51:08 +000095-tuples with these members:
10
11 the token type (see token.py)
12 the token (a string)
13 the starting (row, column) indices of the token (a 2-tuple of ints)
14 the ending (row, column) indices of the token (a 2-tuple of ints)
15 the original line (string)
16
17It is designed to match the working of the Python tokenizer exactly, except
18that it produces COMMENT tokens for comments and gives type OP for all
Trent Nelson428de652008-03-18 22:41:35 +000019operators. Aditionally, all token lists start with an ENCODING token
20which tells you which encoding was used to decode the bytes stream."""
Guido van Rossumb51eaa11997-03-07 00:21:55 +000021
Ka-Ping Yee244c5932001-03-01 13:56:40 +000022__author__ = 'Ka-Ping Yee <ping@lfw.org>'
Trent Nelson428de652008-03-18 22:41:35 +000023__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
24 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
25 'Michael Foord')
Trent Nelson428de652008-03-18 22:41:35 +000026import re, string, sys
Guido van Rossumfc6f5331997-03-07 00:21:12 +000027from token import *
Benjamin Peterson433f32c2008-12-12 01:25:05 +000028from codecs import lookup, BOM_UTF8
Trent Nelson428de652008-03-18 22:41:35 +000029cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
Guido van Rossum4d8e8591992-01-01 19:34:47 +000030
Skip Montanaro40fc1602001-03-01 04:27:19 +000031import token
Benjamin Petersona0dfa822009-11-13 02:25:08 +000032__all__ = [x for x in dir(token) if not x.startswith("_")]
33__all__.extend(["COMMENT", "tokenize", "detect_encoding", "NL", "untokenize",
34 "ENCODING", "TokenInfo"])
Skip Montanaro40fc1602001-03-01 04:27:19 +000035del token
36
Guido van Rossum1aec3231997-04-08 14:24:39 +000037COMMENT = N_TOKENS
38tok_name[COMMENT] = 'COMMENT'
Guido van Rossuma90c78b1998-04-03 16:05:38 +000039NL = N_TOKENS + 1
40tok_name[NL] = 'NL'
Trent Nelson428de652008-03-18 22:41:35 +000041ENCODING = N_TOKENS + 2
42tok_name[ENCODING] = 'ENCODING'
43N_TOKENS += 3
Guido van Rossum1aec3231997-04-08 14:24:39 +000044
Raymond Hettingeraa17a7f2009-04-29 14:21:25 +000045class TokenInfo(tuple):
46 'TokenInfo(type, string, start, end, line)'
47
48 __slots__ = ()
49
50 _fields = ('type', 'string', 'start', 'end', 'line')
51
52 def __new__(cls, type, string, start, end, line):
53 return tuple.__new__(cls, (type, string, start, end, line))
54
55 @classmethod
56 def _make(cls, iterable, new=tuple.__new__, len=len):
57 'Make a new TokenInfo object from a sequence or iterable'
58 result = new(cls, iterable)
59 if len(result) != 5:
60 raise TypeError('Expected 5 arguments, got %d' % len(result))
61 return result
62
63 def __repr__(self):
64 return 'TokenInfo(type=%r, string=%r, start=%r, end=%r, line=%r)' % self
65
66 def _asdict(self):
67 'Return a new dict which maps field names to their values'
68 return dict(zip(self._fields, self))
69
70 def _replace(self, **kwds):
71 'Return a new TokenInfo object replacing specified fields with new values'
72 result = self._make(map(kwds.pop, ('type', 'string', 'start', 'end', 'line'), self))
73 if kwds:
74 raise ValueError('Got unexpected field names: %r' % kwds.keys())
75 return result
76
77 def __getnewargs__(self):
78 return tuple(self)
79
80 type = property(lambda t: t[0])
81 string = property(lambda t: t[1])
82 start = property(lambda t: t[2])
83 end = property(lambda t: t[3])
84 line = property(lambda t: t[4])
Raymond Hettingera48db392009-04-29 00:34:27 +000085
Eric S. Raymondb08b2d32001-02-09 11:10:16 +000086def group(*choices): return '(' + '|'.join(choices) + ')'
Guido van Rossum68468eb2003-02-27 20:14:51 +000087def any(*choices): return group(*choices) + '*'
88def maybe(*choices): return group(*choices) + '?'
Guido van Rossum4d8e8591992-01-01 19:34:47 +000089
Antoine Pitroufd036452008-08-19 17:56:33 +000090# Note: we use unicode matching for names ("\w") but ascii matching for
91# number literals.
Guido van Rossum3b631771997-10-27 20:44:15 +000092Whitespace = r'[ \f\t]*'
93Comment = r'#[^\r\n]*'
94Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Benjamin Peterson33856de2010-08-30 14:41:20 +000095Name = r'\w+'
Guido van Rossum4d8e8591992-01-01 19:34:47 +000096
Antoine Pitroufd036452008-08-19 17:56:33 +000097Hexnumber = r'0[xX][0-9a-fA-F]+'
Georg Brandlfceab5a2008-01-19 20:08:23 +000098Binnumber = r'0[bB][01]+'
99Octnumber = r'0[oO][0-7]+'
Antoine Pitroufd036452008-08-19 17:56:33 +0000100Decnumber = r'(?:0+|[1-9][0-9]*)'
Guido van Rossumcd16bf62007-06-13 18:07:49 +0000101Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Antoine Pitroufd036452008-08-19 17:56:33 +0000102Exponent = r'[eE][-+]?[0-9]+'
103Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
104Expfloat = r'[0-9]+' + Exponent
Guido van Rossum1aec3231997-04-08 14:24:39 +0000105Floatnumber = group(Pointfloat, Expfloat)
Antoine Pitroufd036452008-08-19 17:56:33 +0000106Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Guido van Rossum1aec3231997-04-08 14:24:39 +0000107Number = group(Imagnumber, Floatnumber, Intnumber)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000108
Tim Petersde495832000-10-07 05:09:39 +0000109# Tail end of ' string.
110Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
111# Tail end of " string.
112Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
113# Tail end of ''' string.
114Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
115# Tail end of """ string.
116Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000117Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""')
Tim Petersde495832000-10-07 05:09:39 +0000118# Single-line ' or " string.
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000119String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
120 r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000121
Tim Petersde495832000-10-07 05:09:39 +0000122# Because of leftmost-then-longest match semantics, be sure to put the
123# longest operators first (e.g., if = came before ==, == would get
124# recognized as two instances of =).
Guido van Rossumb053cd82006-08-24 03:53:23 +0000125Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
Neal Norwitzc1505362006-12-28 06:47:50 +0000126 r"//=?", r"->",
Tim Petersde495832000-10-07 05:09:39 +0000127 r"[+\-*/%&|^=<>]=?",
128 r"~")
Thomas Wouterse1519a12000-08-24 21:44:52 +0000129
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000130Bracket = '[][(){}]'
Georg Brandldde00282007-03-18 19:01:53 +0000131Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000132Funny = group(Operator, Bracket, Special)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000133
Guido van Rossum3b631771997-10-27 20:44:15 +0000134PlainToken = group(Number, Funny, String, Name)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000135Token = Ignore + PlainToken
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000136
Tim Petersde495832000-10-07 05:09:39 +0000137# First (or only) line of ' or " string.
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000138ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000139 group("'", r'\\\r?\n'),
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000140 r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000141 group('"', r'\\\r?\n'))
Guido van Rossum3b631771997-10-27 20:44:15 +0000142PseudoExtras = group(r'\\\r?\n', Comment, Triple)
143PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000144
Benjamin Peterson33856de2010-08-30 14:41:20 +0000145def _compile(expr):
146 return re.compile(expr, re.UNICODE)
147
Guido van Rossum3b631771997-10-27 20:44:15 +0000148tokenprog, pseudoprog, single3prog, double3prog = map(
Benjamin Peterson33856de2010-08-30 14:41:20 +0000149 _compile, (Token, PseudoToken, Single3, Double3))
150endprogs = {"'": _compile(Single), '"': _compile(Double),
Guido van Rossum3b631771997-10-27 20:44:15 +0000151 "'''": single3prog, '"""': double3prog,
Guido van Rossumfefc9221997-10-27 21:17:24 +0000152 "r'''": single3prog, 'r"""': double3prog,
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000153 "b'''": single3prog, 'b"""': double3prog,
154 "br'''": single3prog, 'br"""': double3prog,
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000155 "R'''": single3prog, 'R"""': double3prog,
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000156 "B'''": single3prog, 'B"""': double3prog,
157 "bR'''": single3prog, 'bR"""': double3prog,
158 "Br'''": single3prog, 'Br"""': double3prog,
159 "BR'''": single3prog, 'BR"""': double3prog,
160 'r': None, 'R': None, 'b': None, 'B': None}
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000161
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000162triple_quoted = {}
163for t in ("'''", '"""',
164 "r'''", 'r"""', "R'''", 'R"""',
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000165 "b'''", 'b"""', "B'''", 'B"""',
166 "br'''", 'br"""', "Br'''", 'Br"""',
167 "bR'''", 'bR"""', "BR'''", 'BR"""'):
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000168 triple_quoted[t] = t
169single_quoted = {}
170for t in ("'", '"',
171 "r'", 'r"', "R'", 'R"',
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000172 "b'", 'b"', "B'", 'B"',
173 "br'", 'br"', "Br'", 'Br"',
174 "bR'", 'bR"', "BR'", 'BR"' ):
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000175 single_quoted[t] = t
176
Benjamin Peterson33856de2010-08-30 14:41:20 +0000177del _compile
178
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000179tabsize = 8
Fred Drake9b8d8012000-08-17 04:45:13 +0000180
Ka-Ping Yee28c62bb2001-03-23 05:22:49 +0000181class TokenError(Exception): pass
182
183class StopTokenizing(Exception): pass
Fred Drake9b8d8012000-08-17 04:45:13 +0000184
Tim Peters5ca576e2001-06-18 22:08:13 +0000185
Thomas Wouters89f507f2006-12-13 04:49:30 +0000186class Untokenizer:
187
188 def __init__(self):
189 self.tokens = []
190 self.prev_row = 1
191 self.prev_col = 0
Trent Nelson428de652008-03-18 22:41:35 +0000192 self.encoding = None
Thomas Wouters89f507f2006-12-13 04:49:30 +0000193
194 def add_whitespace(self, start):
195 row, col = start
196 assert row <= self.prev_row
197 col_offset = col - self.prev_col
198 if col_offset:
199 self.tokens.append(" " * col_offset)
200
201 def untokenize(self, iterable):
202 for t in iterable:
203 if len(t) == 2:
204 self.compat(t, iterable)
205 break
206 tok_type, token, start, end, line = t
Trent Nelson428de652008-03-18 22:41:35 +0000207 if tok_type == ENCODING:
208 self.encoding = token
209 continue
Thomas Wouters89f507f2006-12-13 04:49:30 +0000210 self.add_whitespace(start)
211 self.tokens.append(token)
212 self.prev_row, self.prev_col = end
213 if tok_type in (NEWLINE, NL):
214 self.prev_row += 1
215 self.prev_col = 0
216 return "".join(self.tokens)
217
218 def compat(self, token, iterable):
219 startline = False
220 indents = []
221 toks_append = self.tokens.append
222 toknum, tokval = token
Trent Nelson428de652008-03-18 22:41:35 +0000223
Thomas Wouters89f507f2006-12-13 04:49:30 +0000224 if toknum in (NAME, NUMBER):
225 tokval += ' '
226 if toknum in (NEWLINE, NL):
227 startline = True
Christian Heimesba4af492008-03-28 00:55:15 +0000228 prevstring = False
Thomas Wouters89f507f2006-12-13 04:49:30 +0000229 for tok in iterable:
230 toknum, tokval = tok[:2]
Trent Nelson428de652008-03-18 22:41:35 +0000231 if toknum == ENCODING:
232 self.encoding = tokval
233 continue
Thomas Wouters89f507f2006-12-13 04:49:30 +0000234
235 if toknum in (NAME, NUMBER):
236 tokval += ' '
237
Christian Heimesba4af492008-03-28 00:55:15 +0000238 # Insert a space between two consecutive strings
239 if toknum == STRING:
240 if prevstring:
241 tokval = ' ' + tokval
242 prevstring = True
243 else:
244 prevstring = False
245
Thomas Wouters89f507f2006-12-13 04:49:30 +0000246 if toknum == INDENT:
247 indents.append(tokval)
248 continue
249 elif toknum == DEDENT:
250 indents.pop()
251 continue
252 elif toknum in (NEWLINE, NL):
253 startline = True
254 elif startline and indents:
255 toks_append(indents[-1])
256 startline = False
257 toks_append(tokval)
Raymond Hettinger68c04532005-06-10 11:05:19 +0000258
Trent Nelson428de652008-03-18 22:41:35 +0000259
Raymond Hettinger68c04532005-06-10 11:05:19 +0000260def untokenize(iterable):
261 """Transform tokens back into Python source code.
Trent Nelson428de652008-03-18 22:41:35 +0000262 It returns a bytes object, encoded using the ENCODING
263 token, which is the first token sequence output by tokenize.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000264
265 Each element returned by the iterable must be a token sequence
Thomas Wouters89f507f2006-12-13 04:49:30 +0000266 with at least two elements, a token number and token value. If
267 only two tokens are passed, the resulting output is poor.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000268
Thomas Wouters89f507f2006-12-13 04:49:30 +0000269 Round-trip invariant for full input:
270 Untokenized source will match input source exactly
271
272 Round-trip invariant for limited intput:
Trent Nelson428de652008-03-18 22:41:35 +0000273 # Output bytes will tokenize the back to the input
274 t1 = [tok[:2] for tok in tokenize(f.readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000275 newcode = untokenize(t1)
Trent Nelson428de652008-03-18 22:41:35 +0000276 readline = BytesIO(newcode).readline
277 t2 = [tok[:2] for tok in tokenize(readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000278 assert t1 == t2
279 """
Thomas Wouters89f507f2006-12-13 04:49:30 +0000280 ut = Untokenizer()
Trent Nelson428de652008-03-18 22:41:35 +0000281 out = ut.untokenize(iterable)
282 if ut.encoding is not None:
283 out = out.encode(ut.encoding)
284 return out
Raymond Hettinger68c04532005-06-10 11:05:19 +0000285
Trent Nelson428de652008-03-18 22:41:35 +0000286
Benjamin Petersond3afada2009-10-09 21:43:09 +0000287def _get_normal_name(orig_enc):
288 """Imitates get_normal_name in tokenizer.c."""
289 # Only care about the first 12 characters.
290 enc = orig_enc[:12].lower().replace("_", "-")
291 if enc == "utf-8" or enc.startswith("utf-8-"):
292 return "utf-8"
293 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
294 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
295 return "iso-8859-1"
296 return orig_enc
297
Trent Nelson428de652008-03-18 22:41:35 +0000298def detect_encoding(readline):
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000299 """
Trent Nelson428de652008-03-18 22:41:35 +0000300 The detect_encoding() function is used to detect the encoding that should
301 be used to decode a Python source file. It requires one argment, readline,
302 in the same way as the tokenize() generator.
303
304 It will call readline a maximum of twice, and return the encoding used
305 (as a string) and a list of any lines (left as bytes) it has read
306 in.
307
308 It detects the encoding from the presence of a utf-8 bom or an encoding
Benjamin Peterson689a5582010-03-18 22:29:52 +0000309 cookie as specified in pep-0263. If both a bom and a cookie are present, but
310 disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
311 charset, raise a SyntaxError. Note that if a utf-8 bom is found,
312 'utf-8-sig' is returned.
Trent Nelson428de652008-03-18 22:41:35 +0000313
314 If no encoding is specified, then the default of 'utf-8' will be returned.
315 """
Trent Nelson428de652008-03-18 22:41:35 +0000316 bom_found = False
317 encoding = None
Benjamin Peterson689a5582010-03-18 22:29:52 +0000318 default = 'utf-8'
Trent Nelson428de652008-03-18 22:41:35 +0000319 def read_or_stop():
320 try:
321 return readline()
322 except StopIteration:
323 return b''
324
325 def find_cookie(line):
326 try:
327 line_string = line.decode('ascii')
328 except UnicodeDecodeError:
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000329 return None
330
331 matches = cookie_re.findall(line_string)
332 if not matches:
333 return None
Benjamin Petersond3afada2009-10-09 21:43:09 +0000334 encoding = _get_normal_name(matches[0])
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000335 try:
336 codec = lookup(encoding)
337 except LookupError:
338 # This behaviour mimics the Python interpreter
339 raise SyntaxError("unknown encoding: " + encoding)
340
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000341 if bom_found:
342 if codec.name != 'utf-8':
343 # This behaviour mimics the Python interpreter
344 raise SyntaxError('encoding problem: utf-8')
345 encoding += '-sig'
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000346 return encoding
Trent Nelson428de652008-03-18 22:41:35 +0000347
348 first = read_or_stop()
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000349 if first.startswith(BOM_UTF8):
Trent Nelson428de652008-03-18 22:41:35 +0000350 bom_found = True
351 first = first[3:]
Benjamin Peterson689a5582010-03-18 22:29:52 +0000352 default = 'utf-8-sig'
Trent Nelson428de652008-03-18 22:41:35 +0000353 if not first:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000354 return default, []
Trent Nelson428de652008-03-18 22:41:35 +0000355
356 encoding = find_cookie(first)
357 if encoding:
358 return encoding, [first]
359
360 second = read_or_stop()
361 if not second:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000362 return default, [first]
Trent Nelson428de652008-03-18 22:41:35 +0000363
364 encoding = find_cookie(second)
365 if encoding:
366 return encoding, [first, second]
367
Benjamin Peterson689a5582010-03-18 22:29:52 +0000368 return default, [first, second]
Trent Nelson428de652008-03-18 22:41:35 +0000369
370
371def tokenize(readline):
372 """
373 The tokenize() generator requires one argment, readline, which
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000374 must be a callable object which provides the same interface as the
375 readline() method of built-in file objects. Each call to the function
Trent Nelson428de652008-03-18 22:41:35 +0000376 should return one line of input as bytes. Alternately, readline
Raymond Hettinger68c04532005-06-10 11:05:19 +0000377 can be a callable function terminating with StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000378 readline = open(myfile, 'rb').__next__ # Example of alternate readline
Tim Peters8ac14952002-05-23 15:15:30 +0000379
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000380 The generator produces 5-tuples with these members: the token type; the
381 token string; a 2-tuple (srow, scol) of ints specifying the row and
382 column where the token begins in the source; a 2-tuple (erow, ecol) of
383 ints specifying the row and column where the token ends in the source;
384 and the line on which the token was found. The line passed is the
Tim Peters8ac14952002-05-23 15:15:30 +0000385 logical line; continuation lines are included.
Trent Nelson428de652008-03-18 22:41:35 +0000386
387 The first token sequence will always be an ENCODING token
388 which tells you which encoding was used to decode the bytes stream.
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000389 """
Benjamin Peterson21db77e2009-11-14 16:27:26 +0000390 # This import is here to avoid problems when the itertools module is not
391 # built yet and tokenize is imported.
Benjamin Peterson81dd8b92009-11-14 18:09:17 +0000392 from itertools import chain, repeat
Trent Nelson428de652008-03-18 22:41:35 +0000393 encoding, consumed = detect_encoding(readline)
Benjamin Peterson81dd8b92009-11-14 18:09:17 +0000394 rl_gen = iter(readline, b"")
395 empty = repeat(b"")
396 return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
Trent Nelson428de652008-03-18 22:41:35 +0000397
398
399def _tokenize(readline, encoding):
Guido van Rossum1aec3231997-04-08 14:24:39 +0000400 lnum = parenlev = continued = 0
Benjamin Peterson33856de2010-08-30 14:41:20 +0000401 numchars = '0123456789'
Guido van Rossumde655271997-04-09 17:15:54 +0000402 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000403 contline = None
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000404 indents = [0]
Guido van Rossum1aec3231997-04-08 14:24:39 +0000405
Trent Nelson428de652008-03-18 22:41:35 +0000406 if encoding is not None:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000407 if encoding == "utf-8-sig":
408 # BOM will already have been stripped.
409 encoding = "utf-8"
Raymond Hettingera48db392009-04-29 00:34:27 +0000410 yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
Benjamin Peterson0fe14382008-06-05 23:07:42 +0000411 while True: # loop over lines in stream
Raymond Hettinger68c04532005-06-10 11:05:19 +0000412 try:
413 line = readline()
414 except StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000415 line = b''
416
417 if encoding is not None:
418 line = line.decode(encoding)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000419 lnum += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000420 pos, max = 0, len(line)
421
422 if contstr: # continued string
Guido van Rossumde655271997-04-09 17:15:54 +0000423 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000424 raise TokenError("EOF in multi-line string", strstart)
Guido van Rossum3b631771997-10-27 20:44:15 +0000425 endmatch = endprog.match(line)
426 if endmatch:
427 pos = end = endmatch.end(0)
Raymond Hettingera48db392009-04-29 00:34:27 +0000428 yield TokenInfo(STRING, contstr + line[:end],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000429 strstart, (lnum, end), contline + line)
Guido van Rossumde655271997-04-09 17:15:54 +0000430 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000431 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000432 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
Raymond Hettingera48db392009-04-29 00:34:27 +0000433 yield TokenInfo(ERRORTOKEN, contstr + line,
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000434 strstart, (lnum, len(line)), contline)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000435 contstr = ''
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000436 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000437 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000438 else:
439 contstr = contstr + line
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000440 contline = contline + line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000441 continue
442
Guido van Rossum1aec3231997-04-08 14:24:39 +0000443 elif parenlev == 0 and not continued: # new statement
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000444 if not line: break
445 column = 0
Guido van Rossum1aec3231997-04-08 14:24:39 +0000446 while pos < max: # measure leading whitespace
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000447 if line[pos] == ' ':
448 column += 1
449 elif line[pos] == '\t':
450 column = (column//tabsize + 1)*tabsize
451 elif line[pos] == '\f':
452 column = 0
453 else:
454 break
455 pos += 1
456 if pos == max:
457 break
Guido van Rossum1aec3231997-04-08 14:24:39 +0000458
459 if line[pos] in '#\r\n': # skip comments or blank lines
Thomas Wouters89f507f2006-12-13 04:49:30 +0000460 if line[pos] == '#':
461 comment_token = line[pos:].rstrip('\r\n')
462 nl_pos = pos + len(comment_token)
Raymond Hettingera48db392009-04-29 00:34:27 +0000463 yield TokenInfo(COMMENT, comment_token,
Thomas Wouters89f507f2006-12-13 04:49:30 +0000464 (lnum, pos), (lnum, pos + len(comment_token)), line)
Raymond Hettingera48db392009-04-29 00:34:27 +0000465 yield TokenInfo(NL, line[nl_pos:],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000466 (lnum, nl_pos), (lnum, len(line)), line)
467 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000468 yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
Guido van Rossum1aec3231997-04-08 14:24:39 +0000469 (lnum, pos), (lnum, len(line)), line)
470 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000471
472 if column > indents[-1]: # count indents or dedents
473 indents.append(column)
Raymond Hettingera48db392009-04-29 00:34:27 +0000474 yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000475 while column < indents[-1]:
Raymond Hettingerda99d1c2005-06-21 07:43:58 +0000476 if column not in indents:
477 raise IndentationError(
Thomas Wouters00ee7ba2006-08-21 19:07:27 +0000478 "unindent does not match any outer indentation level",
479 ("<tokenize>", lnum, pos, line))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000480 indents = indents[:-1]
Raymond Hettingera48db392009-04-29 00:34:27 +0000481 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000482
483 else: # continued statement
Guido van Rossumde655271997-04-09 17:15:54 +0000484 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000485 raise TokenError("EOF in multi-line statement", (lnum, 0))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000486 continued = 0
487
488 while pos < max:
Guido van Rossum3b631771997-10-27 20:44:15 +0000489 pseudomatch = pseudoprog.match(line, pos)
490 if pseudomatch: # scan for tokens
491 start, end = pseudomatch.span(1)
Guido van Rossumde655271997-04-09 17:15:54 +0000492 spos, epos, pos = (lnum, start), (lnum, end), end
Guido van Rossum1aec3231997-04-08 14:24:39 +0000493 token, initial = line[start:end], line[start]
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000494
Georg Brandldde00282007-03-18 19:01:53 +0000495 if (initial in numchars or # ordinary number
496 (initial == '.' and token != '.' and token != '...')):
Raymond Hettingera48db392009-04-29 00:34:27 +0000497 yield TokenInfo(NUMBER, token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000498 elif initial in '\r\n':
Raymond Hettingera48db392009-04-29 00:34:27 +0000499 yield TokenInfo(NL if parenlev > 0 else NEWLINE,
Thomas Wouters89f507f2006-12-13 04:49:30 +0000500 token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000501 elif initial == '#':
Thomas Wouters89f507f2006-12-13 04:49:30 +0000502 assert not token.endswith("\n")
Raymond Hettingera48db392009-04-29 00:34:27 +0000503 yield TokenInfo(COMMENT, token, spos, epos, line)
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000504 elif token in triple_quoted:
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000505 endprog = endprogs[token]
Guido van Rossum3b631771997-10-27 20:44:15 +0000506 endmatch = endprog.match(line, pos)
507 if endmatch: # all on one line
508 pos = endmatch.end(0)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000509 token = line[start:pos]
Raymond Hettingera48db392009-04-29 00:34:27 +0000510 yield TokenInfo(STRING, token, spos, (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000511 else:
Guido van Rossum1aec3231997-04-08 14:24:39 +0000512 strstart = (lnum, start) # multiple lines
513 contstr = line[start:]
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000514 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000515 break
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000516 elif initial in single_quoted or \
517 token[:2] in single_quoted or \
518 token[:3] in single_quoted:
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000519 if token[-1] == '\n': # continued string
Guido van Rossum1aec3231997-04-08 14:24:39 +0000520 strstart = (lnum, start)
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000521 endprog = (endprogs[initial] or endprogs[token[1]] or
522 endprogs[token[2]])
Guido van Rossumde655271997-04-09 17:15:54 +0000523 contstr, needcont = line[start:], 1
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000524 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000525 break
526 else: # ordinary string
Raymond Hettingera48db392009-04-29 00:34:27 +0000527 yield TokenInfo(STRING, token, spos, epos, line)
Benjamin Peterson33856de2010-08-30 14:41:20 +0000528 elif initial.isidentifier(): # ordinary name
Raymond Hettingera48db392009-04-29 00:34:27 +0000529 yield TokenInfo(NAME, token, spos, epos, line)
Guido van Rossum3b631771997-10-27 20:44:15 +0000530 elif initial == '\\': # continued stmt
531 continued = 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000532 else:
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000533 if initial in '([{':
534 parenlev += 1
535 elif initial in ')]}':
536 parenlev -= 1
Raymond Hettingera48db392009-04-29 00:34:27 +0000537 yield TokenInfo(OP, token, spos, epos, line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000538 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000539 yield TokenInfo(ERRORTOKEN, line[pos],
Guido van Rossumde655271997-04-09 17:15:54 +0000540 (lnum, pos), (lnum, pos+1), line)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000541 pos += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000542
543 for indent in indents[1:]: # pop remaining indent levels
Raymond Hettingera48db392009-04-29 00:34:27 +0000544 yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
545 yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000546
Trent Nelson428de652008-03-18 22:41:35 +0000547
548# An undocumented, backwards compatible, API for all the places in the standard
549# library that expect to be able to use tokenize with strings
550def generate_tokens(readline):
551 return _tokenize(readline, None)