blob: 519dfa5784927c5872f2b1cc82102a456d9f6ef4 [file] [log] [blame]
Guido van Rossumb51eaa11997-03-07 00:21:55 +00001"""Tokenization help for Python programs.
Guido van Rossum4d8e8591992-01-01 19:34:47 +00002
Trent Nelson428de652008-03-18 22:41:35 +00003tokenize(readline) is a generator that breaks a stream of
4bytes into Python tokens. It decodes the bytes according to
5PEP-0263 for determining source file encoding.
6
7It accepts a readline-like method which is called
8repeatedly to get the next line of input (or b"" for EOF). It generates
Tim Peters4efb6e92001-06-29 23:51:08 +000095-tuples with these members:
10
11 the token type (see token.py)
12 the token (a string)
13 the starting (row, column) indices of the token (a 2-tuple of ints)
14 the ending (row, column) indices of the token (a 2-tuple of ints)
15 the original line (string)
16
17It is designed to match the working of the Python tokenizer exactly, except
18that it produces COMMENT tokens for comments and gives type OP for all
Trent Nelson428de652008-03-18 22:41:35 +000019operators. Aditionally, all token lists start with an ENCODING token
20which tells you which encoding was used to decode the bytes stream."""
Guido van Rossumb51eaa11997-03-07 00:21:55 +000021
Ka-Ping Yee244c5932001-03-01 13:56:40 +000022__author__ = 'Ka-Ping Yee <ping@lfw.org>'
Trent Nelson428de652008-03-18 22:41:35 +000023__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
24 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
25 'Michael Foord')
Trent Nelson428de652008-03-18 22:41:35 +000026import re, string, sys
Guido van Rossumfc6f5331997-03-07 00:21:12 +000027from token import *
Benjamin Peterson433f32c2008-12-12 01:25:05 +000028from codecs import lookup, BOM_UTF8
Trent Nelson428de652008-03-18 22:41:35 +000029cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
Guido van Rossum4d8e8591992-01-01 19:34:47 +000030
Skip Montanaro40fc1602001-03-01 04:27:19 +000031import token
Benjamin Petersona0dfa822009-11-13 02:25:08 +000032__all__ = [x for x in dir(token) if not x.startswith("_")]
33__all__.extend(["COMMENT", "tokenize", "detect_encoding", "NL", "untokenize",
34 "ENCODING", "TokenInfo"])
Skip Montanaro40fc1602001-03-01 04:27:19 +000035del token
36
Guido van Rossum1aec3231997-04-08 14:24:39 +000037COMMENT = N_TOKENS
38tok_name[COMMENT] = 'COMMENT'
Guido van Rossuma90c78b1998-04-03 16:05:38 +000039NL = N_TOKENS + 1
40tok_name[NL] = 'NL'
Trent Nelson428de652008-03-18 22:41:35 +000041ENCODING = N_TOKENS + 2
42tok_name[ENCODING] = 'ENCODING'
43N_TOKENS += 3
Guido van Rossum1aec3231997-04-08 14:24:39 +000044
Raymond Hettingeraa17a7f2009-04-29 14:21:25 +000045class TokenInfo(tuple):
46 'TokenInfo(type, string, start, end, line)'
47
48 __slots__ = ()
49
50 _fields = ('type', 'string', 'start', 'end', 'line')
51
52 def __new__(cls, type, string, start, end, line):
53 return tuple.__new__(cls, (type, string, start, end, line))
54
55 @classmethod
56 def _make(cls, iterable, new=tuple.__new__, len=len):
57 'Make a new TokenInfo object from a sequence or iterable'
58 result = new(cls, iterable)
59 if len(result) != 5:
60 raise TypeError('Expected 5 arguments, got %d' % len(result))
61 return result
62
63 def __repr__(self):
64 return 'TokenInfo(type=%r, string=%r, start=%r, end=%r, line=%r)' % self
65
66 def _asdict(self):
67 'Return a new dict which maps field names to their values'
68 return dict(zip(self._fields, self))
69
70 def _replace(self, **kwds):
71 'Return a new TokenInfo object replacing specified fields with new values'
72 result = self._make(map(kwds.pop, ('type', 'string', 'start', 'end', 'line'), self))
73 if kwds:
74 raise ValueError('Got unexpected field names: %r' % kwds.keys())
75 return result
76
77 def __getnewargs__(self):
78 return tuple(self)
79
80 type = property(lambda t: t[0])
81 string = property(lambda t: t[1])
82 start = property(lambda t: t[2])
83 end = property(lambda t: t[3])
84 line = property(lambda t: t[4])
Raymond Hettingera48db392009-04-29 00:34:27 +000085
Eric S. Raymondb08b2d32001-02-09 11:10:16 +000086def group(*choices): return '(' + '|'.join(choices) + ')'
Guido van Rossum68468eb2003-02-27 20:14:51 +000087def any(*choices): return group(*choices) + '*'
88def maybe(*choices): return group(*choices) + '?'
Guido van Rossum4d8e8591992-01-01 19:34:47 +000089
Antoine Pitroufd036452008-08-19 17:56:33 +000090# Note: we use unicode matching for names ("\w") but ascii matching for
91# number literals.
Guido van Rossum3b631771997-10-27 20:44:15 +000092Whitespace = r'[ \f\t]*'
93Comment = r'#[^\r\n]*'
94Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
95Name = r'[a-zA-Z_]\w*'
Guido van Rossum4d8e8591992-01-01 19:34:47 +000096
Antoine Pitroufd036452008-08-19 17:56:33 +000097Hexnumber = r'0[xX][0-9a-fA-F]+'
Georg Brandlfceab5a2008-01-19 20:08:23 +000098Binnumber = r'0[bB][01]+'
99Octnumber = r'0[oO][0-7]+'
Antoine Pitroufd036452008-08-19 17:56:33 +0000100Decnumber = r'(?:0+|[1-9][0-9]*)'
Guido van Rossumcd16bf62007-06-13 18:07:49 +0000101Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Antoine Pitroufd036452008-08-19 17:56:33 +0000102Exponent = r'[eE][-+]?[0-9]+'
103Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
104Expfloat = r'[0-9]+' + Exponent
Guido van Rossum1aec3231997-04-08 14:24:39 +0000105Floatnumber = group(Pointfloat, Expfloat)
Antoine Pitroufd036452008-08-19 17:56:33 +0000106Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Guido van Rossum1aec3231997-04-08 14:24:39 +0000107Number = group(Imagnumber, Floatnumber, Intnumber)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000108
Tim Petersde495832000-10-07 05:09:39 +0000109# Tail end of ' string.
110Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
111# Tail end of " string.
112Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
113# Tail end of ''' string.
114Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
115# Tail end of """ string.
116Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000117Triple = group("[bB]?[rR]?'''", '[bB]?[rR]?"""')
Tim Petersde495832000-10-07 05:09:39 +0000118# Single-line ' or " string.
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000119String = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
120 r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000121
Tim Petersde495832000-10-07 05:09:39 +0000122# Because of leftmost-then-longest match semantics, be sure to put the
123# longest operators first (e.g., if = came before ==, == would get
124# recognized as two instances of =).
Guido van Rossumb053cd82006-08-24 03:53:23 +0000125Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
Neal Norwitzc1505362006-12-28 06:47:50 +0000126 r"//=?", r"->",
Tim Petersde495832000-10-07 05:09:39 +0000127 r"[+\-*/%&|^=<>]=?",
128 r"~")
Thomas Wouterse1519a12000-08-24 21:44:52 +0000129
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000130Bracket = '[][(){}]'
Georg Brandldde00282007-03-18 19:01:53 +0000131Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000132Funny = group(Operator, Bracket, Special)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000133
Guido van Rossum3b631771997-10-27 20:44:15 +0000134PlainToken = group(Number, Funny, String, Name)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000135Token = Ignore + PlainToken
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000136
Tim Petersde495832000-10-07 05:09:39 +0000137# First (or only) line of ' or " string.
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000138ContStr = group(r"[bB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000139 group("'", r'\\\r?\n'),
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000140 r'[bB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000141 group('"', r'\\\r?\n'))
Guido van Rossum3b631771997-10-27 20:44:15 +0000142PseudoExtras = group(r'\\\r?\n', Comment, Triple)
143PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000144
Guido van Rossum3b631771997-10-27 20:44:15 +0000145tokenprog, pseudoprog, single3prog, double3prog = map(
146 re.compile, (Token, PseudoToken, Single3, Double3))
Guido van Rossumfefc9221997-10-27 21:17:24 +0000147endprogs = {"'": re.compile(Single), '"': re.compile(Double),
Guido van Rossum3b631771997-10-27 20:44:15 +0000148 "'''": single3prog, '"""': double3prog,
Guido van Rossumfefc9221997-10-27 21:17:24 +0000149 "r'''": single3prog, 'r"""': double3prog,
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000150 "b'''": single3prog, 'b"""': double3prog,
151 "br'''": single3prog, 'br"""': double3prog,
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000152 "R'''": single3prog, 'R"""': double3prog,
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000153 "B'''": single3prog, 'B"""': double3prog,
154 "bR'''": single3prog, 'bR"""': double3prog,
155 "Br'''": single3prog, 'Br"""': double3prog,
156 "BR'''": single3prog, 'BR"""': double3prog,
157 'r': None, 'R': None, 'b': None, 'B': None}
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000158
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000159triple_quoted = {}
160for t in ("'''", '"""',
161 "r'''", 'r"""', "R'''", 'R"""',
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000162 "b'''", 'b"""', "B'''", 'B"""',
163 "br'''", 'br"""', "Br'''", 'Br"""',
164 "bR'''", 'bR"""', "BR'''", 'BR"""'):
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000165 triple_quoted[t] = t
166single_quoted = {}
167for t in ("'", '"',
168 "r'", 'r"', "R'", 'R"',
Guido van Rossum4fe72f92007-11-12 17:40:10 +0000169 "b'", 'b"', "B'", 'B"',
170 "br'", 'br"', "Br'", 'Br"',
171 "bR'", 'bR"', "BR'", 'BR"' ):
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000172 single_quoted[t] = t
173
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000174tabsize = 8
Fred Drake9b8d8012000-08-17 04:45:13 +0000175
Ka-Ping Yee28c62bb2001-03-23 05:22:49 +0000176class TokenError(Exception): pass
177
178class StopTokenizing(Exception): pass
Fred Drake9b8d8012000-08-17 04:45:13 +0000179
Tim Peters5ca576e2001-06-18 22:08:13 +0000180
Thomas Wouters89f507f2006-12-13 04:49:30 +0000181class Untokenizer:
182
183 def __init__(self):
184 self.tokens = []
185 self.prev_row = 1
186 self.prev_col = 0
Trent Nelson428de652008-03-18 22:41:35 +0000187 self.encoding = None
Thomas Wouters89f507f2006-12-13 04:49:30 +0000188
189 def add_whitespace(self, start):
190 row, col = start
191 assert row <= self.prev_row
192 col_offset = col - self.prev_col
193 if col_offset:
194 self.tokens.append(" " * col_offset)
195
196 def untokenize(self, iterable):
197 for t in iterable:
198 if len(t) == 2:
199 self.compat(t, iterable)
200 break
201 tok_type, token, start, end, line = t
Trent Nelson428de652008-03-18 22:41:35 +0000202 if tok_type == ENCODING:
203 self.encoding = token
204 continue
Thomas Wouters89f507f2006-12-13 04:49:30 +0000205 self.add_whitespace(start)
206 self.tokens.append(token)
207 self.prev_row, self.prev_col = end
208 if tok_type in (NEWLINE, NL):
209 self.prev_row += 1
210 self.prev_col = 0
211 return "".join(self.tokens)
212
213 def compat(self, token, iterable):
214 startline = False
215 indents = []
216 toks_append = self.tokens.append
217 toknum, tokval = token
Trent Nelson428de652008-03-18 22:41:35 +0000218
Thomas Wouters89f507f2006-12-13 04:49:30 +0000219 if toknum in (NAME, NUMBER):
220 tokval += ' '
221 if toknum in (NEWLINE, NL):
222 startline = True
Christian Heimesba4af492008-03-28 00:55:15 +0000223 prevstring = False
Thomas Wouters89f507f2006-12-13 04:49:30 +0000224 for tok in iterable:
225 toknum, tokval = tok[:2]
Trent Nelson428de652008-03-18 22:41:35 +0000226 if toknum == ENCODING:
227 self.encoding = tokval
228 continue
Thomas Wouters89f507f2006-12-13 04:49:30 +0000229
230 if toknum in (NAME, NUMBER):
231 tokval += ' '
232
Christian Heimesba4af492008-03-28 00:55:15 +0000233 # Insert a space between two consecutive strings
234 if toknum == STRING:
235 if prevstring:
236 tokval = ' ' + tokval
237 prevstring = True
238 else:
239 prevstring = False
240
Thomas Wouters89f507f2006-12-13 04:49:30 +0000241 if toknum == INDENT:
242 indents.append(tokval)
243 continue
244 elif toknum == DEDENT:
245 indents.pop()
246 continue
247 elif toknum in (NEWLINE, NL):
248 startline = True
249 elif startline and indents:
250 toks_append(indents[-1])
251 startline = False
252 toks_append(tokval)
Raymond Hettinger68c04532005-06-10 11:05:19 +0000253
Trent Nelson428de652008-03-18 22:41:35 +0000254
Raymond Hettinger68c04532005-06-10 11:05:19 +0000255def untokenize(iterable):
256 """Transform tokens back into Python source code.
Trent Nelson428de652008-03-18 22:41:35 +0000257 It returns a bytes object, encoded using the ENCODING
258 token, which is the first token sequence output by tokenize.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000259
260 Each element returned by the iterable must be a token sequence
Thomas Wouters89f507f2006-12-13 04:49:30 +0000261 with at least two elements, a token number and token value. If
262 only two tokens are passed, the resulting output is poor.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000263
Thomas Wouters89f507f2006-12-13 04:49:30 +0000264 Round-trip invariant for full input:
265 Untokenized source will match input source exactly
266
267 Round-trip invariant for limited intput:
Trent Nelson428de652008-03-18 22:41:35 +0000268 # Output bytes will tokenize the back to the input
269 t1 = [tok[:2] for tok in tokenize(f.readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000270 newcode = untokenize(t1)
Trent Nelson428de652008-03-18 22:41:35 +0000271 readline = BytesIO(newcode).readline
272 t2 = [tok[:2] for tok in tokenize(readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000273 assert t1 == t2
274 """
Thomas Wouters89f507f2006-12-13 04:49:30 +0000275 ut = Untokenizer()
Trent Nelson428de652008-03-18 22:41:35 +0000276 out = ut.untokenize(iterable)
277 if ut.encoding is not None:
278 out = out.encode(ut.encoding)
279 return out
Raymond Hettinger68c04532005-06-10 11:05:19 +0000280
Trent Nelson428de652008-03-18 22:41:35 +0000281
Benjamin Petersond3afada2009-10-09 21:43:09 +0000282def _get_normal_name(orig_enc):
283 """Imitates get_normal_name in tokenizer.c."""
284 # Only care about the first 12 characters.
285 enc = orig_enc[:12].lower().replace("_", "-")
286 if enc == "utf-8" or enc.startswith("utf-8-"):
287 return "utf-8"
288 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
289 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
290 return "iso-8859-1"
291 return orig_enc
292
Trent Nelson428de652008-03-18 22:41:35 +0000293def detect_encoding(readline):
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000294 """
Trent Nelson428de652008-03-18 22:41:35 +0000295 The detect_encoding() function is used to detect the encoding that should
296 be used to decode a Python source file. It requires one argment, readline,
297 in the same way as the tokenize() generator.
298
299 It will call readline a maximum of twice, and return the encoding used
300 (as a string) and a list of any lines (left as bytes) it has read
301 in.
302
303 It detects the encoding from the presence of a utf-8 bom or an encoding
Benjamin Peterson689a5582010-03-18 22:29:52 +0000304 cookie as specified in pep-0263. If both a bom and a cookie are present, but
305 disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
306 charset, raise a SyntaxError. Note that if a utf-8 bom is found,
307 'utf-8-sig' is returned.
Trent Nelson428de652008-03-18 22:41:35 +0000308
309 If no encoding is specified, then the default of 'utf-8' will be returned.
310 """
Trent Nelson428de652008-03-18 22:41:35 +0000311 bom_found = False
312 encoding = None
Benjamin Peterson689a5582010-03-18 22:29:52 +0000313 default = 'utf-8'
Trent Nelson428de652008-03-18 22:41:35 +0000314 def read_or_stop():
315 try:
316 return readline()
317 except StopIteration:
318 return b''
319
320 def find_cookie(line):
321 try:
322 line_string = line.decode('ascii')
323 except UnicodeDecodeError:
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000324 return None
325
326 matches = cookie_re.findall(line_string)
327 if not matches:
328 return None
Benjamin Petersond3afada2009-10-09 21:43:09 +0000329 encoding = _get_normal_name(matches[0])
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000330 try:
331 codec = lookup(encoding)
332 except LookupError:
333 # This behaviour mimics the Python interpreter
334 raise SyntaxError("unknown encoding: " + encoding)
335
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000336 if bom_found:
337 if codec.name != 'utf-8':
338 # This behaviour mimics the Python interpreter
339 raise SyntaxError('encoding problem: utf-8')
340 encoding += '-sig'
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000341 return encoding
Trent Nelson428de652008-03-18 22:41:35 +0000342
343 first = read_or_stop()
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000344 if first.startswith(BOM_UTF8):
Trent Nelson428de652008-03-18 22:41:35 +0000345 bom_found = True
346 first = first[3:]
Benjamin Peterson689a5582010-03-18 22:29:52 +0000347 default = 'utf-8-sig'
Trent Nelson428de652008-03-18 22:41:35 +0000348 if not first:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000349 return default, []
Trent Nelson428de652008-03-18 22:41:35 +0000350
351 encoding = find_cookie(first)
352 if encoding:
353 return encoding, [first]
354
355 second = read_or_stop()
356 if not second:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000357 return default, [first]
Trent Nelson428de652008-03-18 22:41:35 +0000358
359 encoding = find_cookie(second)
360 if encoding:
361 return encoding, [first, second]
362
Benjamin Peterson689a5582010-03-18 22:29:52 +0000363 return default, [first, second]
Trent Nelson428de652008-03-18 22:41:35 +0000364
365
366def tokenize(readline):
367 """
368 The tokenize() generator requires one argment, readline, which
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000369 must be a callable object which provides the same interface as the
370 readline() method of built-in file objects. Each call to the function
Trent Nelson428de652008-03-18 22:41:35 +0000371 should return one line of input as bytes. Alternately, readline
Raymond Hettinger68c04532005-06-10 11:05:19 +0000372 can be a callable function terminating with StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000373 readline = open(myfile, 'rb').__next__ # Example of alternate readline
Tim Peters8ac14952002-05-23 15:15:30 +0000374
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000375 The generator produces 5-tuples with these members: the token type; the
376 token string; a 2-tuple (srow, scol) of ints specifying the row and
377 column where the token begins in the source; a 2-tuple (erow, ecol) of
378 ints specifying the row and column where the token ends in the source;
379 and the line on which the token was found. The line passed is the
Tim Peters8ac14952002-05-23 15:15:30 +0000380 logical line; continuation lines are included.
Trent Nelson428de652008-03-18 22:41:35 +0000381
382 The first token sequence will always be an ENCODING token
383 which tells you which encoding was used to decode the bytes stream.
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000384 """
Benjamin Peterson21db77e2009-11-14 16:27:26 +0000385 # This import is here to avoid problems when the itertools module is not
386 # built yet and tokenize is imported.
Benjamin Peterson81dd8b92009-11-14 18:09:17 +0000387 from itertools import chain, repeat
Trent Nelson428de652008-03-18 22:41:35 +0000388 encoding, consumed = detect_encoding(readline)
Benjamin Peterson81dd8b92009-11-14 18:09:17 +0000389 rl_gen = iter(readline, b"")
390 empty = repeat(b"")
391 return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
Trent Nelson428de652008-03-18 22:41:35 +0000392
393
394def _tokenize(readline, encoding):
Guido van Rossum1aec3231997-04-08 14:24:39 +0000395 lnum = parenlev = continued = 0
Fred Drake79e75e12001-07-20 19:05:50 +0000396 namechars, numchars = string.ascii_letters + '_', '0123456789'
Guido van Rossumde655271997-04-09 17:15:54 +0000397 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000398 contline = None
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000399 indents = [0]
Guido van Rossum1aec3231997-04-08 14:24:39 +0000400
Trent Nelson428de652008-03-18 22:41:35 +0000401 if encoding is not None:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000402 if encoding == "utf-8-sig":
403 # BOM will already have been stripped.
404 encoding = "utf-8"
Raymond Hettingera48db392009-04-29 00:34:27 +0000405 yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
Benjamin Peterson0fe14382008-06-05 23:07:42 +0000406 while True: # loop over lines in stream
Raymond Hettinger68c04532005-06-10 11:05:19 +0000407 try:
408 line = readline()
409 except StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000410 line = b''
411
412 if encoding is not None:
413 line = line.decode(encoding)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000414 lnum += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000415 pos, max = 0, len(line)
416
417 if contstr: # continued string
Guido van Rossumde655271997-04-09 17:15:54 +0000418 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000419 raise TokenError("EOF in multi-line string", strstart)
Guido van Rossum3b631771997-10-27 20:44:15 +0000420 endmatch = endprog.match(line)
421 if endmatch:
422 pos = end = endmatch.end(0)
Raymond Hettingera48db392009-04-29 00:34:27 +0000423 yield TokenInfo(STRING, contstr + line[:end],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000424 strstart, (lnum, end), contline + line)
Guido van Rossumde655271997-04-09 17:15:54 +0000425 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000426 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000427 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
Raymond Hettingera48db392009-04-29 00:34:27 +0000428 yield TokenInfo(ERRORTOKEN, contstr + line,
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000429 strstart, (lnum, len(line)), contline)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000430 contstr = ''
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000431 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000432 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000433 else:
434 contstr = contstr + line
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000435 contline = contline + line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000436 continue
437
Guido van Rossum1aec3231997-04-08 14:24:39 +0000438 elif parenlev == 0 and not continued: # new statement
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000439 if not line: break
440 column = 0
Guido van Rossum1aec3231997-04-08 14:24:39 +0000441 while pos < max: # measure leading whitespace
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000442 if line[pos] == ' ':
443 column += 1
444 elif line[pos] == '\t':
445 column = (column//tabsize + 1)*tabsize
446 elif line[pos] == '\f':
447 column = 0
448 else:
449 break
450 pos += 1
451 if pos == max:
452 break
Guido van Rossum1aec3231997-04-08 14:24:39 +0000453
454 if line[pos] in '#\r\n': # skip comments or blank lines
Thomas Wouters89f507f2006-12-13 04:49:30 +0000455 if line[pos] == '#':
456 comment_token = line[pos:].rstrip('\r\n')
457 nl_pos = pos + len(comment_token)
Raymond Hettingera48db392009-04-29 00:34:27 +0000458 yield TokenInfo(COMMENT, comment_token,
Thomas Wouters89f507f2006-12-13 04:49:30 +0000459 (lnum, pos), (lnum, pos + len(comment_token)), line)
Raymond Hettingera48db392009-04-29 00:34:27 +0000460 yield TokenInfo(NL, line[nl_pos:],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000461 (lnum, nl_pos), (lnum, len(line)), line)
462 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000463 yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
Guido van Rossum1aec3231997-04-08 14:24:39 +0000464 (lnum, pos), (lnum, len(line)), line)
465 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000466
467 if column > indents[-1]: # count indents or dedents
468 indents.append(column)
Raymond Hettingera48db392009-04-29 00:34:27 +0000469 yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000470 while column < indents[-1]:
Raymond Hettingerda99d1c2005-06-21 07:43:58 +0000471 if column not in indents:
472 raise IndentationError(
Thomas Wouters00ee7ba2006-08-21 19:07:27 +0000473 "unindent does not match any outer indentation level",
474 ("<tokenize>", lnum, pos, line))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000475 indents = indents[:-1]
Raymond Hettingera48db392009-04-29 00:34:27 +0000476 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000477
478 else: # continued statement
Guido van Rossumde655271997-04-09 17:15:54 +0000479 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000480 raise TokenError("EOF in multi-line statement", (lnum, 0))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000481 continued = 0
482
483 while pos < max:
Guido van Rossum3b631771997-10-27 20:44:15 +0000484 pseudomatch = pseudoprog.match(line, pos)
485 if pseudomatch: # scan for tokens
486 start, end = pseudomatch.span(1)
Guido van Rossumde655271997-04-09 17:15:54 +0000487 spos, epos, pos = (lnum, start), (lnum, end), end
Guido van Rossum1aec3231997-04-08 14:24:39 +0000488 token, initial = line[start:end], line[start]
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000489
Georg Brandldde00282007-03-18 19:01:53 +0000490 if (initial in numchars or # ordinary number
491 (initial == '.' and token != '.' and token != '...')):
Raymond Hettingera48db392009-04-29 00:34:27 +0000492 yield TokenInfo(NUMBER, token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000493 elif initial in '\r\n':
Raymond Hettingera48db392009-04-29 00:34:27 +0000494 yield TokenInfo(NL if parenlev > 0 else NEWLINE,
Thomas Wouters89f507f2006-12-13 04:49:30 +0000495 token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000496 elif initial == '#':
Thomas Wouters89f507f2006-12-13 04:49:30 +0000497 assert not token.endswith("\n")
Raymond Hettingera48db392009-04-29 00:34:27 +0000498 yield TokenInfo(COMMENT, token, spos, epos, line)
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000499 elif token in triple_quoted:
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000500 endprog = endprogs[token]
Guido van Rossum3b631771997-10-27 20:44:15 +0000501 endmatch = endprog.match(line, pos)
502 if endmatch: # all on one line
503 pos = endmatch.end(0)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000504 token = line[start:pos]
Raymond Hettingera48db392009-04-29 00:34:27 +0000505 yield TokenInfo(STRING, token, spos, (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000506 else:
Guido van Rossum1aec3231997-04-08 14:24:39 +0000507 strstart = (lnum, start) # multiple lines
508 contstr = line[start:]
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000509 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000510 break
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000511 elif initial in single_quoted or \
512 token[:2] in single_quoted or \
513 token[:3] in single_quoted:
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000514 if token[-1] == '\n': # continued string
Guido van Rossum1aec3231997-04-08 14:24:39 +0000515 strstart = (lnum, start)
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000516 endprog = (endprogs[initial] or endprogs[token[1]] or
517 endprogs[token[2]])
Guido van Rossumde655271997-04-09 17:15:54 +0000518 contstr, needcont = line[start:], 1
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000519 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000520 break
521 else: # ordinary string
Raymond Hettingera48db392009-04-29 00:34:27 +0000522 yield TokenInfo(STRING, token, spos, epos, line)
Guido van Rossum3b631771997-10-27 20:44:15 +0000523 elif initial in namechars: # ordinary name
Raymond Hettingera48db392009-04-29 00:34:27 +0000524 yield TokenInfo(NAME, token, spos, epos, line)
Guido van Rossum3b631771997-10-27 20:44:15 +0000525 elif initial == '\\': # continued stmt
526 continued = 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000527 else:
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000528 if initial in '([{':
529 parenlev += 1
530 elif initial in ')]}':
531 parenlev -= 1
Raymond Hettingera48db392009-04-29 00:34:27 +0000532 yield TokenInfo(OP, token, spos, epos, line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000533 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000534 yield TokenInfo(ERRORTOKEN, line[pos],
Guido van Rossumde655271997-04-09 17:15:54 +0000535 (lnum, pos), (lnum, pos+1), line)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000536 pos += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000537
538 for indent in indents[1:]: # pop remaining indent levels
Raymond Hettingera48db392009-04-29 00:34:27 +0000539 yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
540 yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000541
Trent Nelson428de652008-03-18 22:41:35 +0000542
543# An undocumented, backwards compatible, API for all the places in the standard
544# library that expect to be able to use tokenize with strings
545def generate_tokens(readline):
546 return _tokenize(readline, None)