blob: 6c857f8547337bbb4a763a5950403609293b45ce [file] [log] [blame]
Guido van Rossumb51eaa11997-03-07 00:21:55 +00001"""Tokenization help for Python programs.
Guido van Rossum4d8e8591992-01-01 19:34:47 +00002
Tim Peters4efb6e92001-06-29 23:51:08 +00003generate_tokens(readline) is a generator that breaks a stream of
Guido van Rossum1aec3231997-04-08 14:24:39 +00004text into Python tokens. It accepts a readline-like method which is called
Tim Peters4efb6e92001-06-29 23:51:08 +00005repeatedly to get the next line of input (or "" for EOF). It generates
65-tuples with these members:
7
8 the token type (see token.py)
9 the token (a string)
10 the starting (row, column) indices of the token (a 2-tuple of ints)
11 the ending (row, column) indices of the token (a 2-tuple of ints)
12 the original line (string)
13
14It is designed to match the working of the Python tokenizer exactly, except
15that it produces COMMENT tokens for comments and gives type OP for all
16operators
17
18Older entry points
19 tokenize_loop(readline, tokeneater)
20 tokenize(readline, tokeneater=printtoken)
21are the same, except instead of generating tokens, tokeneater is a callback
22function to which the 5 fields described above are passed as 5 arguments,
23each time a new token is found."""
Guido van Rossumb51eaa11997-03-07 00:21:55 +000024
Ka-Ping Yee244c5932001-03-01 13:56:40 +000025__author__ = 'Ka-Ping Yee <ping@lfw.org>'
Benjamin Petersonca2d2522009-10-15 03:05:39 +000026__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
27 'Skip Montanaro, Raymond Hettinger')
Guido van Rossumb51eaa11997-03-07 00:21:55 +000028
Terry Jan Reedy6858f002014-02-17 23:12:07 -050029from itertools import chain
Guido van Rossum3b631771997-10-27 20:44:15 +000030import string, re
Guido van Rossumfc6f5331997-03-07 00:21:12 +000031from token import *
Guido van Rossum4d8e8591992-01-01 19:34:47 +000032
Skip Montanaro40fc1602001-03-01 04:27:19 +000033import token
Benjamin Petersonca2d2522009-10-15 03:05:39 +000034__all__ = [x for x in dir(token) if not x.startswith("_")]
35__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
Neal Norwitze98d16e2002-03-26 16:20:26 +000036del x
Skip Montanaro40fc1602001-03-01 04:27:19 +000037del token
38
Guido van Rossum1aec3231997-04-08 14:24:39 +000039COMMENT = N_TOKENS
40tok_name[COMMENT] = 'COMMENT'
Guido van Rossuma90c78b1998-04-03 16:05:38 +000041NL = N_TOKENS + 1
42tok_name[NL] = 'NL'
Skip Montanaro40fc1602001-03-01 04:27:19 +000043N_TOKENS += 2
Guido van Rossum1aec3231997-04-08 14:24:39 +000044
Eric S. Raymondb08b2d32001-02-09 11:10:16 +000045def group(*choices): return '(' + '|'.join(choices) + ')'
Guido van Rossum68468eb2003-02-27 20:14:51 +000046def any(*choices): return group(*choices) + '*'
47def maybe(*choices): return group(*choices) + '?'
Guido van Rossum4d8e8591992-01-01 19:34:47 +000048
Guido van Rossum3b631771997-10-27 20:44:15 +000049Whitespace = r'[ \f\t]*'
50Comment = r'#[^\r\n]*'
51Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
52Name = r'[a-zA-Z_]\w*'
Guido van Rossum4d8e8591992-01-01 19:34:47 +000053
Georg Brandl14404b62008-01-19 19:27:05 +000054Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
Eric Smith0aed07a2008-03-17 19:43:40 +000055Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
56Binnumber = r'0[bB][01]+[lL]?'
Guido van Rossum3b631771997-10-27 20:44:15 +000057Decnumber = r'[1-9]\d*[lL]?'
Eric Smith0aed07a2008-03-17 19:43:40 +000058Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Guido van Rossum3b631771997-10-27 20:44:15 +000059Exponent = r'[eE][-+]?\d+'
60Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Tim Petersd507dab2001-08-30 20:51:59 +000061Expfloat = r'\d+' + Exponent
Guido van Rossum1aec3231997-04-08 14:24:39 +000062Floatnumber = group(Pointfloat, Expfloat)
Tim Petersd507dab2001-08-30 20:51:59 +000063Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Guido van Rossum1aec3231997-04-08 14:24:39 +000064Number = group(Imagnumber, Floatnumber, Intnumber)
Guido van Rossum4d8e8591992-01-01 19:34:47 +000065
Tim Petersde495832000-10-07 05:09:39 +000066# Tail end of ' string.
67Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
68# Tail end of " string.
69Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
70# Tail end of ''' string.
71Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
72# Tail end of """ string.
73Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Meador Inge43f42fc2012-06-16 21:05:50 -050074Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
Tim Petersde495832000-10-07 05:09:39 +000075# Single-line ' or " string.
Meador Inge43f42fc2012-06-16 21:05:50 -050076String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
77 r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
Guido van Rossum4d8e8591992-01-01 19:34:47 +000078
Tim Petersde495832000-10-07 05:09:39 +000079# Because of leftmost-then-longest match semantics, be sure to put the
80# longest operators first (e.g., if = came before ==, == would get
81# recognized as two instances of =).
82Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
Guido van Rossum96204f52001-08-08 05:04:07 +000083 r"//=?",
Tim Petersde495832000-10-07 05:09:39 +000084 r"[+\-*/%&|^=<>]=?",
85 r"~")
Thomas Wouterse1519a12000-08-24 21:44:52 +000086
Guido van Rossum4d8e8591992-01-01 19:34:47 +000087Bracket = '[][(){}]'
Anthony Baxterc2a5a632004-08-02 06:10:11 +000088Special = group(r'\r?\n', r'[:;.,`@]')
Guido van Rossumfc6f5331997-03-07 00:21:12 +000089Funny = group(Operator, Bracket, Special)
Guido van Rossum4d8e8591992-01-01 19:34:47 +000090
Guido van Rossum3b631771997-10-27 20:44:15 +000091PlainToken = group(Number, Funny, String, Name)
Guido van Rossumfc6f5331997-03-07 00:21:12 +000092Token = Ignore + PlainToken
Guido van Rossum4d8e8591992-01-01 19:34:47 +000093
Tim Petersde495832000-10-07 05:09:39 +000094# First (or only) line of ' or " string.
Meador Inge43f42fc2012-06-16 21:05:50 -050095ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +000096 group("'", r'\\\r?\n'),
Meador Inge43f42fc2012-06-16 21:05:50 -050097 r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +000098 group('"', r'\\\r?\n'))
Ezio Melotti7d24b162012-11-03 17:30:51 +020099PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
Guido van Rossum3b631771997-10-27 20:44:15 +0000100PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000101
Guido van Rossum3b631771997-10-27 20:44:15 +0000102tokenprog, pseudoprog, single3prog, double3prog = map(
103 re.compile, (Token, PseudoToken, Single3, Double3))
Guido van Rossumfefc9221997-10-27 21:17:24 +0000104endprogs = {"'": re.compile(Single), '"': re.compile(Double),
Guido van Rossum3b631771997-10-27 20:44:15 +0000105 "'''": single3prog, '"""': double3prog,
Guido van Rossumfefc9221997-10-27 21:17:24 +0000106 "r'''": single3prog, 'r"""': double3prog,
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000107 "u'''": single3prog, 'u"""': double3prog,
108 "ur'''": single3prog, 'ur"""': double3prog,
109 "R'''": single3prog, 'R"""': double3prog,
110 "U'''": single3prog, 'U"""': double3prog,
111 "uR'''": single3prog, 'uR"""': double3prog,
112 "Ur'''": single3prog, 'Ur"""': double3prog,
113 "UR'''": single3prog, 'UR"""': double3prog,
Christian Heimes288e89a2008-01-18 18:24:07 +0000114 "b'''": single3prog, 'b"""': double3prog,
115 "br'''": single3prog, 'br"""': double3prog,
116 "B'''": single3prog, 'B"""': double3prog,
117 "bR'''": single3prog, 'bR"""': double3prog,
118 "Br'''": single3prog, 'Br"""': double3prog,
119 "BR'''": single3prog, 'BR"""': double3prog,
120 'r': None, 'R': None, 'u': None, 'U': None,
121 'b': None, 'B': None}
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000122
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000123triple_quoted = {}
124for t in ("'''", '"""',
125 "r'''", 'r"""', "R'''", 'R"""',
126 "u'''", 'u"""', "U'''", 'U"""',
127 "ur'''", 'ur"""', "Ur'''", 'Ur"""',
Christian Heimes288e89a2008-01-18 18:24:07 +0000128 "uR'''", 'uR"""', "UR'''", 'UR"""',
129 "b'''", 'b"""', "B'''", 'B"""',
130 "br'''", 'br"""', "Br'''", 'Br"""',
131 "bR'''", 'bR"""', "BR'''", 'BR"""'):
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000132 triple_quoted[t] = t
133single_quoted = {}
134for t in ("'", '"',
135 "r'", 'r"', "R'", 'R"',
136 "u'", 'u"', "U'", 'U"',
137 "ur'", 'ur"', "Ur'", 'Ur"',
Christian Heimes288e89a2008-01-18 18:24:07 +0000138 "uR'", 'uR"', "UR'", 'UR"',
139 "b'", 'b"', "B'", 'B"',
140 "br'", 'br"', "Br'", 'Br"',
141 "bR'", 'bR"', "BR'", 'BR"' ):
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000142 single_quoted[t] = t
143
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000144tabsize = 8
Fred Drake9b8d8012000-08-17 04:45:13 +0000145
Ka-Ping Yee28c62bb2001-03-23 05:22:49 +0000146class TokenError(Exception): pass
147
148class StopTokenizing(Exception): pass
Fred Drake9b8d8012000-08-17 04:45:13 +0000149
Brett Cannon50bb7e12008-08-02 03:15:20 +0000150def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
151 srow, scol = srow_scol
152 erow, ecol = erow_ecol
Guido van Rossum1aec3231997-04-08 14:24:39 +0000153 print "%d,%d-%d,%d:\t%s\t%s" % \
154 (srow, scol, erow, ecol, tok_name[type], repr(token))
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000155
Guido van Rossum1aec3231997-04-08 14:24:39 +0000156def tokenize(readline, tokeneater=printtoken):
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000157 """
158 The tokenize() function accepts two parameters: one representing the
159 input stream, and one providing an output mechanism for tokenize().
Tim Peters8ac14952002-05-23 15:15:30 +0000160
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000161 The first parameter, readline, must be a callable object which provides
162 the same interface as the readline() method of built-in file objects.
Tim Peters8ac14952002-05-23 15:15:30 +0000163 Each call to the function should return one line of input as a string.
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000164
165 The second parameter, tokeneater, must also be a callable object. It is
166 called once for each token, with five arguments, corresponding to the
Tim Peters8ac14952002-05-23 15:15:30 +0000167 tuples generated by generate_tokens().
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000168 """
Ka-Ping Yee28c62bb2001-03-23 05:22:49 +0000169 try:
170 tokenize_loop(readline, tokeneater)
171 except StopTokenizing:
172 pass
173
Tim Peters4efb6e92001-06-29 23:51:08 +0000174# backwards compatible interface
Ka-Ping Yee28c62bb2001-03-23 05:22:49 +0000175def tokenize_loop(readline, tokeneater):
Tim Peters5ca576e2001-06-18 22:08:13 +0000176 for token_info in generate_tokens(readline):
Guido van Rossum68468eb2003-02-27 20:14:51 +0000177 tokeneater(*token_info)
Tim Peters5ca576e2001-06-18 22:08:13 +0000178
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000179class Untokenizer:
180
181 def __init__(self):
182 self.tokens = []
183 self.prev_row = 1
184 self.prev_col = 0
185
186 def add_whitespace(self, start):
187 row, col = start
Terry Jan Reedy7751a342014-02-17 16:45:38 -0500188 if row < self.prev_row or row == self.prev_row and col < self.prev_col:
189 raise ValueError("start ({},{}) precedes previous end ({},{})"
190 .format(row, col, self.prev_row, self.prev_col))
Terry Jan Reedybd7cf3a2014-02-23 23:32:59 -0500191 row_offset = row - self.prev_row
192 if row_offset:
193 self.tokens.append("\\\n" * row_offset)
194 self.prev_col = 0
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000195 col_offset = col - self.prev_col
196 if col_offset:
197 self.tokens.append(" " * col_offset)
198
199 def untokenize(self, iterable):
Terry Jan Reedy6858f002014-02-17 23:12:07 -0500200 it = iter(iterable)
Jason R. Coombseabfe8c2015-06-28 13:05:19 -0400201 indents = []
202 startline = False
Terry Jan Reedy6858f002014-02-17 23:12:07 -0500203 for t in it:
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000204 if len(t) == 2:
Terry Jan Reedy6858f002014-02-17 23:12:07 -0500205 self.compat(t, it)
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000206 break
207 tok_type, token, start, end, line = t
Terry Jan Reedybd7cf3a2014-02-23 23:32:59 -0500208 if tok_type == ENDMARKER:
209 break
Jason R. Coombseabfe8c2015-06-28 13:05:19 -0400210 if tok_type == INDENT:
211 indents.append(token)
212 continue
213 elif tok_type == DEDENT:
214 indents.pop()
215 self.prev_row, self.prev_col = end
216 continue
217 elif tok_type in (NEWLINE, NL):
218 startline = True
219 elif startline and indents:
220 indent = indents[-1]
221 if start[1] >= len(indent):
222 self.tokens.append(indent)
223 self.prev_col = len(indent)
224 startline = False
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000225 self.add_whitespace(start)
226 self.tokens.append(token)
227 self.prev_row, self.prev_col = end
228 if tok_type in (NEWLINE, NL):
229 self.prev_row += 1
230 self.prev_col = 0
231 return "".join(self.tokens)
232
233 def compat(self, token, iterable):
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000234 indents = []
235 toks_append = self.tokens.append
Terry Jan Reedy6858f002014-02-17 23:12:07 -0500236 startline = token[0] in (NEWLINE, NL)
Amaury Forgeot d'Arcda0c0252008-03-27 23:23:54 +0000237 prevstring = False
Terry Jan Reedy8ab7cba2014-02-17 23:16:26 -0500238
Terry Jan Reedy6858f002014-02-17 23:12:07 -0500239 for tok in chain([token], iterable):
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000240 toknum, tokval = tok[:2]
241
242 if toknum in (NAME, NUMBER):
243 tokval += ' '
244
Amaury Forgeot d'Arcda0c0252008-03-27 23:23:54 +0000245 # Insert a space between two consecutive strings
246 if toknum == STRING:
247 if prevstring:
248 tokval = ' ' + tokval
249 prevstring = True
250 else:
251 prevstring = False
252
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000253 if toknum == INDENT:
254 indents.append(tokval)
255 continue
256 elif toknum == DEDENT:
257 indents.pop()
258 continue
259 elif toknum in (NEWLINE, NL):
260 startline = True
261 elif startline and indents:
262 toks_append(indents[-1])
263 startline = False
264 toks_append(tokval)
Raymond Hettinger68c04532005-06-10 11:05:19 +0000265
266def untokenize(iterable):
267 """Transform tokens back into Python source code.
268
269 Each element returned by the iterable must be a token sequence
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000270 with at least two elements, a token number and token value. If
271 only two tokens are passed, the resulting output is poor.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000272
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000273 Round-trip invariant for full input:
274 Untokenized source will match input source exactly
275
276 Round-trip invariant for limited intput:
Raymond Hettinger68c04532005-06-10 11:05:19 +0000277 # Output text will tokenize the back to the input
278 t1 = [tok[:2] for tok in generate_tokens(f.readline)]
279 newcode = untokenize(t1)
280 readline = iter(newcode.splitlines(1)).next
Amaury Forgeot d'Arcda0c0252008-03-27 23:23:54 +0000281 t2 = [tok[:2] for tok in generate_tokens(readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000282 assert t1 == t2
283 """
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000284 ut = Untokenizer()
285 return ut.untokenize(iterable)
Raymond Hettinger68c04532005-06-10 11:05:19 +0000286
Tim Peters5ca576e2001-06-18 22:08:13 +0000287def generate_tokens(readline):
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000288 """
Ezio Melotti26126792013-11-25 05:14:51 +0200289 The generate_tokens() generator requires one argument, readline, which
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000290 must be a callable object which provides the same interface as the
291 readline() method of built-in file objects. Each call to the function
Raymond Hettinger68c04532005-06-10 11:05:19 +0000292 should return one line of input as a string. Alternately, readline
293 can be a callable function terminating with StopIteration:
294 readline = open(myfile).next # Example of alternate readline
Tim Peters8ac14952002-05-23 15:15:30 +0000295
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000296 The generator produces 5-tuples with these members: the token type; the
297 token string; a 2-tuple (srow, scol) of ints specifying the row and
298 column where the token begins in the source; a 2-tuple (erow, ecol) of
299 ints specifying the row and column where the token ends in the source;
300 and the line on which the token was found. The line passed is the
Tim Peters8ac14952002-05-23 15:15:30 +0000301 logical line; continuation lines are included.
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000302 """
Guido van Rossum1aec3231997-04-08 14:24:39 +0000303 lnum = parenlev = continued = 0
Fred Drake79e75e12001-07-20 19:05:50 +0000304 namechars, numchars = string.ascii_letters + '_', '0123456789'
Guido van Rossumde655271997-04-09 17:15:54 +0000305 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000306 contline = None
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000307 indents = [0]
Guido van Rossum1aec3231997-04-08 14:24:39 +0000308
Ammar Askar7829bba2018-07-06 06:23:13 -0400309 last_line = b''
310 line = b''
Benjamin Peterson8456f642008-06-05 23:02:33 +0000311 while 1: # loop over lines in stream
Raymond Hettinger68c04532005-06-10 11:05:19 +0000312 try:
Ammar Askar7829bba2018-07-06 06:23:13 -0400313 # We capture the value of the line variable here because
314 # readline uses the empty string '' to signal end of input,
315 # hence `line` itself will always be overwritten at the end
316 # of this loop.
317 last_line = line
Raymond Hettinger68c04532005-06-10 11:05:19 +0000318 line = readline()
319 except StopIteration:
320 line = ''
Benjamin Petersonca2d2522009-10-15 03:05:39 +0000321 lnum += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000322 pos, max = 0, len(line)
323
324 if contstr: # continued string
Guido van Rossumde655271997-04-09 17:15:54 +0000325 if not line:
326 raise TokenError, ("EOF in multi-line string", strstart)
Guido van Rossum3b631771997-10-27 20:44:15 +0000327 endmatch = endprog.match(line)
328 if endmatch:
329 pos = end = endmatch.end(0)
Tim Peters5ca576e2001-06-18 22:08:13 +0000330 yield (STRING, contstr + line[:end],
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000331 strstart, (lnum, end), contline + line)
Guido van Rossumde655271997-04-09 17:15:54 +0000332 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000333 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000334 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
Tim Peters5ca576e2001-06-18 22:08:13 +0000335 yield (ERRORTOKEN, contstr + line,
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000336 strstart, (lnum, len(line)), contline)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000337 contstr = ''
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000338 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000339 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000340 else:
341 contstr = contstr + line
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000342 contline = contline + line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000343 continue
344
Guido van Rossum1aec3231997-04-08 14:24:39 +0000345 elif parenlev == 0 and not continued: # new statement
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000346 if not line: break
347 column = 0
Guido van Rossum1aec3231997-04-08 14:24:39 +0000348 while pos < max: # measure leading whitespace
Benjamin Petersone537adf2009-10-15 01:47:28 +0000349 if line[pos] == ' ':
Benjamin Petersonca2d2522009-10-15 03:05:39 +0000350 column += 1
Benjamin Petersone537adf2009-10-15 01:47:28 +0000351 elif line[pos] == '\t':
Benjamin Peterson447dc152009-10-15 01:49:37 +0000352 column = (column//tabsize + 1)*tabsize
Benjamin Petersone537adf2009-10-15 01:47:28 +0000353 elif line[pos] == '\f':
354 column = 0
355 else:
356 break
Benjamin Petersonca2d2522009-10-15 03:05:39 +0000357 pos += 1
Benjamin Petersone537adf2009-10-15 01:47:28 +0000358 if pos == max:
359 break
Guido van Rossum1aec3231997-04-08 14:24:39 +0000360
361 if line[pos] in '#\r\n': # skip comments or blank lines
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000362 if line[pos] == '#':
363 comment_token = line[pos:].rstrip('\r\n')
364 nl_pos = pos + len(comment_token)
365 yield (COMMENT, comment_token,
366 (lnum, pos), (lnum, pos + len(comment_token)), line)
367 yield (NL, line[nl_pos:],
368 (lnum, nl_pos), (lnum, len(line)), line)
369 else:
370 yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
Guido van Rossum1aec3231997-04-08 14:24:39 +0000371 (lnum, pos), (lnum, len(line)), line)
372 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000373
374 if column > indents[-1]: # count indents or dedents
375 indents.append(column)
Tim Peters5ca576e2001-06-18 22:08:13 +0000376 yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000377 while column < indents[-1]:
Raymond Hettingerda99d1c2005-06-21 07:43:58 +0000378 if column not in indents:
379 raise IndentationError(
Georg Brandl2463f8f2006-08-14 21:34:08 +0000380 "unindent does not match any outer indentation level",
381 ("<tokenize>", lnum, pos, line))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000382 indents = indents[:-1]
Tim Peters5ca576e2001-06-18 22:08:13 +0000383 yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000384
385 else: # continued statement
Guido van Rossumde655271997-04-09 17:15:54 +0000386 if not line:
387 raise TokenError, ("EOF in multi-line statement", (lnum, 0))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000388 continued = 0
389
390 while pos < max:
Guido van Rossum3b631771997-10-27 20:44:15 +0000391 pseudomatch = pseudoprog.match(line, pos)
392 if pseudomatch: # scan for tokens
393 start, end = pseudomatch.span(1)
Guido van Rossumde655271997-04-09 17:15:54 +0000394 spos, epos, pos = (lnum, start), (lnum, end), end
Ezio Melotti7d24b162012-11-03 17:30:51 +0200395 if start == end:
396 continue
Guido van Rossum1aec3231997-04-08 14:24:39 +0000397 token, initial = line[start:end], line[start]
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000398
Ka-Ping Yee28c62bb2001-03-23 05:22:49 +0000399 if initial in numchars or \
400 (initial == '.' and token != '.'): # ordinary number
Tim Peters5ca576e2001-06-18 22:08:13 +0000401 yield (NUMBER, token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000402 elif initial in '\r\n':
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000403 yield (NL if parenlev > 0 else NEWLINE,
404 token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000405 elif initial == '#':
Jeremy Hylton76467ba2006-08-23 21:14:03 +0000406 assert not token.endswith("\n")
Tim Peters5ca576e2001-06-18 22:08:13 +0000407 yield (COMMENT, token, spos, epos, line)
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000408 elif token in triple_quoted:
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000409 endprog = endprogs[token]
Guido van Rossum3b631771997-10-27 20:44:15 +0000410 endmatch = endprog.match(line, pos)
411 if endmatch: # all on one line
412 pos = endmatch.end(0)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000413 token = line[start:pos]
Tim Peters5ca576e2001-06-18 22:08:13 +0000414 yield (STRING, token, spos, (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000415 else:
Guido van Rossum1aec3231997-04-08 14:24:39 +0000416 strstart = (lnum, start) # multiple lines
417 contstr = line[start:]
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000418 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000419 break
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000420 elif initial in single_quoted or \
421 token[:2] in single_quoted or \
422 token[:3] in single_quoted:
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000423 if token[-1] == '\n': # continued string
Guido van Rossum1aec3231997-04-08 14:24:39 +0000424 strstart = (lnum, start)
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000425 endprog = (endprogs[initial] or endprogs[token[1]] or
426 endprogs[token[2]])
Guido van Rossumde655271997-04-09 17:15:54 +0000427 contstr, needcont = line[start:], 1
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000428 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000429 break
430 else: # ordinary string
Tim Peters5ca576e2001-06-18 22:08:13 +0000431 yield (STRING, token, spos, epos, line)
Guido van Rossum3b631771997-10-27 20:44:15 +0000432 elif initial in namechars: # ordinary name
Tim Peters5ca576e2001-06-18 22:08:13 +0000433 yield (NAME, token, spos, epos, line)
Guido van Rossum3b631771997-10-27 20:44:15 +0000434 elif initial == '\\': # continued stmt
435 continued = 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000436 else:
Benjamin Petersone537adf2009-10-15 01:47:28 +0000437 if initial in '([{':
Benjamin Petersonca2d2522009-10-15 03:05:39 +0000438 parenlev += 1
Benjamin Petersone537adf2009-10-15 01:47:28 +0000439 elif initial in ')]}':
Benjamin Petersonca2d2522009-10-15 03:05:39 +0000440 parenlev -= 1
Tim Peters5ca576e2001-06-18 22:08:13 +0000441 yield (OP, token, spos, epos, line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000442 else:
Tim Peters5ca576e2001-06-18 22:08:13 +0000443 yield (ERRORTOKEN, line[pos],
Guido van Rossumde655271997-04-09 17:15:54 +0000444 (lnum, pos), (lnum, pos+1), line)
Benjamin Petersonca2d2522009-10-15 03:05:39 +0000445 pos += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000446
Ammar Askar7829bba2018-07-06 06:23:13 -0400447 # Add an implicit NEWLINE if the input doesn't end in one
448 if last_line and last_line[-1] not in '\r\n':
449 yield (NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000450 for indent in indents[1:]: # pop remaining indent levels
Tim Peters5ca576e2001-06-18 22:08:13 +0000451 yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
452 yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000453
454if __name__ == '__main__': # testing
455 import sys
Benjamin Petersone537adf2009-10-15 01:47:28 +0000456 if len(sys.argv) > 1:
457 tokenize(open(sys.argv[1]).readline)
458 else:
459 tokenize(sys.stdin.readline)