blob: 7a003580a4a9ad1fb0e1bfe3d451b10f204d3b91 [file] [log] [blame]
Guido van Rossumb51eaa11997-03-07 00:21:55 +00001"""Tokenization help for Python programs.
Guido van Rossum4d8e8591992-01-01 19:34:47 +00002
Florent Xicluna43e4ea12010-09-03 19:54:02 +00003tokenize(readline) is a generator that breaks a stream of bytes into
4Python tokens. It decodes the bytes according to PEP-0263 for
5determining source file encoding.
Trent Nelson428de652008-03-18 22:41:35 +00006
Florent Xicluna43e4ea12010-09-03 19:54:02 +00007It accepts a readline-like method which is called repeatedly to get the
8next line of input (or b"" for EOF). It generates 5-tuples with these
9members:
Tim Peters4efb6e92001-06-29 23:51:08 +000010
11 the token type (see token.py)
12 the token (a string)
13 the starting (row, column) indices of the token (a 2-tuple of ints)
14 the ending (row, column) indices of the token (a 2-tuple of ints)
15 the original line (string)
16
17It is designed to match the working of the Python tokenizer exactly, except
18that it produces COMMENT tokens for comments and gives type OP for all
Florent Xicluna43e4ea12010-09-03 19:54:02 +000019operators. Additionally, all token lists start with an ENCODING token
20which tells you which encoding was used to decode the bytes stream.
21"""
Guido van Rossumb51eaa11997-03-07 00:21:55 +000022
Ka-Ping Yee244c5932001-03-01 13:56:40 +000023__author__ = 'Ka-Ping Yee <ping@lfw.org>'
Trent Nelson428de652008-03-18 22:41:35 +000024__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
25 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
26 'Michael Foord')
Serhiy Storchakacf4a2f22015-03-11 17:18:03 +020027from builtins import open as _builtin_open
Benjamin Peterson433f32c2008-12-12 01:25:05 +000028from codecs import lookup, BOM_UTF8
Raymond Hettinger3fb79c72010-09-09 07:15:18 +000029import collections
Victor Stinner58c07522010-11-09 01:08:59 +000030from io import TextIOWrapper
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -050031from itertools import chain
Eric V. Smith1c8222c2015-10-26 04:37:55 -040032import itertools as _itertools
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -050033import re
34import sys
35from token import *
36
Serhiy Storchakadafea852013-09-16 23:51:56 +030037cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
Serhiy Storchaka768c16c2014-01-09 18:36:09 +020038blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
Guido van Rossum4d8e8591992-01-01 19:34:47 +000039
Skip Montanaro40fc1602001-03-01 04:27:19 +000040import token
Alexander Belopolskyb9d10d02010-11-11 14:07:41 +000041__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
42 "NL", "untokenize", "ENCODING", "TokenInfo"]
Skip Montanaro40fc1602001-03-01 04:27:19 +000043del token
44
Guido van Rossum1aec3231997-04-08 14:24:39 +000045COMMENT = N_TOKENS
46tok_name[COMMENT] = 'COMMENT'
Guido van Rossuma90c78b1998-04-03 16:05:38 +000047NL = N_TOKENS + 1
48tok_name[NL] = 'NL'
Trent Nelson428de652008-03-18 22:41:35 +000049ENCODING = N_TOKENS + 2
50tok_name[ENCODING] = 'ENCODING'
51N_TOKENS += 3
Meador Inge00c7f852012-01-19 00:44:45 -060052EXACT_TOKEN_TYPES = {
53 '(': LPAR,
54 ')': RPAR,
55 '[': LSQB,
56 ']': RSQB,
57 ':': COLON,
58 ',': COMMA,
59 ';': SEMI,
60 '+': PLUS,
61 '-': MINUS,
62 '*': STAR,
63 '/': SLASH,
64 '|': VBAR,
65 '&': AMPER,
66 '<': LESS,
67 '>': GREATER,
68 '=': EQUAL,
69 '.': DOT,
70 '%': PERCENT,
71 '{': LBRACE,
72 '}': RBRACE,
73 '==': EQEQUAL,
74 '!=': NOTEQUAL,
75 '<=': LESSEQUAL,
76 '>=': GREATEREQUAL,
77 '~': TILDE,
78 '^': CIRCUMFLEX,
79 '<<': LEFTSHIFT,
80 '>>': RIGHTSHIFT,
81 '**': DOUBLESTAR,
82 '+=': PLUSEQUAL,
83 '-=': MINEQUAL,
84 '*=': STAREQUAL,
85 '/=': SLASHEQUAL,
86 '%=': PERCENTEQUAL,
87 '&=': AMPEREQUAL,
88 '|=': VBAREQUAL,
89 '^=': CIRCUMFLEXEQUAL,
90 '<<=': LEFTSHIFTEQUAL,
91 '>>=': RIGHTSHIFTEQUAL,
92 '**=': DOUBLESTAREQUAL,
93 '//': DOUBLESLASH,
94 '//=': DOUBLESLASHEQUAL,
Benjamin Petersond51374e2014-04-09 23:55:56 -040095 '@': AT,
96 '@=': ATEQUAL,
Meador Inge00c7f852012-01-19 00:44:45 -060097}
Guido van Rossum1aec3231997-04-08 14:24:39 +000098
Raymond Hettinger3fb79c72010-09-09 07:15:18 +000099class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
Raymond Hettingeraa17a7f2009-04-29 14:21:25 +0000100 def __repr__(self):
Raymond Hettingera0e79402010-09-09 08:29:05 +0000101 annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
102 return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
103 self._replace(type=annotated_type))
Raymond Hettingeraa17a7f2009-04-29 14:21:25 +0000104
Meador Inge00c7f852012-01-19 00:44:45 -0600105 @property
106 def exact_type(self):
107 if self.type == OP and self.string in EXACT_TOKEN_TYPES:
108 return EXACT_TOKEN_TYPES[self.string]
109 else:
110 return self.type
111
Eric S. Raymondb08b2d32001-02-09 11:10:16 +0000112def group(*choices): return '(' + '|'.join(choices) + ')'
Guido van Rossum68468eb2003-02-27 20:14:51 +0000113def any(*choices): return group(*choices) + '*'
114def maybe(*choices): return group(*choices) + '?'
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000115
Antoine Pitroufd036452008-08-19 17:56:33 +0000116# Note: we use unicode matching for names ("\w") but ascii matching for
117# number literals.
Guido van Rossum3b631771997-10-27 20:44:15 +0000118Whitespace = r'[ \f\t]*'
119Comment = r'#[^\r\n]*'
120Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Benjamin Peterson33856de2010-08-30 14:41:20 +0000121Name = r'\w+'
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000122
Antoine Pitroufd036452008-08-19 17:56:33 +0000123Hexnumber = r'0[xX][0-9a-fA-F]+'
Georg Brandlfceab5a2008-01-19 20:08:23 +0000124Binnumber = r'0[bB][01]+'
125Octnumber = r'0[oO][0-7]+'
Antoine Pitroufd036452008-08-19 17:56:33 +0000126Decnumber = r'(?:0+|[1-9][0-9]*)'
Guido van Rossumcd16bf62007-06-13 18:07:49 +0000127Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Antoine Pitroufd036452008-08-19 17:56:33 +0000128Exponent = r'[eE][-+]?[0-9]+'
129Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
130Expfloat = r'[0-9]+' + Exponent
Guido van Rossum1aec3231997-04-08 14:24:39 +0000131Floatnumber = group(Pointfloat, Expfloat)
Antoine Pitroufd036452008-08-19 17:56:33 +0000132Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Guido van Rossum1aec3231997-04-08 14:24:39 +0000133Number = group(Imagnumber, Floatnumber, Intnumber)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000134
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400135# Return the empty string, plus all of the valid string prefixes.
136def _all_string_prefixes():
137 # The valid string prefixes. Only contain the lower case versions,
138 # and don't contain any permuations (include 'fr', but not
139 # 'rf'). The various permutations will be generated.
140 _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
141 # if we add binary f-strings, add: ['fb', 'fbr']
142 result = set([''])
143 for prefix in _valid_string_prefixes:
144 for t in _itertools.permutations(prefix):
145 # create a list with upper and lower versions of each
146 # character
147 for u in _itertools.product(*[(c, c.upper()) for c in t]):
148 result.add(''.join(u))
149 return result
150
151def _compile(expr):
152 return re.compile(expr, re.UNICODE)
153
154# Note that since _all_string_prefixes includes the empty string,
155# StringPrefix can be the empty string (making it optional).
156StringPrefix = group(*_all_string_prefixes())
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000157
Tim Petersde495832000-10-07 05:09:39 +0000158# Tail end of ' string.
159Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
160# Tail end of " string.
161Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
162# Tail end of ''' string.
163Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
164# Tail end of """ string.
165Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000166Triple = group(StringPrefix + "'''", StringPrefix + '"""')
Tim Petersde495832000-10-07 05:09:39 +0000167# Single-line ' or " string.
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000168String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
169 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000170
Tim Petersde495832000-10-07 05:09:39 +0000171# Because of leftmost-then-longest match semantics, be sure to put the
172# longest operators first (e.g., if = came before ==, == would get
173# recognized as two instances of =).
Guido van Rossumb053cd82006-08-24 03:53:23 +0000174Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
Neal Norwitzc1505362006-12-28 06:47:50 +0000175 r"//=?", r"->",
Benjamin Petersond51374e2014-04-09 23:55:56 -0400176 r"[+\-*/%&@|^=<>]=?",
Tim Petersde495832000-10-07 05:09:39 +0000177 r"~")
Thomas Wouterse1519a12000-08-24 21:44:52 +0000178
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000179Bracket = '[][(){}]'
Georg Brandldde00282007-03-18 19:01:53 +0000180Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000181Funny = group(Operator, Bracket, Special)
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000182
Guido van Rossum3b631771997-10-27 20:44:15 +0000183PlainToken = group(Number, Funny, String, Name)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000184Token = Ignore + PlainToken
Guido van Rossum4d8e8591992-01-01 19:34:47 +0000185
Tim Petersde495832000-10-07 05:09:39 +0000186# First (or only) line of ' or " string.
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000187ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000188 group("'", r'\\\r?\n'),
Armin Ronacherc0eaeca2012-03-04 13:07:57 +0000189 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
Ka-Ping Yee1ff08b12001-01-15 22:04:30 +0000190 group('"', r'\\\r?\n'))
Ezio Melotti2cc3b4b2012-11-03 17:38:43 +0200191PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
Guido van Rossum3b631771997-10-27 20:44:15 +0000192PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000193
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400194# For a given string prefix plus quotes, endpats maps it to a regex
195# to match the remainder of that string. _prefix can be empty, for
196# a normal single or triple quoted string (with no prefix).
197endpats = {}
198for _prefix in _all_string_prefixes():
199 endpats[_prefix + "'"] = Single
200 endpats[_prefix + '"'] = Double
201 endpats[_prefix + "'''"] = Single3
202 endpats[_prefix + '"""'] = Double3
Benjamin Peterson33856de2010-08-30 14:41:20 +0000203
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400204# A set of all of the single and triple quoted string prefixes,
205# including the opening quotes.
206single_quoted = set()
207triple_quoted = set()
208for t in _all_string_prefixes():
209 for u in (t + '"', t + "'"):
210 single_quoted.add(u)
211 for u in (t + '"""', t + "'''"):
212 triple_quoted.add(u)
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000213
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000214tabsize = 8
Fred Drake9b8d8012000-08-17 04:45:13 +0000215
Ka-Ping Yee28c62bb2001-03-23 05:22:49 +0000216class TokenError(Exception): pass
217
218class StopTokenizing(Exception): pass
Fred Drake9b8d8012000-08-17 04:45:13 +0000219
Tim Peters5ca576e2001-06-18 22:08:13 +0000220
Thomas Wouters89f507f2006-12-13 04:49:30 +0000221class Untokenizer:
222
223 def __init__(self):
224 self.tokens = []
225 self.prev_row = 1
226 self.prev_col = 0
Trent Nelson428de652008-03-18 22:41:35 +0000227 self.encoding = None
Thomas Wouters89f507f2006-12-13 04:49:30 +0000228
229 def add_whitespace(self, start):
230 row, col = start
Terry Jan Reedy5e6db312014-02-17 16:45:48 -0500231 if row < self.prev_row or row == self.prev_row and col < self.prev_col:
232 raise ValueError("start ({},{}) precedes previous end ({},{})"
233 .format(row, col, self.prev_row, self.prev_col))
Terry Jan Reedy9dc3a362014-02-23 23:33:08 -0500234 row_offset = row - self.prev_row
Terry Jan Reedyf106f8f2014-02-23 23:39:57 -0500235 if row_offset:
Terry Jan Reedy9dc3a362014-02-23 23:33:08 -0500236 self.tokens.append("\\\n" * row_offset)
237 self.prev_col = 0
Thomas Wouters89f507f2006-12-13 04:49:30 +0000238 col_offset = col - self.prev_col
239 if col_offset:
240 self.tokens.append(" " * col_offset)
241
242 def untokenize(self, iterable):
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500243 it = iter(iterable)
Dingyuan Wange411b662015-06-22 10:01:12 +0800244 indents = []
245 startline = False
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500246 for t in it:
Thomas Wouters89f507f2006-12-13 04:49:30 +0000247 if len(t) == 2:
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500248 self.compat(t, it)
Thomas Wouters89f507f2006-12-13 04:49:30 +0000249 break
250 tok_type, token, start, end, line = t
Trent Nelson428de652008-03-18 22:41:35 +0000251 if tok_type == ENCODING:
252 self.encoding = token
253 continue
Terry Jan Reedy9dc3a362014-02-23 23:33:08 -0500254 if tok_type == ENDMARKER:
255 break
Dingyuan Wange411b662015-06-22 10:01:12 +0800256 if tok_type == INDENT:
257 indents.append(token)
258 continue
259 elif tok_type == DEDENT:
260 indents.pop()
261 self.prev_row, self.prev_col = end
262 continue
263 elif tok_type in (NEWLINE, NL):
264 startline = True
265 elif startline and indents:
266 indent = indents[-1]
267 if start[1] >= len(indent):
268 self.tokens.append(indent)
269 self.prev_col = len(indent)
270 startline = False
Thomas Wouters89f507f2006-12-13 04:49:30 +0000271 self.add_whitespace(start)
272 self.tokens.append(token)
273 self.prev_row, self.prev_col = end
274 if tok_type in (NEWLINE, NL):
275 self.prev_row += 1
276 self.prev_col = 0
277 return "".join(self.tokens)
278
279 def compat(self, token, iterable):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000280 indents = []
281 toks_append = self.tokens.append
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500282 startline = token[0] in (NEWLINE, NL)
Christian Heimesba4af492008-03-28 00:55:15 +0000283 prevstring = False
Terry Jan Reedy5b8d2c32014-02-17 23:12:16 -0500284
285 for tok in chain([token], iterable):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000286 toknum, tokval = tok[:2]
Trent Nelson428de652008-03-18 22:41:35 +0000287 if toknum == ENCODING:
288 self.encoding = tokval
289 continue
Thomas Wouters89f507f2006-12-13 04:49:30 +0000290
Yury Selivanov75445082015-05-11 22:57:16 -0400291 if toknum in (NAME, NUMBER, ASYNC, AWAIT):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000292 tokval += ' '
293
Christian Heimesba4af492008-03-28 00:55:15 +0000294 # Insert a space between two consecutive strings
295 if toknum == STRING:
296 if prevstring:
297 tokval = ' ' + tokval
298 prevstring = True
299 else:
300 prevstring = False
301
Thomas Wouters89f507f2006-12-13 04:49:30 +0000302 if toknum == INDENT:
303 indents.append(tokval)
304 continue
305 elif toknum == DEDENT:
306 indents.pop()
307 continue
308 elif toknum in (NEWLINE, NL):
309 startline = True
310 elif startline and indents:
311 toks_append(indents[-1])
312 startline = False
313 toks_append(tokval)
Raymond Hettinger68c04532005-06-10 11:05:19 +0000314
Trent Nelson428de652008-03-18 22:41:35 +0000315
Raymond Hettinger68c04532005-06-10 11:05:19 +0000316def untokenize(iterable):
317 """Transform tokens back into Python source code.
Trent Nelson428de652008-03-18 22:41:35 +0000318 It returns a bytes object, encoded using the ENCODING
319 token, which is the first token sequence output by tokenize.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000320
321 Each element returned by the iterable must be a token sequence
Thomas Wouters89f507f2006-12-13 04:49:30 +0000322 with at least two elements, a token number and token value. If
323 only two tokens are passed, the resulting output is poor.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000324
Thomas Wouters89f507f2006-12-13 04:49:30 +0000325 Round-trip invariant for full input:
326 Untokenized source will match input source exactly
327
Berker Peksagff8d0872015-12-30 01:41:58 +0200328 Round-trip invariant for limited input:
329 # Output bytes will tokenize back to the input
Trent Nelson428de652008-03-18 22:41:35 +0000330 t1 = [tok[:2] for tok in tokenize(f.readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000331 newcode = untokenize(t1)
Trent Nelson428de652008-03-18 22:41:35 +0000332 readline = BytesIO(newcode).readline
333 t2 = [tok[:2] for tok in tokenize(readline)]
Raymond Hettinger68c04532005-06-10 11:05:19 +0000334 assert t1 == t2
335 """
Thomas Wouters89f507f2006-12-13 04:49:30 +0000336 ut = Untokenizer()
Trent Nelson428de652008-03-18 22:41:35 +0000337 out = ut.untokenize(iterable)
338 if ut.encoding is not None:
339 out = out.encode(ut.encoding)
340 return out
Raymond Hettinger68c04532005-06-10 11:05:19 +0000341
Trent Nelson428de652008-03-18 22:41:35 +0000342
Benjamin Petersond3afada2009-10-09 21:43:09 +0000343def _get_normal_name(orig_enc):
344 """Imitates get_normal_name in tokenizer.c."""
345 # Only care about the first 12 characters.
346 enc = orig_enc[:12].lower().replace("_", "-")
347 if enc == "utf-8" or enc.startswith("utf-8-"):
348 return "utf-8"
349 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
350 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
351 return "iso-8859-1"
352 return orig_enc
353
Trent Nelson428de652008-03-18 22:41:35 +0000354def detect_encoding(readline):
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000355 """
Trent Nelson428de652008-03-18 22:41:35 +0000356 The detect_encoding() function is used to detect the encoding that should
Ezio Melotti4bcc7962013-11-25 05:14:51 +0200357 be used to decode a Python source file. It requires one argument, readline,
Trent Nelson428de652008-03-18 22:41:35 +0000358 in the same way as the tokenize() generator.
359
360 It will call readline a maximum of twice, and return the encoding used
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000361 (as a string) and a list of any lines (left as bytes) it has read in.
Trent Nelson428de652008-03-18 22:41:35 +0000362
363 It detects the encoding from the presence of a utf-8 bom or an encoding
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000364 cookie as specified in pep-0263. If both a bom and a cookie are present,
365 but disagree, a SyntaxError will be raised. If the encoding cookie is an
366 invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
Benjamin Peterson689a5582010-03-18 22:29:52 +0000367 'utf-8-sig' is returned.
Trent Nelson428de652008-03-18 22:41:35 +0000368
369 If no encoding is specified, then the default of 'utf-8' will be returned.
370 """
Brett Cannonc33f3f22012-04-20 13:23:54 -0400371 try:
372 filename = readline.__self__.name
373 except AttributeError:
374 filename = None
Trent Nelson428de652008-03-18 22:41:35 +0000375 bom_found = False
376 encoding = None
Benjamin Peterson689a5582010-03-18 22:29:52 +0000377 default = 'utf-8'
Trent Nelson428de652008-03-18 22:41:35 +0000378 def read_or_stop():
379 try:
380 return readline()
381 except StopIteration:
382 return b''
383
384 def find_cookie(line):
385 try:
Martin v. Löwis63674f42012-04-20 14:36:47 +0200386 # Decode as UTF-8. Either the line is an encoding declaration,
387 # in which case it should be pure ASCII, or it must be UTF-8
388 # per default encoding.
389 line_string = line.decode('utf-8')
Trent Nelson428de652008-03-18 22:41:35 +0000390 except UnicodeDecodeError:
Brett Cannonc33f3f22012-04-20 13:23:54 -0400391 msg = "invalid or missing encoding declaration"
392 if filename is not None:
393 msg = '{} for {!r}'.format(msg, filename)
394 raise SyntaxError(msg)
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000395
Serhiy Storchakadafea852013-09-16 23:51:56 +0300396 match = cookie_re.match(line_string)
397 if not match:
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000398 return None
Serhiy Storchakadafea852013-09-16 23:51:56 +0300399 encoding = _get_normal_name(match.group(1))
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000400 try:
401 codec = lookup(encoding)
402 except LookupError:
403 # This behaviour mimics the Python interpreter
Brett Cannonc33f3f22012-04-20 13:23:54 -0400404 if filename is None:
405 msg = "unknown encoding: " + encoding
406 else:
407 msg = "unknown encoding for {!r}: {}".format(filename,
408 encoding)
409 raise SyntaxError(msg)
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000410
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000411 if bom_found:
Florent Xicluna11f0b412012-07-07 12:13:35 +0200412 if encoding != 'utf-8':
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000413 # This behaviour mimics the Python interpreter
Brett Cannonc33f3f22012-04-20 13:23:54 -0400414 if filename is None:
415 msg = 'encoding problem: utf-8'
416 else:
417 msg = 'encoding problem for {!r}: utf-8'.format(filename)
418 raise SyntaxError(msg)
Benjamin Peterson1613ed82010-03-18 22:34:15 +0000419 encoding += '-sig'
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000420 return encoding
Trent Nelson428de652008-03-18 22:41:35 +0000421
422 first = read_or_stop()
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000423 if first.startswith(BOM_UTF8):
Trent Nelson428de652008-03-18 22:41:35 +0000424 bom_found = True
425 first = first[3:]
Benjamin Peterson689a5582010-03-18 22:29:52 +0000426 default = 'utf-8-sig'
Trent Nelson428de652008-03-18 22:41:35 +0000427 if not first:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000428 return default, []
Trent Nelson428de652008-03-18 22:41:35 +0000429
430 encoding = find_cookie(first)
431 if encoding:
432 return encoding, [first]
Serhiy Storchaka768c16c2014-01-09 18:36:09 +0200433 if not blank_re.match(first):
434 return default, [first]
Trent Nelson428de652008-03-18 22:41:35 +0000435
436 second = read_or_stop()
437 if not second:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000438 return default, [first]
Trent Nelson428de652008-03-18 22:41:35 +0000439
440 encoding = find_cookie(second)
441 if encoding:
442 return encoding, [first, second]
443
Benjamin Peterson689a5582010-03-18 22:29:52 +0000444 return default, [first, second]
Trent Nelson428de652008-03-18 22:41:35 +0000445
446
Victor Stinner58c07522010-11-09 01:08:59 +0000447def open(filename):
448 """Open a file in read only mode using the encoding detected by
449 detect_encoding().
450 """
Victor Stinner96917502014-12-05 10:17:10 +0100451 buffer = _builtin_open(filename, 'rb')
Victor Stinner387729e2015-05-26 00:43:58 +0200452 try:
453 encoding, lines = detect_encoding(buffer.readline)
454 buffer.seek(0)
455 text = TextIOWrapper(buffer, encoding, line_buffering=True)
456 text.mode = 'r'
457 return text
458 except:
459 buffer.close()
460 raise
Victor Stinner58c07522010-11-09 01:08:59 +0000461
462
Trent Nelson428de652008-03-18 22:41:35 +0000463def tokenize(readline):
464 """
Berker Peksagff8d0872015-12-30 01:41:58 +0200465 The tokenize() generator requires one argument, readline, which
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000466 must be a callable object which provides the same interface as the
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000467 readline() method of built-in file objects. Each call to the function
Berker Peksagff8d0872015-12-30 01:41:58 +0200468 should return one line of input as bytes. Alternatively, readline
Raymond Hettinger68c04532005-06-10 11:05:19 +0000469 can be a callable function terminating with StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000470 readline = open(myfile, 'rb').__next__ # Example of alternate readline
Tim Peters8ac14952002-05-23 15:15:30 +0000471
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000472 The generator produces 5-tuples with these members: the token type; the
473 token string; a 2-tuple (srow, scol) of ints specifying the row and
474 column where the token begins in the source; a 2-tuple (erow, ecol) of
475 ints specifying the row and column where the token ends in the source;
Florent Xicluna43e4ea12010-09-03 19:54:02 +0000476 and the line on which the token was found. The line passed is the
Tim Peters8ac14952002-05-23 15:15:30 +0000477 logical line; continuation lines are included.
Trent Nelson428de652008-03-18 22:41:35 +0000478
479 The first token sequence will always be an ENCODING token
480 which tells you which encoding was used to decode the bytes stream.
Raymond Hettingerd1fa3db2002-05-15 02:56:03 +0000481 """
Benjamin Peterson21db77e2009-11-14 16:27:26 +0000482 # This import is here to avoid problems when the itertools module is not
483 # built yet and tokenize is imported.
Benjamin Peterson81dd8b92009-11-14 18:09:17 +0000484 from itertools import chain, repeat
Trent Nelson428de652008-03-18 22:41:35 +0000485 encoding, consumed = detect_encoding(readline)
Benjamin Peterson81dd8b92009-11-14 18:09:17 +0000486 rl_gen = iter(readline, b"")
487 empty = repeat(b"")
488 return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
Trent Nelson428de652008-03-18 22:41:35 +0000489
490
491def _tokenize(readline, encoding):
Guido van Rossum1aec3231997-04-08 14:24:39 +0000492 lnum = parenlev = continued = 0
Benjamin Peterson33856de2010-08-30 14:41:20 +0000493 numchars = '0123456789'
Guido van Rossumde655271997-04-09 17:15:54 +0000494 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000495 contline = None
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000496 indents = [0]
Guido van Rossum1aec3231997-04-08 14:24:39 +0000497
Yury Selivanov96ec9342015-07-23 15:01:58 +0300498 # 'stashed' and 'async_*' are used for async/await parsing
Yury Selivanov75445082015-05-11 22:57:16 -0400499 stashed = None
Yury Selivanov96ec9342015-07-23 15:01:58 +0300500 async_def = False
501 async_def_indent = 0
502 async_def_nl = False
Yury Selivanov75445082015-05-11 22:57:16 -0400503
Trent Nelson428de652008-03-18 22:41:35 +0000504 if encoding is not None:
Benjamin Peterson689a5582010-03-18 22:29:52 +0000505 if encoding == "utf-8-sig":
506 # BOM will already have been stripped.
507 encoding = "utf-8"
Raymond Hettingera48db392009-04-29 00:34:27 +0000508 yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
Benjamin Peterson0fe14382008-06-05 23:07:42 +0000509 while True: # loop over lines in stream
Raymond Hettinger68c04532005-06-10 11:05:19 +0000510 try:
511 line = readline()
512 except StopIteration:
Trent Nelson428de652008-03-18 22:41:35 +0000513 line = b''
514
515 if encoding is not None:
516 line = line.decode(encoding)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000517 lnum += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000518 pos, max = 0, len(line)
519
520 if contstr: # continued string
Guido van Rossumde655271997-04-09 17:15:54 +0000521 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000522 raise TokenError("EOF in multi-line string", strstart)
Guido van Rossum3b631771997-10-27 20:44:15 +0000523 endmatch = endprog.match(line)
524 if endmatch:
525 pos = end = endmatch.end(0)
Raymond Hettingera48db392009-04-29 00:34:27 +0000526 yield TokenInfo(STRING, contstr + line[:end],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000527 strstart, (lnum, end), contline + line)
Guido van Rossumde655271997-04-09 17:15:54 +0000528 contstr, needcont = '', 0
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000529 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000530 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
Raymond Hettingera48db392009-04-29 00:34:27 +0000531 yield TokenInfo(ERRORTOKEN, contstr + line,
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000532 strstart, (lnum, len(line)), contline)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000533 contstr = ''
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000534 contline = None
Guido van Rossumde655271997-04-09 17:15:54 +0000535 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000536 else:
537 contstr = contstr + line
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000538 contline = contline + line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000539 continue
540
Guido van Rossum1aec3231997-04-08 14:24:39 +0000541 elif parenlev == 0 and not continued: # new statement
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000542 if not line: break
543 column = 0
Guido van Rossum1aec3231997-04-08 14:24:39 +0000544 while pos < max: # measure leading whitespace
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000545 if line[pos] == ' ':
546 column += 1
547 elif line[pos] == '\t':
548 column = (column//tabsize + 1)*tabsize
549 elif line[pos] == '\f':
550 column = 0
551 else:
552 break
553 pos += 1
554 if pos == max:
555 break
Guido van Rossum1aec3231997-04-08 14:24:39 +0000556
557 if line[pos] in '#\r\n': # skip comments or blank lines
Thomas Wouters89f507f2006-12-13 04:49:30 +0000558 if line[pos] == '#':
559 comment_token = line[pos:].rstrip('\r\n')
560 nl_pos = pos + len(comment_token)
Raymond Hettingera48db392009-04-29 00:34:27 +0000561 yield TokenInfo(COMMENT, comment_token,
Thomas Wouters89f507f2006-12-13 04:49:30 +0000562 (lnum, pos), (lnum, pos + len(comment_token)), line)
Raymond Hettingera48db392009-04-29 00:34:27 +0000563 yield TokenInfo(NL, line[nl_pos:],
Thomas Wouters89f507f2006-12-13 04:49:30 +0000564 (lnum, nl_pos), (lnum, len(line)), line)
565 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000566 yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
Guido van Rossum1aec3231997-04-08 14:24:39 +0000567 (lnum, pos), (lnum, len(line)), line)
568 continue
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000569
570 if column > indents[-1]: # count indents or dedents
571 indents.append(column)
Raymond Hettingera48db392009-04-29 00:34:27 +0000572 yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000573 while column < indents[-1]:
Raymond Hettingerda99d1c2005-06-21 07:43:58 +0000574 if column not in indents:
575 raise IndentationError(
Thomas Wouters00ee7ba2006-08-21 19:07:27 +0000576 "unindent does not match any outer indentation level",
577 ("<tokenize>", lnum, pos, line))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000578 indents = indents[:-1]
Yury Selivanov75445082015-05-11 22:57:16 -0400579
Yury Selivanov96ec9342015-07-23 15:01:58 +0300580 if async_def and async_def_indent >= indents[-1]:
581 async_def = False
582 async_def_nl = False
583 async_def_indent = 0
Yury Selivanov75445082015-05-11 22:57:16 -0400584
Raymond Hettingera48db392009-04-29 00:34:27 +0000585 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000586
Yury Selivanov96ec9342015-07-23 15:01:58 +0300587 if async_def and async_def_nl and async_def_indent >= indents[-1]:
588 async_def = False
589 async_def_nl = False
590 async_def_indent = 0
591
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000592 else: # continued statement
Guido van Rossumde655271997-04-09 17:15:54 +0000593 if not line:
Collin Winterce36ad82007-08-30 01:19:48 +0000594 raise TokenError("EOF in multi-line statement", (lnum, 0))
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000595 continued = 0
596
597 while pos < max:
Antoine Pitrou10a99b02011-10-11 15:45:56 +0200598 pseudomatch = _compile(PseudoToken).match(line, pos)
Guido van Rossum3b631771997-10-27 20:44:15 +0000599 if pseudomatch: # scan for tokens
600 start, end = pseudomatch.span(1)
Guido van Rossumde655271997-04-09 17:15:54 +0000601 spos, epos, pos = (lnum, start), (lnum, end), end
Ezio Melotti2cc3b4b2012-11-03 17:38:43 +0200602 if start == end:
603 continue
Guido van Rossum1aec3231997-04-08 14:24:39 +0000604 token, initial = line[start:end], line[start]
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000605
Georg Brandldde00282007-03-18 19:01:53 +0000606 if (initial in numchars or # ordinary number
607 (initial == '.' and token != '.' and token != '...')):
Raymond Hettingera48db392009-04-29 00:34:27 +0000608 yield TokenInfo(NUMBER, token, spos, epos, line)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000609 elif initial in '\r\n':
Yury Selivanov75445082015-05-11 22:57:16 -0400610 if stashed:
611 yield stashed
612 stashed = None
Yury Selivanov96ec9342015-07-23 15:01:58 +0300613 if parenlev > 0:
614 yield TokenInfo(NL, token, spos, epos, line)
615 else:
616 yield TokenInfo(NEWLINE, token, spos, epos, line)
617 if async_def:
618 async_def_nl = True
619
Guido van Rossum1aec3231997-04-08 14:24:39 +0000620 elif initial == '#':
Thomas Wouters89f507f2006-12-13 04:49:30 +0000621 assert not token.endswith("\n")
Yury Selivanov75445082015-05-11 22:57:16 -0400622 if stashed:
623 yield stashed
624 stashed = None
Raymond Hettingera48db392009-04-29 00:34:27 +0000625 yield TokenInfo(COMMENT, token, spos, epos, line)
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400626
Guido van Rossum9d6897a2002-08-24 06:54:19 +0000627 elif token in triple_quoted:
Antoine Pitrou10a99b02011-10-11 15:45:56 +0200628 endprog = _compile(endpats[token])
Guido van Rossum3b631771997-10-27 20:44:15 +0000629 endmatch = endprog.match(line, pos)
630 if endmatch: # all on one line
631 pos = endmatch.end(0)
Guido van Rossum1aec3231997-04-08 14:24:39 +0000632 token = line[start:pos]
Raymond Hettingera48db392009-04-29 00:34:27 +0000633 yield TokenInfo(STRING, token, spos, (lnum, pos), line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000634 else:
Guido van Rossum1aec3231997-04-08 14:24:39 +0000635 strstart = (lnum, start) # multiple lines
636 contstr = line[start:]
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000637 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000638 break
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400639
640 # Check up to the first 3 chars of the token to see if
641 # they're in the single_quoted set. If so, they start
642 # a string.
643 # We're using the first 3, because we're looking for
644 # "rb'" (for example) at the start of the token. If
645 # we switch to longer prefixes, this needs to be
646 # adjusted.
647 # Note that initial == token[:1].
Berker Peksaga7161e72015-12-30 01:42:43 +0200648 # Also note that single quote checking must come after
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400649 # triple quote checking (above).
650 elif (initial in single_quoted or
651 token[:2] in single_quoted or
652 token[:3] in single_quoted):
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000653 if token[-1] == '\n': # continued string
Guido van Rossum1aec3231997-04-08 14:24:39 +0000654 strstart = (lnum, start)
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400655 # Again, using the first 3 chars of the
656 # token. This is looking for the matching end
657 # regex for the correct type of quote
658 # character. So it's really looking for
659 # endpats["'"] or endpats['"'], by trying to
660 # skip string prefix characters, if any.
661 endprog = _compile(endpats.get(initial) or
662 endpats.get(token[1]) or
663 endpats.get(token[2]))
Guido van Rossumde655271997-04-09 17:15:54 +0000664 contstr, needcont = line[start:], 1
Guido van Rossuma90c78b1998-04-03 16:05:38 +0000665 contline = line
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000666 break
667 else: # ordinary string
Raymond Hettingera48db392009-04-29 00:34:27 +0000668 yield TokenInfo(STRING, token, spos, epos, line)
Eric V. Smith1c8222c2015-10-26 04:37:55 -0400669
Benjamin Peterson33856de2010-08-30 14:41:20 +0000670 elif initial.isidentifier(): # ordinary name
Yury Selivanov75445082015-05-11 22:57:16 -0400671 if token in ('async', 'await'):
Yury Selivanov96ec9342015-07-23 15:01:58 +0300672 if async_def:
Yury Selivanov75445082015-05-11 22:57:16 -0400673 yield TokenInfo(
674 ASYNC if token == 'async' else AWAIT,
675 token, spos, epos, line)
676 continue
677
678 tok = TokenInfo(NAME, token, spos, epos, line)
679 if token == 'async' and not stashed:
680 stashed = tok
681 continue
682
683 if token == 'def':
684 if (stashed
685 and stashed.type == NAME
686 and stashed.string == 'async'):
687
Yury Selivanov96ec9342015-07-23 15:01:58 +0300688 async_def = True
689 async_def_indent = indents[-1]
Yury Selivanov75445082015-05-11 22:57:16 -0400690
691 yield TokenInfo(ASYNC, stashed.string,
692 stashed.start, stashed.end,
693 stashed.line)
694 stashed = None
Yury Selivanov75445082015-05-11 22:57:16 -0400695
696 if stashed:
697 yield stashed
698 stashed = None
699
700 yield tok
Guido van Rossum3b631771997-10-27 20:44:15 +0000701 elif initial == '\\': # continued stmt
702 continued = 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000703 else:
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000704 if initial in '([{':
705 parenlev += 1
706 elif initial in ')]}':
707 parenlev -= 1
Yury Selivanov75445082015-05-11 22:57:16 -0400708 if stashed:
709 yield stashed
710 stashed = None
Raymond Hettingera48db392009-04-29 00:34:27 +0000711 yield TokenInfo(OP, token, spos, epos, line)
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000712 else:
Raymond Hettingera48db392009-04-29 00:34:27 +0000713 yield TokenInfo(ERRORTOKEN, line[pos],
Guido van Rossumde655271997-04-09 17:15:54 +0000714 (lnum, pos), (lnum, pos+1), line)
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000715 pos += 1
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000716
Yury Selivanov75445082015-05-11 22:57:16 -0400717 if stashed:
718 yield stashed
719 stashed = None
720
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000721 for indent in indents[1:]: # pop remaining indent levels
Raymond Hettingera48db392009-04-29 00:34:27 +0000722 yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
723 yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
Guido van Rossumfc6f5331997-03-07 00:21:12 +0000724
Trent Nelson428de652008-03-18 22:41:35 +0000725
726# An undocumented, backwards compatible, API for all the places in the standard
727# library that expect to be able to use tokenize with strings
728def generate_tokens(readline):
729 return _tokenize(readline, None)
Raymond Hettinger6c60d092010-09-09 04:32:39 +0000730
Meador Inge14c0f032011-10-07 08:53:38 -0500731def main():
732 import argparse
733
734 # Helper error handling routines
735 def perror(message):
736 print(message, file=sys.stderr)
737
738 def error(message, filename=None, location=None):
739 if location:
740 args = (filename,) + location + (message,)
741 perror("%s:%d:%d: error: %s" % args)
742 elif filename:
743 perror("%s: error: %s" % (filename, message))
744 else:
745 perror("error: %s" % message)
746 sys.exit(1)
747
748 # Parse the arguments and options
749 parser = argparse.ArgumentParser(prog='python -m tokenize')
750 parser.add_argument(dest='filename', nargs='?',
751 metavar='filename.py',
752 help='the file to tokenize; defaults to stdin')
Meador Inge00c7f852012-01-19 00:44:45 -0600753 parser.add_argument('-e', '--exact', dest='exact', action='store_true',
754 help='display token names using the exact type')
Meador Inge14c0f032011-10-07 08:53:38 -0500755 args = parser.parse_args()
756
757 try:
758 # Tokenize the input
759 if args.filename:
760 filename = args.filename
Victor Stinner96917502014-12-05 10:17:10 +0100761 with _builtin_open(filename, 'rb') as f:
Meador Inge14c0f032011-10-07 08:53:38 -0500762 tokens = list(tokenize(f.readline))
763 else:
764 filename = "<stdin>"
765 tokens = _tokenize(sys.stdin.readline, None)
766
767 # Output the tokenization
768 for token in tokens:
Meador Inge00c7f852012-01-19 00:44:45 -0600769 token_type = token.type
770 if args.exact:
771 token_type = token.exact_type
Meador Inge14c0f032011-10-07 08:53:38 -0500772 token_range = "%d,%d-%d,%d:" % (token.start + token.end)
773 print("%-20s%-15s%-15r" %
Meador Inge00c7f852012-01-19 00:44:45 -0600774 (token_range, tok_name[token_type], token.string))
Meador Inge14c0f032011-10-07 08:53:38 -0500775 except IndentationError as err:
776 line, column = err.args[1][1:3]
777 error(err.args[0], filename, (line, column))
778 except TokenError as err:
779 line, column = err.args[1]
780 error(err.args[0], filename, (line, column))
781 except SyntaxError as err:
782 error(err, filename)
Andrew Svetlovf7a17b42012-12-25 16:47:37 +0200783 except OSError as err:
Meador Inge14c0f032011-10-07 08:53:38 -0500784 error(err)
785 except KeyboardInterrupt:
786 print("interrupted\n")
787 except Exception as err:
788 perror("unexpected error: %s" % err)
789 raise
790
Raymond Hettinger6c60d092010-09-09 04:32:39 +0000791if __name__ == "__main__":
Meador Inge14c0f032011-10-07 08:53:38 -0500792 main()