blob: dce3c6edcd97b51048cdcb5c9e39d934a4fa4fdb [file] [log] [blame]
Christian Heimesdd15f6c2008-03-16 00:07:10 +00001doctests = """
2Tests for the tokenize module.
Thomas Wouters89f507f2006-12-13 04:49:30 +00003
Christian Heimesdd15f6c2008-03-16 00:07:10 +00004The tests can be really simple. Given a small fragment of source
Eric Smith74ca5572008-03-17 19:49:19 +00005code, print out a table with tokens. The ENDMARK is omitted for
Thomas Wouters89f507f2006-12-13 04:49:30 +00006brevity.
7
Christian Heimesdd15f6c2008-03-16 00:07:10 +00008 >>> dump_tokens("1 + 1")
Trent Nelson428de652008-03-18 22:41:35 +00009 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +000010 NUMBER '1' (1, 0) (1, 1)
11 OP '+' (1, 2) (1, 3)
12 NUMBER '1' (1, 4) (1, 5)
Thomas Wouters89f507f2006-12-13 04:49:30 +000013
Christian Heimesdd15f6c2008-03-16 00:07:10 +000014 >>> dump_tokens("if False:\\n"
15 ... " # NL\\n"
16 ... " True = False # NEWLINE\\n")
Trent Nelson428de652008-03-18 22:41:35 +000017 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +000018 NAME 'if' (1, 0) (1, 2)
19 NAME 'False' (1, 3) (1, 8)
20 OP ':' (1, 8) (1, 9)
21 NEWLINE '\\n' (1, 9) (1, 10)
22 COMMENT '# NL' (2, 4) (2, 8)
23 NL '\\n' (2, 8) (2, 9)
24 INDENT ' ' (3, 0) (3, 4)
25 NAME 'True' (3, 4) (3, 8)
26 OP '=' (3, 9) (3, 10)
27 NAME 'False' (3, 11) (3, 16)
28 COMMENT '# NEWLINE' (3, 17) (3, 26)
29 NEWLINE '\\n' (3, 26) (3, 27)
30 DEDENT '' (4, 0) (4, 0)
Thomas Wouters89f507f2006-12-13 04:49:30 +000031
Christian Heimesdd15f6c2008-03-16 00:07:10 +000032 >>> indent_error_file = \"""
33 ... def k(x):
34 ... x += 2
35 ... x += 5
36 ... \"""
Trent Nelson428de652008-03-18 22:41:35 +000037 >>> readline = BytesIO(indent_error_file.encode('utf-8')).readline
38 >>> for tok in tokenize(readline): pass
Christian Heimesdd15f6c2008-03-16 00:07:10 +000039 Traceback (most recent call last):
40 ...
41 IndentationError: unindent does not match any outer indentation level
Thomas Wouters89f507f2006-12-13 04:49:30 +000042
Mark Dickinson3c0b3172010-06-29 07:38:37 +000043There are some standard formatting practices that are easy to get right.
Thomas Wouters89f507f2006-12-13 04:49:30 +000044
Christian Heimesdd15f6c2008-03-16 00:07:10 +000045 >>> roundtrip("if x == 1:\\n"
46 ... " print(x)\\n")
47 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000048
Christian Heimesdd15f6c2008-03-16 00:07:10 +000049 >>> roundtrip("# This is a comment\\n# This also")
50 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000051
52Some people use different formatting conventions, which makes
Christian Heimesdd15f6c2008-03-16 00:07:10 +000053untokenize a little trickier. Note that this test involves trailing
54whitespace after the colon. Note that we use hex escapes to make the
Trent Nelson428de652008-03-18 22:41:35 +000055two trailing blanks apparent in the expected output.
Thomas Wouters89f507f2006-12-13 04:49:30 +000056
Christian Heimesdd15f6c2008-03-16 00:07:10 +000057 >>> roundtrip("if x == 1 : \\n"
58 ... " print(x)\\n")
59 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000060
Benjamin Petersonee8712c2008-05-20 21:35:26 +000061 >>> f = support.findfile("tokenize_tests.txt")
Trent Nelson428de652008-03-18 22:41:35 +000062 >>> roundtrip(open(f, 'rb'))
Christian Heimesdd15f6c2008-03-16 00:07:10 +000063 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000064
Christian Heimesdd15f6c2008-03-16 00:07:10 +000065 >>> roundtrip("if x == 1:\\n"
66 ... " # A comment by itself.\\n"
67 ... " print(x) # Comment here, too.\\n"
68 ... " # Another comment.\\n"
69 ... "after_if = True\\n")
70 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000071
Christian Heimesdd15f6c2008-03-16 00:07:10 +000072 >>> roundtrip("if (x # The comments need to go in the right place\\n"
73 ... " == 1):\\n"
74 ... " print('x==1')\\n")
75 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000076
Christian Heimesdd15f6c2008-03-16 00:07:10 +000077 >>> roundtrip("class Test: # A comment here\\n"
78 ... " # A comment with weird indent\\n"
79 ... " after_com = 5\\n"
80 ... " def x(m): return m*5 # a one liner\\n"
81 ... " def y(m): # A whitespace after the colon\\n"
82 ... " return y*4 # 3-space indent\\n")
83 True
84
85Some error-handling code
86
87 >>> roundtrip("try: import somemodule\\n"
88 ... "except ImportError: # comment\\n"
Christian Heimesba4af492008-03-28 00:55:15 +000089 ... " print('Can not import' # comment2\\n)"
Neal Norwitz752abd02008-05-13 04:55:24 +000090 ... "else: print('Loaded')\\n")
Christian Heimesdd15f6c2008-03-16 00:07:10 +000091 True
92
Eric Smith74ca5572008-03-17 19:49:19 +000093Balancing continuation
Christian Heimesdd15f6c2008-03-16 00:07:10 +000094
95 >>> roundtrip("a = (3,4, \\n"
96 ... "5,6)\\n"
97 ... "y = [3, 4,\\n"
98 ... "5]\\n"
99 ... "z = {'a': 5,\\n"
100 ... "'b':15, 'c':True}\\n"
101 ... "x = len(y) + 5 - a[\\n"
102 ... "3] - a[2]\\n"
103 ... "+ len(z) - z[\\n"
104 ... "'b']\\n")
105 True
106
107Ordinary integers and binary operators
108
109 >>> dump_tokens("0xff <= 255")
Trent Nelson428de652008-03-18 22:41:35 +0000110 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000111 NUMBER '0xff' (1, 0) (1, 4)
112 OP '<=' (1, 5) (1, 7)
113 NUMBER '255' (1, 8) (1, 11)
Eric Smith74ca5572008-03-17 19:49:19 +0000114 >>> dump_tokens("0b10 <= 255")
Trent Nelson428de652008-03-18 22:41:35 +0000115 ENCODING 'utf-8' (0, 0) (0, 0)
Eric Smith74ca5572008-03-17 19:49:19 +0000116 NUMBER '0b10' (1, 0) (1, 4)
117 OP '<=' (1, 5) (1, 7)
118 NUMBER '255' (1, 8) (1, 11)
119 >>> dump_tokens("0o123 <= 0O123")
Trent Nelson428de652008-03-18 22:41:35 +0000120 ENCODING 'utf-8' (0, 0) (0, 0)
Eric Smith74ca5572008-03-17 19:49:19 +0000121 NUMBER '0o123' (1, 0) (1, 5)
122 OP '<=' (1, 6) (1, 8)
123 NUMBER '0O123' (1, 9) (1, 14)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000124 >>> dump_tokens("1234567 > ~0x15")
Trent Nelson428de652008-03-18 22:41:35 +0000125 ENCODING 'utf-8' (0, 0) (0, 0)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000126 NUMBER '1234567' (1, 0) (1, 7)
127 OP '>' (1, 8) (1, 9)
128 OP '~' (1, 10) (1, 11)
129 NUMBER '0x15' (1, 11) (1, 15)
130 >>> dump_tokens("2134568 != 1231515")
Trent Nelson428de652008-03-18 22:41:35 +0000131 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000132 NUMBER '2134568' (1, 0) (1, 7)
133 OP '!=' (1, 8) (1, 10)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000134 NUMBER '1231515' (1, 11) (1, 18)
135 >>> dump_tokens("(-124561-1) & 200000000")
Trent Nelson428de652008-03-18 22:41:35 +0000136 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000137 OP '(' (1, 0) (1, 1)
138 OP '-' (1, 1) (1, 2)
139 NUMBER '124561' (1, 2) (1, 8)
140 OP '-' (1, 8) (1, 9)
141 NUMBER '1' (1, 9) (1, 10)
142 OP ')' (1, 10) (1, 11)
143 OP '&' (1, 12) (1, 13)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000144 NUMBER '200000000' (1, 14) (1, 23)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000145 >>> dump_tokens("0xdeadbeef != -1")
Trent Nelson428de652008-03-18 22:41:35 +0000146 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000147 NUMBER '0xdeadbeef' (1, 0) (1, 10)
148 OP '!=' (1, 11) (1, 13)
149 OP '-' (1, 14) (1, 15)
150 NUMBER '1' (1, 15) (1, 16)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000151 >>> dump_tokens("0xdeadc0de & 12345")
Trent Nelson428de652008-03-18 22:41:35 +0000152 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000153 NUMBER '0xdeadc0de' (1, 0) (1, 10)
154 OP '&' (1, 11) (1, 12)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000155 NUMBER '12345' (1, 13) (1, 18)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000156 >>> dump_tokens("0xFF & 0x15 | 1234")
Trent Nelson428de652008-03-18 22:41:35 +0000157 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000158 NUMBER '0xFF' (1, 0) (1, 4)
159 OP '&' (1, 5) (1, 6)
160 NUMBER '0x15' (1, 7) (1, 11)
161 OP '|' (1, 12) (1, 13)
162 NUMBER '1234' (1, 14) (1, 18)
163
164Long integers
165
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000166 >>> dump_tokens("x = 0")
Trent Nelson428de652008-03-18 22:41:35 +0000167 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000168 NAME 'x' (1, 0) (1, 1)
169 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000170 NUMBER '0' (1, 4) (1, 5)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000171 >>> dump_tokens("x = 0xfffffffffff")
Trent Nelson428de652008-03-18 22:41:35 +0000172 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000173 NAME 'x' (1, 0) (1, 1)
174 OP '=' (1, 2) (1, 3)
175 NUMBER '0xffffffffff (1, 4) (1, 17)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000176 >>> dump_tokens("x = 123141242151251616110")
Trent Nelson428de652008-03-18 22:41:35 +0000177 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000178 NAME 'x' (1, 0) (1, 1)
179 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000180 NUMBER '123141242151 (1, 4) (1, 25)
181 >>> dump_tokens("x = -15921590215012591")
Trent Nelson428de652008-03-18 22:41:35 +0000182 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000183 NAME 'x' (1, 0) (1, 1)
184 OP '=' (1, 2) (1, 3)
185 OP '-' (1, 4) (1, 5)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000186 NUMBER '159215902150 (1, 5) (1, 22)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000187
188Floating point numbers
189
190 >>> dump_tokens("x = 3.14159")
Trent Nelson428de652008-03-18 22:41:35 +0000191 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000192 NAME 'x' (1, 0) (1, 1)
193 OP '=' (1, 2) (1, 3)
194 NUMBER '3.14159' (1, 4) (1, 11)
195 >>> dump_tokens("x = 314159.")
Trent Nelson428de652008-03-18 22:41:35 +0000196 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000197 NAME 'x' (1, 0) (1, 1)
198 OP '=' (1, 2) (1, 3)
199 NUMBER '314159.' (1, 4) (1, 11)
200 >>> dump_tokens("x = .314159")
Trent Nelson428de652008-03-18 22:41:35 +0000201 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000202 NAME 'x' (1, 0) (1, 1)
203 OP '=' (1, 2) (1, 3)
204 NUMBER '.314159' (1, 4) (1, 11)
205 >>> dump_tokens("x = 3e14159")
Trent Nelson428de652008-03-18 22:41:35 +0000206 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000207 NAME 'x' (1, 0) (1, 1)
208 OP '=' (1, 2) (1, 3)
209 NUMBER '3e14159' (1, 4) (1, 11)
210 >>> dump_tokens("x = 3E123")
Trent Nelson428de652008-03-18 22:41:35 +0000211 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000212 NAME 'x' (1, 0) (1, 1)
213 OP '=' (1, 2) (1, 3)
214 NUMBER '3E123' (1, 4) (1, 9)
215 >>> dump_tokens("x+y = 3e-1230")
Trent Nelson428de652008-03-18 22:41:35 +0000216 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000217 NAME 'x' (1, 0) (1, 1)
218 OP '+' (1, 1) (1, 2)
219 NAME 'y' (1, 2) (1, 3)
220 OP '=' (1, 4) (1, 5)
221 NUMBER '3e-1230' (1, 6) (1, 13)
222 >>> dump_tokens("x = 3.14e159")
Trent Nelson428de652008-03-18 22:41:35 +0000223 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000224 NAME 'x' (1, 0) (1, 1)
225 OP '=' (1, 2) (1, 3)
226 NUMBER '3.14e159' (1, 4) (1, 12)
227
228String literals
229
230 >>> dump_tokens("x = ''; y = \\\"\\\"")
Trent Nelson428de652008-03-18 22:41:35 +0000231 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000232 NAME 'x' (1, 0) (1, 1)
233 OP '=' (1, 2) (1, 3)
234 STRING "''" (1, 4) (1, 6)
235 OP ';' (1, 6) (1, 7)
236 NAME 'y' (1, 8) (1, 9)
237 OP '=' (1, 10) (1, 11)
238 STRING '""' (1, 12) (1, 14)
239 >>> dump_tokens("x = '\\\"'; y = \\\"'\\\"")
Trent Nelson428de652008-03-18 22:41:35 +0000240 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000241 NAME 'x' (1, 0) (1, 1)
242 OP '=' (1, 2) (1, 3)
243 STRING '\\'"\\'' (1, 4) (1, 7)
244 OP ';' (1, 7) (1, 8)
245 NAME 'y' (1, 9) (1, 10)
246 OP '=' (1, 11) (1, 12)
247 STRING '"\\'"' (1, 13) (1, 16)
248 >>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"")
Trent Nelson428de652008-03-18 22:41:35 +0000249 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000250 NAME 'x' (1, 0) (1, 1)
251 OP '=' (1, 2) (1, 3)
252 STRING '"doesn\\'t "' (1, 4) (1, 14)
253 NAME 'shrink' (1, 14) (1, 20)
254 STRING '", does it"' (1, 20) (1, 31)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000255 >>> dump_tokens("x = 'abc' + 'ABC'")
Trent Nelson428de652008-03-18 22:41:35 +0000256 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000257 NAME 'x' (1, 0) (1, 1)
258 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000259 STRING "'abc'" (1, 4) (1, 9)
260 OP '+' (1, 10) (1, 11)
261 STRING "'ABC'" (1, 12) (1, 17)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000262 >>> dump_tokens('y = "ABC" + "ABC"')
Trent Nelson428de652008-03-18 22:41:35 +0000263 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000264 NAME 'y' (1, 0) (1, 1)
265 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000266 STRING '"ABC"' (1, 4) (1, 9)
267 OP '+' (1, 10) (1, 11)
268 STRING '"ABC"' (1, 12) (1, 17)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000269 >>> dump_tokens("x = r'abc' + r'ABC' + R'ABC' + R'ABC'")
Trent Nelson428de652008-03-18 22:41:35 +0000270 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000271 NAME 'x' (1, 0) (1, 1)
272 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000273 STRING "r'abc'" (1, 4) (1, 10)
274 OP '+' (1, 11) (1, 12)
275 STRING "r'ABC'" (1, 13) (1, 19)
276 OP '+' (1, 20) (1, 21)
277 STRING "R'ABC'" (1, 22) (1, 28)
278 OP '+' (1, 29) (1, 30)
279 STRING "R'ABC'" (1, 31) (1, 37)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000280 >>> dump_tokens('y = r"abc" + r"ABC" + R"ABC" + R"ABC"')
Trent Nelson428de652008-03-18 22:41:35 +0000281 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000282 NAME 'y' (1, 0) (1, 1)
283 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000284 STRING 'r"abc"' (1, 4) (1, 10)
285 OP '+' (1, 11) (1, 12)
286 STRING 'r"ABC"' (1, 13) (1, 19)
287 OP '+' (1, 20) (1, 21)
288 STRING 'R"ABC"' (1, 22) (1, 28)
289 OP '+' (1, 29) (1, 30)
290 STRING 'R"ABC"' (1, 31) (1, 37)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000291
292Operators
293
294 >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass")
Trent Nelson428de652008-03-18 22:41:35 +0000295 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000296 NAME 'def' (1, 0) (1, 3)
297 NAME 'd22' (1, 4) (1, 7)
298 OP '(' (1, 7) (1, 8)
299 NAME 'a' (1, 8) (1, 9)
300 OP ',' (1, 9) (1, 10)
301 NAME 'b' (1, 11) (1, 12)
302 OP ',' (1, 12) (1, 13)
303 NAME 'c' (1, 14) (1, 15)
304 OP '=' (1, 15) (1, 16)
305 NUMBER '2' (1, 16) (1, 17)
306 OP ',' (1, 17) (1, 18)
307 NAME 'd' (1, 19) (1, 20)
308 OP '=' (1, 20) (1, 21)
309 NUMBER '2' (1, 21) (1, 22)
310 OP ',' (1, 22) (1, 23)
311 OP '*' (1, 24) (1, 25)
312 NAME 'k' (1, 25) (1, 26)
313 OP ')' (1, 26) (1, 27)
314 OP ':' (1, 27) (1, 28)
315 NAME 'pass' (1, 29) (1, 33)
316 >>> dump_tokens("def d01v_(a=1, *k, **w): pass")
Trent Nelson428de652008-03-18 22:41:35 +0000317 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000318 NAME 'def' (1, 0) (1, 3)
319 NAME 'd01v_' (1, 4) (1, 9)
320 OP '(' (1, 9) (1, 10)
321 NAME 'a' (1, 10) (1, 11)
322 OP '=' (1, 11) (1, 12)
323 NUMBER '1' (1, 12) (1, 13)
324 OP ',' (1, 13) (1, 14)
325 OP '*' (1, 15) (1, 16)
326 NAME 'k' (1, 16) (1, 17)
327 OP ',' (1, 17) (1, 18)
328 OP '**' (1, 19) (1, 21)
329 NAME 'w' (1, 21) (1, 22)
330 OP ')' (1, 22) (1, 23)
331 OP ':' (1, 23) (1, 24)
332 NAME 'pass' (1, 25) (1, 29)
333
334Comparison
335
336 >>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " +
337 ... "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass")
Trent Nelson428de652008-03-18 22:41:35 +0000338 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000339 NAME 'if' (1, 0) (1, 2)
340 NUMBER '1' (1, 3) (1, 4)
341 OP '<' (1, 5) (1, 6)
342 NUMBER '1' (1, 7) (1, 8)
343 OP '>' (1, 9) (1, 10)
344 NUMBER '1' (1, 11) (1, 12)
345 OP '==' (1, 13) (1, 15)
346 NUMBER '1' (1, 16) (1, 17)
347 OP '>=' (1, 18) (1, 20)
348 NUMBER '5' (1, 21) (1, 22)
349 OP '<=' (1, 23) (1, 25)
350 NUMBER '0x15' (1, 26) (1, 30)
351 OP '<=' (1, 31) (1, 33)
352 NUMBER '0x12' (1, 34) (1, 38)
353 OP '!=' (1, 39) (1, 41)
354 NUMBER '1' (1, 42) (1, 43)
355 NAME 'and' (1, 44) (1, 47)
356 NUMBER '5' (1, 48) (1, 49)
357 NAME 'in' (1, 50) (1, 52)
358 NUMBER '1' (1, 53) (1, 54)
359 NAME 'not' (1, 55) (1, 58)
360 NAME 'in' (1, 59) (1, 61)
361 NUMBER '1' (1, 62) (1, 63)
362 NAME 'is' (1, 64) (1, 66)
363 NUMBER '1' (1, 67) (1, 68)
364 NAME 'or' (1, 69) (1, 71)
365 NUMBER '5' (1, 72) (1, 73)
366 NAME 'is' (1, 74) (1, 76)
367 NAME 'not' (1, 77) (1, 80)
368 NUMBER '1' (1, 81) (1, 82)
369 OP ':' (1, 82) (1, 83)
370 NAME 'pass' (1, 84) (1, 88)
371
372Shift
373
374 >>> dump_tokens("x = 1 << 1 >> 5")
Trent Nelson428de652008-03-18 22:41:35 +0000375 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000376 NAME 'x' (1, 0) (1, 1)
377 OP '=' (1, 2) (1, 3)
378 NUMBER '1' (1, 4) (1, 5)
379 OP '<<' (1, 6) (1, 8)
380 NUMBER '1' (1, 9) (1, 10)
381 OP '>>' (1, 11) (1, 13)
382 NUMBER '5' (1, 14) (1, 15)
383
384Additive
385
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000386 >>> dump_tokens("x = 1 - y + 15 - 1 + 0x124 + z + a[5]")
Trent Nelson428de652008-03-18 22:41:35 +0000387 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000388 NAME 'x' (1, 0) (1, 1)
389 OP '=' (1, 2) (1, 3)
390 NUMBER '1' (1, 4) (1, 5)
391 OP '-' (1, 6) (1, 7)
392 NAME 'y' (1, 8) (1, 9)
393 OP '+' (1, 10) (1, 11)
394 NUMBER '15' (1, 12) (1, 14)
395 OP '-' (1, 15) (1, 16)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000396 NUMBER '1' (1, 17) (1, 18)
397 OP '+' (1, 19) (1, 20)
398 NUMBER '0x124' (1, 21) (1, 26)
399 OP '+' (1, 27) (1, 28)
400 NAME 'z' (1, 29) (1, 30)
401 OP '+' (1, 31) (1, 32)
402 NAME 'a' (1, 33) (1, 34)
403 OP '[' (1, 34) (1, 35)
404 NUMBER '5' (1, 35) (1, 36)
405 OP ']' (1, 36) (1, 37)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000406
407Multiplicative
408
409 >>> dump_tokens("x = 1//1*1/5*12%0x12")
Trent Nelson428de652008-03-18 22:41:35 +0000410 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000411 NAME 'x' (1, 0) (1, 1)
412 OP '=' (1, 2) (1, 3)
413 NUMBER '1' (1, 4) (1, 5)
414 OP '//' (1, 5) (1, 7)
415 NUMBER '1' (1, 7) (1, 8)
416 OP '*' (1, 8) (1, 9)
417 NUMBER '1' (1, 9) (1, 10)
418 OP '/' (1, 10) (1, 11)
419 NUMBER '5' (1, 11) (1, 12)
420 OP '*' (1, 12) (1, 13)
421 NUMBER '12' (1, 13) (1, 15)
422 OP '%' (1, 15) (1, 16)
423 NUMBER '0x12' (1, 16) (1, 20)
424
425Unary
426
427 >>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1")
Trent Nelson428de652008-03-18 22:41:35 +0000428 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000429 OP '~' (1, 0) (1, 1)
430 NUMBER '1' (1, 1) (1, 2)
431 OP '^' (1, 3) (1, 4)
432 NUMBER '1' (1, 5) (1, 6)
433 OP '&' (1, 7) (1, 8)
434 NUMBER '1' (1, 9) (1, 10)
435 OP '|' (1, 11) (1, 12)
436 NUMBER '1' (1, 12) (1, 13)
437 OP '^' (1, 14) (1, 15)
438 OP '-' (1, 16) (1, 17)
439 NUMBER '1' (1, 17) (1, 18)
440 >>> dump_tokens("-1*1/1+1*1//1 - ---1**1")
Trent Nelson428de652008-03-18 22:41:35 +0000441 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000442 OP '-' (1, 0) (1, 1)
443 NUMBER '1' (1, 1) (1, 2)
444 OP '*' (1, 2) (1, 3)
445 NUMBER '1' (1, 3) (1, 4)
446 OP '/' (1, 4) (1, 5)
447 NUMBER '1' (1, 5) (1, 6)
448 OP '+' (1, 6) (1, 7)
449 NUMBER '1' (1, 7) (1, 8)
450 OP '*' (1, 8) (1, 9)
451 NUMBER '1' (1, 9) (1, 10)
452 OP '//' (1, 10) (1, 12)
453 NUMBER '1' (1, 12) (1, 13)
454 OP '-' (1, 14) (1, 15)
455 OP '-' (1, 16) (1, 17)
456 OP '-' (1, 17) (1, 18)
457 OP '-' (1, 18) (1, 19)
458 NUMBER '1' (1, 19) (1, 20)
459 OP '**' (1, 20) (1, 22)
460 NUMBER '1' (1, 22) (1, 23)
461
462Selector
463
464 >>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()")
Trent Nelson428de652008-03-18 22:41:35 +0000465 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000466 NAME 'import' (1, 0) (1, 6)
467 NAME 'sys' (1, 7) (1, 10)
468 OP ',' (1, 10) (1, 11)
469 NAME 'time' (1, 12) (1, 16)
470 NEWLINE '\\n' (1, 16) (1, 17)
471 NAME 'x' (2, 0) (2, 1)
472 OP '=' (2, 2) (2, 3)
473 NAME 'sys' (2, 4) (2, 7)
474 OP '.' (2, 7) (2, 8)
475 NAME 'modules' (2, 8) (2, 15)
476 OP '[' (2, 15) (2, 16)
477 STRING "'time'" (2, 16) (2, 22)
478 OP ']' (2, 22) (2, 23)
479 OP '.' (2, 23) (2, 24)
480 NAME 'time' (2, 24) (2, 28)
481 OP '(' (2, 28) (2, 29)
482 OP ')' (2, 29) (2, 30)
483
484Methods
485
486 >>> dump_tokens("@staticmethod\\ndef foo(x,y): pass")
Trent Nelson428de652008-03-18 22:41:35 +0000487 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000488 OP '@' (1, 0) (1, 1)
489 NAME 'staticmethod (1, 1) (1, 13)
490 NEWLINE '\\n' (1, 13) (1, 14)
491 NAME 'def' (2, 0) (2, 3)
492 NAME 'foo' (2, 4) (2, 7)
493 OP '(' (2, 7) (2, 8)
494 NAME 'x' (2, 8) (2, 9)
495 OP ',' (2, 9) (2, 10)
496 NAME 'y' (2, 10) (2, 11)
497 OP ')' (2, 11) (2, 12)
498 OP ':' (2, 12) (2, 13)
499 NAME 'pass' (2, 14) (2, 18)
500
501Backslash means line continuation, except for comments
502
503 >>> roundtrip("x=1+\\\\n"
504 ... "1\\n"
505 ... "# This is a comment\\\\n"
506 ... "# This also\\n")
507 True
508 >>> roundtrip("# Comment \\\\nx = 0")
509 True
Christian Heimesba4af492008-03-28 00:55:15 +0000510
511Two string literals on the same line
512
513 >>> roundtrip("'' ''")
514 True
515
516Test roundtrip on random python modules.
Antoine Pitrou5bc4fa72010-10-14 15:34:31 +0000517pass the '-ucpu' option to process the full directory.
Christian Heimesba4af492008-03-28 00:55:15 +0000518
519 >>> import random
520 >>> tempdir = os.path.dirname(f) or os.curdir
521 >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py"))
522
Benjamin Peterson963e4022011-08-13 00:33:21 -0500523tokenize is broken on test_pep3131.py because regular expressions are broken on
524the obscure unicode identifiers in it. *sigh*
525 >>> testfiles.remove(os.path.join(tempdir, "test_pep3131.py"))
Antoine Pitrou5bc4fa72010-10-14 15:34:31 +0000526 >>> if not support.is_resource_enabled("cpu"):
Christian Heimesba4af492008-03-28 00:55:15 +0000527 ... testfiles = random.sample(testfiles, 10)
528 ...
529 >>> for testfile in testfiles:
530 ... if not roundtrip(open(testfile, 'rb')):
531 ... print("Roundtrip failed for file %s" % testfile)
532 ... break
533 ... else: True
534 True
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000535
536Evil tabs
Benjamin Peterson33856de2010-08-30 14:41:20 +0000537
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000538 >>> dump_tokens("def f():\\n\\tif x\\n \\tpass")
539 ENCODING 'utf-8' (0, 0) (0, 0)
540 NAME 'def' (1, 0) (1, 3)
541 NAME 'f' (1, 4) (1, 5)
542 OP '(' (1, 5) (1, 6)
543 OP ')' (1, 6) (1, 7)
544 OP ':' (1, 7) (1, 8)
545 NEWLINE '\\n' (1, 8) (1, 9)
546 INDENT '\\t' (2, 0) (2, 1)
547 NAME 'if' (2, 1) (2, 3)
548 NAME 'x' (2, 4) (2, 5)
549 NEWLINE '\\n' (2, 5) (2, 6)
550 INDENT ' \\t' (3, 0) (3, 9)
551 NAME 'pass' (3, 9) (3, 13)
552 DEDENT '' (4, 0) (4, 0)
553 DEDENT '' (4, 0) (4, 0)
Benjamin Peterson33856de2010-08-30 14:41:20 +0000554
555Non-ascii identifiers
556
557 >>> dump_tokens("Örter = 'places'\\ngrün = 'green'")
558 ENCODING 'utf-8' (0, 0) (0, 0)
559 NAME 'Örter' (1, 0) (1, 5)
560 OP '=' (1, 6) (1, 7)
561 STRING "'places'" (1, 8) (1, 16)
562 NEWLINE '\\n' (1, 16) (1, 17)
563 NAME 'grün' (2, 0) (2, 4)
564 OP '=' (2, 5) (2, 6)
565 STRING "'green'" (2, 7) (2, 14)
Thomas Wouters89f507f2006-12-13 04:49:30 +0000566"""
567
Benjamin Petersonee8712c2008-05-20 21:35:26 +0000568from test import support
Trent Nelson428de652008-03-18 22:41:35 +0000569from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
Meador Inge00c7f852012-01-19 00:44:45 -0600570 STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
Victor Stinner58c07522010-11-09 01:08:59 +0000571 open as tokenize_open)
Trent Nelson428de652008-03-18 22:41:35 +0000572from io import BytesIO
573from unittest import TestCase
574import os, sys, glob
Meador Inge00c7f852012-01-19 00:44:45 -0600575import token
Raymond Hettinger68c04532005-06-10 11:05:19 +0000576
Thomas Wouters89f507f2006-12-13 04:49:30 +0000577def dump_tokens(s):
578 """Print out the tokens in s in a table format.
579
580 The ENDMARKER is omitted.
581 """
Trent Nelson428de652008-03-18 22:41:35 +0000582 f = BytesIO(s.encode('utf-8'))
583 for type, token, start, end, line in tokenize(f.readline):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000584 if type == ENDMARKER:
585 break
586 type = tok_name[type]
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000587 print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals())
Thomas Wouters89f507f2006-12-13 04:49:30 +0000588
Trent Nelson428de652008-03-18 22:41:35 +0000589def roundtrip(f):
590 """
591 Test roundtrip for `untokenize`. `f` is an open file or a string.
592 The source code in f is tokenized, converted back to source code via
593 tokenize.untokenize(), and tokenized again from the latter. The test
594 fails if the second tokenization doesn't match the first.
595 """
596 if isinstance(f, str):
597 f = BytesIO(f.encode('utf-8'))
Brian Curtin9f5f65c2010-10-30 21:35:28 +0000598 try:
599 token_list = list(tokenize(f.readline))
600 finally:
601 f.close()
Trent Nelson428de652008-03-18 22:41:35 +0000602 tokens1 = [tok[:2] for tok in token_list]
603 new_bytes = untokenize(tokens1)
Ezio Melottid8b509b2011-09-28 17:37:55 +0300604 readline = (line for line in new_bytes.splitlines(keepends=True)).__next__
Trent Nelson428de652008-03-18 22:41:35 +0000605 tokens2 = [tok[:2] for tok in tokenize(readline)]
606 return tokens1 == tokens2
Thomas Wouters89f507f2006-12-13 04:49:30 +0000607
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000608# This is an example from the docs, set up as a doctest.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000609def decistmt(s):
610 """Substitute Decimals for floats in a string of statements.
611
612 >>> from decimal import Decimal
Georg Brandl88fc6642007-02-09 21:28:07 +0000613 >>> s = 'print(+21.3e-5*-.1234/81.7)'
Raymond Hettinger68c04532005-06-10 11:05:19 +0000614 >>> decistmt(s)
Georg Brandl88fc6642007-02-09 21:28:07 +0000615 "print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))"
Raymond Hettinger68c04532005-06-10 11:05:19 +0000616
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000617 The format of the exponent is inherited from the platform C library.
618 Known cases are "e-007" (Windows) and "e-07" (not Windows). Since
Mark Dickinson388122d2010-08-04 20:56:28 +0000619 we're only showing 11 digits, and the 12th isn't close to 5, the
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000620 rest of the output should be platform-independent.
621
622 >>> exec(s) #doctest: +ELLIPSIS
Mark Dickinson388122d2010-08-04 20:56:28 +0000623 -3.2171603427...e-0...7
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000624
625 Output from calculations with Decimal should be identical across all
626 platforms.
627
Raymond Hettinger68c04532005-06-10 11:05:19 +0000628 >>> exec(decistmt(s))
629 -3.217160342717258261933904529E-7
Raymond Hettinger68c04532005-06-10 11:05:19 +0000630 """
631 result = []
Trent Nelson428de652008-03-18 22:41:35 +0000632 g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string
Raymond Hettinger68c04532005-06-10 11:05:19 +0000633 for toknum, tokval, _, _, _ in g:
634 if toknum == NUMBER and '.' in tokval: # replace NUMBER tokens
635 result.extend([
636 (NAME, 'Decimal'),
637 (OP, '('),
638 (STRING, repr(tokval)),
639 (OP, ')')
640 ])
641 else:
642 result.append((toknum, tokval))
Trent Nelson428de652008-03-18 22:41:35 +0000643 return untokenize(result).decode('utf-8')
644
645
646class TestTokenizerAdheresToPep0263(TestCase):
647 """
648 Test that tokenizer adheres to the coding behaviour stipulated in PEP 0263.
649 """
650
651 def _testFile(self, filename):
652 path = os.path.join(os.path.dirname(__file__), filename)
653 return roundtrip(open(path, 'rb'))
654
655 def test_utf8_coding_cookie_and_no_utf8_bom(self):
Ned Deily2ea6fcc2011-07-19 16:15:27 -0700656 f = 'tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt'
Trent Nelson428de652008-03-18 22:41:35 +0000657 self.assertTrue(self._testFile(f))
658
659 def test_latin1_coding_cookie_and_utf8_bom(self):
660 """
661 As per PEP 0263, if a file starts with a utf-8 BOM signature, the only
662 allowed encoding for the comment is 'utf-8'. The text file used in
663 this test starts with a BOM signature, but specifies latin1 as the
664 coding, so verify that a SyntaxError is raised, which matches the
665 behaviour of the interpreter when it encounters a similar condition.
666 """
667 f = 'tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt'
Benjamin Petersonc9c0f202009-06-30 23:06:06 +0000668 self.assertRaises(SyntaxError, self._testFile, f)
Trent Nelson428de652008-03-18 22:41:35 +0000669
670 def test_no_coding_cookie_and_utf8_bom(self):
671 f = 'tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt'
672 self.assertTrue(self._testFile(f))
673
674 def test_utf8_coding_cookie_and_utf8_bom(self):
675 f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt'
676 self.assertTrue(self._testFile(f))
677
678
679class Test_Tokenize(TestCase):
680
681 def test__tokenize_decodes_with_specified_encoding(self):
682 literal = '"ЉЊЈЁЂ"'
683 line = literal.encode('utf-8')
684 first = False
685 def readline():
686 nonlocal first
687 if not first:
688 first = True
689 return line
690 else:
691 return b''
692
693 # skip the initial encoding token and the end token
694 tokens = list(_tokenize(readline, encoding='utf-8'))[1:-1]
695 expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
Ezio Melottib3aedd42010-11-20 19:04:17 +0000696 self.assertEqual(tokens, expected_tokens,
697 "bytes not decoded with encoding")
Trent Nelson428de652008-03-18 22:41:35 +0000698
699 def test__tokenize_does_not_decode_with_encoding_none(self):
700 literal = '"ЉЊЈЁЂ"'
701 first = False
702 def readline():
703 nonlocal first
704 if not first:
705 first = True
706 return literal
707 else:
708 return b''
709
710 # skip the end token
711 tokens = list(_tokenize(readline, encoding=None))[:-1]
712 expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
Ezio Melottib3aedd42010-11-20 19:04:17 +0000713 self.assertEqual(tokens, expected_tokens,
714 "string not tokenized when encoding is None")
Trent Nelson428de652008-03-18 22:41:35 +0000715
716
717class TestDetectEncoding(TestCase):
718
719 def get_readline(self, lines):
720 index = 0
721 def readline():
722 nonlocal index
723 if index == len(lines):
724 raise StopIteration
725 line = lines[index]
726 index += 1
727 return line
728 return readline
729
730 def test_no_bom_no_encoding_cookie(self):
731 lines = (
732 b'# something\n',
733 b'print(something)\n',
734 b'do_something(else)\n'
735 )
736 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000737 self.assertEqual(encoding, 'utf-8')
738 self.assertEqual(consumed_lines, list(lines[:2]))
Trent Nelson428de652008-03-18 22:41:35 +0000739
740 def test_bom_no_cookie(self):
741 lines = (
742 b'\xef\xbb\xbf# something\n',
743 b'print(something)\n',
744 b'do_something(else)\n'
745 )
746 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000747 self.assertEqual(encoding, 'utf-8-sig')
748 self.assertEqual(consumed_lines,
749 [b'# something\n', b'print(something)\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000750
751 def test_cookie_first_line_no_bom(self):
752 lines = (
753 b'# -*- coding: latin-1 -*-\n',
754 b'print(something)\n',
755 b'do_something(else)\n'
756 )
757 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000758 self.assertEqual(encoding, 'iso-8859-1')
759 self.assertEqual(consumed_lines, [b'# -*- coding: latin-1 -*-\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000760
761 def test_matched_bom_and_cookie_first_line(self):
762 lines = (
763 b'\xef\xbb\xbf# coding=utf-8\n',
764 b'print(something)\n',
765 b'do_something(else)\n'
766 )
767 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000768 self.assertEqual(encoding, 'utf-8-sig')
769 self.assertEqual(consumed_lines, [b'# coding=utf-8\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000770
771 def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self):
772 lines = (
773 b'\xef\xbb\xbf# vim: set fileencoding=ascii :\n',
774 b'print(something)\n',
775 b'do_something(else)\n'
776 )
777 readline = self.get_readline(lines)
778 self.assertRaises(SyntaxError, detect_encoding, readline)
779
780 def test_cookie_second_line_no_bom(self):
781 lines = (
782 b'#! something\n',
783 b'# vim: set fileencoding=ascii :\n',
784 b'print(something)\n',
785 b'do_something(else)\n'
786 )
787 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000788 self.assertEqual(encoding, 'ascii')
Trent Nelson428de652008-03-18 22:41:35 +0000789 expected = [b'#! something\n', b'# vim: set fileencoding=ascii :\n']
Ezio Melottib3aedd42010-11-20 19:04:17 +0000790 self.assertEqual(consumed_lines, expected)
Trent Nelson428de652008-03-18 22:41:35 +0000791
792 def test_matched_bom_and_cookie_second_line(self):
793 lines = (
794 b'\xef\xbb\xbf#! something\n',
795 b'f# coding=utf-8\n',
796 b'print(something)\n',
797 b'do_something(else)\n'
798 )
799 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000800 self.assertEqual(encoding, 'utf-8-sig')
801 self.assertEqual(consumed_lines,
802 [b'#! something\n', b'f# coding=utf-8\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000803
804 def test_mismatched_bom_and_cookie_second_line_raises_syntaxerror(self):
805 lines = (
806 b'\xef\xbb\xbf#! something\n',
807 b'# vim: set fileencoding=ascii :\n',
808 b'print(something)\n',
809 b'do_something(else)\n'
810 )
811 readline = self.get_readline(lines)
812 self.assertRaises(SyntaxError, detect_encoding, readline)
813
Benjamin Petersond3afada2009-10-09 21:43:09 +0000814 def test_latin1_normalization(self):
815 # See get_normal_name() in tokenizer.c.
816 encodings = ("latin-1", "iso-8859-1", "iso-latin-1", "latin-1-unix",
817 "iso-8859-1-unix", "iso-latin-1-mac")
818 for encoding in encodings:
819 for rep in ("-", "_"):
820 enc = encoding.replace("-", rep)
821 lines = (b"#!/usr/bin/python\n",
822 b"# coding: " + enc.encode("ascii") + b"\n",
823 b"print(things)\n",
824 b"do_something += 4\n")
825 rl = self.get_readline(lines)
826 found, consumed_lines = detect_encoding(rl)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000827 self.assertEqual(found, "iso-8859-1")
Benjamin Petersond3afada2009-10-09 21:43:09 +0000828
829 def test_utf8_normalization(self):
830 # See get_normal_name() in tokenizer.c.
831 encodings = ("utf-8", "utf-8-mac", "utf-8-unix")
832 for encoding in encodings:
833 for rep in ("-", "_"):
834 enc = encoding.replace("-", rep)
835 lines = (b"#!/usr/bin/python\n",
836 b"# coding: " + enc.encode("ascii") + b"\n",
837 b"1 + 3\n")
838 rl = self.get_readline(lines)
839 found, consumed_lines = detect_encoding(rl)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000840 self.assertEqual(found, "utf-8")
Benjamin Petersond3afada2009-10-09 21:43:09 +0000841
Trent Nelson428de652008-03-18 22:41:35 +0000842 def test_short_files(self):
843 readline = self.get_readline((b'print(something)\n',))
844 encoding, consumed_lines = detect_encoding(readline)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000845 self.assertEqual(encoding, 'utf-8')
846 self.assertEqual(consumed_lines, [b'print(something)\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000847
848 encoding, consumed_lines = detect_encoding(self.get_readline(()))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000849 self.assertEqual(encoding, 'utf-8')
850 self.assertEqual(consumed_lines, [])
Trent Nelson428de652008-03-18 22:41:35 +0000851
852 readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',))
853 encoding, consumed_lines = detect_encoding(readline)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000854 self.assertEqual(encoding, 'utf-8-sig')
855 self.assertEqual(consumed_lines, [b'print(something)\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000856
857 readline = self.get_readline((b'\xef\xbb\xbf',))
858 encoding, consumed_lines = detect_encoding(readline)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000859 self.assertEqual(encoding, 'utf-8-sig')
860 self.assertEqual(consumed_lines, [])
Trent Nelson428de652008-03-18 22:41:35 +0000861
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000862 readline = self.get_readline((b'# coding: bad\n',))
863 self.assertRaises(SyntaxError, detect_encoding, readline)
Trent Nelson428de652008-03-18 22:41:35 +0000864
Victor Stinner58c07522010-11-09 01:08:59 +0000865 def test_open(self):
866 filename = support.TESTFN + '.py'
867 self.addCleanup(support.unlink, filename)
868
869 # test coding cookie
870 for encoding in ('iso-8859-15', 'utf-8'):
871 with open(filename, 'w', encoding=encoding) as fp:
872 print("# coding: %s" % encoding, file=fp)
873 print("print('euro:\u20ac')", file=fp)
874 with tokenize_open(filename) as fp:
Victor Stinner92665ab2010-11-09 01:11:31 +0000875 self.assertEqual(fp.encoding, encoding)
876 self.assertEqual(fp.mode, 'r')
Victor Stinner58c07522010-11-09 01:08:59 +0000877
878 # test BOM (no coding cookie)
879 with open(filename, 'w', encoding='utf-8-sig') as fp:
880 print("print('euro:\u20ac')", file=fp)
881 with tokenize_open(filename) as fp:
Victor Stinner92665ab2010-11-09 01:11:31 +0000882 self.assertEqual(fp.encoding, 'utf-8-sig')
883 self.assertEqual(fp.mode, 'r')
Victor Stinner58c07522010-11-09 01:08:59 +0000884
Trent Nelson428de652008-03-18 22:41:35 +0000885class TestTokenize(TestCase):
886
887 def test_tokenize(self):
888 import tokenize as tokenize_module
889 encoding = object()
890 encoding_used = None
891 def mock_detect_encoding(readline):
892 return encoding, ['first', 'second']
893
894 def mock__tokenize(readline, encoding):
895 nonlocal encoding_used
896 encoding_used = encoding
897 out = []
898 while True:
899 next_line = readline()
900 if next_line:
901 out.append(next_line)
902 continue
903 return out
904
905 counter = 0
906 def mock_readline():
907 nonlocal counter
908 counter += 1
909 if counter == 5:
910 return b''
911 return counter
912
913 orig_detect_encoding = tokenize_module.detect_encoding
914 orig__tokenize = tokenize_module._tokenize
915 tokenize_module.detect_encoding = mock_detect_encoding
916 tokenize_module._tokenize = mock__tokenize
917 try:
918 results = tokenize(mock_readline)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000919 self.assertEqual(list(results), ['first', 'second', 1, 2, 3, 4])
Trent Nelson428de652008-03-18 22:41:35 +0000920 finally:
921 tokenize_module.detect_encoding = orig_detect_encoding
922 tokenize_module._tokenize = orig__tokenize
923
924 self.assertTrue(encoding_used, encoding)
Raymond Hettinger68c04532005-06-10 11:05:19 +0000925
Meador Inge00c7f852012-01-19 00:44:45 -0600926 def assertExactTypeEqual(self, opstr, *optypes):
927 tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
928 num_optypes = len(optypes)
929 self.assertEqual(len(tokens), 2 + num_optypes)
930 self.assertEqual(token.tok_name[tokens[0].exact_type],
931 token.tok_name[ENCODING])
932 for i in range(num_optypes):
933 self.assertEqual(token.tok_name[tokens[i + 1].exact_type],
934 token.tok_name[optypes[i]])
935 self.assertEqual(token.tok_name[tokens[1 + num_optypes].exact_type],
936 token.tok_name[token.ENDMARKER])
937
938 def test_exact_type(self):
939 self.assertExactTypeEqual('()', token.LPAR, token.RPAR)
940 self.assertExactTypeEqual('[]', token.LSQB, token.RSQB)
941 self.assertExactTypeEqual(':', token.COLON)
942 self.assertExactTypeEqual(',', token.COMMA)
943 self.assertExactTypeEqual(';', token.SEMI)
944 self.assertExactTypeEqual('+', token.PLUS)
945 self.assertExactTypeEqual('-', token.MINUS)
946 self.assertExactTypeEqual('*', token.STAR)
947 self.assertExactTypeEqual('/', token.SLASH)
948 self.assertExactTypeEqual('|', token.VBAR)
949 self.assertExactTypeEqual('&', token.AMPER)
950 self.assertExactTypeEqual('<', token.LESS)
951 self.assertExactTypeEqual('>', token.GREATER)
952 self.assertExactTypeEqual('=', token.EQUAL)
953 self.assertExactTypeEqual('.', token.DOT)
954 self.assertExactTypeEqual('%', token.PERCENT)
955 self.assertExactTypeEqual('{}', token.LBRACE, token.RBRACE)
956 self.assertExactTypeEqual('==', token.EQEQUAL)
957 self.assertExactTypeEqual('!=', token.NOTEQUAL)
958 self.assertExactTypeEqual('<=', token.LESSEQUAL)
959 self.assertExactTypeEqual('>=', token.GREATEREQUAL)
960 self.assertExactTypeEqual('~', token.TILDE)
961 self.assertExactTypeEqual('^', token.CIRCUMFLEX)
962 self.assertExactTypeEqual('<<', token.LEFTSHIFT)
963 self.assertExactTypeEqual('>>', token.RIGHTSHIFT)
964 self.assertExactTypeEqual('**', token.DOUBLESTAR)
965 self.assertExactTypeEqual('+=', token.PLUSEQUAL)
966 self.assertExactTypeEqual('-=', token.MINEQUAL)
967 self.assertExactTypeEqual('*=', token.STAREQUAL)
968 self.assertExactTypeEqual('/=', token.SLASHEQUAL)
969 self.assertExactTypeEqual('%=', token.PERCENTEQUAL)
970 self.assertExactTypeEqual('&=', token.AMPEREQUAL)
971 self.assertExactTypeEqual('|=', token.VBAREQUAL)
972 self.assertExactTypeEqual('^=', token.CIRCUMFLEXEQUAL)
973 self.assertExactTypeEqual('^=', token.CIRCUMFLEXEQUAL)
974 self.assertExactTypeEqual('<<=', token.LEFTSHIFTEQUAL)
975 self.assertExactTypeEqual('>>=', token.RIGHTSHIFTEQUAL)
976 self.assertExactTypeEqual('**=', token.DOUBLESTAREQUAL)
977 self.assertExactTypeEqual('//', token.DOUBLESLASH)
978 self.assertExactTypeEqual('//=', token.DOUBLESLASHEQUAL)
979 self.assertExactTypeEqual('@', token.AT)
980
981 self.assertExactTypeEqual('a**2+b**2==c**2',
982 NAME, token.DOUBLESTAR, NUMBER,
983 token.PLUS,
984 NAME, token.DOUBLESTAR, NUMBER,
985 token.EQEQUAL,
986 NAME, token.DOUBLESTAR, NUMBER)
987 self.assertExactTypeEqual('{1, 2, 3}',
988 token.LBRACE,
989 token.NUMBER, token.COMMA,
990 token.NUMBER, token.COMMA,
991 token.NUMBER,
992 token.RBRACE)
993 self.assertExactTypeEqual('^(x & 0x1)',
994 token.CIRCUMFLEX,
995 token.LPAR,
996 token.NAME, token.AMPER, token.NUMBER,
997 token.RPAR)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000998
999__test__ = {"doctests" : doctests, 'decistmt': decistmt}
1000
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001001def test_main():
Christian Heimesdd15f6c2008-03-16 00:07:10 +00001002 from test import test_tokenize
Benjamin Petersonee8712c2008-05-20 21:35:26 +00001003 support.run_doctest(test_tokenize, True)
1004 support.run_unittest(TestTokenizerAdheresToPep0263)
1005 support.run_unittest(Test_Tokenize)
1006 support.run_unittest(TestDetectEncoding)
1007 support.run_unittest(TestTokenize)
Neal Norwitzc1505362006-12-28 06:47:50 +00001008
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001009if __name__ == "__main__":
1010 test_main()