blob: 11e6fb459657e6106b31c1d52d94040d58e2c981 [file] [log] [blame]
Christian Heimesdd15f6c2008-03-16 00:07:10 +00001doctests = """
2Tests for the tokenize module.
Thomas Wouters89f507f2006-12-13 04:49:30 +00003
Christian Heimesdd15f6c2008-03-16 00:07:10 +00004The tests can be really simple. Given a small fragment of source
Eric Smith74ca5572008-03-17 19:49:19 +00005code, print out a table with tokens. The ENDMARK is omitted for
Thomas Wouters89f507f2006-12-13 04:49:30 +00006brevity.
7
Christian Heimesdd15f6c2008-03-16 00:07:10 +00008 >>> dump_tokens("1 + 1")
Trent Nelson428de652008-03-18 22:41:35 +00009 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +000010 NUMBER '1' (1, 0) (1, 1)
11 OP '+' (1, 2) (1, 3)
12 NUMBER '1' (1, 4) (1, 5)
Thomas Wouters89f507f2006-12-13 04:49:30 +000013
Christian Heimesdd15f6c2008-03-16 00:07:10 +000014 >>> dump_tokens("if False:\\n"
15 ... " # NL\\n"
16 ... " True = False # NEWLINE\\n")
Trent Nelson428de652008-03-18 22:41:35 +000017 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +000018 NAME 'if' (1, 0) (1, 2)
19 NAME 'False' (1, 3) (1, 8)
20 OP ':' (1, 8) (1, 9)
21 NEWLINE '\\n' (1, 9) (1, 10)
22 COMMENT '# NL' (2, 4) (2, 8)
23 NL '\\n' (2, 8) (2, 9)
24 INDENT ' ' (3, 0) (3, 4)
25 NAME 'True' (3, 4) (3, 8)
26 OP '=' (3, 9) (3, 10)
27 NAME 'False' (3, 11) (3, 16)
28 COMMENT '# NEWLINE' (3, 17) (3, 26)
29 NEWLINE '\\n' (3, 26) (3, 27)
30 DEDENT '' (4, 0) (4, 0)
Thomas Wouters89f507f2006-12-13 04:49:30 +000031
Christian Heimesdd15f6c2008-03-16 00:07:10 +000032 >>> indent_error_file = \"""
33 ... def k(x):
34 ... x += 2
35 ... x += 5
36 ... \"""
Trent Nelson428de652008-03-18 22:41:35 +000037 >>> readline = BytesIO(indent_error_file.encode('utf-8')).readline
38 >>> for tok in tokenize(readline): pass
Christian Heimesdd15f6c2008-03-16 00:07:10 +000039 Traceback (most recent call last):
40 ...
41 IndentationError: unindent does not match any outer indentation level
Thomas Wouters89f507f2006-12-13 04:49:30 +000042
Mark Dickinson3c0b3172010-06-29 07:38:37 +000043There are some standard formatting practices that are easy to get right.
Thomas Wouters89f507f2006-12-13 04:49:30 +000044
Christian Heimesdd15f6c2008-03-16 00:07:10 +000045 >>> roundtrip("if x == 1:\\n"
46 ... " print(x)\\n")
47 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000048
Christian Heimesdd15f6c2008-03-16 00:07:10 +000049 >>> roundtrip("# This is a comment\\n# This also")
50 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000051
52Some people use different formatting conventions, which makes
Christian Heimesdd15f6c2008-03-16 00:07:10 +000053untokenize a little trickier. Note that this test involves trailing
54whitespace after the colon. Note that we use hex escapes to make the
Trent Nelson428de652008-03-18 22:41:35 +000055two trailing blanks apparent in the expected output.
Thomas Wouters89f507f2006-12-13 04:49:30 +000056
Christian Heimesdd15f6c2008-03-16 00:07:10 +000057 >>> roundtrip("if x == 1 : \\n"
58 ... " print(x)\\n")
59 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000060
Benjamin Petersonee8712c2008-05-20 21:35:26 +000061 >>> f = support.findfile("tokenize_tests.txt")
Trent Nelson428de652008-03-18 22:41:35 +000062 >>> roundtrip(open(f, 'rb'))
Christian Heimesdd15f6c2008-03-16 00:07:10 +000063 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000064
Christian Heimesdd15f6c2008-03-16 00:07:10 +000065 >>> roundtrip("if x == 1:\\n"
66 ... " # A comment by itself.\\n"
67 ... " print(x) # Comment here, too.\\n"
68 ... " # Another comment.\\n"
69 ... "after_if = True\\n")
70 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000071
Christian Heimesdd15f6c2008-03-16 00:07:10 +000072 >>> roundtrip("if (x # The comments need to go in the right place\\n"
73 ... " == 1):\\n"
74 ... " print('x==1')\\n")
75 True
Thomas Wouters89f507f2006-12-13 04:49:30 +000076
Christian Heimesdd15f6c2008-03-16 00:07:10 +000077 >>> roundtrip("class Test: # A comment here\\n"
78 ... " # A comment with weird indent\\n"
79 ... " after_com = 5\\n"
80 ... " def x(m): return m*5 # a one liner\\n"
81 ... " def y(m): # A whitespace after the colon\\n"
82 ... " return y*4 # 3-space indent\\n")
83 True
84
85Some error-handling code
86
87 >>> roundtrip("try: import somemodule\\n"
88 ... "except ImportError: # comment\\n"
Christian Heimesba4af492008-03-28 00:55:15 +000089 ... " print('Can not import' # comment2\\n)"
Neal Norwitz752abd02008-05-13 04:55:24 +000090 ... "else: print('Loaded')\\n")
Christian Heimesdd15f6c2008-03-16 00:07:10 +000091 True
92
Eric Smith74ca5572008-03-17 19:49:19 +000093Balancing continuation
Christian Heimesdd15f6c2008-03-16 00:07:10 +000094
95 >>> roundtrip("a = (3,4, \\n"
96 ... "5,6)\\n"
97 ... "y = [3, 4,\\n"
98 ... "5]\\n"
99 ... "z = {'a': 5,\\n"
100 ... "'b':15, 'c':True}\\n"
101 ... "x = len(y) + 5 - a[\\n"
102 ... "3] - a[2]\\n"
103 ... "+ len(z) - z[\\n"
104 ... "'b']\\n")
105 True
106
107Ordinary integers and binary operators
108
109 >>> dump_tokens("0xff <= 255")
Trent Nelson428de652008-03-18 22:41:35 +0000110 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000111 NUMBER '0xff' (1, 0) (1, 4)
112 OP '<=' (1, 5) (1, 7)
113 NUMBER '255' (1, 8) (1, 11)
Eric Smith74ca5572008-03-17 19:49:19 +0000114 >>> dump_tokens("0b10 <= 255")
Trent Nelson428de652008-03-18 22:41:35 +0000115 ENCODING 'utf-8' (0, 0) (0, 0)
Eric Smith74ca5572008-03-17 19:49:19 +0000116 NUMBER '0b10' (1, 0) (1, 4)
117 OP '<=' (1, 5) (1, 7)
118 NUMBER '255' (1, 8) (1, 11)
119 >>> dump_tokens("0o123 <= 0O123")
Trent Nelson428de652008-03-18 22:41:35 +0000120 ENCODING 'utf-8' (0, 0) (0, 0)
Eric Smith74ca5572008-03-17 19:49:19 +0000121 NUMBER '0o123' (1, 0) (1, 5)
122 OP '<=' (1, 6) (1, 8)
123 NUMBER '0O123' (1, 9) (1, 14)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000124 >>> dump_tokens("1234567 > ~0x15")
Trent Nelson428de652008-03-18 22:41:35 +0000125 ENCODING 'utf-8' (0, 0) (0, 0)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000126 NUMBER '1234567' (1, 0) (1, 7)
127 OP '>' (1, 8) (1, 9)
128 OP '~' (1, 10) (1, 11)
129 NUMBER '0x15' (1, 11) (1, 15)
130 >>> dump_tokens("2134568 != 1231515")
Trent Nelson428de652008-03-18 22:41:35 +0000131 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000132 NUMBER '2134568' (1, 0) (1, 7)
133 OP '!=' (1, 8) (1, 10)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000134 NUMBER '1231515' (1, 11) (1, 18)
135 >>> dump_tokens("(-124561-1) & 200000000")
Trent Nelson428de652008-03-18 22:41:35 +0000136 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000137 OP '(' (1, 0) (1, 1)
138 OP '-' (1, 1) (1, 2)
139 NUMBER '124561' (1, 2) (1, 8)
140 OP '-' (1, 8) (1, 9)
141 NUMBER '1' (1, 9) (1, 10)
142 OP ')' (1, 10) (1, 11)
143 OP '&' (1, 12) (1, 13)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000144 NUMBER '200000000' (1, 14) (1, 23)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000145 >>> dump_tokens("0xdeadbeef != -1")
Trent Nelson428de652008-03-18 22:41:35 +0000146 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000147 NUMBER '0xdeadbeef' (1, 0) (1, 10)
148 OP '!=' (1, 11) (1, 13)
149 OP '-' (1, 14) (1, 15)
150 NUMBER '1' (1, 15) (1, 16)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000151 >>> dump_tokens("0xdeadc0de & 12345")
Trent Nelson428de652008-03-18 22:41:35 +0000152 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000153 NUMBER '0xdeadc0de' (1, 0) (1, 10)
154 OP '&' (1, 11) (1, 12)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000155 NUMBER '12345' (1, 13) (1, 18)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000156 >>> dump_tokens("0xFF & 0x15 | 1234")
Trent Nelson428de652008-03-18 22:41:35 +0000157 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000158 NUMBER '0xFF' (1, 0) (1, 4)
159 OP '&' (1, 5) (1, 6)
160 NUMBER '0x15' (1, 7) (1, 11)
161 OP '|' (1, 12) (1, 13)
162 NUMBER '1234' (1, 14) (1, 18)
163
164Long integers
165
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000166 >>> dump_tokens("x = 0")
Trent Nelson428de652008-03-18 22:41:35 +0000167 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000168 NAME 'x' (1, 0) (1, 1)
169 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000170 NUMBER '0' (1, 4) (1, 5)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000171 >>> dump_tokens("x = 0xfffffffffff")
Trent Nelson428de652008-03-18 22:41:35 +0000172 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000173 NAME 'x' (1, 0) (1, 1)
174 OP '=' (1, 2) (1, 3)
175 NUMBER '0xffffffffff (1, 4) (1, 17)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000176 >>> dump_tokens("x = 123141242151251616110")
Trent Nelson428de652008-03-18 22:41:35 +0000177 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000178 NAME 'x' (1, 0) (1, 1)
179 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000180 NUMBER '123141242151 (1, 4) (1, 25)
181 >>> dump_tokens("x = -15921590215012591")
Trent Nelson428de652008-03-18 22:41:35 +0000182 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000183 NAME 'x' (1, 0) (1, 1)
184 OP '=' (1, 2) (1, 3)
185 OP '-' (1, 4) (1, 5)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000186 NUMBER '159215902150 (1, 5) (1, 22)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000187
188Floating point numbers
189
190 >>> dump_tokens("x = 3.14159")
Trent Nelson428de652008-03-18 22:41:35 +0000191 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000192 NAME 'x' (1, 0) (1, 1)
193 OP '=' (1, 2) (1, 3)
194 NUMBER '3.14159' (1, 4) (1, 11)
195 >>> dump_tokens("x = 314159.")
Trent Nelson428de652008-03-18 22:41:35 +0000196 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000197 NAME 'x' (1, 0) (1, 1)
198 OP '=' (1, 2) (1, 3)
199 NUMBER '314159.' (1, 4) (1, 11)
200 >>> dump_tokens("x = .314159")
Trent Nelson428de652008-03-18 22:41:35 +0000201 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000202 NAME 'x' (1, 0) (1, 1)
203 OP '=' (1, 2) (1, 3)
204 NUMBER '.314159' (1, 4) (1, 11)
205 >>> dump_tokens("x = 3e14159")
Trent Nelson428de652008-03-18 22:41:35 +0000206 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000207 NAME 'x' (1, 0) (1, 1)
208 OP '=' (1, 2) (1, 3)
209 NUMBER '3e14159' (1, 4) (1, 11)
210 >>> dump_tokens("x = 3E123")
Trent Nelson428de652008-03-18 22:41:35 +0000211 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000212 NAME 'x' (1, 0) (1, 1)
213 OP '=' (1, 2) (1, 3)
214 NUMBER '3E123' (1, 4) (1, 9)
215 >>> dump_tokens("x+y = 3e-1230")
Trent Nelson428de652008-03-18 22:41:35 +0000216 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000217 NAME 'x' (1, 0) (1, 1)
218 OP '+' (1, 1) (1, 2)
219 NAME 'y' (1, 2) (1, 3)
220 OP '=' (1, 4) (1, 5)
221 NUMBER '3e-1230' (1, 6) (1, 13)
222 >>> dump_tokens("x = 3.14e159")
Trent Nelson428de652008-03-18 22:41:35 +0000223 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000224 NAME 'x' (1, 0) (1, 1)
225 OP '=' (1, 2) (1, 3)
226 NUMBER '3.14e159' (1, 4) (1, 12)
227
228String literals
229
230 >>> dump_tokens("x = ''; y = \\\"\\\"")
Trent Nelson428de652008-03-18 22:41:35 +0000231 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000232 NAME 'x' (1, 0) (1, 1)
233 OP '=' (1, 2) (1, 3)
234 STRING "''" (1, 4) (1, 6)
235 OP ';' (1, 6) (1, 7)
236 NAME 'y' (1, 8) (1, 9)
237 OP '=' (1, 10) (1, 11)
238 STRING '""' (1, 12) (1, 14)
239 >>> dump_tokens("x = '\\\"'; y = \\\"'\\\"")
Trent Nelson428de652008-03-18 22:41:35 +0000240 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000241 NAME 'x' (1, 0) (1, 1)
242 OP '=' (1, 2) (1, 3)
243 STRING '\\'"\\'' (1, 4) (1, 7)
244 OP ';' (1, 7) (1, 8)
245 NAME 'y' (1, 9) (1, 10)
246 OP '=' (1, 11) (1, 12)
247 STRING '"\\'"' (1, 13) (1, 16)
248 >>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"")
Trent Nelson428de652008-03-18 22:41:35 +0000249 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000250 NAME 'x' (1, 0) (1, 1)
251 OP '=' (1, 2) (1, 3)
252 STRING '"doesn\\'t "' (1, 4) (1, 14)
253 NAME 'shrink' (1, 14) (1, 20)
254 STRING '", does it"' (1, 20) (1, 31)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000255 >>> dump_tokens("x = 'abc' + 'ABC'")
Trent Nelson428de652008-03-18 22:41:35 +0000256 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000257 NAME 'x' (1, 0) (1, 1)
258 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000259 STRING "'abc'" (1, 4) (1, 9)
260 OP '+' (1, 10) (1, 11)
261 STRING "'ABC'" (1, 12) (1, 17)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000262 >>> dump_tokens('y = "ABC" + "ABC"')
Trent Nelson428de652008-03-18 22:41:35 +0000263 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000264 NAME 'y' (1, 0) (1, 1)
265 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000266 STRING '"ABC"' (1, 4) (1, 9)
267 OP '+' (1, 10) (1, 11)
268 STRING '"ABC"' (1, 12) (1, 17)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000269 >>> dump_tokens("x = r'abc' + r'ABC' + R'ABC' + R'ABC'")
Trent Nelson428de652008-03-18 22:41:35 +0000270 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000271 NAME 'x' (1, 0) (1, 1)
272 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000273 STRING "r'abc'" (1, 4) (1, 10)
274 OP '+' (1, 11) (1, 12)
275 STRING "r'ABC'" (1, 13) (1, 19)
276 OP '+' (1, 20) (1, 21)
277 STRING "R'ABC'" (1, 22) (1, 28)
278 OP '+' (1, 29) (1, 30)
279 STRING "R'ABC'" (1, 31) (1, 37)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000280 >>> dump_tokens('y = r"abc" + r"ABC" + R"ABC" + R"ABC"')
Trent Nelson428de652008-03-18 22:41:35 +0000281 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000282 NAME 'y' (1, 0) (1, 1)
283 OP '=' (1, 2) (1, 3)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000284 STRING 'r"abc"' (1, 4) (1, 10)
285 OP '+' (1, 11) (1, 12)
286 STRING 'r"ABC"' (1, 13) (1, 19)
287 OP '+' (1, 20) (1, 21)
288 STRING 'R"ABC"' (1, 22) (1, 28)
289 OP '+' (1, 29) (1, 30)
290 STRING 'R"ABC"' (1, 31) (1, 37)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000291
292Operators
293
294 >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass")
Trent Nelson428de652008-03-18 22:41:35 +0000295 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000296 NAME 'def' (1, 0) (1, 3)
297 NAME 'd22' (1, 4) (1, 7)
298 OP '(' (1, 7) (1, 8)
299 NAME 'a' (1, 8) (1, 9)
300 OP ',' (1, 9) (1, 10)
301 NAME 'b' (1, 11) (1, 12)
302 OP ',' (1, 12) (1, 13)
303 NAME 'c' (1, 14) (1, 15)
304 OP '=' (1, 15) (1, 16)
305 NUMBER '2' (1, 16) (1, 17)
306 OP ',' (1, 17) (1, 18)
307 NAME 'd' (1, 19) (1, 20)
308 OP '=' (1, 20) (1, 21)
309 NUMBER '2' (1, 21) (1, 22)
310 OP ',' (1, 22) (1, 23)
311 OP '*' (1, 24) (1, 25)
312 NAME 'k' (1, 25) (1, 26)
313 OP ')' (1, 26) (1, 27)
314 OP ':' (1, 27) (1, 28)
315 NAME 'pass' (1, 29) (1, 33)
316 >>> dump_tokens("def d01v_(a=1, *k, **w): pass")
Trent Nelson428de652008-03-18 22:41:35 +0000317 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000318 NAME 'def' (1, 0) (1, 3)
319 NAME 'd01v_' (1, 4) (1, 9)
320 OP '(' (1, 9) (1, 10)
321 NAME 'a' (1, 10) (1, 11)
322 OP '=' (1, 11) (1, 12)
323 NUMBER '1' (1, 12) (1, 13)
324 OP ',' (1, 13) (1, 14)
325 OP '*' (1, 15) (1, 16)
326 NAME 'k' (1, 16) (1, 17)
327 OP ',' (1, 17) (1, 18)
328 OP '**' (1, 19) (1, 21)
329 NAME 'w' (1, 21) (1, 22)
330 OP ')' (1, 22) (1, 23)
331 OP ':' (1, 23) (1, 24)
332 NAME 'pass' (1, 25) (1, 29)
333
334Comparison
335
336 >>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " +
337 ... "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass")
Trent Nelson428de652008-03-18 22:41:35 +0000338 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000339 NAME 'if' (1, 0) (1, 2)
340 NUMBER '1' (1, 3) (1, 4)
341 OP '<' (1, 5) (1, 6)
342 NUMBER '1' (1, 7) (1, 8)
343 OP '>' (1, 9) (1, 10)
344 NUMBER '1' (1, 11) (1, 12)
345 OP '==' (1, 13) (1, 15)
346 NUMBER '1' (1, 16) (1, 17)
347 OP '>=' (1, 18) (1, 20)
348 NUMBER '5' (1, 21) (1, 22)
349 OP '<=' (1, 23) (1, 25)
350 NUMBER '0x15' (1, 26) (1, 30)
351 OP '<=' (1, 31) (1, 33)
352 NUMBER '0x12' (1, 34) (1, 38)
353 OP '!=' (1, 39) (1, 41)
354 NUMBER '1' (1, 42) (1, 43)
355 NAME 'and' (1, 44) (1, 47)
356 NUMBER '5' (1, 48) (1, 49)
357 NAME 'in' (1, 50) (1, 52)
358 NUMBER '1' (1, 53) (1, 54)
359 NAME 'not' (1, 55) (1, 58)
360 NAME 'in' (1, 59) (1, 61)
361 NUMBER '1' (1, 62) (1, 63)
362 NAME 'is' (1, 64) (1, 66)
363 NUMBER '1' (1, 67) (1, 68)
364 NAME 'or' (1, 69) (1, 71)
365 NUMBER '5' (1, 72) (1, 73)
366 NAME 'is' (1, 74) (1, 76)
367 NAME 'not' (1, 77) (1, 80)
368 NUMBER '1' (1, 81) (1, 82)
369 OP ':' (1, 82) (1, 83)
370 NAME 'pass' (1, 84) (1, 88)
371
372Shift
373
374 >>> dump_tokens("x = 1 << 1 >> 5")
Trent Nelson428de652008-03-18 22:41:35 +0000375 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000376 NAME 'x' (1, 0) (1, 1)
377 OP '=' (1, 2) (1, 3)
378 NUMBER '1' (1, 4) (1, 5)
379 OP '<<' (1, 6) (1, 8)
380 NUMBER '1' (1, 9) (1, 10)
381 OP '>>' (1, 11) (1, 13)
382 NUMBER '5' (1, 14) (1, 15)
383
384Additive
385
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000386 >>> dump_tokens("x = 1 - y + 15 - 1 + 0x124 + z + a[5]")
Trent Nelson428de652008-03-18 22:41:35 +0000387 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000388 NAME 'x' (1, 0) (1, 1)
389 OP '=' (1, 2) (1, 3)
390 NUMBER '1' (1, 4) (1, 5)
391 OP '-' (1, 6) (1, 7)
392 NAME 'y' (1, 8) (1, 9)
393 OP '+' (1, 10) (1, 11)
394 NUMBER '15' (1, 12) (1, 14)
395 OP '-' (1, 15) (1, 16)
Mark Dickinson0c1f7c02008-03-16 05:05:12 +0000396 NUMBER '1' (1, 17) (1, 18)
397 OP '+' (1, 19) (1, 20)
398 NUMBER '0x124' (1, 21) (1, 26)
399 OP '+' (1, 27) (1, 28)
400 NAME 'z' (1, 29) (1, 30)
401 OP '+' (1, 31) (1, 32)
402 NAME 'a' (1, 33) (1, 34)
403 OP '[' (1, 34) (1, 35)
404 NUMBER '5' (1, 35) (1, 36)
405 OP ']' (1, 36) (1, 37)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000406
407Multiplicative
408
409 >>> dump_tokens("x = 1//1*1/5*12%0x12")
Trent Nelson428de652008-03-18 22:41:35 +0000410 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000411 NAME 'x' (1, 0) (1, 1)
412 OP '=' (1, 2) (1, 3)
413 NUMBER '1' (1, 4) (1, 5)
414 OP '//' (1, 5) (1, 7)
415 NUMBER '1' (1, 7) (1, 8)
416 OP '*' (1, 8) (1, 9)
417 NUMBER '1' (1, 9) (1, 10)
418 OP '/' (1, 10) (1, 11)
419 NUMBER '5' (1, 11) (1, 12)
420 OP '*' (1, 12) (1, 13)
421 NUMBER '12' (1, 13) (1, 15)
422 OP '%' (1, 15) (1, 16)
423 NUMBER '0x12' (1, 16) (1, 20)
424
425Unary
426
427 >>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1")
Trent Nelson428de652008-03-18 22:41:35 +0000428 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000429 OP '~' (1, 0) (1, 1)
430 NUMBER '1' (1, 1) (1, 2)
431 OP '^' (1, 3) (1, 4)
432 NUMBER '1' (1, 5) (1, 6)
433 OP '&' (1, 7) (1, 8)
434 NUMBER '1' (1, 9) (1, 10)
435 OP '|' (1, 11) (1, 12)
436 NUMBER '1' (1, 12) (1, 13)
437 OP '^' (1, 14) (1, 15)
438 OP '-' (1, 16) (1, 17)
439 NUMBER '1' (1, 17) (1, 18)
440 >>> dump_tokens("-1*1/1+1*1//1 - ---1**1")
Trent Nelson428de652008-03-18 22:41:35 +0000441 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000442 OP '-' (1, 0) (1, 1)
443 NUMBER '1' (1, 1) (1, 2)
444 OP '*' (1, 2) (1, 3)
445 NUMBER '1' (1, 3) (1, 4)
446 OP '/' (1, 4) (1, 5)
447 NUMBER '1' (1, 5) (1, 6)
448 OP '+' (1, 6) (1, 7)
449 NUMBER '1' (1, 7) (1, 8)
450 OP '*' (1, 8) (1, 9)
451 NUMBER '1' (1, 9) (1, 10)
452 OP '//' (1, 10) (1, 12)
453 NUMBER '1' (1, 12) (1, 13)
454 OP '-' (1, 14) (1, 15)
455 OP '-' (1, 16) (1, 17)
456 OP '-' (1, 17) (1, 18)
457 OP '-' (1, 18) (1, 19)
458 NUMBER '1' (1, 19) (1, 20)
459 OP '**' (1, 20) (1, 22)
460 NUMBER '1' (1, 22) (1, 23)
461
462Selector
463
464 >>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()")
Trent Nelson428de652008-03-18 22:41:35 +0000465 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000466 NAME 'import' (1, 0) (1, 6)
467 NAME 'sys' (1, 7) (1, 10)
468 OP ',' (1, 10) (1, 11)
469 NAME 'time' (1, 12) (1, 16)
470 NEWLINE '\\n' (1, 16) (1, 17)
471 NAME 'x' (2, 0) (2, 1)
472 OP '=' (2, 2) (2, 3)
473 NAME 'sys' (2, 4) (2, 7)
474 OP '.' (2, 7) (2, 8)
475 NAME 'modules' (2, 8) (2, 15)
476 OP '[' (2, 15) (2, 16)
477 STRING "'time'" (2, 16) (2, 22)
478 OP ']' (2, 22) (2, 23)
479 OP '.' (2, 23) (2, 24)
480 NAME 'time' (2, 24) (2, 28)
481 OP '(' (2, 28) (2, 29)
482 OP ')' (2, 29) (2, 30)
483
484Methods
485
486 >>> dump_tokens("@staticmethod\\ndef foo(x,y): pass")
Trent Nelson428de652008-03-18 22:41:35 +0000487 ENCODING 'utf-8' (0, 0) (0, 0)
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000488 OP '@' (1, 0) (1, 1)
489 NAME 'staticmethod (1, 1) (1, 13)
490 NEWLINE '\\n' (1, 13) (1, 14)
491 NAME 'def' (2, 0) (2, 3)
492 NAME 'foo' (2, 4) (2, 7)
493 OP '(' (2, 7) (2, 8)
494 NAME 'x' (2, 8) (2, 9)
495 OP ',' (2, 9) (2, 10)
496 NAME 'y' (2, 10) (2, 11)
497 OP ')' (2, 11) (2, 12)
498 OP ':' (2, 12) (2, 13)
499 NAME 'pass' (2, 14) (2, 18)
500
501Backslash means line continuation, except for comments
502
503 >>> roundtrip("x=1+\\\\n"
504 ... "1\\n"
505 ... "# This is a comment\\\\n"
506 ... "# This also\\n")
507 True
508 >>> roundtrip("# Comment \\\\nx = 0")
509 True
Christian Heimesba4af492008-03-28 00:55:15 +0000510
511Two string literals on the same line
512
513 >>> roundtrip("'' ''")
514 True
515
516Test roundtrip on random python modules.
Antoine Pitrou5bc4fa72010-10-14 15:34:31 +0000517pass the '-ucpu' option to process the full directory.
Christian Heimesba4af492008-03-28 00:55:15 +0000518
519 >>> import random
520 >>> tempdir = os.path.dirname(f) or os.curdir
521 >>> testfiles = glob.glob(os.path.join(tempdir, "test*.py"))
522
Antoine Pitrou5bc4fa72010-10-14 15:34:31 +0000523 >>> if not support.is_resource_enabled("cpu"):
Christian Heimesba4af492008-03-28 00:55:15 +0000524 ... testfiles = random.sample(testfiles, 10)
525 ...
526 >>> for testfile in testfiles:
527 ... if not roundtrip(open(testfile, 'rb')):
528 ... print("Roundtrip failed for file %s" % testfile)
529 ... break
530 ... else: True
531 True
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000532
533Evil tabs
Benjamin Peterson33856de2010-08-30 14:41:20 +0000534
Benjamin Petersona0dfa822009-11-13 02:25:08 +0000535 >>> dump_tokens("def f():\\n\\tif x\\n \\tpass")
536 ENCODING 'utf-8' (0, 0) (0, 0)
537 NAME 'def' (1, 0) (1, 3)
538 NAME 'f' (1, 4) (1, 5)
539 OP '(' (1, 5) (1, 6)
540 OP ')' (1, 6) (1, 7)
541 OP ':' (1, 7) (1, 8)
542 NEWLINE '\\n' (1, 8) (1, 9)
543 INDENT '\\t' (2, 0) (2, 1)
544 NAME 'if' (2, 1) (2, 3)
545 NAME 'x' (2, 4) (2, 5)
546 NEWLINE '\\n' (2, 5) (2, 6)
547 INDENT ' \\t' (3, 0) (3, 9)
548 NAME 'pass' (3, 9) (3, 13)
549 DEDENT '' (4, 0) (4, 0)
550 DEDENT '' (4, 0) (4, 0)
Benjamin Peterson33856de2010-08-30 14:41:20 +0000551
552Non-ascii identifiers
553
554 >>> dump_tokens("Örter = 'places'\\ngrün = 'green'")
555 ENCODING 'utf-8' (0, 0) (0, 0)
556 NAME 'Örter' (1, 0) (1, 5)
557 OP '=' (1, 6) (1, 7)
558 STRING "'places'" (1, 8) (1, 16)
559 NEWLINE '\\n' (1, 16) (1, 17)
560 NAME 'grün' (2, 0) (2, 4)
561 OP '=' (2, 5) (2, 6)
562 STRING "'green'" (2, 7) (2, 14)
Thomas Wouters89f507f2006-12-13 04:49:30 +0000563"""
564
Benjamin Petersonee8712c2008-05-20 21:35:26 +0000565from test import support
Trent Nelson428de652008-03-18 22:41:35 +0000566from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
Victor Stinner58c07522010-11-09 01:08:59 +0000567 STRING, ENDMARKER, tok_name, detect_encoding,
568 open as tokenize_open)
Trent Nelson428de652008-03-18 22:41:35 +0000569from io import BytesIO
570from unittest import TestCase
571import os, sys, glob
Raymond Hettinger68c04532005-06-10 11:05:19 +0000572
Thomas Wouters89f507f2006-12-13 04:49:30 +0000573def dump_tokens(s):
574 """Print out the tokens in s in a table format.
575
576 The ENDMARKER is omitted.
577 """
Trent Nelson428de652008-03-18 22:41:35 +0000578 f = BytesIO(s.encode('utf-8'))
579 for type, token, start, end, line in tokenize(f.readline):
Thomas Wouters89f507f2006-12-13 04:49:30 +0000580 if type == ENDMARKER:
581 break
582 type = tok_name[type]
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000583 print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals())
Thomas Wouters89f507f2006-12-13 04:49:30 +0000584
Trent Nelson428de652008-03-18 22:41:35 +0000585def roundtrip(f):
586 """
587 Test roundtrip for `untokenize`. `f` is an open file or a string.
588 The source code in f is tokenized, converted back to source code via
589 tokenize.untokenize(), and tokenized again from the latter. The test
590 fails if the second tokenization doesn't match the first.
591 """
592 if isinstance(f, str):
593 f = BytesIO(f.encode('utf-8'))
Brian Curtin9f5f65c2010-10-30 21:35:28 +0000594 try:
595 token_list = list(tokenize(f.readline))
596 finally:
597 f.close()
Trent Nelson428de652008-03-18 22:41:35 +0000598 tokens1 = [tok[:2] for tok in token_list]
599 new_bytes = untokenize(tokens1)
600 readline = (line for line in new_bytes.splitlines(1)).__next__
601 tokens2 = [tok[:2] for tok in tokenize(readline)]
602 return tokens1 == tokens2
Thomas Wouters89f507f2006-12-13 04:49:30 +0000603
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000604# This is an example from the docs, set up as a doctest.
Raymond Hettinger68c04532005-06-10 11:05:19 +0000605def decistmt(s):
606 """Substitute Decimals for floats in a string of statements.
607
608 >>> from decimal import Decimal
Georg Brandl88fc6642007-02-09 21:28:07 +0000609 >>> s = 'print(+21.3e-5*-.1234/81.7)'
Raymond Hettinger68c04532005-06-10 11:05:19 +0000610 >>> decistmt(s)
Georg Brandl88fc6642007-02-09 21:28:07 +0000611 "print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))"
Raymond Hettinger68c04532005-06-10 11:05:19 +0000612
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000613 The format of the exponent is inherited from the platform C library.
614 Known cases are "e-007" (Windows) and "e-07" (not Windows). Since
Mark Dickinson388122d2010-08-04 20:56:28 +0000615 we're only showing 11 digits, and the 12th isn't close to 5, the
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000616 rest of the output should be platform-independent.
617
618 >>> exec(s) #doctest: +ELLIPSIS
Mark Dickinson388122d2010-08-04 20:56:28 +0000619 -3.2171603427...e-0...7
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000620
621 Output from calculations with Decimal should be identical across all
622 platforms.
623
Raymond Hettinger68c04532005-06-10 11:05:19 +0000624 >>> exec(decistmt(s))
625 -3.217160342717258261933904529E-7
Raymond Hettinger68c04532005-06-10 11:05:19 +0000626 """
627 result = []
Trent Nelson428de652008-03-18 22:41:35 +0000628 g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string
Raymond Hettinger68c04532005-06-10 11:05:19 +0000629 for toknum, tokval, _, _, _ in g:
630 if toknum == NUMBER and '.' in tokval: # replace NUMBER tokens
631 result.extend([
632 (NAME, 'Decimal'),
633 (OP, '('),
634 (STRING, repr(tokval)),
635 (OP, ')')
636 ])
637 else:
638 result.append((toknum, tokval))
Trent Nelson428de652008-03-18 22:41:35 +0000639 return untokenize(result).decode('utf-8')
640
641
642class TestTokenizerAdheresToPep0263(TestCase):
643 """
644 Test that tokenizer adheres to the coding behaviour stipulated in PEP 0263.
645 """
646
647 def _testFile(self, filename):
648 path = os.path.join(os.path.dirname(__file__), filename)
649 return roundtrip(open(path, 'rb'))
650
651 def test_utf8_coding_cookie_and_no_utf8_bom(self):
652 f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt'
653 self.assertTrue(self._testFile(f))
654
655 def test_latin1_coding_cookie_and_utf8_bom(self):
656 """
657 As per PEP 0263, if a file starts with a utf-8 BOM signature, the only
658 allowed encoding for the comment is 'utf-8'. The text file used in
659 this test starts with a BOM signature, but specifies latin1 as the
660 coding, so verify that a SyntaxError is raised, which matches the
661 behaviour of the interpreter when it encounters a similar condition.
662 """
663 f = 'tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt'
Benjamin Petersonc9c0f202009-06-30 23:06:06 +0000664 self.assertRaises(SyntaxError, self._testFile, f)
Trent Nelson428de652008-03-18 22:41:35 +0000665
666 def test_no_coding_cookie_and_utf8_bom(self):
667 f = 'tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt'
668 self.assertTrue(self._testFile(f))
669
670 def test_utf8_coding_cookie_and_utf8_bom(self):
671 f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt'
672 self.assertTrue(self._testFile(f))
673
674
675class Test_Tokenize(TestCase):
676
677 def test__tokenize_decodes_with_specified_encoding(self):
678 literal = '"ЉЊЈЁЂ"'
679 line = literal.encode('utf-8')
680 first = False
681 def readline():
682 nonlocal first
683 if not first:
684 first = True
685 return line
686 else:
687 return b''
688
689 # skip the initial encoding token and the end token
690 tokens = list(_tokenize(readline, encoding='utf-8'))[1:-1]
691 expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
Ezio Melottib3aedd42010-11-20 19:04:17 +0000692 self.assertEqual(tokens, expected_tokens,
693 "bytes not decoded with encoding")
Trent Nelson428de652008-03-18 22:41:35 +0000694
695 def test__tokenize_does_not_decode_with_encoding_none(self):
696 literal = '"ЉЊЈЁЂ"'
697 first = False
698 def readline():
699 nonlocal first
700 if not first:
701 first = True
702 return literal
703 else:
704 return b''
705
706 # skip the end token
707 tokens = list(_tokenize(readline, encoding=None))[:-1]
708 expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
Ezio Melottib3aedd42010-11-20 19:04:17 +0000709 self.assertEqual(tokens, expected_tokens,
710 "string not tokenized when encoding is None")
Trent Nelson428de652008-03-18 22:41:35 +0000711
712
713class TestDetectEncoding(TestCase):
714
715 def get_readline(self, lines):
716 index = 0
717 def readline():
718 nonlocal index
719 if index == len(lines):
720 raise StopIteration
721 line = lines[index]
722 index += 1
723 return line
724 return readline
725
726 def test_no_bom_no_encoding_cookie(self):
727 lines = (
728 b'# something\n',
729 b'print(something)\n',
730 b'do_something(else)\n'
731 )
732 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000733 self.assertEqual(encoding, 'utf-8')
734 self.assertEqual(consumed_lines, list(lines[:2]))
Trent Nelson428de652008-03-18 22:41:35 +0000735
736 def test_bom_no_cookie(self):
737 lines = (
738 b'\xef\xbb\xbf# something\n',
739 b'print(something)\n',
740 b'do_something(else)\n'
741 )
742 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000743 self.assertEqual(encoding, 'utf-8-sig')
744 self.assertEqual(consumed_lines,
745 [b'# something\n', b'print(something)\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000746
747 def test_cookie_first_line_no_bom(self):
748 lines = (
749 b'# -*- coding: latin-1 -*-\n',
750 b'print(something)\n',
751 b'do_something(else)\n'
752 )
753 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000754 self.assertEqual(encoding, 'iso-8859-1')
755 self.assertEqual(consumed_lines, [b'# -*- coding: latin-1 -*-\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000756
757 def test_matched_bom_and_cookie_first_line(self):
758 lines = (
759 b'\xef\xbb\xbf# coding=utf-8\n',
760 b'print(something)\n',
761 b'do_something(else)\n'
762 )
763 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000764 self.assertEqual(encoding, 'utf-8-sig')
765 self.assertEqual(consumed_lines, [b'# coding=utf-8\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000766
767 def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self):
768 lines = (
769 b'\xef\xbb\xbf# vim: set fileencoding=ascii :\n',
770 b'print(something)\n',
771 b'do_something(else)\n'
772 )
773 readline = self.get_readline(lines)
774 self.assertRaises(SyntaxError, detect_encoding, readline)
775
776 def test_cookie_second_line_no_bom(self):
777 lines = (
778 b'#! something\n',
779 b'# vim: set fileencoding=ascii :\n',
780 b'print(something)\n',
781 b'do_something(else)\n'
782 )
783 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000784 self.assertEqual(encoding, 'ascii')
Trent Nelson428de652008-03-18 22:41:35 +0000785 expected = [b'#! something\n', b'# vim: set fileencoding=ascii :\n']
Ezio Melottib3aedd42010-11-20 19:04:17 +0000786 self.assertEqual(consumed_lines, expected)
Trent Nelson428de652008-03-18 22:41:35 +0000787
788 def test_matched_bom_and_cookie_second_line(self):
789 lines = (
790 b'\xef\xbb\xbf#! something\n',
791 b'f# coding=utf-8\n',
792 b'print(something)\n',
793 b'do_something(else)\n'
794 )
795 encoding, consumed_lines = detect_encoding(self.get_readline(lines))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000796 self.assertEqual(encoding, 'utf-8-sig')
797 self.assertEqual(consumed_lines,
798 [b'#! something\n', b'f# coding=utf-8\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000799
800 def test_mismatched_bom_and_cookie_second_line_raises_syntaxerror(self):
801 lines = (
802 b'\xef\xbb\xbf#! something\n',
803 b'# vim: set fileencoding=ascii :\n',
804 b'print(something)\n',
805 b'do_something(else)\n'
806 )
807 readline = self.get_readline(lines)
808 self.assertRaises(SyntaxError, detect_encoding, readline)
809
Benjamin Petersond3afada2009-10-09 21:43:09 +0000810 def test_latin1_normalization(self):
811 # See get_normal_name() in tokenizer.c.
812 encodings = ("latin-1", "iso-8859-1", "iso-latin-1", "latin-1-unix",
813 "iso-8859-1-unix", "iso-latin-1-mac")
814 for encoding in encodings:
815 for rep in ("-", "_"):
816 enc = encoding.replace("-", rep)
817 lines = (b"#!/usr/bin/python\n",
818 b"# coding: " + enc.encode("ascii") + b"\n",
819 b"print(things)\n",
820 b"do_something += 4\n")
821 rl = self.get_readline(lines)
822 found, consumed_lines = detect_encoding(rl)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000823 self.assertEqual(found, "iso-8859-1")
Benjamin Petersond3afada2009-10-09 21:43:09 +0000824
825 def test_utf8_normalization(self):
826 # See get_normal_name() in tokenizer.c.
827 encodings = ("utf-8", "utf-8-mac", "utf-8-unix")
828 for encoding in encodings:
829 for rep in ("-", "_"):
830 enc = encoding.replace("-", rep)
831 lines = (b"#!/usr/bin/python\n",
832 b"# coding: " + enc.encode("ascii") + b"\n",
833 b"1 + 3\n")
834 rl = self.get_readline(lines)
835 found, consumed_lines = detect_encoding(rl)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000836 self.assertEqual(found, "utf-8")
Benjamin Petersond3afada2009-10-09 21:43:09 +0000837
Trent Nelson428de652008-03-18 22:41:35 +0000838 def test_short_files(self):
839 readline = self.get_readline((b'print(something)\n',))
840 encoding, consumed_lines = detect_encoding(readline)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000841 self.assertEqual(encoding, 'utf-8')
842 self.assertEqual(consumed_lines, [b'print(something)\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000843
844 encoding, consumed_lines = detect_encoding(self.get_readline(()))
Ezio Melottib3aedd42010-11-20 19:04:17 +0000845 self.assertEqual(encoding, 'utf-8')
846 self.assertEqual(consumed_lines, [])
Trent Nelson428de652008-03-18 22:41:35 +0000847
848 readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',))
849 encoding, consumed_lines = detect_encoding(readline)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000850 self.assertEqual(encoding, 'utf-8-sig')
851 self.assertEqual(consumed_lines, [b'print(something)\n'])
Trent Nelson428de652008-03-18 22:41:35 +0000852
853 readline = self.get_readline((b'\xef\xbb\xbf',))
854 encoding, consumed_lines = detect_encoding(readline)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000855 self.assertEqual(encoding, 'utf-8-sig')
856 self.assertEqual(consumed_lines, [])
Trent Nelson428de652008-03-18 22:41:35 +0000857
Benjamin Peterson433f32c2008-12-12 01:25:05 +0000858 readline = self.get_readline((b'# coding: bad\n',))
859 self.assertRaises(SyntaxError, detect_encoding, readline)
Trent Nelson428de652008-03-18 22:41:35 +0000860
Victor Stinner58c07522010-11-09 01:08:59 +0000861 def test_open(self):
862 filename = support.TESTFN + '.py'
863 self.addCleanup(support.unlink, filename)
864
865 # test coding cookie
866 for encoding in ('iso-8859-15', 'utf-8'):
867 with open(filename, 'w', encoding=encoding) as fp:
868 print("# coding: %s" % encoding, file=fp)
869 print("print('euro:\u20ac')", file=fp)
870 with tokenize_open(filename) as fp:
Victor Stinner92665ab2010-11-09 01:11:31 +0000871 self.assertEqual(fp.encoding, encoding)
872 self.assertEqual(fp.mode, 'r')
Victor Stinner58c07522010-11-09 01:08:59 +0000873
874 # test BOM (no coding cookie)
875 with open(filename, 'w', encoding='utf-8-sig') as fp:
876 print("print('euro:\u20ac')", file=fp)
877 with tokenize_open(filename) as fp:
Victor Stinner92665ab2010-11-09 01:11:31 +0000878 self.assertEqual(fp.encoding, 'utf-8-sig')
879 self.assertEqual(fp.mode, 'r')
Victor Stinner58c07522010-11-09 01:08:59 +0000880
Trent Nelson428de652008-03-18 22:41:35 +0000881class TestTokenize(TestCase):
882
883 def test_tokenize(self):
884 import tokenize as tokenize_module
885 encoding = object()
886 encoding_used = None
887 def mock_detect_encoding(readline):
888 return encoding, ['first', 'second']
889
890 def mock__tokenize(readline, encoding):
891 nonlocal encoding_used
892 encoding_used = encoding
893 out = []
894 while True:
895 next_line = readline()
896 if next_line:
897 out.append(next_line)
898 continue
899 return out
900
901 counter = 0
902 def mock_readline():
903 nonlocal counter
904 counter += 1
905 if counter == 5:
906 return b''
907 return counter
908
909 orig_detect_encoding = tokenize_module.detect_encoding
910 orig__tokenize = tokenize_module._tokenize
911 tokenize_module.detect_encoding = mock_detect_encoding
912 tokenize_module._tokenize = mock__tokenize
913 try:
914 results = tokenize(mock_readline)
Ezio Melottib3aedd42010-11-20 19:04:17 +0000915 self.assertEqual(list(results), ['first', 'second', 1, 2, 3, 4])
Trent Nelson428de652008-03-18 22:41:35 +0000916 finally:
917 tokenize_module.detect_encoding = orig_detect_encoding
918 tokenize_module._tokenize = orig__tokenize
919
920 self.assertTrue(encoding_used, encoding)
Raymond Hettinger68c04532005-06-10 11:05:19 +0000921
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000922
923__test__ = {"doctests" : doctests, 'decistmt': decistmt}
924
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000925def test_main():
Christian Heimesdd15f6c2008-03-16 00:07:10 +0000926 from test import test_tokenize
Benjamin Petersonee8712c2008-05-20 21:35:26 +0000927 support.run_doctest(test_tokenize, True)
928 support.run_unittest(TestTokenizerAdheresToPep0263)
929 support.run_unittest(Test_Tokenize)
930 support.run_unittest(TestDetectEncoding)
931 support.run_unittest(TestTokenize)
Neal Norwitzc1505362006-12-28 06:47:50 +0000932
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000933if __name__ == "__main__":
934 test_main()