blob: a9bd9dc45562980c2a63f4497d92e80c2a6048e6 [file] [log] [blame]
Guido van Rossum9ab75cb1998-03-31 14:31:39 +00001#! /usr/bin/env python
2
Guido van Rossumf4b44fa1998-04-06 14:41:20 +00003"""The Tab Nanny despises ambiguous indentation. She knows no mercy."""
Guido van Rossum9ab75cb1998-03-31 14:31:39 +00004
Guido van Rossumaa2a7a41998-06-09 19:02:21 +00005# Released to the public domain, by Tim Peters, 15 April 1998.
Guido van Rossum9ab75cb1998-03-31 14:31:39 +00006
Guido van Rossumaa2a7a41998-06-09 19:02:21 +00007__version__ = "5"
Guido van Rossum9ab75cb1998-03-31 14:31:39 +00008
9import os
10import sys
11import getopt
12import tokenize
13
Guido van Rossum9ab75cb1998-03-31 14:31:39 +000014verbose = 0
15
16def main():
17 global verbose
18 try:
19 opts, args = getopt.getopt(sys.argv[1:], "v")
20 except getopt.error, msg:
21 print msg
Guido van Rossum8053d891998-04-06 14:45:26 +000022 return
Guido van Rossum9ab75cb1998-03-31 14:31:39 +000023 for o, a in opts:
24 if o == '-v':
25 verbose = verbose + 1
Guido van Rossum8053d891998-04-06 14:45:26 +000026 if not args:
Guido van Rossumce73acf1998-04-10 19:14:59 +000027 print "Usage:", sys.argv[0], "[-v] file_or_directory ..."
Guido van Rossum8053d891998-04-06 14:45:26 +000028 return
Guido van Rossum9ab75cb1998-03-31 14:31:39 +000029 for arg in args:
30 check(arg)
31
32class NannyNag:
33 def __init__(self, lineno, msg, line):
34 self.lineno, self.msg, self.line = lineno, msg, line
35 def get_lineno(self):
36 return self.lineno
37 def get_msg(self):
38 return self.msg
39 def get_line(self):
40 return self.line
41
42def check(file):
43 if os.path.isdir(file) and not os.path.islink(file):
44 if verbose:
45 print "%s: listing directory" % `file`
46 names = os.listdir(file)
47 for name in names:
48 fullname = os.path.join(file, name)
49 if (os.path.isdir(fullname) and
50 not os.path.islink(fullname) or
51 os.path.normcase(name[-3:]) == ".py"):
52 check(fullname)
53 return
54
55 try:
56 f = open(file)
57 except IOError, msg:
58 print "%s: I/O Error: %s" % (`file`, str(msg))
59 return
60
61 if verbose > 1:
62 print "checking", `file`, "..."
63
64 reset_globals()
65 try:
66 tokenize.tokenize(f.readline, tokeneater)
67
68 except tokenize.TokenError, msg:
Guido van Rossum53117ae1998-04-28 16:01:13 +000069 print "%s: Token Error: %s" % (`file`, str(msg))
Guido van Rossum9ab75cb1998-03-31 14:31:39 +000070 return
71
72 except NannyNag, nag:
73 badline = nag.get_lineno()
74 line = nag.get_line()
75 if verbose:
76 print "%s: *** Line %d: trouble in tab city! ***" % (
77 `file`, badline)
78 print "offending line:", `line`
79 print nag.get_msg()
80 else:
81 print file, badline, `line`
82 return
83
84 if verbose:
85 print "%s: Clean bill of health." % `file`
86
87class Whitespace:
88 # the characters used for space and tab
89 S, T = ' \t'
90
91 # members:
92 # raw
93 # the original string
94 # n
95 # the number of leading whitespace characters in raw
96 # nt
97 # the number of tabs in raw[:n]
98 # norm
99 # the normal form as a pair (count, trailing), where:
100 # count
101 # a tuple such that raw[:n] contains count[i]
102 # instances of S * i + T
103 # trailing
104 # the number of trailing spaces in raw[:n]
105 # It's A Theorem that m.indent_level(t) ==
106 # n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
107 # is_simple
108 # true iff raw[:n] is of the form (T*)(S*)
109
110 def __init__(self, ws):
111 self.raw = ws
112 S, T = Whitespace.S, Whitespace.T
113 count = []
114 b = n = nt = 0
115 for ch in self.raw:
116 if ch == S:
117 n = n + 1
118 b = b + 1
119 elif ch == T:
120 n = n + 1
121 nt = nt + 1
122 if b >= len(count):
123 count = count + [0] * (b - len(count) + 1)
124 count[b] = count[b] + 1
125 b = 0
126 else:
127 break
128 self.n = n
129 self.nt = nt
130 self.norm = tuple(count), b
131 self.is_simple = len(count) <= 1
132
133 # return length of longest contiguous run of spaces (whether or not
134 # preceding a tab)
135 def longest_run_of_spaces(self):
136 count, trailing = self.norm
137 return max(len(count)-1, trailing)
138
139 def indent_level(self, tabsize):
140 # count, il = self.norm
141 # for i in range(len(count)):
142 # if count[i]:
143 # il = il + (i/tabsize + 1)*tabsize * count[i]
144 # return il
145
146 # quicker:
147 # il = trailing + sum (i/ts + 1)*ts*count[i] =
148 # trailing + ts * sum (i/ts + 1)*count[i] =
149 # trailing + ts * sum i/ts*count[i] + count[i] =
150 # trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
151 # trailing + ts * [(sum i/ts*count[i]) + num_tabs]
152 # and note that i/ts*count[i] is 0 when i < ts
153
154 count, trailing = self.norm
155 il = 0
156 for i in range(tabsize, len(count)):
157 il = il + i/tabsize * count[i]
158 return trailing + tabsize * (il + self.nt)
159
160 # return true iff self.indent_level(t) == other.indent_level(t)
161 # for all t >= 1
162 def equal(self, other):
163 return self.norm == other.norm
164
165 # return a list of tuples (ts, i1, i2) such that
166 # i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
167 # Intended to be used after not self.equal(other) is known, in which
168 # case it will return at least one witnessing tab size.
169 def not_equal_witness(self, other):
170 n = max(self.longest_run_of_spaces(),
171 other.longest_run_of_spaces()) + 1
172 a = []
173 for ts in range(1, n+1):
174 if self.indent_level(ts) != other.indent_level(ts):
175 a.append( (ts,
176 self.indent_level(ts),
177 other.indent_level(ts)) )
178 return a
179
180 # Return true iff self.indent_level(t) < other.indent_level(t)
181 # for all t >= 1.
182 # The algorithm is due to Vincent Broman.
183 # Easy to prove it's correct.
184 # XXXpost that.
185 # Trivial to prove n is sharp (consider T vs ST).
186 # Unknown whether there's a faster general way. I suspected so at
187 # first, but no longer.
188 # For the special (but common!) case where M and N are both of the
189 # form (T*)(S*), M.less(N) iff M.len() < N.len() and
190 # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
191 # XXXwrite that up.
192 # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
193 def less(self, other):
194 if self.n >= other.n:
195 return 0
196 if self.is_simple and other.is_simple:
197 return self.nt <= other.nt
198 n = max(self.longest_run_of_spaces(),
199 other.longest_run_of_spaces()) + 1
200 # the self.n >= other.n test already did it for ts=1
201 for ts in range(2, n+1):
202 if self.indent_level(ts) >= other.indent_level(ts):
203 return 0
204 return 1
205
206 # return a list of tuples (ts, i1, i2) such that
207 # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
208 # Intended to be used after not self.less(other) is known, in which
209 # case it will return at least one witnessing tab size.
210 def not_less_witness(self, other):
211 n = max(self.longest_run_of_spaces(),
212 other.longest_run_of_spaces()) + 1
213 a = []
214 for ts in range(1, n+1):
215 if self.indent_level(ts) >= other.indent_level(ts):
216 a.append( (ts,
217 self.indent_level(ts),
218 other.indent_level(ts)) )
219 return a
220
221def format_witnesses(w):
222 import string
223 firsts = map(lambda tup: str(tup[0]), w)
224 prefix = "at tab size"
225 if len(w) > 1:
226 prefix = prefix + "s"
227 return prefix + " " + string.join(firsts, ', ')
228
Guido van Rossumf4b44fa1998-04-06 14:41:20 +0000229# The collection of globals, the reset_globals() function, and the
230# tokeneater() function, depend on which version of tokenize is
231# in use.
Guido van Rossum9ab75cb1998-03-31 14:31:39 +0000232
Guido van Rossumf4b44fa1998-04-06 14:41:20 +0000233if hasattr(tokenize, 'NL'):
234 # take advantage of Guido's patch!
Guido van Rossum9ab75cb1998-03-31 14:31:39 +0000235
Guido van Rossumf4b44fa1998-04-06 14:41:20 +0000236 indents = []
237 check_equal = 0
Guido van Rossum9ab75cb1998-03-31 14:31:39 +0000238
Guido van Rossumf4b44fa1998-04-06 14:41:20 +0000239 def reset_globals():
240 global indents, check_equal
241 check_equal = 0
242 indents = [Whitespace("")]
Guido van Rossum9ab75cb1998-03-31 14:31:39 +0000243
Guido van Rossumf4b44fa1998-04-06 14:41:20 +0000244 def tokeneater(type, token, start, end, line,
245 INDENT=tokenize.INDENT,
246 DEDENT=tokenize.DEDENT,
247 NEWLINE=tokenize.NEWLINE,
Guido van Rossumce73acf1998-04-10 19:14:59 +0000248 JUNK=(tokenize.COMMENT, tokenize.NL) ):
Guido van Rossumf4b44fa1998-04-06 14:41:20 +0000249 global indents, check_equal
Guido van Rossum9ab75cb1998-03-31 14:31:39 +0000250
Guido van Rossumce73acf1998-04-10 19:14:59 +0000251 if type == NEWLINE:
Guido van Rossumf4b44fa1998-04-06 14:41:20 +0000252 # a program statement, or ENDMARKER, will eventually follow,
253 # after some (possibly empty) run of tokens of the form
254 # (NL | COMMENT)* (INDENT | DEDENT+)?
255 # If an INDENT appears, setting check_equal is wrong, and will
256 # be undone when we see the INDENT.
257 check_equal = 1
Guido van Rossum9ab75cb1998-03-31 14:31:39 +0000258
Guido van Rossumf4b44fa1998-04-06 14:41:20 +0000259 elif type == INDENT:
260 check_equal = 0
261 thisguy = Whitespace(token)
262 if not indents[-1].less(thisguy):
263 witness = indents[-1].not_less_witness(thisguy)
264 msg = "indent not greater e.g. " + format_witnesses(witness)
265 raise NannyNag(start[0], msg, line)
266 indents.append(thisguy)
267
268 elif type == DEDENT:
269 # there's nothing we need to check here! what's important is
270 # that when the run of DEDENTs ends, the indentation of the
271 # program statement (or ENDMARKER) that triggered the run is
272 # equal to what's left at the top of the indents stack
Guido van Rossumaa2a7a41998-06-09 19:02:21 +0000273
274 # Ouch! This assert triggers if the last line of the source
275 # is indented *and* lacks a newline -- then DEDENTs pop out
276 # of thin air.
277 # assert check_equal # else no earlier NEWLINE, or an earlier INDENT
278 check_equal = 1
279
Guido van Rossumf4b44fa1998-04-06 14:41:20 +0000280 del indents[-1]
281
Guido van Rossumce73acf1998-04-10 19:14:59 +0000282 elif check_equal and type not in JUNK:
Guido van Rossumf4b44fa1998-04-06 14:41:20 +0000283 # this is the first "real token" following a NEWLINE, so it
284 # must be the first token of the next program statement, or an
285 # ENDMARKER; the "line" argument exposes the leading whitespace
286 # for this statement; in the case of ENDMARKER, line is an empty
287 # string, so will properly match the empty string with which the
288 # "indents" stack was seeded
289 check_equal = 0
290 thisguy = Whitespace(line)
291 if not indents[-1].equal(thisguy):
292 witness = indents[-1].not_equal_witness(thisguy)
293 msg = "indent not equal e.g. " + format_witnesses(witness)
294 raise NannyNag(start[0], msg, line)
295
296else:
297 # unpatched version of tokenize
298
299 nesting_level = 0
300 indents = []
301 check_equal = 0
302
303 def reset_globals():
304 global nesting_level, indents, check_equal
305 nesting_level = check_equal = 0
306 indents = [Whitespace("")]
307
308 def tokeneater(type, token, start, end, line,
309 INDENT=tokenize.INDENT,
310 DEDENT=tokenize.DEDENT,
311 NEWLINE=tokenize.NEWLINE,
312 COMMENT=tokenize.COMMENT,
313 OP=tokenize.OP):
314 global nesting_level, indents, check_equal
315
316 if type == INDENT:
317 check_equal = 0
318 thisguy = Whitespace(token)
319 if not indents[-1].less(thisguy):
320 witness = indents[-1].not_less_witness(thisguy)
321 msg = "indent not greater e.g. " + format_witnesses(witness)
322 raise NannyNag(start[0], msg, line)
323 indents.append(thisguy)
324
325 elif type == DEDENT:
326 del indents[-1]
327
328 elif type == NEWLINE:
329 if nesting_level == 0:
330 check_equal = 1
331
332 elif type == COMMENT:
333 pass
334
335 elif check_equal:
336 check_equal = 0
337 thisguy = Whitespace(line)
338 if not indents[-1].equal(thisguy):
339 witness = indents[-1].not_equal_witness(thisguy)
340 msg = "indent not equal e.g. " + format_witnesses(witness)
341 raise NannyNag(start[0], msg, line)
342
343 if type == OP and token in ('{', '[', '('):
344 nesting_level = nesting_level + 1
345
346 elif type == OP and token in ('}', ']', ')'):
347 if nesting_level == 0:
348 raise NannyNag(start[0],
349 "unbalanced bracket '" + token + "'",
350 line)
351 nesting_level = nesting_level - 1
Guido van Rossum9ab75cb1998-03-31 14:31:39 +0000352
353if __name__ == '__main__':
354 main()
355