blob: e1c5ce5abaea4625c59697dce391f5d3abb2ca0d [file] [log] [blame]
yaberauneya0d39b832009-11-20 05:45:41 +00001#!/usr/bin/env python
2"""
3 An LTP [execution and] parsing wrapper.
4
5 Used as a second layer for ease-of-use with users as many developers
6 complain about complexity involved with trying to use LTP in my
7 organization -_-.
8
9 Copyright (C) 2009, Garrett Cooper
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License along
22 with this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24"""
25
26from optparse import OptionGroup, OptionParser
27import os, re, sys
28
29class ResultsParseException(Exception):
30 """ Extended class for parsing LTP results. """
31
32def parse_ltp_results(exec_log, output_log, verbose=0):
33 """ Function for parsing LTP results.
34
35 1. The exec log is the log with the results in summary form.
36
37 And now a note from our sponsors about exec logs...
38
39 startup='Thu Oct 1 06:42:07 2009'
40 tag=abort01 stime=1254379327 dur=2 exit=exited stat=0 core=no cu=0 cs=16
41 tag=accept01 stime=1254379329 dur=0 exit=exited stat=0 core=no cu=1 cs=0
42 tag=access01 stime=1254379329 dur=0 exit=exited stat=0 core=no cu=0 cs=0
43 tag=access02 stime=1254379329 dur=0 exit=exited stat=0 core=no cu=0 cs=0
44 tag=access03 stime=1254379329 dur=1 exit=exited stat=0 core=no cu=0 cs=1
45
46 [...]
47
48 a. tag is the test tag name.
49 b. stime is the system time at the start of the exec.
50 c. dur is the total duration of the test.
51 d. exit tells you what the result was. Valid values are:
52 - exited
53 - signaled
54 - stopped
55 - unknown
56 See run_child in pan.c.
57 e. stat is the exit status.
58 f. core answers the question: `did I dump core?'.
59 g. cu is the cutime (cumulative user time).
60 h. cs is the cstime (cumulative system time).
61
62 2. The output log is the log with all of the terse results.
63 3. verbose tells us whether or not we need to include the passed results.
64 """
65
66 if not os.access(exec_log, os.R_OK):
67 raise ResultsParseException("Exec log - %s - specified doesn't exist"
68 % exec_log)
69 elif 1 < verbose and not os.access(output_log, os.R_OK):
70 # Need the output log for context to the end user.
71 raise ResultsParseException("Output log - %s - specified doesn't exist"
72 % output_log )
73
74 context = None
75
76 failed = [ ]
77 passed = 0
78
79 if 2 <= verbose:
80 passed = [ ]
81
82 target_vals = ( 'exited', '0', 'no' )
83
84 fd = open(exec_log, 'r')
85
86 try:
87 content = fd.read()
88 matches = re.finditer('tag=(?P<tag>\w+).+exit=(?P<exit>\w+) '
89 'stat=(?P<stat>\d+) core=(?P<core>\w+)', content)
90 finally:
91 fd.close()
92
93 if not matches:
94 raise ResultsParseException("No parseable results were found in the "
95 "exec log - `%s'."% exec_log)
96
97 for match in matches:
98
99 if ((match.group('exit'), match.group('stat'), match.group('core')) !=
100 target_vals):
101 failed.append(match.group('tag'))
102 elif 2 <= verbose:
103 passed.append(match.group('tag'))
104 else:
105 passed += 1
106
107 # Save memory on large files because lists can eat up a fair amount of
108 # memory.
109 matches = None
110
111 if 1 <= verbose:
112
113 context = { }
114
115 search_tags = failed[:]
116
117 if 2 <= verbose:
118 search_tags += passed
119
120 search_tags.sort()
121
122 fd = open(output_log, 'r')
123
124 try:
125
126 try:
127 lines = fd.readlines()
128 finally:
129 fd.close()
130
131 fd.close()
132
133 end_output = '<<<execution_status>>>'
134 output_start = '<<<test_output>>>'
135
136 tag_re = re.compile('tag=(\w+)')
137
138 grab_output = False
139
140 i = 0
141
142 local_context = ''
143
144 line_len = len(lines)
145
146 search_tag = None
147
148 while i < line_len:
149
150 if lines[i].startswith(end_output):
151
152 if search_tag:
153 context[search_tag] = local_context
154
155 grab_output = False
156 local_context = ''
157 search_tag = None
158
159 if not search_tag:
160
161 while i < len(lines):
162
163 match = tag_re.match(lines[i])
164
165 if match and match.group(1) in search_tags:
166 search_tag = match.group(1)
167 break
168
169 i += 1
170
171 elif lines[i].startswith(output_start):
172 grab_output = True
173 elif grab_output:
174 local_context += lines[i]
175
176 i += 1
177
178 for k in context.keys():
179 if k not in search_tags:
180 raise ResultsParseException('Leftover token in search '
181 'keys: %s' % k)
182
183 except Exception, exc:
184 # XXX (garrcoop): change from Exception to soft error and print
185 # out warning with logging module.
186 raise ResultsParseException('Encountered exception reading output '
187 'for context: %s' % str(exc))
188
189 return failed, passed, context
190
191def determine_context(output_log, testsuite, test_set, context):
192 """ Return a set of context values mapping test_set -> context. """
193
194 test_set_context = {}
195
196 for test in test_set:
197
198 if test in context:
199 test_context = context[test]
200 del context[test]
201 else:
202 test_context = ('Could not determine context for %s; please see '
203 'output log - %s' % (test, output_log))
204
205
206 test_set_context['%s : %s' % (testsuite, test)] = test_context
207
208 return test_set_context
209
210def print_context(output_dest, header, testsuite_context):
211 """ Print out testsuite_context to output_dest, heading it up with
212 header.
213 """
214 output_dest.write('\n'.join(['', '=' * 40, header, '-' * 40, '']))
215
216 for test, context in testsuite_context.items():
217 output_dest.write('<output test="%s">\n%s\n</output>\n' %
218 (test, context.strip()))
219
220def main():
221 """ main. """
222
223 parser = OptionParser(prog=os.path.basename(sys.argv[0]),
224 usage='usage: %prog [options] test ...',
225 version='0.0.1')
226
227 ltpdir = os.getenv('LTPROOT', '@prefix@')
228
229 parser.add_option('-l', '--ltp-dir', dest='ltp_dir',
230 default=ltpdir, help='LTP directory [default: %default]')
231 parser.add_option('-L', '--log-dir', dest='log_dir',
232 default=None,
233 help=('directory for [storing and] retrieving logs '
234 '[default: %s/output]' % ltpdir),
235 metavar='DIR')
236 parser.add_option('-p', '--postprocess-only', dest='postprocess_only',
237 default=False, action='store_true',
238 help=("Don't execute runltp; just postprocess logs "
239 "[default: %default]."))
240 parser.add_option('-o', '--output-file', dest='output_file',
241 default=None,
242 help='File to output results')
243 parser.add_option('-r', '--runltp-opts', dest='runltp_opts',
244 default='',
245 help=('options to pass directly to runltp (will '
246 'suppress -q).'))
247
248 group = OptionGroup(parser, 'Logging',
249 'If --summary-mode is 0, then the summary output is '
250 'suppressed. '
251 'If --summary-mode is 1 [the default], then summary '
252 'output will be displayed for test execution'
253 'If --summary-mode is 2, then summary output will be '
254 'provided on a per-test suite basis. If only '
255 'one test suite is specified, this has the same net '
256 "effect as `--summary-mode 1'"
257 'If --verbose is specified once, prints out failed '
258 'test information with additional context. '
259 'If --verbose is specified twice, prints out the '
260 'failed and passed test context, as well as the '
261 'summary.')
262
263 parser.add_option('-s', '--summary-mode', dest='summary_mode', default=1,
264 type='int',
265 help='See Logging.')
266
267 parser.add_option('-v', '--verbose', dest='verbose', default=0,
268 action='count',
269 help=('Increases context verbosity from tests. See '
270 'Verbosity for more details.'))
271 parser.add_option_group(group)
272
273 group = OptionGroup(parser, 'Copyright',
274 '%(prog)s version %(version)s, Copyright (C) 2009, '
275 'Garrett Cooper %(prog)s comes with ABSOLUTELY NO '
276 'WARRANTY; '
277 'This is free software, and you are welcome to '
278 'redistribute it under certain conditions (See the '
279 'license tort in %(file)s for more details).'
280 % { 'file' : os.path.abspath(__file__),
281 'prog' : parser.prog,
282 'version' : parser.version })
283
284 parser.add_option_group(group)
285
286 opts, args = parser.parse_args()
287
288 # Remove -q from the opts string, as long as it's a standalone option.
289 runltp_opts = re.sub('^((?<!\S)+\-q\s+|\-q|\s+\-q(?!\S))$', '',
290 opts.runltp_opts)
291
292 if not opts.log_dir:
293 opts.log_dir = os.path.join(opts.ltp_dir, 'output')
294
295 if not opts.summary_mode and not opts.verbose:
296 parser.error('You cannot suppress summary output and disable '
297 'verbosity.')
298 elif opts.summary_mode not in range(3):
299 parser.error('--summary-mode must be a value between 0 and 2.')
300
301 if len(args) == 0:
302 # This matches the default test suite list in runltp when -f isn't
303 # specified. Look for `SCENFILES'.
304 args = [ 'syscalls', 'fs', 'fsx', 'dio', 'io', 'mm', 'ipc', 'sched',
305 'math', 'nptl', 'pty', 'containers', 'fs_bind', 'controllers',
306 'filecaps', 'cap_bounds', 'fcntl-locktests', 'connectors',
307 'admin_tools', 'timers', 'power_management_tests', 'numa',
308 'hugetlb', 'commands', 'hyperthreading' ]
309
310 if opts.output_file:
311
312 output_dir = os.path.dirname(opts.output_file)
313
314 if output_dir:
315 # Not cwd; let's check to make sure that the directory does or
316 # does not exist.
317
318 if not os.path.exists(output_dir):
319 # We need to make the directory.
320 os.makedirs(os.path.dirname(opts.output_file))
321 elif not os.path.isdir(os.path.abspath(output_dir)):
322 # Path exists, but isn't a file. Oops!
323 parser.error('Dirname for path specified - %s - is not valid'
324 % output_dir)
325
326 else:
327 # Current path (cwd)
328 opts.output_file = os.path.join(os.getcwd(), opts.output_file)
329
330 output_dest = open(opts.output_file, 'w')
331
332 else:
333
334 output_dest = sys.stdout
335
336 try:
337
338 failed_context = {}
339 passed_context = {}
340
341 failed_count = 0
342 passed_count = 0
343
344 if opts.summary_mode == 2 and len(args) == 1:
345 opts.summary_mode = 1
346
347 for testsuite in args:
348
349 # Iterate over the provided test list
350
351 context = {}
352 exec_log = os.path.join(opts.log_dir, '%s-exec.log' % testsuite)
353 output_log = os.path.join(opts.log_dir, ('%s-output.log'
354 % testsuite))
355
356 failed_subset = {}
357
358 runtest_file = os.path.join(opts.ltp_dir, 'runtest', testsuite)
359
360 if not opts.postprocess_only:
361
362 for log in [ exec_log, output_log ]:
363 if os.path.isfile(log):
364 os.remove(log)
365
366 if not os.access(runtest_file, os.R_OK):
367 output_dest.write("%s doesn't exist; skipping "
368 "test\n" % runtest_file)
369 continue
370
371 os.system(' '.join([ os.path.join(opts.ltp_dir, 'runltp'),
372 runltp_opts, '-f', testsuite,
373 '-l', exec_log, '-o', output_log ]))
374
375 try:
376
377 failed_subset, passed_css, context = \
378 parse_ltp_results(exec_log, output_log,
379 verbose=opts.verbose)
380
381 except ResultsParseException, rpe:
382 output_dest.write('Error encountered when parsing results for '
383 'test - %s: %s\n' % (testsuite, str(rpe)))
384 continue
385
386 failed_count += len(failed_subset)
387
388 failed_subset_context = {}
389 passed_subset_context = {}
390
391 if opts.verbose:
392 failed_subset_context = determine_context(output_log,
393 testsuite,
394 failed_subset,
395 context)
396 if type(passed_css) == list:
397
398 passed_count += len(passed_css)
399
400 if opts.verbose == 2:
401 passed_subset_context = determine_context(output_log,
402 testsuite,
403 passed_css,
404 context)
405
406 else:
407
408 passed_count += passed_css
409
410 if opts.summary_mode == 1:
411
412 failed_context.update(failed_subset_context)
413 passed_context.update(passed_subset_context)
414
415 else:
416
417 if 1 <= opts.verbose:
418 # Print out failed testcases.
419 print_context(output_dest,
420 'FAILED TESTCASES for %s' % testsuite,
421 failed_subset_context)
422
423 if opts.verbose == 2:
424 # Print out passed testcases with context.
425 print_context(output_dest,
426 'PASSED TESTCASES for %s' % testsuite,
427 passed_subset_context)
428
429 if opts.summary_mode == 2:
430 output_dest.write("""
431========================================
432SUMMARY for: %s
433----------------------------------------
434PASS - %d
435FAIL - %d
436----------------------------------------
437""" % (testsuite, passed_count, len(failed_subset)))
438
439 if opts.summary_mode == 1:
440
441 # Print out overall results.
442
443 if 1 <= opts.verbose:
444 # Print out failed testcases with context.
445 print_context(output_dest, "FAILED TESTCASES", failed_context)
446
447 if opts.verbose == 2:
448 # Print out passed testcases with context.
449 print_context(output_dest, "PASSED TESTCASES", passed_context)
450
451 output_dest.write("""
452========================================
453SUMMARY for tests:
454%s
455----------------------------------------
456PASS - %d
457FAIL - %d
458----------------------------------------
459""" % (' '.join(args), passed_count, failed_count))
460
461 finally:
462
463 if output_dest != sys.stdout:
464
465 output_dest.close()
466
467if __name__ == '__main__':
468 main()