blob: 8ff16c51fdb14aac6d34cf1322752be7c1777336 [file] [log] [blame]
Marc-André Lemburgc311f642006-04-19 15:27:33 +00001#!/usr/local/bin/python -O
2
3""" A Python Benchmark Suite
4
5"""
6#
7# Note: Please keep this module compatible to Python 1.5.2.
8#
9# Tests may include features in later Python versions, but these
10# should then be embedded in try-except clauses in the configuration
11# module Setup.py.
12#
13
14# pybench Copyright
15__copyright__ = """\
16Copyright (c), 1997-2006, Marc-Andre Lemburg (mal@lemburg.com)
17Copyright (c), 2000-2006, eGenix.com Software GmbH (info@egenix.com)
18
19 All Rights Reserved.
20
21Permission to use, copy, modify, and distribute this software and its
22documentation for any purpose and without fee or royalty is hereby
23granted, provided that the above copyright notice appear in all copies
24and that both that copyright notice and this permission notice appear
25in supporting documentation or portions thereof, including
26modifications, that you make.
27
28THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO
29THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
30FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
31INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
32FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
33NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
34WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
35"""
36
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +000037import sys, time, operator, string
Marc-André Lemburgc311f642006-04-19 15:27:33 +000038from CommandLine import *
39
40try:
41 import cPickle
42 pickle = cPickle
43except ImportError:
44 import pickle
45
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +000046# Version number; version history: see README file !
47__version__ = '2.0'
48
49### Constants
50
51# Second fractions
52MILLI_SECONDS = 1e3
53MICRO_SECONDS = 1e6
54
55# Percent unit
56PERCENT = 100
57
58# Horizontal line length
59LINE = 79
60
61# Minimum test run-time
62MIN_TEST_RUNTIME = 1e-3
63
64# Number of calibration runs to use for calibrating the tests
65CALIBRATION_RUNS = 20
66
67# Number of calibration loops to run for each calibration run
68CALIBRATION_LOOPS = 20
69
70# Allow skipping calibration ?
71ALLOW_SKIPPING_CALIBRATION = 1
72
73# Timer types
74TIMER_TIME_TIME = 'time.time'
75TIMER_TIME_CLOCK = 'time.clock'
76TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'
77
78# Choose platform default timer
79if sys.platform[:3] == 'win':
80 # On WinXP this has 2.5ms resolution
81 TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK
82else:
83 # On Linux this has 1ms resolution
84 TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME
85
86# Print debug information ?
87_debug = 0
88
89### Helpers
90
91def get_timer(timertype):
92
93 if timertype == TIMER_TIME_TIME:
94 return time.time
95 elif timertype == TIMER_TIME_CLOCK:
96 return time.clock
97 elif timertype == TIMER_SYSTIMES_PROCESSTIME:
98 import systimes
99 return systimes.processtime
100 else:
101 raise TypeError('unknown timer type: %s' % timertype)
102
103def get_machine_details():
104
105 import platform
106 if _debug:
107 print 'Getting machine details...'
108 buildno, builddate = platform.python_build()
109 python = platform.python_version()
110 if python > '2.0':
111 try:
112 unichr(100000)
113 except ValueError:
114 # UCS2 build (standard)
115 unicode = 'UCS2'
116 else:
117 # UCS4 build (most recent Linux distros)
118 unicode = 'UCS4'
119 else:
120 unicode = None
121 bits, linkage = platform.architecture()
122 return {
123 'platform': platform.platform(),
124 'processor': platform.processor(),
125 'executable': sys.executable,
126 'python': platform.python_version(),
127 'compiler': platform.python_compiler(),
128 'buildno': buildno,
129 'builddate': builddate,
130 'unicode': unicode,
131 'bits': bits,
132 }
133
134def print_machine_details(d, indent=''):
135
136 l = ['Machine Details:',
137 ' Platform ID: %s' % d.get('platform', 'n/a'),
138 ' Processor: %s' % d.get('processor', 'n/a'),
139 '',
140 'Python:',
141 ' Executable: %s' % d.get('executable', 'n/a'),
142 ' Version: %s' % d.get('python', 'n/a'),
143 ' Compiler: %s' % d.get('compiler', 'n/a'),
144 ' Bits: %s' % d.get('bits', 'n/a'),
145 ' Build: %s (#%s)' % (d.get('builddate', 'n/a'),
146 d.get('buildno', 'n/a')),
147 ' Unicode: %s' % d.get('unicode', 'n/a'),
148 ]
149 print indent + string.join(l, '\n' + indent) + '\n'
150
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000151### Test baseclass
152
153class Test:
154
155 """ All test must have this class as baseclass. It provides
156 the necessary interface to the benchmark machinery.
157
158 The tests must set .rounds to a value high enough to let the
159 test run between 20-50 seconds. This is needed because
160 clock()-timing only gives rather inaccurate values (on Linux,
161 for example, it is accurate to a few hundreths of a
162 second). If you don't want to wait that long, use a warp
163 factor larger than 1.
164
165 It is also important to set the .operations variable to a
166 value representing the number of "virtual operations" done per
167 call of .run().
168
169 If you change a test in some way, don't forget to increase
170 it's version number.
171
172 """
173
174 ### Instance variables that each test should override
175
176 # Version number of the test as float (x.yy); this is important
177 # for comparisons of benchmark runs - tests with unequal version
178 # number will not get compared.
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000179 version = 2.0
Tim Petersf9cc5942006-04-21 16:34:54 +0000180
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000181 # The number of abstract operations done in each round of the
182 # test. An operation is the basic unit of what you want to
183 # measure. The benchmark will output the amount of run-time per
184 # operation. Note that in order to raise the measured timings
185 # significantly above noise level, it is often required to repeat
186 # sets of operations more than once per test round. The measured
187 # overhead per test round should be less than 1 second.
188 operations = 1
189
190 # Number of rounds to execute per test run. This should be
191 # adjusted to a figure that results in a test run-time of between
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000192 # 1-2 seconds.
193 rounds = 100000
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000194
195 ### Internal variables
196
197 # Mark this class as implementing a test
198 is_a_test = 1
199
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000200 # Last timing: (real, run, overhead)
201 last_timing = (0.0, 0.0, 0.0)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000202
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000203 # Warp factor to use for this test
204 warp = 1
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000205
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000206 # Number of calibration runs to use
207 calibration_runs = CALIBRATION_RUNS
208
209 # List of calibration timings
210 overhead_times = None
211
212 # List of test run timings
213 times = []
214
215 # Timer used for the benchmark
216 timer = TIMER_PLATFORM_DEFAULT
217
218 def __init__(self, warp=None, calibration_runs=None, timer=None):
219
220 # Set parameters
221 if warp is not None:
222 self.rounds = int(self.rounds / warp)
Steve Holden431a7632006-05-26 16:27:59 +0000223 if self.rounds == 0:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000224 raise ValueError('warp factor set too high')
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000225 self.warp = warp
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000226 if calibration_runs is not None:
227 if (not ALLOW_SKIPPING_CALIBRATION and
228 calibration_runs < 1):
229 raise ValueError('at least one calibration run is required')
230 self.calibration_runs = calibration_runs
231 if timer is not None:
232 timer = timer
233
234 # Init variables
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000235 self.times = []
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000236 self.overhead_times = []
237
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000238 # We want these to be in the instance dict, so that pickle
239 # saves them
240 self.version = self.version
241 self.operations = self.operations
242 self.rounds = self.rounds
243
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000244 def get_timer(self):
245
246 """ Return the timer function to use for the test.
247
248 """
249 return get_timer(self.timer)
250
251 def compatible(self, other):
252
253 """ Return 1/0 depending on whether the test is compatible
254 with the other Test instance or not.
255
256 """
257 if self.version != other.version:
258 return 0
259 if self.rounds != other.rounds:
260 return 0
261 return 1
262
263 def calibrate_test(self):
264
265 if self.calibration_runs == 0:
266 self.overhead_times = [0.0]
267 return
268
269 calibrate = self.calibrate
270 timer = self.get_timer()
271 calibration_loops = range(CALIBRATION_LOOPS)
272
273 # Time the calibration loop overhead
274 prep_times = []
275 for i in range(self.calibration_runs):
276 t = timer()
277 for i in calibration_loops:
278 pass
279 t = timer() - t
280 prep_times.append(t)
281 min_prep_time = min(prep_times)
282 if _debug:
283 print
284 print 'Calib. prep time = %.6fms' % (
285 min_prep_time * MILLI_SECONDS)
286
287 # Time the calibration runs (doing CALIBRATION_LOOPS loops of
288 # .calibrate() method calls each)
289 for i in range(self.calibration_runs):
290 t = timer()
291 for i in calibration_loops:
292 calibrate()
293 t = timer() - t
294 self.overhead_times.append(t / CALIBRATION_LOOPS
295 - min_prep_time)
296
297 # Check the measured times
298 min_overhead = min(self.overhead_times)
299 max_overhead = max(self.overhead_times)
300 if _debug:
301 print 'Calib. overhead time = %.6fms' % (
302 min_overhead * MILLI_SECONDS)
303 if min_overhead < 0.0:
304 raise ValueError('calibration setup did not work')
305 if max_overhead - min_overhead > 0.1:
306 raise ValueError(
307 'overhead calibration timing range too inaccurate: '
308 '%r - %r' % (min_overhead, max_overhead))
309
310 def run(self):
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000311
312 """ Run the test in two phases: first calibrate, then
313 do the actual test. Be careful to keep the calibration
314 timing low w/r to the test timing.
Tim Petersf9cc5942006-04-21 16:34:54 +0000315
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000316 """
317 test = self.test
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000318 timer = self.get_timer()
319
320 # Get calibration
321 min_overhead = min(self.overhead_times)
322
323 # Test run
324 t = timer()
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000325 test()
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000326 t = timer() - t
327 if t < MIN_TEST_RUNTIME:
328 raise ValueError('warp factor too high: '
329 'test times are < 10ms')
330 eff_time = t - min_overhead
331 if eff_time < 0:
332 raise ValueError('wrong calibration')
333 self.last_timing = (eff_time, t, min_overhead)
334 self.times.append(eff_time)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000335
336 def calibrate(self):
337
Tim Petersf9cc5942006-04-21 16:34:54 +0000338 """ Calibrate the test.
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000339
Tim Petersf9cc5942006-04-21 16:34:54 +0000340 This method should execute everything that is needed to
341 setup and run the test - except for the actual operations
342 that you intend to measure. pybench uses this method to
343 measure the test implementation overhead.
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000344
345 """
346 return
347
348 def test(self):
349
Tim Petersf9cc5942006-04-21 16:34:54 +0000350 """ Run the test.
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000351
Tim Petersf9cc5942006-04-21 16:34:54 +0000352 The test needs to run self.rounds executing
353 self.operations number of operations each.
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000354
355 """
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000356 return
Tim Petersf9cc5942006-04-21 16:34:54 +0000357
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000358 def stat(self):
359
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000360 """ Return test run statistics as tuple:
Tim Petersf9cc5942006-04-21 16:34:54 +0000361
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000362 (minimum run time,
363 average run time,
364 total run time,
365 average time per operation,
366 minimum overhead time)
367
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000368 """
369 runs = len(self.times)
370 if runs == 0:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000371 return 0.0, 0.0, 0.0, 0.0
372 min_time = min(self.times)
373 total_time = reduce(operator.add, self.times, 0.0)
374 avg_time = total_time / float(runs)
375 operation_avg = total_time / float(runs
376 * self.rounds
377 * self.operations)
378 if self.overhead_times:
379 min_overhead = min(self.overhead_times)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000380 else:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000381 min_overhead = self.last_timing[2]
382 return min_time, avg_time, total_time, operation_avg, min_overhead
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000383
384### Load Setup
385
386# This has to be done after the definition of the Test class, since
387# the Setup module will import subclasses using this class.
388
389import Setup
390
391### Benchmark base class
392
393class Benchmark:
394
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000395 # Name of the benchmark
396 name = ''
397
398 # Number of benchmark rounds to run
399 rounds = 1
400
401 # Warp factor use to run the tests
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000402 warp = 1 # Warp factor
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000403
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000404 # Average benchmark round time
405 roundtime = 0
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000406
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000407 # Benchmark version number as float x.yy
408 version = 2.0
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000409
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000410 # Produce verbose output ?
411 verbose = 0
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000412
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000413 # Dictionary with the machine details
414 machine_details = None
415
416 # Timer used for the benchmark
417 timer = TIMER_PLATFORM_DEFAULT
418
419 def __init__(self, name, verbose=None, timer=None, warp=None,
420 calibration_runs=None):
421
422 if name:
423 self.name = name
Steve Holden431a7632006-05-26 16:27:59 +0000424 else:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000425 self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
426 (time.localtime(time.time())[:6])
427 if verbose is not None:
428 self.verbose = verbose
429 if timer is not None:
430 self.timer = timer
431 if warp is not None:
432 self.warp = warp
433 if calibration_runs is not None:
434 self.calibration_runs = calibration_runs
435
436 # Init vars
437 self.tests = {}
438 if _debug:
439 print 'Getting machine details...'
440 self.machine_details = get_machine_details()
441
442 # Make .version an instance attribute to have it saved in the
443 # Benchmark pickle
444 self.version = self.version
445
446 def get_timer(self):
447
448 """ Return the timer function to use for the test.
449
450 """
451 return get_timer(self.timer)
452
453 def compatible(self, other):
454
455 """ Return 1/0 depending on whether the benchmark is
456 compatible with the other Benchmark instance or not.
457
458 """
459 if self.version != other.version:
460 return 0
461 if (self.machine_details == other.machine_details and
462 self.timer != other.timer):
463 return 0
464 if (self.calibration_runs == 0 and
465 other.calibration_runs != 0):
466 return 0
467 if (self.calibration_runs != 0 and
468 other.calibration_runs == 0):
469 return 0
470 return 1
471
472 def load_tests(self, setupmod, limitnames=None):
473
474 # Add tests
475 if self.verbose:
476 print 'Searching for tests ...'
477 print '--------------------------------------'
478 for testclass in setupmod.__dict__.values():
479 if not hasattr(testclass, 'is_a_test'):
Steve Holden431a7632006-05-26 16:27:59 +0000480 continue
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000481 name = testclass.__name__
Steve Holden431a7632006-05-26 16:27:59 +0000482 if name == 'Test':
483 continue
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000484 if (limitnames is not None and
485 limitnames.search(name) is None):
Steve Holden431a7632006-05-26 16:27:59 +0000486 continue
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000487 self.tests[name] = testclass(
488 warp=self.warp,
489 calibration_runs=self.calibration_runs,
490 timer=self.timer)
491 l = self.tests.keys()
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000492 l.sort()
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000493 if self.verbose:
494 for name in l:
495 print ' %s' % name
496 print '--------------------------------------'
497 print ' %i tests found' % len(l)
Steve Holden431a7632006-05-26 16:27:59 +0000498 print
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000499
500 def calibrate(self):
501
502 print 'Calibrating tests. Please wait...'
503 if self.verbose:
504 print
505 print 'Test min max'
506 print '-' * LINE
507 tests = self.tests.items()
508 tests.sort()
509 for i in range(len(tests)):
510 name, test = tests[i]
511 test.calibrate_test()
512 if self.verbose:
513 print '%30s: %6.3fms %6.3fms' % \
514 (name,
515 min(test.overhead_times) * MILLI_SECONDS,
516 max(test.overhead_times) * MILLI_SECONDS)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000517 print
518
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000519 def run(self):
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000520
521 tests = self.tests.items()
522 tests.sort()
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000523 timer = self.get_timer()
524 print 'Running %i round(s) of the suite at warp factor %i:' % \
525 (self.rounds, self.warp)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000526 print
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000527 self.roundtimes = []
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000528 for i in range(self.rounds):
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000529 if self.verbose:
530 print ' Round %-25i effective absolute overhead' % (i+1)
531 total_eff_time = 0.0
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000532 for j in range(len(tests)):
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000533 name, test = tests[j]
534 if self.verbose:
Steve Holden431a7632006-05-26 16:27:59 +0000535 print '%30s:' % name,
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000536 test.run()
537 (eff_time, abs_time, min_overhead) = test.last_timing
538 total_eff_time = total_eff_time + eff_time
539 if self.verbose:
540 print ' %5.0fms %5.0fms %7.3fms' % \
541 (eff_time * MILLI_SECONDS,
542 abs_time * MILLI_SECONDS,
543 min_overhead * MILLI_SECONDS)
544 self.roundtimes.append(total_eff_time)
545 if self.verbose:
546 print (' '
547 ' ------------------------------')
548 print (' '
549 ' Totals: %6.0fms' %
550 (total_eff_time * MILLI_SECONDS))
Steve Holden431a7632006-05-26 16:27:59 +0000551 print
552 else:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000553 print '* Round %i done in %.3f seconds.' % (i+1,
554 total_eff_time)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000555 print
Tim Petersf9cc5942006-04-21 16:34:54 +0000556
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000557 def stat(self):
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000558
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000559 """ Return benchmark run statistics as tuple:
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000560
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000561 (minimum round time,
562 average round time,
563 maximum round time)
564
565 XXX Currently not used, since the benchmark does test
566 statistics across all rounds.
567
568 """
569 runs = len(self.roundtimes)
570 if runs == 0:
571 return 0.0, 0.0
572 min_time = min(self.roundtimes)
573 total_time = reduce(operator.add, self.roundtimes, 0.0)
574 avg_time = total_time / float(runs)
575 max_time = max(self.roundtimes)
576 return (min_time, avg_time, max_time)
577
578 def print_header(self, title='Benchmark'):
579
580 print '-' * LINE
581 print '%s: %s' % (title, self.name)
582 print '-' * LINE
583 print
584 print ' Rounds: %s' % self.rounds
585 print ' Warp: %s' % self.warp
586 print ' Timer: %s' % self.timer
587 print
588 if self.machine_details:
589 print_machine_details(self.machine_details, indent=' ')
590 print
591
592 def print_benchmark(self, hidenoise=0, limitnames=None):
593
594 print ('Test '
595 ' minimum average operation overhead')
596 print '-' * LINE
597 tests = self.tests.items()
598 tests.sort()
599 total_min_time = 0.0
600 total_avg_time = 0.0
601 for name, test in tests:
602 if (limitnames is not None and
603 limitnames.search(name) is None):
604 continue
605 (min_time,
606 avg_time,
607 total_time,
608 op_avg,
609 min_overhead) = test.stat()
610 total_min_time = total_min_time + min_time
611 total_avg_time = total_avg_time + avg_time
612 print '%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \
613 (name,
614 min_time * MILLI_SECONDS,
615 avg_time * MILLI_SECONDS,
616 op_avg * MICRO_SECONDS,
617 min_overhead *MILLI_SECONDS)
618 print '-' * LINE
619 print ('Totals: '
620 ' %6.0fms %6.0fms' %
621 (total_min_time * MILLI_SECONDS,
622 total_avg_time * MILLI_SECONDS,
623 ))
624 print
625
626 def print_comparison(self, compare_to, hidenoise=0, limitnames=None):
627
628 # Check benchmark versions
629 if compare_to.version != self.version:
630 print ('* Benchmark versions differ: '
631 'cannot compare this benchmark to "%s" !' %
632 compare_to.name)
633 print
634 self.print_benchmark(hidenoise=hidenoise,
635 limitnames=limitnames)
636 return
637
638 # Print header
639 compare_to.print_header('Comparing with')
640 print ('Test '
641 ' minimum run-time average run-time')
642 print (' '
643 ' this other diff this other diff')
644 print '-' * LINE
645
646 # Print test comparisons
647 tests = self.tests.items()
648 tests.sort()
649 total_min_time = other_total_min_time = 0.0
650 total_avg_time = other_total_avg_time = 0.0
651 benchmarks_compatible = self.compatible(compare_to)
652 tests_compatible = 1
653 for name, test in tests:
654 if (limitnames is not None and
655 limitnames.search(name) is None):
656 continue
657 (min_time,
658 avg_time,
659 total_time,
660 op_avg,
661 min_overhead) = test.stat()
662 total_min_time = total_min_time + min_time
663 total_avg_time = total_avg_time + avg_time
664 try:
665 other = compare_to.tests[name]
666 except KeyError:
667 other = None
668 if other is None:
669 # Other benchmark doesn't include the given test
670 min_diff, avg_diff = 'n/a', 'n/a'
671 other_min_time = 0.0
672 other_avg_time = 0.0
673 tests_compatible = 0
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000674 else:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000675 (other_min_time,
676 other_avg_time,
677 other_total_time,
678 other_op_avg,
679 other_min_overhead) = other.stat()
680 other_total_min_time = other_total_min_time + other_min_time
681 other_total_avg_time = other_total_avg_time + other_avg_time
682 if (benchmarks_compatible and
683 test.compatible(other)):
684 # Both benchmark and tests are comparible
685 min_diff = ((min_time * self.warp) /
686 (other_min_time * other.warp) - 1.0)
687 avg_diff = ((avg_time * self.warp) /
688 (other_avg_time * other.warp) - 1.0)
689 if hidenoise and abs(min_diff) < 10.0:
690 min_diff = ''
691 else:
692 min_diff = '%+5.1f%%' % (min_diff * PERCENT)
693 if hidenoise and abs(avg_diff) < 10.0:
694 avg_diff = ''
695 else:
696 avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)
697 else:
698 # Benchmark or tests are not comparible
699 min_diff, avg_diff = 'n/a', 'n/a'
700 tests_compatible = 0
701 print '%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \
702 (name,
703 min_time * MILLI_SECONDS,
704 other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,
705 min_diff,
706 avg_time * MILLI_SECONDS,
707 other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,
708 avg_diff)
709 print '-' * LINE
710
711 # Summarise test results
712 if not benchmarks_compatible or not tests_compatible:
713 min_diff, avg_diff = 'n/a', 'n/a'
714 else:
715 if other_total_min_time != 0.0:
716 min_diff = '%+5.1f%%' % (
717 ((total_min_time * self.warp) /
718 (other_total_min_time * compare_to.warp) - 1.0) * PERCENT)
719 else:
720 min_diff = 'n/a'
721 if other_total_avg_time != 0.0:
722 avg_diff = '%+5.1f%%' % (
723 ((total_avg_time * self.warp) /
724 (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)
725 else:
726 avg_diff = 'n/a'
727 print ('Totals: '
728 ' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %
729 (total_min_time * MILLI_SECONDS,
730 (other_total_min_time * compare_to.warp/self.warp
731 * MILLI_SECONDS),
732 min_diff,
733 total_avg_time * MILLI_SECONDS,
734 (other_total_avg_time * compare_to.warp/self.warp
735 * MILLI_SECONDS),
736 avg_diff
737 ))
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000738 print
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000739 print '(this=%s, other=%s)' % (self.name,
740 compare_to.name)
741 print
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000742
743class PyBenchCmdline(Application):
744
745 header = ("PYBENCH - a benchmark test suite for Python "
746 "interpreters/compilers.")
747
748 version = __version__
749
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000750 debug = _debug
751
752 options = [ArgumentOption('-n',
753 'number of rounds',
754 Setup.Number_of_rounds),
755 ArgumentOption('-f',
756 'save benchmark to file arg',
757 ''),
758 ArgumentOption('-c',
759 'compare benchmark with the one in file arg',
760 ''),
761 ArgumentOption('-s',
762 'show benchmark in file arg, then exit',
763 ''),
764 ArgumentOption('-w',
765 'set warp factor to arg',
766 Setup.Warp_factor),
767 ArgumentOption('-t',
768 'run only tests with names matching arg',
769 ''),
770 ArgumentOption('-C',
771 'set the number of calibration runs to arg',
772 CALIBRATION_RUNS),
773 SwitchOption('-d',
774 'hide noise in comparisons',
775 0),
776 SwitchOption('-v',
777 'verbose output (not recommended)',
778 0),
779 SwitchOption('--with-gc',
780 'enable garbage collection',
781 0),
782 SwitchOption('--with-syscheck',
783 'use default sys check interval',
784 0),
785 ArgumentOption('--timer',
786 'use given timer',
787 TIMER_PLATFORM_DEFAULT),
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000788 ]
789
790 about = """\
791The normal operation is to run the suite and display the
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000792results. Use -f to save them for later reuse or comparisons.
793
794Available timers:
795
796 time.time
797 time.clock
798 systimes.processtime
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000799
800Examples:
801
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000802python2.1 pybench.py -f p21.pybench
803python2.5 pybench.py -f p25.pybench
804python pybench.py -s p25.pybench -c p21.pybench
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000805"""
806 copyright = __copyright__
807
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000808 def main(self):
809
810 rounds = self.values['-n']
811 reportfile = self.values['-f']
812 show_bench = self.values['-s']
813 compare_to = self.values['-c']
814 hidenoise = self.values['-d']
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000815 warp = int(self.values['-w'])
816 withgc = self.values['--with-gc']
Steve Holden431a7632006-05-26 16:27:59 +0000817 limitnames = self.values['-t']
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000818 if limitnames:
819 if _debug:
820 print '* limiting test names to one with substring "%s"' % \
821 limitnames
822 limitnames = re.compile(limitnames, re.I)
823 else:
824 limitnames = None
Steve Holden431a7632006-05-26 16:27:59 +0000825 verbose = self.verbose
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000826 withsyscheck = self.values['--with-syscheck']
827 calibration_runs = self.values['-C']
828 timer = self.values['--timer']
Tim Peters19bfd422006-05-26 21:51:13 +0000829
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000830 print '-' * LINE
831 print 'PYBENCH %s' % __version__
832 print '-' * LINE
833 print '* using Python %s' % (string.split(sys.version)[0])
Tim Petersf9cc5942006-04-21 16:34:54 +0000834
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000835 # Switch off garbage collection
836 if not withgc:
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000837 try:
838 import gc
839 except ImportError:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000840 print '* Python version doesn\'t support garbage collection'
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000841 else:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000842 gc.disable()
843 print '* disabled garbage collection'
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000844
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000845 # "Disable" sys check interval
846 if not withsyscheck:
847 # Too bad the check interval uses an int instead of a long...
848 value = 2147483647
849 sys.setcheckinterval(value)
850 print '* system check interval set to maximum: %s' % value
851
852 if timer == TIMER_SYSTIMES_PROCESSTIME:
853 import systimes
854 print '* using timer: systimes.processtime (%s)' % \
855 systimes.SYSTIMES_IMPLEMENTATION
856 else:
857 print '* using timer: %s' % timer
Steve Holden431a7632006-05-26 16:27:59 +0000858
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000859 print
860
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000861 if compare_to:
862 try:
863 f = open(compare_to,'rb')
864 bench = pickle.load(f)
865 bench.name = compare_to
866 f.close()
867 compare_to = bench
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000868 except IOError, reason:
869 print '* Error opening/reading file %s: %s' % (
870 repr(compare_to),
871 reason)
Tim Petersf9cc5942006-04-21 16:34:54 +0000872 compare_to = None
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000873
874 if show_bench:
875 try:
876 f = open(show_bench,'rb')
877 bench = pickle.load(f)
878 bench.name = show_bench
879 f.close()
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000880 bench.print_header()
881 if compare_to:
882 bench.print_comparison(compare_to,
883 hidenoise=hidenoise,
884 limitnames=limitnames)
885 else:
886 bench.print_benchmark(hidenoise=hidenoise,
887 limitnames=limitnames)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000888 except IOError:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000889 print '* Error opening/reading file %s: %s' % (
890 repr(show_bench),
891 reason)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000892 print
893 return
894
895 if reportfile:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000896 print 'Creating benchmark: %s (rounds=%i, warp=%i)' % \
897 (reportfile, rounds, warp)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000898 print
899
900 # Create benchmark object
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000901 bench = Benchmark(reportfile,
902 verbose=verbose,
903 timer=timer,
904 warp=warp,
905 calibration_runs=calibration_runs)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000906 bench.rounds = rounds
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000907 bench.load_tests(Setup, limitnames=limitnames)
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000908 try:
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000909 bench.calibrate()
910 bench.run()
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000911 except KeyboardInterrupt:
912 print
913 print '*** KeyboardInterrupt -- Aborting'
914 print
915 return
Marc-André Lemburg7d9743d2006-06-13 18:56:56 +0000916 bench.print_header()
917 if compare_to:
918 bench.print_comparison(compare_to,
919 hidenoise=hidenoise,
920 limitnames=limitnames)
921 else:
922 bench.print_benchmark(hidenoise=hidenoise,
923 limitnames=limitnames)
924
925 # Ring bell
Marc-André Lemburgc311f642006-04-19 15:27:33 +0000926 sys.stderr.write('\007')
927
928 if reportfile:
929 try:
930 f = open(reportfile,'wb')
931 bench.name = reportfile
932 pickle.dump(bench,f)
933 f.close()
934 except IOError:
935 print '* Error opening/writing reportfile'
936
937if __name__ == '__main__':
938 PyBenchCmdline()