Jean-Baptiste Queru | 6516b99 | 2011-02-04 14:12:08 -0800 | [diff] [blame] | 1 | #!/usr/bin/env python |
| 2 | |
| 3 | import os, difflib, time, gc, codecs, platform, sys |
| 4 | from pprint import pprint |
| 5 | import textwrap |
| 6 | |
| 7 | # Setup a logger manually for compatibility with Python 2.3 |
| 8 | import logging |
| 9 | logging.getLogger('MARKDOWN').addHandler(logging.StreamHandler()) |
| 10 | import markdown |
| 11 | |
| 12 | TEST_DIR = "tests" |
| 13 | TMP_DIR = "./tmp/" |
| 14 | WRITE_BENCHMARK = True |
| 15 | WRITE_BENCHMARK = False |
| 16 | ACTUALLY_MEASURE_MEMORY = True |
| 17 | |
| 18 | ###################################################################### |
| 19 | |
| 20 | if platform.system().lower() == "darwin": # Darwin |
| 21 | _proc_status = '/proc/%d/stat' % os.getpid() |
| 22 | else: # Linux |
| 23 | _proc_status = '/proc/%d/status' % os.getpid() |
| 24 | |
| 25 | _scale = {'kB': 1024.0, 'mB': 1024.0*1024.0, |
| 26 | 'KB': 1024.0, 'MB': 1024.0*1024.0} |
| 27 | |
| 28 | def _VmB(VmKey): |
| 29 | '''Private. |
| 30 | ''' |
| 31 | global _proc_status, _scale |
| 32 | # get pseudo file /proc/<pid>/status |
| 33 | try: |
| 34 | t = open(_proc_status) |
| 35 | v = t.read() |
| 36 | t.close() |
| 37 | except: |
| 38 | return 0.0 # non-Linux? |
| 39 | # get VmKey line e.g. 'VmRSS: 9999 kB\n ...' |
| 40 | i = v.index(VmKey) |
| 41 | v = v[i:].split(None, 3) # whitespace |
| 42 | if len(v) < 3: |
| 43 | return 0.0 # invalid format? |
| 44 | # convert Vm value to bytes |
| 45 | return float(v[1]) * _scale[v[2]] |
| 46 | |
| 47 | |
| 48 | def memory(since=0.0): |
| 49 | '''Return memory usage in bytes. |
| 50 | ''' |
| 51 | if ACTUALLY_MEASURE_MEMORY : |
| 52 | return _VmB('VmSize:') - since |
| 53 | |
| 54 | |
| 55 | def resident(since=0.0): |
| 56 | '''Return resident memory usage in bytes. |
| 57 | ''' |
| 58 | return _VmB('VmRSS:') - since |
| 59 | |
| 60 | |
| 61 | def stacksize(since=0.0): |
| 62 | '''Return stack size in bytes. |
| 63 | ''' |
| 64 | return _VmB('VmStk:') - since |
| 65 | |
| 66 | |
| 67 | ############################################################ |
| 68 | |
| 69 | DIFF_FILE_TEMPLATE = """ |
| 70 | <html> |
| 71 | <head> |
| 72 | <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> |
| 73 | <style> |
| 74 | td { |
| 75 | padding-left: 10px; |
| 76 | padding-right: 10px; |
| 77 | } |
| 78 | colgroup { |
| 79 | margin: 10px; |
| 80 | } |
| 81 | .diff_header { |
| 82 | color: gray; |
| 83 | } |
| 84 | .ok { |
| 85 | color: green; |
| 86 | } |
| 87 | .gray { |
| 88 | color: gray; |
| 89 | } |
| 90 | .failed a { |
| 91 | color: red; |
| 92 | } |
| 93 | .failed { |
| 94 | color: red; |
| 95 | } |
| 96 | </style> |
| 97 | </head> |
| 98 | <body> |
| 99 | <h1>Results Summary</h1> |
| 100 | <table rules="groups" > |
| 101 | <colgroup></colgroup> |
| 102 | <colgroup></colgroup> |
| 103 | <colgroup></colgroup> |
| 104 | <colgroup></colgroup> |
| 105 | <colgroup></colgroup> |
| 106 | <th> |
| 107 | <td></td> |
| 108 | <td>Seconds</td> |
| 109 | <td></td> |
| 110 | <td>Memory</td> |
| 111 | </th> |
| 112 | <tbody> |
| 113 | """ |
| 114 | |
| 115 | FOOTER = """ |
| 116 | </body> |
| 117 | </html> |
| 118 | """ |
| 119 | |
| 120 | DIFF_TABLE_TEMPLATE = """ |
| 121 | <table class="diff" rules="groups" > |
| 122 | <colgroup></colgroup> |
| 123 | <colgroup></colgroup> |
| 124 | <colgroup></colgroup> |
| 125 | <colgroup></colgroup> |
| 126 | <colgroup></colgroup> |
| 127 | <colgroup></colgroup> |
| 128 | <th> |
| 129 | <td></td> |
| 130 | <td>Expected</td> |
| 131 | <td></td> |
| 132 | <td></td> |
| 133 | <td>Actual</td> |
| 134 | </th> |
| 135 | <tbody> |
| 136 | %s |
| 137 | </tbody> |
| 138 | </table> |
| 139 | """ |
| 140 | |
| 141 | |
| 142 | def smart_split(text) : |
| 143 | result = [] |
| 144 | for x in text.splitlines() : |
| 145 | for y in textwrap.wrap(textwrap.dedent(x), 40): |
| 146 | result.append(y) |
| 147 | return result |
| 148 | |
| 149 | |
| 150 | differ = difflib.Differ() |
| 151 | try : |
| 152 | htmldiff = difflib.HtmlDiff() |
| 153 | except: |
| 154 | htmldiff = None |
| 155 | |
| 156 | class TestRunner : |
| 157 | |
| 158 | def __init__ (self) : |
| 159 | self.failedTests = [] |
| 160 | if not os.path.exists(TMP_DIR): |
| 161 | os.mkdir(TMP_DIR) |
| 162 | |
| 163 | def test_directory(self, dir, measure_time=False, safe_mode=False, encoding="utf-8", output_format='xhtml1') : |
| 164 | self.encoding = encoding |
| 165 | benchmark_file_name = os.path.join(dir, "benchmark.dat") |
| 166 | self.saved_benchmarks = {} |
| 167 | |
| 168 | if measure_time : |
| 169 | if os.path.exists(benchmark_file_name) : |
| 170 | file = open(benchmark_file_name) |
| 171 | for line in file.readlines() : |
| 172 | test, str_time, str_mem = line.strip().split(":") |
| 173 | self.saved_benchmarks[test] = (float(str_time), float(str_mem)) |
| 174 | repeat = range(10) |
| 175 | else : |
| 176 | repeat = (0,) |
| 177 | |
| 178 | # First, determine from the name of the directory if any extensions |
| 179 | # need to be loaded. |
| 180 | |
| 181 | parts = os.path.split(dir)[-1].split("-x-") |
| 182 | if len(parts) > 1 : |
| 183 | extensions = parts[1].split("-") |
| 184 | print extensions |
| 185 | else : |
| 186 | extensions = [] |
| 187 | |
| 188 | mem = memory() |
| 189 | start = time.clock() |
| 190 | self.md = markdown.Markdown(extensions=extensions, safe_mode = safe_mode, output_format=output_format) |
| 191 | construction_time = time.clock() - start |
| 192 | construction_mem = memory(mem) |
| 193 | |
| 194 | self.benchmark_buffer = "construction:%f:%f\n" % (construction_time, |
| 195 | construction_mem) |
| 196 | |
| 197 | html_diff_file_path = os.path.join(TMP_DIR, os.path.split(dir)[-1]) + ".html" |
| 198 | self.html_diff_file = codecs.open(html_diff_file_path, "w", encoding=encoding) |
| 199 | self.html_diff_file.write(DIFF_FILE_TEMPLATE) |
| 200 | |
| 201 | self.diffs_buffer = "" |
| 202 | |
| 203 | tests = [x.replace(".txt", "") |
| 204 | for x in os.listdir(dir) if x.endswith(".txt")] |
| 205 | tests.sort() |
| 206 | for test in tests : |
| 207 | self.run_test(dir, test, repeat) |
| 208 | |
| 209 | self.html_diff_file.write("</table>") |
| 210 | |
| 211 | if sys.version < "3.0": |
| 212 | self.html_diff_file.write(self.diffs_buffer.decode("utf-8")) |
| 213 | |
| 214 | self.html_diff_file.write(FOOTER) |
| 215 | self.html_diff_file.close() |
| 216 | print "Diff written to %s" % html_diff_file_path |
| 217 | |
| 218 | benchmark_output_file_name = benchmark_file_name |
| 219 | |
| 220 | if not WRITE_BENCHMARK: |
| 221 | benchmark_output_file_name += ".tmp" |
| 222 | |
| 223 | self.benchmark_file = open(benchmark_output_file_name, "w") |
| 224 | self.benchmark_file.write(self.benchmark_buffer) |
| 225 | self.benchmark_file.close() |
| 226 | |
| 227 | |
| 228 | #################### |
| 229 | |
| 230 | |
| 231 | def run_test(self, dir, test, repeat): |
| 232 | |
| 233 | print "--- %s ---" % test |
| 234 | self.html_diff_file.write("<tr><td>%s</td>" % test) |
| 235 | input_file = os.path.join(dir, test + ".txt") |
| 236 | output_file = os.path.join(dir, test + ".html") |
| 237 | |
| 238 | expected_output = codecs.open(output_file, encoding=self.encoding).read() |
| 239 | input = codecs.open(input_file, encoding=self.encoding).read() |
| 240 | actual_output = "" |
| 241 | actual_lines = [] |
| 242 | self.md.source = "" |
| 243 | gc.collect() |
| 244 | mem = memory() |
| 245 | start = time.clock() |
| 246 | for x in repeat: |
| 247 | actual_output = self.md.convert(input) |
| 248 | conversion_time = time.clock() - start |
| 249 | conversion_mem = memory(mem) |
| 250 | self.md.reset() |
| 251 | |
| 252 | expected_lines = [x.encode("utf-8") for x in smart_split(expected_output)] |
| 253 | actual_lines = [x.encode("utf-8") for x in smart_split(actual_output)] |
| 254 | |
| 255 | #diff = difflib.ndiff(expected_output.split("\n"), |
| 256 | # actual_output.split("\n")) |
| 257 | |
| 258 | diff = [x for x in differ.compare(expected_lines, |
| 259 | actual_lines) |
| 260 | if not x.startswith(" ")] |
| 261 | |
| 262 | if not diff: |
| 263 | self.html_diff_file.write("<td class='ok'>OK</td>") |
| 264 | else : |
| 265 | self.failedTests.append(test) |
| 266 | self.html_diff_file.write("<td class='failed'>" + |
| 267 | "<a href='#diff-%s'>FAILED</a></td>" % test) |
| 268 | print "MISMATCH on %s/%s.txt" % (dir, test) |
| 269 | print |
| 270 | for line in diff : |
| 271 | print line |
| 272 | if htmldiff!=None : |
| 273 | htmlDiff = htmldiff.make_table(expected_lines, actual_lines, |
| 274 | context=True) |
| 275 | htmlDiff = "\n".join( [x for x in htmlDiff.splitlines() |
| 276 | if x.strip().startswith("<tr>")] ) |
| 277 | self.diffs_buffer += "<a name='diff-%s'/><h2>%s</h2>" % (test, test) |
| 278 | self.diffs_buffer += DIFF_TABLE_TEMPLATE % htmlDiff |
| 279 | |
| 280 | expected_time, expected_mem = self.saved_benchmarks.get(test, ("na", "na")) |
| 281 | |
| 282 | self.html_diff_file.write(get_benchmark_html(conversion_time, expected_time)) |
| 283 | self.html_diff_file.write(get_benchmark_html(conversion_mem, expected_mem)) |
| 284 | self.html_diff_file.write("</tr>\n") |
| 285 | |
| 286 | self.benchmark_buffer += "%s:%f:%f\n" % (test, |
| 287 | conversion_time, conversion_mem) |
| 288 | |
| 289 | |
| 290 | |
| 291 | |
| 292 | |
| 293 | def get_benchmark_html (actual, expected) : |
| 294 | buffer = "" |
| 295 | if not expected == "na": |
| 296 | if actual > expected * 1.5: |
| 297 | tdiff = "failed" |
| 298 | elif actual * 1.5 < expected : |
| 299 | tdiff = "ok" |
| 300 | else : |
| 301 | tdiff = "same" |
| 302 | if ( (actual <= 0 and expected < 0.015) or |
| 303 | (expected <= 0 and actual < 0.015)) : |
| 304 | tdiff = "same" |
| 305 | else : |
| 306 | tdiff = "same" |
| 307 | buffer += "<td class='%s'>%.2f</td>" % (tdiff, actual) |
| 308 | if not expected == "na": |
| 309 | buffer += "<td class='gray'>%.2f</td>" % (expected) |
| 310 | return buffer |
| 311 | |
| 312 | |
| 313 | def run_tests() : |
| 314 | |
| 315 | tester = TestRunner() |
| 316 | #test.test_directory("tests/basic") |
| 317 | tester.test_directory("tests/markdown-test", measure_time=True) |
| 318 | tester.test_directory("tests/misc", measure_time=True) |
| 319 | tester.test_directory("tests/extensions-x-tables") |
| 320 | tester.test_directory("tests/extensions-x-footnotes") |
| 321 | #tester.test_directory("tests/extensions-x-ext1-ext2") |
| 322 | tester.test_directory("tests/safe_mode", measure_time=True, safe_mode="escape") |
| 323 | tester.test_directory("tests/extensions-x-wikilinks") |
| 324 | tester.test_directory("tests/extensions-x-toc") |
| 325 | tester.test_directory("tests/extensions-x-def_list") |
| 326 | tester.test_directory("tests/extensions-x-abbr") |
| 327 | tester.test_directory("tests/html4", output_format='html4') |
| 328 | |
| 329 | try: |
| 330 | import pygments |
| 331 | except ImportError: |
| 332 | # Dependancy not avalable - skip test |
| 333 | pass |
| 334 | else: |
| 335 | tester.test_directory("tests/extensions-x-codehilite") |
| 336 | |
| 337 | print "\n### Final result ###" |
| 338 | if len(tester.failedTests): |
| 339 | print "%d failed tests: %s" % (len(tester.failedTests), str(tester.failedTests)) |
| 340 | else: |
| 341 | print "All tests passed, no errors!" |
| 342 | |
| 343 | run_tests() |
| 344 | |
| 345 | |
| 346 | |
| 347 | |