Torne (Richard Coles) | 5c87bf8 | 2012-11-14 11:46:17 +0000 | [diff] [blame] | 1 | #!/usr/bin/env python |
| 2 | # Copyright (C) 2010 Google Inc. All rights reserved. |
| 3 | # |
| 4 | # Redistribution and use in source and binary forms, with or without |
| 5 | # modification, are permitted provided that the following conditions are |
| 6 | # met: |
| 7 | # |
| 8 | # * Redistributions of source code must retain the above copyright |
| 9 | # notice, this list of conditions and the following disclaimer. |
| 10 | # * Redistributions in binary form must reproduce the above |
| 11 | # copyright notice, this list of conditions and the following disclaimer |
| 12 | # in the documentation and/or other materials provided with the |
| 13 | # distribution. |
| 14 | # * Neither the Google name nor the names of its |
| 15 | # contributors may be used to endorse or promote products derived from |
| 16 | # this software without specific prior written permission. |
| 17 | # |
| 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 19 | # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 20 | # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 21 | # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 22 | # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 23 | # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 24 | # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 25 | # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 | # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 | |
| 30 | import base64 |
| 31 | import sys |
| 32 | import time |
| 33 | |
| 34 | from webkitpy.layout_tests.port import Port, Driver, DriverOutput |
| 35 | from webkitpy.layout_tests.port.base import VirtualTestSuite |
| 36 | from webkitpy.layout_tests.models.test_configuration import TestConfiguration |
| 37 | from webkitpy.common.system.filesystem_mock import MockFileSystem |
| 38 | from webkitpy.common.system.crashlogs import CrashLogs |
| 39 | |
| 40 | |
| 41 | # This sets basic expectations for a test. Each individual expectation |
| 42 | # can be overridden by a keyword argument in TestList.add(). |
| 43 | class TestInstance(object): |
| 44 | def __init__(self, name): |
| 45 | self.name = name |
| 46 | self.base = name[(name.rfind("/") + 1):name.rfind(".")] |
| 47 | self.crash = False |
| 48 | self.web_process_crash = False |
| 49 | self.exception = False |
| 50 | self.hang = False |
| 51 | self.keyboard = False |
| 52 | self.error = '' |
| 53 | self.timeout = False |
| 54 | self.is_reftest = False |
| 55 | |
| 56 | # The values of each field are treated as raw byte strings. They |
| 57 | # will be converted to unicode strings where appropriate using |
| 58 | # FileSystem.read_text_file(). |
| 59 | self.actual_text = self.base + '-txt' |
| 60 | self.actual_checksum = self.base + '-checksum' |
| 61 | |
| 62 | # We add the '\x8a' for the image file to prevent the value from |
| 63 | # being treated as UTF-8 (the character is invalid) |
| 64 | self.actual_image = self.base + '\x8a' + '-png' + 'tEXtchecksum\x00' + self.actual_checksum |
| 65 | |
| 66 | self.expected_text = self.actual_text |
| 67 | self.expected_image = self.actual_image |
| 68 | |
| 69 | self.actual_audio = None |
| 70 | self.expected_audio = None |
| 71 | |
| 72 | |
| 73 | # This is an in-memory list of tests, what we want them to produce, and |
| 74 | # what we want to claim are the expected results. |
| 75 | class TestList(object): |
| 76 | def __init__(self): |
| 77 | self.tests = {} |
| 78 | |
| 79 | def add(self, name, **kwargs): |
| 80 | test = TestInstance(name) |
| 81 | for key, value in kwargs.items(): |
| 82 | test.__dict__[key] = value |
| 83 | self.tests[name] = test |
| 84 | |
| 85 | def add_reftest(self, name, reference_name, same_image): |
| 86 | self.add(name, actual_checksum='xxx', actual_image='XXX', is_reftest=True) |
| 87 | if same_image: |
| 88 | self.add(reference_name, actual_checksum='xxx', actual_image='XXX', is_reftest=True) |
| 89 | else: |
| 90 | self.add(reference_name, actual_checksum='yyy', actual_image='YYY', is_reftest=True) |
| 91 | |
| 92 | def keys(self): |
| 93 | return self.tests.keys() |
| 94 | |
| 95 | def __contains__(self, item): |
| 96 | return item in self.tests |
| 97 | |
| 98 | def __getitem__(self, item): |
| 99 | return self.tests[item] |
| 100 | |
| 101 | |
| 102 | def unit_test_list(): |
| 103 | tests = TestList() |
| 104 | tests.add('failures/expected/crash.html', crash=True) |
| 105 | tests.add('failures/expected/exception.html', exception=True) |
| 106 | tests.add('failures/expected/timeout.html', timeout=True) |
| 107 | tests.add('failures/expected/hang.html', hang=True) |
| 108 | tests.add('failures/expected/missing_text.html', expected_text=None) |
| 109 | tests.add('failures/expected/image.html', |
| 110 | actual_image='image_fail-pngtEXtchecksum\x00checksum_fail', |
| 111 | expected_image='image-pngtEXtchecksum\x00checksum-png') |
| 112 | tests.add('failures/expected/image_checksum.html', |
| 113 | actual_checksum='image_checksum_fail-checksum', |
| 114 | actual_image='image_checksum_fail-png') |
| 115 | tests.add('failures/expected/audio.html', |
| 116 | actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav', |
| 117 | actual_text=None, expected_text=None, |
| 118 | actual_image=None, expected_image=None, |
| 119 | actual_checksum=None) |
| 120 | tests.add('failures/expected/keyboard.html', keyboard=True) |
| 121 | tests.add('failures/expected/missing_check.html', |
| 122 | expected_image='missing_check-png') |
| 123 | tests.add('failures/expected/missing_image.html', expected_image=None) |
| 124 | tests.add('failures/expected/missing_audio.html', expected_audio=None, |
| 125 | actual_text=None, expected_text=None, |
| 126 | actual_image=None, expected_image=None, |
| 127 | actual_checksum=None) |
| 128 | tests.add('failures/expected/missing_text.html', expected_text=None) |
| 129 | tests.add('failures/expected/newlines_leading.html', |
| 130 | expected_text="\nfoo\n", actual_text="foo\n") |
| 131 | tests.add('failures/expected/newlines_trailing.html', |
| 132 | expected_text="foo\n\n", actual_text="foo\n") |
| 133 | tests.add('failures/expected/newlines_with_excess_CR.html', |
| 134 | expected_text="foo\r\r\r\n", actual_text="foo\n") |
| 135 | tests.add('failures/expected/text.html', actual_text='text_fail-png') |
| 136 | tests.add('failures/expected/skip_text.html', actual_text='text diff') |
| 137 | tests.add('failures/flaky/text.html') |
| 138 | tests.add('failures/unexpected/missing_text.html', expected_text=None) |
| 139 | tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png') |
| 140 | tests.add('failures/unexpected/missing_image.html', expected_image=None) |
| 141 | tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600 |
| 142 | RenderView at (0,0) size 800x600 |
| 143 | layer at (0,0) size 800x34 |
| 144 | RenderBlock {HTML} at (0,0) size 800x34 |
| 145 | RenderBody {BODY} at (8,8) size 784x18 |
| 146 | RenderText {#text} at (0,0) size 133x18 |
| 147 | text run at (0,0) width 133: "This is an image test!" |
| 148 | """, expected_text=None) |
| 149 | tests.add('failures/unexpected/crash.html', crash=True) |
| 150 | tests.add('failures/unexpected/crash-with-stderr.html', crash=True, |
| 151 | error="mock-std-error-output") |
| 152 | tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True, |
| 153 | error="mock-std-error-output") |
| 154 | tests.add('failures/unexpected/pass.html') |
| 155 | tests.add('failures/unexpected/text-checksum.html', |
| 156 | actual_text='text-checksum_fail-txt', |
| 157 | actual_checksum='text-checksum_fail-checksum') |
| 158 | tests.add('failures/unexpected/text-image-checksum.html', |
| 159 | actual_text='text-image-checksum_fail-txt', |
| 160 | actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail', |
| 161 | actual_checksum='text-image-checksum_fail-checksum') |
| 162 | tests.add('failures/unexpected/checksum-with-matching-image.html', |
| 163 | actual_checksum='text-image-checksum_fail-checksum') |
| 164 | tests.add('failures/unexpected/skip_pass.html') |
| 165 | tests.add('failures/unexpected/text.html', actual_text='text_fail-txt') |
| 166 | tests.add('failures/unexpected/timeout.html', timeout=True) |
| 167 | tests.add('http/tests/passes/text.html') |
| 168 | tests.add('http/tests/passes/image.html') |
| 169 | tests.add('http/tests/ssl/text.html') |
| 170 | tests.add('passes/args.html') |
| 171 | tests.add('passes/error.html', error='stuff going to stderr') |
| 172 | tests.add('passes/image.html') |
| 173 | tests.add('passes/audio.html', |
| 174 | actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav', |
| 175 | actual_text=None, expected_text=None, |
| 176 | actual_image=None, expected_image=None, |
| 177 | actual_checksum=None) |
| 178 | tests.add('passes/platform_image.html') |
| 179 | tests.add('passes/checksum_in_image.html', |
| 180 | expected_image='tEXtchecksum\x00checksum_in_image-checksum') |
| 181 | tests.add('passes/skipped/skip.html') |
| 182 | |
| 183 | # Note that here the checksums don't match but the images do, so this test passes "unexpectedly". |
| 184 | # See https://bugs.webkit.org/show_bug.cgi?id=69444 . |
| 185 | tests.add('failures/unexpected/checksum.html', actual_checksum='checksum_fail-checksum') |
| 186 | |
| 187 | # Text output files contain "\r\n" on Windows. This may be |
| 188 | # helpfully filtered to "\r\r\n" by our Python/Cygwin tooling. |
| 189 | tests.add('passes/text.html', |
| 190 | expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n') |
| 191 | |
| 192 | # For reftests. |
| 193 | tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.html', same_image=True) |
| 194 | tests.add_reftest('passes/mismatch.html', 'passes/mismatch-expected-mismatch.html', same_image=False) |
| 195 | tests.add_reftest('passes/svgreftest.svg', 'passes/svgreftest-expected.svg', same_image=True) |
| 196 | tests.add_reftest('passes/xhtreftest.xht', 'passes/xhtreftest-expected.html', same_image=True) |
| 197 | tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False) |
| 198 | tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.html', same_image=False) |
| 199 | tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html', same_image=True) |
| 200 | tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False) |
| 201 | tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html', same_image=True) |
| 202 | tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True) |
| 203 | tests.add('failures/unexpected/reftest-nopixel-expected.html', actual_checksum=None, actual_image=None, is_reftest=True) |
| 204 | # FIXME: Add a reftest which crashes. |
| 205 | tests.add('reftests/foo/test.html') |
| 206 | tests.add('reftests/foo/test-ref.html') |
| 207 | |
| 208 | tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc') |
| 209 | tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc') |
| 210 | tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc') |
| 211 | tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc') |
| 212 | tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc') |
| 213 | tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc') |
| 214 | |
| 215 | tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc') |
| 216 | tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def') |
| 217 | tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi') |
| 218 | |
| 219 | # The following files shouldn't be treated as reftests |
| 220 | tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.html', same_image=True) |
| 221 | tests.add('reftests/foo/reference/bar/common.html') |
| 222 | tests.add('reftests/foo/reftest/bar/shared.html') |
| 223 | |
| 224 | tests.add('websocket/tests/passes/text.html') |
| 225 | |
| 226 | # For testing test are properly included from platform directories. |
| 227 | tests.add('platform/test-mac-leopard/http/test.html') |
| 228 | tests.add('platform/test-win-win7/http/test.html') |
| 229 | |
| 230 | # For --no-http tests, test that platform specific HTTP tests are properly skipped. |
| 231 | tests.add('platform/test-snow-leopard/http/test.html') |
| 232 | tests.add('platform/test-snow-leopard/websocket/test.html') |
| 233 | |
| 234 | # For testing if perf tests are running in a locked shard. |
| 235 | tests.add('perf/foo/test.html') |
| 236 | tests.add('perf/foo/test-ref.html') |
| 237 | |
| 238 | # For testing --pixel-test-directories. |
| 239 | tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html', |
| 240 | actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail', |
| 241 | expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png') |
| 242 | tests.add('failures/unexpected/image_not_in_pixeldir.html', |
| 243 | actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail', |
| 244 | expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png') |
| 245 | |
| 246 | # For testing that virtual test suites don't expand names containing themselves |
| 247 | # See webkit.org/b/97925 and base_unittest.PortTest.test_tests(). |
| 248 | tests.add('passes/test-virtual-passes.html') |
| 249 | tests.add('passes/passes/test-virtual-passes.html') |
| 250 | |
| 251 | return tests |
| 252 | |
| 253 | |
| 254 | # Here we use a non-standard location for the layout tests, to ensure that |
| 255 | # this works. The path contains a '.' in the name because we've seen bugs |
| 256 | # related to this before. |
| 257 | |
| 258 | LAYOUT_TEST_DIR = '/test.checkout/LayoutTests' |
| 259 | PERF_TEST_DIR = '/test.checkout/PerformanceTests' |
| 260 | |
| 261 | |
| 262 | # Here we synthesize an in-memory filesystem from the test list |
| 263 | # in order to fully control the test output and to demonstrate that |
| 264 | # we don't need a real filesystem to run the tests. |
| 265 | def add_unit_tests_to_mock_filesystem(filesystem): |
| 266 | # Add the test_expectations file. |
| 267 | filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/platform/test') |
| 268 | if not filesystem.exists(LAYOUT_TEST_DIR + '/platform/test/TestExpectations'): |
| 269 | filesystem.write_text_file(LAYOUT_TEST_DIR + '/platform/test/TestExpectations', """ |
| 270 | Bug(test) failures/expected/crash.html [ Crash ] |
| 271 | Bug(test) failures/expected/image.html [ ImageOnlyFailure ] |
| 272 | Bug(test) failures/expected/audio.html [ Failure ] |
| 273 | Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ] |
| 274 | Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ] |
| 275 | Bug(test) failures/expected/missing_check.html [ Missing Pass ] |
| 276 | Bug(test) failures/expected/missing_image.html [ Missing Pass ] |
| 277 | Bug(test) failures/expected/missing_audio.html [ Missing Pass ] |
| 278 | Bug(test) failures/expected/missing_text.html [ Missing Pass ] |
| 279 | Bug(test) failures/expected/newlines_leading.html [ Failure ] |
| 280 | Bug(test) failures/expected/newlines_trailing.html [ Failure ] |
| 281 | Bug(test) failures/expected/newlines_with_excess_CR.html [ Failure ] |
| 282 | Bug(test) failures/expected/reftest.html [ ImageOnlyFailure ] |
| 283 | Bug(test) failures/expected/text.html [ Failure ] |
| 284 | Bug(test) failures/expected/timeout.html [ Timeout ] |
| 285 | Bug(test) failures/expected/hang.html [ WontFix ] |
| 286 | Bug(test) failures/expected/keyboard.html [ WontFix ] |
| 287 | Bug(test) failures/expected/exception.html [ WontFix ] |
| 288 | Bug(test) failures/unexpected/pass.html [ Failure ] |
| 289 | Bug(test) passes/skipped/skip.html [ Skip ] |
| 290 | """) |
| 291 | |
| 292 | filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo') |
| 293 | filesystem.write_text_file(LAYOUT_TEST_DIR + '/reftests/foo/reftest.list', """ |
| 294 | == test.html test-ref.html |
| 295 | |
| 296 | == multiple-match-success.html mismatching-ref.html |
| 297 | == multiple-match-success.html matching-ref.html |
| 298 | == multiple-match-failure.html mismatching-ref.html |
| 299 | == multiple-match-failure.html second-mismatching-ref.html |
| 300 | != multiple-mismatch-success.html mismatching-ref.html |
| 301 | != multiple-mismatch-success.html second-mismatching-ref.html |
| 302 | != multiple-mismatch-failure.html mismatching-ref.html |
| 303 | != multiple-mismatch-failure.html matching-ref.html |
| 304 | == multiple-both-success.html matching-ref.html |
| 305 | == multiple-both-success.html mismatching-ref.html |
| 306 | != multiple-both-success.html second-mismatching-ref.html |
| 307 | == multiple-both-failure.html matching-ref.html |
| 308 | != multiple-both-failure.html second-mismatching-ref.html |
| 309 | != multiple-both-failure.html matching-ref.html |
| 310 | """) |
| 311 | |
| 312 | # FIXME: This test was only being ignored because of missing a leading '/'. |
| 313 | # Fixing the typo causes several tests to assert, so disabling the test entirely. |
| 314 | # Add in a file should be ignored by port.find_test_files(). |
| 315 | #files[LAYOUT_TEST_DIR + '/userscripts/resources/iframe.html'] = 'iframe' |
| 316 | |
| 317 | def add_file(test, suffix, contents): |
| 318 | dirname = filesystem.join(LAYOUT_TEST_DIR, test.name[0:test.name.rfind('/')]) |
| 319 | base = test.base |
| 320 | filesystem.maybe_make_directory(dirname) |
| 321 | filesystem.write_binary_file(filesystem.join(dirname, base + suffix), contents) |
| 322 | |
| 323 | # Add each test and the expected output, if any. |
| 324 | test_list = unit_test_list() |
| 325 | for test in test_list.tests.values(): |
| 326 | add_file(test, test.name[test.name.rfind('.'):], '') |
| 327 | if test.is_reftest: |
| 328 | continue |
| 329 | if test.actual_audio: |
| 330 | add_file(test, '-expected.wav', test.expected_audio) |
| 331 | continue |
| 332 | add_file(test, '-expected.txt', test.expected_text) |
| 333 | add_file(test, '-expected.png', test.expected_image) |
| 334 | |
| 335 | filesystem.write_text_file(filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg') |
| 336 | # Clear the list of written files so that we can watch what happens during testing. |
| 337 | filesystem.clear_written_files() |
| 338 | |
| 339 | |
| 340 | class TestPort(Port): |
| 341 | port_name = 'test' |
| 342 | |
| 343 | """Test implementation of the Port interface.""" |
| 344 | ALL_BASELINE_VARIANTS = ( |
| 345 | 'test-linux-x86_64', |
| 346 | 'test-mac-snowleopard', 'test-mac-leopard', |
| 347 | 'test-win-vista', 'test-win-win7', 'test-win-xp', |
| 348 | ) |
| 349 | |
| 350 | @classmethod |
| 351 | def determine_full_port_name(cls, host, options, port_name): |
| 352 | if port_name == 'test': |
| 353 | return 'test-mac-leopard' |
| 354 | return port_name |
| 355 | |
| 356 | def __init__(self, host, port_name=None, **kwargs): |
| 357 | # FIXME: Consider updating all of the callers to pass in a port_name so it can be a |
| 358 | # required parameter like all of the other Port objects. |
| 359 | port_name = port_name or 'test-mac-leopard' |
| 360 | Port.__init__(self, host, port_name, **kwargs) |
| 361 | self._tests = unit_test_list() |
| 362 | self._flakes = set() |
| 363 | self._expectations_path = LAYOUT_TEST_DIR + '/platform/test/TestExpectations' |
| 364 | self._results_directory = None |
| 365 | |
| 366 | self._operating_system = 'mac' |
| 367 | if port_name.startswith('test-win'): |
| 368 | self._operating_system = 'win' |
| 369 | elif port_name.startswith('test-linux'): |
| 370 | self._operating_system = 'linux' |
| 371 | |
| 372 | version_map = { |
| 373 | 'test-win-xp': 'xp', |
| 374 | 'test-win-win7': 'win7', |
| 375 | 'test-win-vista': 'vista', |
| 376 | 'test-mac-leopard': 'leopard', |
| 377 | 'test-mac-snowleopard': 'snowleopard', |
| 378 | 'test-linux-x86_64': 'lucid', |
| 379 | } |
| 380 | self._version = version_map[port_name] |
| 381 | |
| 382 | def default_pixel_tests(self): |
| 383 | return True |
| 384 | |
| 385 | def _path_to_driver(self): |
| 386 | # This routine shouldn't normally be called, but it is called by |
| 387 | # the mock_drt Driver. We return something, but make sure it's useless. |
| 388 | return 'MOCK _path_to_driver' |
| 389 | |
| 390 | def baseline_search_path(self): |
| 391 | search_paths = { |
| 392 | 'test-mac-snowleopard': ['test-mac-snowleopard'], |
| 393 | 'test-mac-leopard': ['test-mac-leopard', 'test-mac-snowleopard'], |
| 394 | 'test-win-win7': ['test-win-win7'], |
| 395 | 'test-win-vista': ['test-win-vista', 'test-win-win7'], |
| 396 | 'test-win-xp': ['test-win-xp', 'test-win-vista', 'test-win-win7'], |
| 397 | 'test-linux-x86_64': ['test-linux', 'test-win-win7'], |
| 398 | } |
| 399 | return [self._webkit_baseline_path(d) for d in search_paths[self.name()]] |
| 400 | |
| 401 | def default_child_processes(self): |
| 402 | return 1 |
| 403 | |
| 404 | def worker_startup_delay_secs(self): |
| 405 | return 0 |
| 406 | |
| 407 | def check_build(self, needs_http): |
| 408 | return True |
| 409 | |
| 410 | def check_sys_deps(self, needs_http): |
| 411 | return True |
| 412 | |
| 413 | def default_configuration(self): |
| 414 | return 'Release' |
| 415 | |
| 416 | def diff_image(self, expected_contents, actual_contents, tolerance=None): |
| 417 | diffed = actual_contents != expected_contents |
| 418 | if not actual_contents and not expected_contents: |
| 419 | return (None, 0, None) |
| 420 | if not actual_contents or not expected_contents: |
| 421 | return (True, 0, None) |
| 422 | if 'ref' in expected_contents: |
| 423 | assert tolerance == 0 |
| 424 | if diffed: |
| 425 | return ("< %s\n---\n> %s\n" % (expected_contents, actual_contents), 1, None) |
| 426 | return (None, 0, None) |
| 427 | |
| 428 | def layout_tests_dir(self): |
| 429 | return LAYOUT_TEST_DIR |
| 430 | |
| 431 | def perf_tests_dir(self): |
| 432 | return PERF_TEST_DIR |
| 433 | |
| 434 | def webkit_base(self): |
| 435 | return '/test.checkout' |
| 436 | |
| 437 | def skipped_layout_tests(self, test_list): |
| 438 | # This allows us to test the handling Skipped files, both with a test |
| 439 | # that actually passes, and a test that does fail. |
| 440 | return set(['failures/expected/skip_text.html', |
| 441 | 'failures/unexpected/skip_pass.html', |
| 442 | 'virtual/skipped']) |
| 443 | |
| 444 | def name(self): |
| 445 | return self._name |
| 446 | |
| 447 | def operating_system(self): |
| 448 | return self._operating_system |
| 449 | |
| 450 | def _path_to_wdiff(self): |
| 451 | return None |
| 452 | |
| 453 | def default_results_directory(self): |
| 454 | return '/tmp/layout-test-results' |
| 455 | |
| 456 | def setup_test_run(self): |
| 457 | pass |
| 458 | |
| 459 | def _driver_class(self): |
| 460 | return TestDriver |
| 461 | |
| 462 | def start_http_server(self, additional_dirs=None, number_of_servers=None): |
| 463 | pass |
| 464 | |
| 465 | def start_websocket_server(self): |
| 466 | pass |
| 467 | |
| 468 | def acquire_http_lock(self): |
| 469 | pass |
| 470 | |
| 471 | def stop_http_server(self): |
| 472 | pass |
| 473 | |
| 474 | def stop_websocket_server(self): |
| 475 | pass |
| 476 | |
| 477 | def release_http_lock(self): |
| 478 | pass |
| 479 | |
| 480 | def _path_to_lighttpd(self): |
| 481 | return "/usr/sbin/lighttpd" |
| 482 | |
| 483 | def _path_to_lighttpd_modules(self): |
| 484 | return "/usr/lib/lighttpd" |
| 485 | |
| 486 | def _path_to_lighttpd_php(self): |
| 487 | return "/usr/bin/php-cgi" |
| 488 | |
| 489 | def _path_to_apache(self): |
| 490 | return "/usr/sbin/httpd" |
| 491 | |
| 492 | def _path_to_apache_config_file(self): |
| 493 | return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'httpd.conf') |
| 494 | |
| 495 | def path_to_test_expectations_file(self): |
| 496 | return self._expectations_path |
| 497 | |
| 498 | def all_test_configurations(self): |
| 499 | """Returns a sequence of the TestConfigurations the port supports.""" |
| 500 | # By default, we assume we want to test every graphics type in |
| 501 | # every configuration on every system. |
| 502 | test_configurations = [] |
| 503 | for version, architecture in self._all_systems(): |
| 504 | for build_type in self._all_build_types(): |
| 505 | test_configurations.append(TestConfiguration( |
| 506 | version=version, |
| 507 | architecture=architecture, |
| 508 | build_type=build_type)) |
| 509 | return test_configurations |
| 510 | |
| 511 | def _all_systems(self): |
| 512 | return (('leopard', 'x86'), |
| 513 | ('snowleopard', 'x86'), |
| 514 | ('xp', 'x86'), |
| 515 | ('vista', 'x86'), |
| 516 | ('win7', 'x86'), |
| 517 | ('lucid', 'x86'), |
| 518 | ('lucid', 'x86_64')) |
| 519 | |
| 520 | def _all_build_types(self): |
| 521 | return ('debug', 'release') |
| 522 | |
| 523 | def configuration_specifier_macros(self): |
| 524 | """To avoid surprises when introducing new macros, these are intentionally fixed in time.""" |
| 525 | return {'mac': ['leopard', 'snowleopard'], 'win': ['xp', 'vista', 'win7'], 'linux': ['lucid']} |
| 526 | |
| 527 | def all_baseline_variants(self): |
| 528 | return self.ALL_BASELINE_VARIANTS |
| 529 | |
| 530 | def virtual_test_suites(self): |
| 531 | return [ |
| 532 | VirtualTestSuite('virtual/passes', 'passes', ['--virtual-arg']), |
| 533 | VirtualTestSuite('virtual/skipped', 'failures/expected', ['--virtual-arg2']), |
| 534 | ] |
| 535 | |
| 536 | |
| 537 | class TestDriver(Driver): |
| 538 | """Test/Dummy implementation of the DumpRenderTree interface.""" |
| 539 | |
| 540 | def cmd_line(self, pixel_tests, per_test_args): |
| 541 | pixel_tests_flag = '-p' if pixel_tests else '' |
| 542 | return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args |
| 543 | |
| 544 | def run_test(self, test_input, stop_when_done): |
| 545 | start_time = time.time() |
| 546 | test_name = test_input.test_name |
| 547 | test_args = test_input.args or [] |
| 548 | test = self._port._tests[test_name] |
| 549 | if test.keyboard: |
| 550 | raise KeyboardInterrupt |
| 551 | if test.exception: |
| 552 | raise ValueError('exception from ' + test_name) |
| 553 | if test.hang: |
| 554 | time.sleep((float(test_input.timeout) * 4) / 1000.0 + 1.0) # The 1.0 comes from thread_padding_sec in layout_test_runnery. |
| 555 | |
| 556 | audio = None |
| 557 | actual_text = test.actual_text |
| 558 | |
| 559 | if 'flaky' in test_name and not test_name in self._port._flakes: |
| 560 | self._port._flakes.add(test_name) |
| 561 | actual_text = 'flaky text failure' |
| 562 | |
| 563 | if actual_text and test_args and test_name == 'passes/args.html': |
| 564 | actual_text = actual_text + ' ' + ' '.join(test_args) |
| 565 | |
| 566 | if test.actual_audio: |
| 567 | audio = base64.b64decode(test.actual_audio) |
| 568 | crashed_process_name = None |
| 569 | crashed_pid = None |
| 570 | if test.crash: |
| 571 | crashed_process_name = self._port.driver_name() |
| 572 | crashed_pid = 1 |
| 573 | elif test.web_process_crash: |
| 574 | crashed_process_name = 'WebProcess' |
| 575 | crashed_pid = 2 |
| 576 | |
| 577 | crash_log = '' |
| 578 | if crashed_process_name: |
| 579 | crash_logs = CrashLogs(self._port.host) |
| 580 | crash_log = crash_logs.find_newest_log(crashed_process_name, None) or '' |
| 581 | |
| 582 | if stop_when_done: |
| 583 | self.stop() |
| 584 | |
| 585 | if test.actual_checksum == test_input.image_hash: |
| 586 | image = None |
| 587 | else: |
| 588 | image = test.actual_image |
| 589 | return DriverOutput(actual_text, image, test.actual_checksum, audio, |
| 590 | crash=test.crash or test.web_process_crash, crashed_process_name=crashed_process_name, |
| 591 | crashed_pid=crashed_pid, crash_log=crash_log, |
| 592 | test_time=time.time() - start_time, timeout=test.timeout, error=test.error) |
| 593 | |
| 594 | def start(self, pixel_tests, per_test_args): |
| 595 | pass |
| 596 | |
| 597 | def stop(self): |
| 598 | pass |