Merge from Chromium at DEPS revision r199464

This commit was generated by merge_to_master.py.

Change-Id: I19655f81f4534807b2fa07bc72b5208501b02896
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
index eaf16f7..fd35b44 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
@@ -800,20 +800,19 @@
         return cls.EXPECTATIONS.get(string.lower())
 
     @staticmethod
-    def result_was_expected(result, expected_results, test_needs_rebaselining, test_is_skipped):
+    def result_was_expected(result, expected_results, test_needs_rebaselining):
         """Returns whether we got a result we were expecting.
         Args:
             result: actual result of a test execution
             expected_results: set of results listed in test_expectations
-            test_needs_rebaselining: whether test was marked as REBASELINE
-            test_is_skipped: whether test was marked as SKIP"""
+            test_needs_rebaselining: whether test was marked as REBASELINE"""
         if result in expected_results:
             return True
         if result in (TEXT, IMAGE_PLUS_TEXT, AUDIO) and (FAIL in expected_results):
             return True
         if result == MISSING and test_needs_rebaselining:
             return True
-        if result == SKIP and test_is_skipped:
+        if result == SKIP:
             return True
         return False
 
@@ -924,10 +923,7 @@
         expected_results = self._model.get_expectations(test)
         if not pixel_tests_are_enabled:
             expected_results = self.remove_pixel_failures(expected_results)
-        return self.result_was_expected(result,
-                                   expected_results,
-                                   self.is_rebaselining(test),
-                                   self._model.has_modifier(test, SKIP))
+        return self.result_was_expected(result, expected_results, self.is_rebaselining(test))
 
     def is_rebaselining(self, test):
         return self._model.has_modifier(test, REBASELINE)
@@ -967,9 +963,7 @@
         modified_expectations = []
 
         for expectation in self._expectations:
-            if expectation.name != test or expectation.is_flaky() or not expectation.parsed_expectations:
-                continue
-            if iter(expectation.parsed_expectations).next() not in (FAIL, IMAGE):
+            if expectation.name != test or not expectation.parsed_expectations:
                 continue
             if test_configuration not in expectation.matching_configurations:
                 continue
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
index a198cce..8e6ffdd 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
@@ -107,16 +107,15 @@
 
     def test_result_was_expected(self):
         # test basics
-        self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), True)
-        self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False)
+        self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False), True)
+        self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False), False)
 
         # test handling of SKIPped tests and results
-        self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=True), True)
-        self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=False), False)
+        self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False), True)
 
         # test handling of MISSING results and the REBASELINE modifier
-        self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True, test_is_skipped=False), True)
-        self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False)
+        self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True), True)
+        self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False), False)
 
     def test_remove_pixel_failures(self):
         self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
@@ -504,6 +503,26 @@
         self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
 """, actual_expectations)
 
+    def test_remove_flaky_line(self):
+        host = MockHost()
+        test_port = host.port_factory.get('test-win-xp', None)
+        test_port.test_exists = lambda test: True
+        test_port.test_isfile = lambda test: True
+
+        test_config = test_port.test_configuration()
+        test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win ] failures/expected/foo.html [ Failure Timeout ]
+Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
+"""}
+        expectations = TestExpectations(test_port)
+
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-vista', None).test_configuration())
+        actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
+
+        self.assertEqual("""Bug(x) [ Win Debug ] failures/expected/foo.html [ Failure Timeout ]
+Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
 
 class RebaseliningTest(Base):
     """Test rebaselining-specific functionality."""
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
index 3af1224..b2959d0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
@@ -28,6 +28,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 import logging
+import time
 
 from webkitpy.layout_tests.models import test_expectations
 from webkitpy.layout_tests.models import test_failures
@@ -135,6 +136,7 @@
     tbe = initial_results.tests_by_expectation
     tbt = initial_results.tests_by_timeline
     results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
+    # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
     results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
 
     num_passes = 0
@@ -148,6 +150,12 @@
     for modifier_string, modifier_enum in test_expectations.TestExpectations.MODIFIERS.iteritems():
         keywords[modifier_enum] = modifier_string.upper()
 
+    num_failures_by_type = {}
+    for expectation in initial_results.tests_by_expectation:
+        num_failures_by_type[keywords[expectation]] = len(initial_results.tests_by_expectation[expectation] & tbt[test_expectations.NOW])
+    # The number of failures by type.
+    results['num_failures_by_type'] = num_failures_by_type
+
     tests = {}
 
     for test_name, result in initial_results.results_by_name.iteritems():
@@ -158,10 +166,9 @@
         result_type = result.type
         actual = [keywords[result_type]]
 
-        if result_type == test_expectations.SKIP:
-            continue
-
         test_dict = {}
+        test_dict['time'] = result.total_run_time
+
         if result.has_stderr:
             test_dict['has_stderr'] = True
 
@@ -173,16 +180,13 @@
 
         if result_type == test_expectations.PASS:
             num_passes += 1
-            # FIXME: include passing tests that have stderr output.
-            if expected == 'PASS':
-                continue
         elif result_type == test_expectations.CRASH:
             if test_name in initial_results.unexpected_results_by_name:
                 num_regressions += 1
         elif result_type == test_expectations.MISSING:
             if test_name in initial_results.unexpected_results_by_name:
                 num_missing += 1
-        elif test_name in initial_results.unexpected_results_by_name:
+        elif result_type != test_expectations.SKIP and test_name in initial_results.unexpected_results_by_name:
             if retry_results and test_name not in retry_results.unexpected_results_by_name:
                 actual.extend(expectations.get_expectations_string(test_name).split(" "))
                 num_flaky += 1
@@ -202,6 +206,13 @@
         test_dict['expected'] = expected
         test_dict['actual'] = " ".join(actual)
 
+        def is_expected(actual_result):
+            return expectations.matches_an_expected_result(test_name, result_type, port_obj.get_option('pixel_tests') or result.reftest_type)
+
+        # To avoid bloating the output results json too much, only add an entry for whether the failure is unexpected.
+        if not all(is_expected(actual_result) for actual_result in actual):
+            test_dict['is_unexpected'] = True
+
         test_dict.update(_interpret_test_failures(result.failures))
 
         if retry_results:
@@ -231,30 +242,37 @@
             current_map = current_map[part]
 
     results['tests'] = tests
+    # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
     results['num_passes'] = num_passes
     results['num_flaky'] = num_flaky
+    # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
     results['num_missing'] = num_missing
     results['num_regressions'] = num_regressions
+    # FIXME: This is always true for Blink. We should remove this and update the code in Layouts/fast/harness/results.html that uses this.
     results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
     results['interrupted'] = initial_results.interrupted  # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
     results['layout_tests_dir'] = port_obj.layout_tests_dir()
     results['has_wdiff'] = port_obj.wdiff_available()
     results['has_pretty_patch'] = port_obj.pretty_patch_available()
     results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
+    results['seconds_since_epoch'] = int(time.time())
+    results['build_number'] = port_obj.get_option('build_number')
+    results['builder_name'] = port_obj.get_option('builder_name')
 
     try:
-        # We only use the svn revision for using trac links in the results.html file,
         # Don't do this by default since it takes >100ms.
-        # FIXME: Do we really need to populate this both here and in the json_results_generator?
+        # It's only used for uploading data to the flakiness dashboard.
         if port_obj.get_option("builder_name"):
             port_obj.host.initialize_scm()
-            results['revision'] = port_obj.host.scm().head_svn_revision()
+            for (name, path) in port_obj.repository_paths():
+                results[name.lower() + '_revision'] = port_obj.host.scm().svn_revision(path)
     except Exception, e:
         _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
         # Handle cases where we're running outside of version control.
         import traceback
         _log.debug('Failed to learn head svn revision:')
         _log.debug(traceback.format_exc())
-        results['revision'] = ""
+        results['chromium_revision'] = ""
+        results['blink_revision'] = ""
 
     return results
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
index c0d9265..5c22306 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
@@ -48,7 +48,7 @@
 
 def run_results(port):
     tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html', 'failures/expected/hang.html',
-             'failures/expected/audio.html']
+             'failures/expected/audio.html', 'passes/skipped/skip.html']
     expectations = test_expectations.TestExpectations(port, tests)
     return test_run_results.TestRunResults(expectations, len(tests))
 
@@ -63,6 +63,9 @@
         initial_results.add(get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
         initial_results.add(get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
     elif passing:
+        skipped_result = get_result('passes/skipped/skip.html')
+        skipped_result.type = test_expectations.SKIP
+        initial_results.add(skipped_result, expected, test_is_slow)
         initial_results.add(get_result('passes/text.html'), expected, test_is_slow)
         initial_results.add(get_result('failures/expected/audio.html'), expected, test_is_slow)
         initial_results.add(get_result('failures/expected/timeout.html'), expected, test_is_slow)
@@ -124,12 +127,34 @@
         summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
         self.assertNotIn('revision', summary)
 
+    def test_num_failures_by_type(self):
+        summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+        self.assertEquals(summary['num_failures_by_type'], {'CRASH': 1, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'PASS': 0, 'SKIP': 0, 'TIMEOUT': 2, 'IMAGE+TEXT': 0, 'FAIL': 0, 'AUDIO': 1})
+
+        summary = summarized_results(self.port, expected=True, passing=False, flaky=False)
+        self.assertEquals(summary['num_failures_by_type'], {'CRASH': 1, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'PASS': 1, 'SKIP': 0, 'TIMEOUT': 1, 'IMAGE+TEXT': 0, 'FAIL': 0, 'AUDIO': 1})
+
+        summary = summarized_results(self.port, expected=False, passing=True, flaky=False)
+        self.assertEquals(summary['num_failures_by_type'], {'CRASH': 0, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'PASS': 4, 'SKIP': 1, 'TIMEOUT': 0, 'IMAGE+TEXT': 0, 'FAIL': 0, 'AUDIO': 0})
+
     def test_svn_revision(self):
         self.port._options.builder_name = 'dummy builder'
         summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
-        self.assertNotEquals(summary['revision'], '')
+        self.assertNotEquals(summary['blink_revision'], '')
 
     def test_summarized_results_wontfix(self):
         self.port._options.builder_name = 'dummy builder'
         summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
         self.assertTrue(summary['tests']['failures']['expected']['hang.html']['wontfix'])
+        self.assertTrue(summary['tests']['passes']['text.html']['is_unexpected'])
+
+    def test_summarized_results_expected_pass(self):
+        self.port._options.builder_name = 'dummy builder'
+        summary = summarized_results(self.port, expected=False, passing=True, flaky=False)
+        self.assertTrue(summary['tests']['passes']['text.html'])
+        self.assertTrue('is_unexpected' not in summary['tests']['passes']['text.html'])
+
+    def test_summarized_results_skipped(self):
+        self.port._options.builder_name = 'dummy builder'
+        summary = summarized_results(self.port, expected=False, passing=True, flaky=False)
+        self.assertTrue(summary['tests']['passes']['skipped']['skip.html'])
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py
index 56f7662..82a18be 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -1079,7 +1079,7 @@
 
         # We use LayoutTest directory here because webkit_base isn't a part of WebKit repository in Chromium port
         # where turnk isn't checked out as a whole.
-        return [('WebKit', self.layout_tests_dir())]
+        return [('blink', self.layout_tests_dir())]
 
     _WDIFF_DEL = '##WDIFF_DEL##'
     _WDIFF_ADD = '##WDIFF_ADD##'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
index ce0564d..be8905b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -378,7 +378,7 @@
 
     def repository_paths(self):
         repos = super(ChromiumPort, self).repository_paths()
-        repos.append(('Chromium', self.path_from_chromium_base('build')))
+        repos.append(('chromium', self.path_from_chromium_base('build')))
         return repos
 
     def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
index b7f633c..dad7396 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -295,6 +295,7 @@
 Bug(test) failures/expected/exception.html [ WontFix ]
 Bug(test) failures/unexpected/pass.html [ Failure ]
 Bug(test) passes/skipped/skip.html [ Skip ]
+Bug(test) passes/text.html [ Pass ]
 """)
 
     filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo')
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
index ee9f86f..ff772ca 100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
@@ -186,6 +186,11 @@
         # properly on cygwin (bug 63846).
         self.should_test_processes = not self._platform.is_win()
 
+    def assertHasTimeAndOtherValuesEqual(self, actual, expected):
+        self.assertTrue(actual['time'])
+        del actual['time']
+        self.assertEqual(actual, expected)
+
     def test_basic(self):
         options, args = parse_args(tests_included=True)
         logging_stream = StringIO.StringIO()
@@ -496,9 +501,9 @@
             tests_included=True, host=host)
         file_list = host.filesystem.written_files.keys()
         self.assertEqual(details.exit_code, 1)
-        expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
-        self.assertTrue(json_string.find(expected_token) != -1)
+        self.assertTrue(json_string.find('"text-image-checksum.html":{"actual":"IMAGE+TEXT","is_unexpected":true,"expected":"PASS"') != -1)
+        self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"is_unexpected":true,"actual":"MISSING"') != -1)
         self.assertTrue(json_string.find('"num_regressions":1') != -1)
         self.assertTrue(json_string.find('"num_flaky":0') != -1)
         self.assertTrue(json_string.find('"num_missing":1') != -1)
@@ -506,14 +511,14 @@
     def test_pixel_test_directories(self):
         host = MockHost()
 
-        """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
+        """Both tests have failing checksum. We include only the first in pixel tests so only that should fail."""
         args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
                 'failures/unexpected/pixeldir/image_in_pixeldir.html',
                 'failures/unexpected/image_not_in_pixeldir.html']
         details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
 
         self.assertEqual(details.exit_code, 1)
-        expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE"'
+        expected_token = 'pixeldir":{"image_in_pixeldir.html":{"actual":"IMAGE","is_unexpected":true,"expected":"PASS",'
         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
         self.assertTrue(json_string.find(expected_token) != -1)
 
@@ -537,7 +542,7 @@
     def test_crash_with_stderr(self):
         host = MockHost()
         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
-        self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
+        self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","is_unexpected":true,"actual":"CRASH","has_stderr":true') != -1)
 
     def test_no_image_failure_with_image_diff(self):
         host = MockHost()
@@ -666,8 +671,8 @@
         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
         json = parse_full_results(json_string)
-        self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
-            {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1})
+        self.assertHasTimeAndOtherValuesEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
+            {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1, "is_unexpected": True})
         self.assertFalse(json["pixel_tests_enabled"])
         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
 
@@ -749,7 +754,7 @@
         host = MockHost()
         _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
-        self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
+        self.assertTrue(json_string.find('"unlistedtest.html":{"actual":"MISSING","is_missing_image":true,"is_unexpected":true,"expected":"PASS","is_missing_text":true') != -1)
         self.assertTrue(json_string.find('"num_regressions":4') != -1)
         self.assertTrue(json_string.find('"num_flaky":0') != -1)
         self.assertTrue(json_string.find('"num_missing":1') != -1)
@@ -847,6 +852,11 @@
 
 
 class EndToEndTest(unittest.TestCase):
+    def assertHasTimeAndOtherValuesEqual(self, actual, expected):
+        self.assertTrue(actual['time'])
+        del actual['time']
+        self.assertEqual(actual, expected)
+
     def test_reftest_with_two_notrefs(self):
         # Test that we update expectations in place. If the expectation
         # is missing, update the expected generic location.
@@ -856,15 +866,16 @@
 
         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
         json = parse_full_results(json_string)
-        self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
-        self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
-        self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
-        self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
-            {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1})
-        self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
-            {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="]})
-        self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
-            {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="]})
+        self.assertTrue("is_unexpected" not in json["tests"]["reftests"]["foo"]["multiple-match-success.html"])
+        self.assertTrue("is_unexpected" not in json["tests"]["reftests"]["foo"]["multiple-mismatch-success.html"])
+        self.assertTrue("is_unexpected" not in json["tests"]["reftests"]["foo"]["multiple-both-success.html"])
+
+        self.assertHasTimeAndOtherValuesEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
+            {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1, "is_unexpected": True})
+        self.assertHasTimeAndOtherValuesEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
+            {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_unexpected": True})
+        self.assertHasTimeAndOtherValuesEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
+            {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "is_unexpected": True})
 
 
 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
index 3191b84..e7dabab 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
@@ -100,10 +100,7 @@
             actual = results['actual'].split(" ")
             expected = results['expected'].split(" ")
 
-            def is_expected(result):
-                return (result in expected) or (result in ('AUDIO', 'TEXT', 'IMAGE+TEXT') and 'FAIL' in expected)
-
-            if all(is_expected(actual_result) for actual_result in actual):
+            if 'is_unexpected' not in results or not results['is_unexpected']:
                 # Don't print anything for tests that ran as expected.
                 return
 
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py
index 5ce15c1..8951a63 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py
@@ -87,7 +87,9 @@
         printer, out = self.get_printer()
         summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
         printer.print_unexpected_results(summary)
-        self.assertNotEmpty(out)
+        output = out.getvalue()
+        self.assertTrue(output)
+        self.assertTrue(output.find('Skip') == -1)
 
     def test_print_results(self):
         port = MockHost().port_factory.get('test')