Merge from Chromium at DEPS revision r216972

This commit was generated by merge_to_master.py.

Change-Id: Ie5904a921ece9c5959b52c8e0b74db09fa08f144
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
index 59b0f9b..5fef700 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -213,7 +213,7 @@
             initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
                 int(self._options.child_processes), retrying=False)
 
-            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
+            tests_to_retry = self._tests_to_retry(initial_results)
             if should_retry_failures and tests_to_retry and not initial_results.interrupted:
                 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
 
@@ -242,7 +242,7 @@
         summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
         summarized_failing_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=True)
 
-        exit_code = self._port.exit_code_from_summarized_results(summarized_failing_results)
+        exit_code = summarized_failing_results['num_regressions']
         if not self._options.dry_run:
             self._write_json_files(summarized_full_results, summarized_failing_results, initial_results)
             self._upload_json_files()
@@ -347,11 +347,8 @@
             if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
                 self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
 
-    def _tests_to_retry(self, run_results, include_crashes):
-        return [result.test_name for result in run_results.unexpected_results_by_name.values() if
-                   ((result.type != test_expectations.PASS) and
-                    (result.type != test_expectations.MISSING) and
-                    (result.type != test_expectations.CRASH or include_crashes))]
+    def _tests_to_retry(self, run_results):
+        return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
 
     def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results):
         _log.debug("Writing JSON files in %s." % self._results_directory)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
index 7293af0..3579d23 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
@@ -130,7 +130,6 @@
         'skipped': The number of skipped tests (NOW & SKIPPED)
         'num_regressions': The number of non-flaky failures
         'num_flaky': The number of flaky failures
-        'num_missing': The number of tests with missing results
         'num_passes': The number of unexpected passes
         'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
     """
@@ -145,7 +144,6 @@
 
     num_passes = 0
     num_flaky = 0
-    num_missing = 0
     num_regressions = 0
     keywords = {}
     for expecation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
@@ -163,9 +161,6 @@
     tests = {}
 
     for test_name, result in initial_results.results_by_name.iteritems():
-        # Note that if a test crashed in the original run, we ignore
-        # whether or not it crashed when we retried it (if we retried it),
-        # and always consider the result not flaky.
         expected = expectations.get_expectations_string(test_name)
         result_type = result.type
         actual = [keywords[result_type]]
@@ -177,12 +172,6 @@
             num_passes += 1
             if not result.has_stderr and only_include_failing:
                 continue
-        elif result_type == test_expectations.CRASH:
-            if test_name in initial_results.unexpected_results_by_name:
-                num_regressions += 1
-        elif result_type == test_expectations.MISSING:
-            if test_name in initial_results.unexpected_results_by_name:
-                num_missing += 1
         elif result_type != test_expectations.SKIP and test_name in initial_results.unexpected_results_by_name:
             if retry_results and test_name not in retry_results.unexpected_results_by_name:
                 actual.extend(expectations.get_expectations_string(test_name).split(" "))
@@ -259,7 +248,6 @@
     results['num_passes'] = num_passes
     results['num_flaky'] = num_flaky
     # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
-    results['num_missing'] = num_missing
     results['num_regressions'] = num_regressions
     results['interrupted'] = initial_results.interrupted  # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
     results['layout_tests_dir'] = port_obj.layout_tests_dir()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py
index f8a6fac..5ab1250 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -188,9 +188,6 @@
             self._pretty_patch_available = self.check_pretty_patch(logging=False)
         return self._pretty_patch_available
 
-    def should_retry_crashes(self):
-        return self.get_option('retry_crashes', False)
-
     def default_child_processes(self):
         """Return the number of drivers to use for this port."""
         return self._executive.cpu_count()
@@ -985,13 +982,6 @@
             self._websocket_server.stop()
             self._websocket_server = None
 
-    def exit_code_from_summarized_results(self, unexpected_results):
-        """Given summarized results, compute the exit code to be returned by new-run-webkit-tests.
-        Bots turn red when this function returns a non-zero value. By default, return the number of regressions
-        to avoid turning bots red by flaky failures, unexpected passes, and missing results"""
-        # Don't turn bots red for flaky failures, unexpected passes, and missing results.
-        return unexpected_results['num_regressions']
-
     #
     # TEST EXPECTATION-RELATED METHODS
     #
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
index 6b99360..55ff290 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -257,11 +257,6 @@
             finally:
                 self._helper = None
 
-
-    def exit_code_from_summarized_results(self, unexpected_results):
-        # Turn bots red for missing results.
-        return unexpected_results['num_regressions'] + unexpected_results['num_missing']
-
     def configuration_specifier_macros(self):
         return self.CONFIGURATION_SPECIFIER_MACROS
 
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
index df2c024..8871c87 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -103,10 +103,9 @@
 #
 TOTAL_TESTS = 106
 TOTAL_SKIPS = 27
-TOTAL_RETRIES = 14
 
-UNEXPECTED_PASSES = 6
-UNEXPECTED_FAILURES = 17
+UNEXPECTED_PASSES = 1
+UNEXPECTED_FAILURES = 22
 
 def unit_test_list():
     tests = TestList()
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index b349fc5..0d8578a 100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -235,9 +235,12 @@
         optparse.make_option("--no-retry-failures", action="store_false",
             dest="retry_failures",
             help="Don't re-try any tests that produce unexpected results."),
+
+        # FIXME: Remove this after we remove the flag from the v8 bot.
         optparse.make_option("--retry-crashes", action="store_true",
             default=False,
-            help="Do also retry crashes if retry-failures is enabled."),
+            help="ignored (we now always retry crashes when we retry failures)."),
+
         optparse.make_option("--max-locked-shards", type="int", default=0,
             help="Set the maximum number of locked shards"),
         optparse.make_option("--additional-env-var", type="string", action="append", default=[],
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
index 78baf7c..54b928d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
@@ -199,7 +199,7 @@
         self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
         self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
-        self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)
+        self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
 
         expected_tests = details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name)
         expected_summary_str = ''
@@ -492,13 +492,12 @@
             'failures/unexpected/text-image-checksum.html'],
             tests_included=True, host=host)
         file_list = host.filesystem.written_files.keys()
-        self.assertEqual(details.exit_code, 1)
+        self.assertEqual(details.exit_code, 2)
         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
         self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1)
         self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1)
-        self.assertTrue(json_string.find('"num_regressions":1') != -1)
+        self.assertTrue(json_string.find('"num_regressions":2') != -1)
         self.assertTrue(json_string.find('"num_flaky":0') != -1)
-        self.assertTrue(json_string.find('"num_missing":1') != -1)
 
     def test_pixel_test_directories(self):
         host = MockHost()
@@ -514,23 +513,6 @@
         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
         self.assertTrue(json_string.find(expected_token) != -1)
 
-    def test_missing_and_unexpected_results_with_custom_exit_code(self):
-        # Test that we update expectations in place. If the expectation
-        # is missing, update the expected generic location.
-        class CustomExitCodePort(test.TestPort):
-            def exit_code_from_summarized_results(self, unexpected_results):
-                return unexpected_results['num_regressions'] + unexpected_results['num_missing']
-
-        host = MockHost()
-        options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
-        test_port = CustomExitCodePort(host, options=options)
-        details, err, _ = logging_run(['--no-show-results',
-            'failures/expected/missing_image.html',
-            'failures/unexpected/missing_text.html',
-            'failures/unexpected/text-image-checksum.html'],
-            tests_included=True, host=host, port_obj=test_port)
-        self.assertEqual(details.exit_code, 2)
-
     def test_crash_with_stderr(self):
         host = MockHost()
         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
@@ -619,7 +601,7 @@
 
         host = MockHost()
         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected'], tests_included=True, host=host)
-        self.assertEqual(details.exit_code, 12)
+        self.assertEqual(details.exit_code, 16)
         self.assertTrue('Retrying' in err.getvalue())
 
     def test_retrying_default_value_test_list(self):
@@ -634,7 +616,7 @@
         filename = '/tmp/foo.txt'
         host.filesystem.write_text_file(filename, 'failures')
         details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
-        self.assertEqual(details.exit_code, 12)
+        self.assertEqual(details.exit_code, 16)
         self.assertTrue('Retrying' in err.getvalue())
 
     def test_retrying_and_flaky_tests(self):
@@ -657,21 +639,12 @@
         self.assertFalse(host.filesystem.exists('retries'))
         self.assertEqual(len(host.user.opened_urls), 1)
 
-    def test_retrying_chrashed_tests(self):
+    def test_retrying_crashed_tests(self):
         host = MockHost()
-        details, err, _ = logging_run(['--retry-failures', '--retry-crashes', 'failures/unexpected/crash.html'], tests_included=True, host=host)
+        details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/crash.html'], tests_included=True, host=host)
         self.assertEqual(details.exit_code, 1)
         self.assertTrue('Retrying' in err.getvalue())
 
-        # Now we test that --clobber-old-results does remove the old entries and the old retries,
-        # and that we don't retry again.
-        host = MockHost()
-        details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/unexpected/crash.html'], tests_included=True, host=host)
-        self.assertEqual(details.exit_code, 1)
-        self.assertTrue('Clobbering old results' in err.getvalue())
-        self.assertTrue('unexpected/crash.html' in err.getvalue())
-        self.assertFalse(host.filesystem.exists('retries'))
-
     def test_retrying_force_pixel_tests(self):
         host = MockHost()
         details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
@@ -732,11 +705,11 @@
     def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
         host = MockHost()
         _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
-        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
-        self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"is_missing_image":true,"actual":"MISSING","is_unexpected":true') != -1)
-        self.assertTrue(json_string.find('"num_regressions":4') != -1)
-        self.assertTrue(json_string.find('"num_flaky":0') != -1)
-        self.assertTrue(json_string.find('"num_missing":1') != -1)
+        results = parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
+
+        self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html"]["actual"], "MISSING"),
+        self.assertEqual(results["num_regressions"], 5)
+        self.assertEqual(results["num_flaky"], 0)
 
     def test_additional_platform_directory(self):
         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
@@ -871,11 +844,10 @@
         details, err, _ = logging_run(['--no-show-results',
             'failures/unexpected/missing_text.html',
             'failures/unexpected/missing_image.html',
-            'failures/unexpected/missing_audio.html',
             'failures/unexpected/missing_render_tree_dump.html'],
             tests_included=True, host=host, new_results=True)
         file_list = host.filesystem.written_files.keys()
-        self.assertEqual(details.exit_code, 0)
+        self.assertEqual(details.exit_code, 3)
         self.assertEqual(len(file_list), 10)
         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)