Revert "GraphicsTest: Report subtest failures to chromeperf"
This reverts commit 0349737150453233e130e0ffe5f7de3272aa70ae.
Speculative revert for graphics_dEQP alerts.
BUG=chromium:779727
Change-Id: I6d9df776696151d471cee6c844aecd4c1c426b47
Reviewed-on: https://chromium-review.googlesource.com/828525
Reviewed-by: Ilja H. Friedel <ihf@chromium.org>
Tested-by: Ilja H. Friedel <ihf@chromium.org>
diff --git a/client/cros/graphics/graphics_utils.py b/client/cros/graphics/graphics_utils.py
index 44e7ddb..1752b3b 100644
--- a/client/cros/graphics/graphics_utils.py
+++ b/client/cros/graphics/graphics_utils.py
@@ -45,17 +45,12 @@
_test_failure_report_enable(bool): Enable/Disable reporting
failures to chrome perf dashboard
automatically. (Default: True)
- _test_failure_report_subtest(bool): Enable/Disable reporting
- subtests failure to chrome perf
- dashboard automatically.
- (Default: False)
"""
version = 1
_GSC = None
_test_failure_description = "Failures"
_test_failure_report_enable = True
- _test_failure_report_subtest = False
def __init__(self, *args, **kwargs):
"""Initialize flag setting."""
@@ -90,7 +85,23 @@
if self._GSC:
self._GSC.finalize()
- self._output_perf()
+ self.output_perf_value(
+ description='Timeout_Reboot',
+ value=0,
+ units='count',
+ higher_is_better=False,
+ replace_existing_values=True
+ )
+
+ logging.debug('GraphicsTest recorded failures: %s', self.get_failures())
+ if self._test_failure_report_enable:
+ self.output_perf_value(
+ description=self._test_failure_description,
+ value=len(self._failures),
+ units='count',
+ higher_is_better=False
+ )
+
self._player.close()
if hasattr(super(GraphicsTest, self), "cleanup"):
@@ -98,7 +109,7 @@
*args, **kwargs)
@contextlib.contextmanager
- def failure_report(self, name, subtest=None):
+ def failure_report(self, name):
"""Record the failure of an operation to the self._failures.
Records if the operation taken inside executed normally or not.
@@ -112,12 +123,12 @@
doSomething()
"""
# Assume failed at the beginning
- self.add_failures(name, subtest=subtest)
+ self.add_failures(name)
yield {}
- self.remove_failures(name, subtest=subtest)
+ self.remove_failures(name)
@classmethod
- def failure_report_decorator(cls, name, subtest=None):
+ def failure_report_decorator(cls, name):
"""Record the failure if the function failed to finish.
This method should only decorate to functions of GraphicsTest.
In addition, functions with this decorator should be called with no
@@ -155,7 +166,7 @@
# A member function of GraphicsTest is decorated. The first
# argument is the instance itself.
instance = args[0]
- with instance.failure_report(name, subtest):
+ with instance.failure_report(name):
# Cherry pick the arguments for the wrapped function.
d_args, d_kwargs = test_utils._cherry_pick_args(fn, args,
kwargs)
@@ -163,100 +174,25 @@
return wrapper
return decorator
- def add_failures(self, name, subtest=None):
+ def add_failures(self, failure_description):
"""
Add a record to failures list which will report back to chrome perf
- dashboard at cleanup stage.
- Args:
- name: failure name.
- subtest: subtest which will appears in cros-perf. If None is
- specified, use name instead.
+ dashboard at the cleanup stage.
"""
- target = self._get_failure(name, subtest=subtest)
- if target:
- target['names'].append(name)
- else:
- target = {
- 'description': self._get_failure_description(name, subtest),
- 'unit': 'count',
- 'higher_is_better': False,
- 'graph': self._get_failure_graph_name(),
- 'names': [name],
- }
- self._failures.append(target)
- return target
+ self._failures.append(failure_description)
- def remove_failures(self, name, subtest=None):
+ def remove_failures(self, failure_description):
"""
Remove a record from failures list which will report back to chrome perf
- dashboard at cleanup stage.
- Args:
- name: failure name.
- subtest: subtest which will appears in cros-perf. If None is
- specified, use name instead.
+ dashboard at the cleanup stage.
"""
- target = self._get_failure(name, subtest=subtest)
- if name in target['names']:
- target['names'].remove(name)
-
- def _output_perf(self):
- """Report recorded failures back to chrome perf."""
- if not self._test_failure_report_enable:
- return
-
- self.output_perf_value(
- description='Timeout_Reboot',
- value=0,
- units='count',
- higher_is_better=False,
- replace_existing_values=True
- )
-
- total_failures = 0
- # Report subtests failures
- for failure in self._failures:
- logging.debug('GraphicsTest failure: %s' % failure['names'])
- total_failures += len(failure['names'])
-
- if not self._test_failure_report_subtest:
- continue
-
- self.output_perf_value(
- description=failure['description'],
- value=len(failure['names']),
- units=failure['unit'],
- higher_is_better=failure['higher_is_better'],
- graph=failure['graph']
- )
-
- # Report the count of all failures
- self.output_perf_value(
- description=self._get_failure_graph_name(),
- value=total_failures,
- units='count',
- higher_is_better=False,
- )
-
- def _get_failure_graph_name(self):
- return self._test_failure_description
-
- def _get_failure_description(self, name, subtest):
- return subtest or name
-
- def _get_failure(self, name, subtest):
- """Get specific failures."""
- description = self._get_failure_description(name, subtest=subtest)
- for failure in self._failures:
- if failure['description'] == description:
- return failure
- return None
+ self._failures.remove(failure_description)
def get_failures(self):
"""
Get currently recorded failures list.
"""
- return [name for failure in self._failures
- for name in failure['names']]
+ return list(self._failures)
def open_vt1(self):
"""Switch to VT1 with keyboard."""
diff --git a/client/site_tests/graphics_Drm/graphics_Drm.py b/client/site_tests/graphics_Drm/graphics_Drm.py
index 3d5cb13..72cff27 100644
--- a/client/site_tests/graphics_Drm/graphics_Drm.py
+++ b/client/site_tests/graphics_Drm/graphics_Drm.py
@@ -4,6 +4,7 @@
import logging
import os
+from autotest_lib.client.bin import test
from autotest_lib.client.bin import utils
from autotest_lib.client.common_lib import error
from autotest_lib.client.cros import service_stopper
@@ -142,27 +143,23 @@
# graphics_Drm runs all available tests if tests = None.
def run_once(self, tests=None, perf_report=False):
self._test_failure_report_enable = perf_report
- self._test_failure_report_subtest = perf_report
for test in drm_tests.itervalues():
if tests and test.name not in tests:
continue
logging.info('-----------------[%s]-----------------' % test.name)
- self.add_failures(test.name, subtest=test.name)
- passed = False
if test.should_run():
if test.can_run():
logging.debug('Running test %s.', test.name)
passed = test.run()
+ if not passed:
+ self.add_failures(test.name)
else:
- logging.info('Failed: test %s can not be run on current'
- ' configurations.' % test.name)
+ logging.info('Failed: test %s can not be run on current '
+ 'configurations.' % test.name)
+ self.add_failures(test.name)
else:
- passed = True
logging.info('Skipping test: %s.' % test.name)
- if passed:
- self.remove_failures(test.name, subtest=test.name)
-
if self.get_failures():
raise error.TestFail('Failed: %s' % self.get_failures())