Initial implementation of Test Planner Test View.

Trying again because generated patch was malformed.

Signed-off-by: James Ren <jamesren@google.com>


git-svn-id: http://test.kernel.org/svn/autotest/trunk@4556 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/frontend/planner/models.py b/frontend/planner/models.py
index 9334a09..76aa4a2 100644
--- a/frontend/planner/models.py
+++ b/frontend/planner/models.py
@@ -5,7 +5,7 @@
 from autotest_lib.frontend.afe import model_logic, rpc_utils
 from autotest_lib.frontend.tko import models as tko_models
 from autotest_lib.frontend.planner import model_attributes
-from autotest_lib.client.common_lib import utils
+from autotest_lib.client.common_lib import utils, host_queue_entry_states
 
 
 class Plan(dbmodels.Model, model_logic.ModelExtensions):
@@ -174,6 +174,29 @@
         db_table = 'planner_test_jobs'
 
 
+    def active(self):
+        for hqe in self.afe_job.hostqueueentry_set.all():
+            if not hqe.complete:
+                return True
+        return False
+
+
+    def all_tests_passed(self):
+        if self.active():
+            return False
+
+        Status = host_queue_entry_states.Status
+        if self.afe_job.hostqueueentry_set.exclude(status=Status.COMPLETED):
+            return False
+
+        tko_tests = tko_models.Test.objects.filter(
+                job__afe_job_id=self.afe_job.id)
+        for tko_test in tko_tests:
+            if tko_test.status.word != 'GOOD':
+                return False
+        return True
+
+
     def _get_details_unicode(self):
         return 'AFE job %s' % self.afe_job.id
 
diff --git a/frontend/planner/models_test.py b/frontend/planner/models_test.py
index 8a68f77..2121992 100755
--- a/frontend/planner/models_test.py
+++ b/frontend/planner/models_test.py
@@ -4,7 +4,9 @@
 import common
 from autotest_lib.frontend import setup_django_environment
 from autotest_lib.frontend.afe import frontend_test_utils, rpc_utils
+from autotest_lib.frontend.tko import models as tko_models
 from autotest_lib.frontend.planner import models, model_attributes
+from autotest_lib.frontend.planner import planner_test_utils
 
 
 class ModelWithHashTestBase(frontend_test_utils.FrontendTestMixin):
@@ -113,5 +115,54 @@
         self.assertEqual(None, found)
 
 
+class JobTest(planner_test_utils.PlannerTestMixin,
+              unittest.TestCase):
+    def setUp(self):
+        self._planner_common_setup()
+        self._setup_active_plan()
+
+
+    def tearDown(self):
+        self._planner_common_teardown()
+
+
+    def test_active(self):
+        self.assertEqual(True, self._planner_job.active())
+        self._afe_job.hostqueueentry_set.update(complete=True)
+        self.assertEqual(False, self._planner_job.active())
+
+
+    def test_all_tests_passed_active(self):
+        self.assertEqual(True, self._planner_job.active())
+        self.assertEqual(False, self._planner_job.all_tests_passed())
+
+
+    def test_all_tests_passed_failed_queue_entry(self):
+        self._afe_job.hostqueueentry_set.update(complete=True, status='Failed')
+        self.assertEqual(False, self._planner_job.active())
+
+        self.assertEqual(False, self._planner_job.all_tests_passed())
+
+
+    def _setup_test_all_tests_passed(self, status):
+        self._afe_job.hostqueueentry_set.update(complete=True,
+                                                status='Completed')
+        tko_test = tko_models.Test.objects.create(job=self._tko_job,
+                                                  status=status,
+                                                  kernel=self._tko_kernel,
+                                                  machine=self._tko_machine)
+        self.assertEqual(False, self._planner_job.active())
+
+
+    def test_all_tests_passed_success(self):
+        self._setup_test_all_tests_passed(self._good_status)
+        self.assertEqual(True, self._planner_job.all_tests_passed())
+
+
+    def test_all_tests_passed_failure(self):
+        self._setup_test_all_tests_passed(self._fail_status)
+        self.assertEqual(False, self._planner_job.all_tests_passed())
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/frontend/planner/rpc_interface.py b/frontend/planner/rpc_interface.py
index 51284b0..b97c3d2 100644
--- a/frontend/planner/rpc_interface.py
+++ b/frontend/planner/rpc_interface.py
@@ -367,6 +367,48 @@
                 bugs=bugs, reason=reason, invalidate=invalidate)
 
 
+def get_machine_view_data(plan_id):
+    """
+    Gets the data required for the web frontend Machine View.
+
+    @param plan_id: The ID of the test plan
+    @return An array. Each element is a dictionary:
+                    machine: The name of the machine
+                    status: The machine's status (one of
+                            model_attributes.HostStatus)
+                    bug_ids: List of the IDs for the bugs filed
+                    tests_run: An array of dictionaries:
+                            test_name: The TKO name of the test
+                            success: True if the test passed
+    """
+    plan = models.Plan.smart_get(plan_id)
+    result = []
+    for host in plan.host_set.all():
+        tests_run = []
+
+        machine = host.host.hostname
+        host_status = host.status()
+        bug_ids = set()
+
+        testruns = plan.testrun_set.filter(host=host, invalidated=False,
+                                           finalized=True)
+        for testrun in testruns:
+            test_name = testrun.tko_test.test
+            test_status = testrun.tko_test.status.word
+            testrun_bug_ids = testrun.bugs.all().values_list(
+                    'external_uid', flat=True)
+
+            tests_run.append({'test_name': test_name,
+                              'status': test_status})
+            bug_ids.update(testrun_bug_ids)
+
+        result.append({'machine': machine,
+                       'status': host_status,
+                       'tests_run': tests_run,
+                       'bug_ids': list(bug_ids)})
+    return result
+
+
 def generate_test_config(alias, afe_test_name=None,
                          estimated_runtime=0, **kwargs):
     """
@@ -446,48 +488,6 @@
             'param_values': param_values}
 
 
-def get_machine_view_data(plan_id):
-    """
-    Gets the data required for the web frontend Machine View.
-
-    @param plan_id: The ID of the test plan
-    @return An array. Each element is a dictionary:
-                    machine: The name of the machine
-                    status: The machine's status (one of
-                            model_attributes.HostStatus)
-                    bug_ids: List of the IDs for the bugs filed
-                    tests_run: An array of dictionaries:
-                            test_name: The TKO name of the test
-                            success: True if the test passed
-    """
-    plan = models.Plan.smart_get(plan_id)
-    result = []
-    for host in plan.host_set.all():
-        tests_run = []
-
-        machine = host.host.hostname
-        host_status = host.status()
-        bug_ids = set()
-
-        testruns = plan.testrun_set.filter(host=host, invalidated=False,
-                                           finalized=True)
-        for testrun in testruns:
-            test_name = testrun.tko_test.test
-            test_status = testrun.tko_test.status.word
-            testrun_bug_ids = testrun.bugs.all().values_list(
-                    'external_uid', flat=True)
-
-            tests_run.append({'test_name': test_name,
-                              'status': test_status})
-            bug_ids.update(testrun_bug_ids)
-
-        result.append({'machine': machine,
-                       'status': host_status,
-                       'tests_run': tests_run,
-                       'bug_ids': list(bug_ids)})
-    return result
-
-
 def get_overview_data(plan_ids):
     """
     Gets the data for the Overview tab
@@ -516,9 +516,16 @@
     for plan in plans:
         machines = []
         for host in plan.host_set.all():
+            pass_status = rpc_utils.compute_test_config_status(host)
+            if pass_status == rpc_utils.ComputeTestConfigStatusResult.PASS:
+                passed = True
+            elif pass_status == rpc_utils.ComputeTestConfigStatusResult.FAIL:
+                passed = False
+            else:
+                passed = None
             machines.append({'hostname': host.host.hostname,
                              'status': host.status(),
-                             'passed': rpc_utils.compute_passed(host)})
+                             'passed': passed})
 
         bugs = set()
         for testrun in plan.testrun_set.all():
@@ -526,9 +533,8 @@
 
         test_configs = []
         for test_config in plan.testconfig_set.all():
-            complete_statuses = afe_models.HostQueueEntry.COMPLETE_STATUSES
             complete_jobs = test_config.job_set.filter(
-                    afe_job__hostqueueentry__status__in=complete_statuses)
+                    afe_job__hostqueueentry__complete=True)
             complete_afe_jobs = afe_models.Job.objects.filter(
                     id__in=complete_jobs.values_list('afe_job', flat=True))
 
@@ -548,6 +554,61 @@
     return result
 
 
+def get_test_view_data(plan_id):
+    """
+    Gets the data for the Test View tab
+
+    @param plan_id: The name or ID of the test plan
+    @return A dictionary - Keys are test config aliases, values are dictionaries
+                           of data:
+                total_machines: Total number of machines scheduled for this test
+                                config. Excludes machines that are set to skip
+                                this config.
+                machine_status: A dictionary:
+                    key: The hostname
+                    value: The status of the machine: one of 'Scheduled',
+                           'Running', 'Pass', or 'Fail'
+                total_runs: Total number of runs of this test config. Includes
+                            repeated runs (from triage re-run)
+                total_passes: Number of runs that resulted in a 'pass', meaning
+                              that none of the tests in the test config had any
+                              status other than GOOD.
+                bugs: List of bugs that were filed under this test config
+    """
+    plan = models.Plan.smart_get(plan_id)
+    result = {}
+    for test_config in plan.testconfig_set.all():
+        skipped_host_ids = test_config.skipped_hosts.values_list('id',
+                                                                 flat=True)
+        hosts = plan.host_set.exclude(host__id__in=skipped_host_ids)
+        total_machines = hosts.count()
+
+        machine_status = {}
+        for host in hosts:
+            machine_status[host.host.hostname] = (
+                    rpc_utils.compute_test_config_status(host, test_config))
+
+        planner_jobs = test_config.job_set.all()
+        total_runs = planner_jobs.count()
+        total_passes = 0
+        for planner_job in planner_jobs:
+            if planner_job.all_tests_passed():
+                total_passes += 1
+
+        test_runs = plan.testrun_set.filter(
+                test_job__in=test_config.job_set.all())
+        bugs = set()
+        for test_run in test_runs:
+            bugs.update(test_run.bugs.values_list('external_uid', flat=True))
+
+        result[test_config.alias] = {'total_machines': total_machines,
+                                     'machine_status': machine_status,
+                                     'total_runs': total_runs,
+                                     'total_passes': total_passes,
+                                     'bugs': list(bugs)}
+    return result
+
+
 def get_motd():
     return afe_rpc_utils.get_motd()
 
diff --git a/frontend/planner/rpc_interface_unittest.py b/frontend/planner/rpc_interface_unittest.py
index f36d458..30366bf 100644
--- a/frontend/planner/rpc_interface_unittest.py
+++ b/frontend/planner/rpc_interface_unittest.py
@@ -164,28 +164,6 @@
         self.god.check_playback()
 
 
-    def test_generate_test_config(self):
-        control = {'control_file': object(),
-                   'is_server': object()}
-        test = 'test'
-        alias = 'test alias'
-        estimated_runtime = object()
-
-        self.god.stub_function(afe_rpc_interface, 'generate_control_file')
-        afe_rpc_interface.generate_control_file.expect_call(
-                tests=[test]).and_return(control)
-
-        result = rpc_interface.generate_test_config(
-                alias=alias, afe_test_name=test,
-                estimated_runtime=estimated_runtime)
-
-        self.assertEqual(result['alias'], 'test_alias')
-        self.assertEqual(result['control_file'], control['control_file'])
-        self.assertEqual(result['is_server'], control['is_server'])
-        self.assertEqual(result['estimated_runtime'], estimated_runtime)
-        self.god.check_playback()
-
-
     def test_get_machine_view_data(self):
         self._setup_active_plan()
 
@@ -232,12 +210,34 @@
         self.assertEqual(sorted(actual), sorted(expected))
 
 
+    def test_generate_test_config(self):
+        control = {'control_file': object(),
+                   'is_server': object()}
+        test = 'test'
+        alias = 'test alias'
+        estimated_runtime = object()
+
+        self.god.stub_function(afe_rpc_interface, 'generate_control_file')
+        afe_rpc_interface.generate_control_file.expect_call(
+                tests=[test]).and_return(control)
+
+        result = rpc_interface.generate_test_config(
+                alias=alias, afe_test_name=test,
+                estimated_runtime=estimated_runtime)
+
+        self.assertEqual(result['alias'], 'test_alias')
+        self.assertEqual(result['control_file'], control['control_file'])
+        self.assertEqual(result['is_server'], control['is_server'])
+        self.assertEqual(result['estimated_runtime'], estimated_runtime)
+        self.god.check_playback()
+
+
     def _test_get_overview_data_helper(self, stage):
         self._setup_active_plan()
-        self.god.stub_function(rpc_utils, 'compute_passed')
-        rpc_utils.compute_passed.expect_call(
+        self.god.stub_function(rpc_utils, 'compute_test_config_status')
+        rpc_utils.compute_test_config_status.expect_call(
                 self._plan.host_set.get(host=self.hosts[0])).and_return(None)
-        rpc_utils.compute_passed.expect_call(
+        rpc_utils.compute_test_config_status.expect_call(
                 self._plan.host_set.get(host=self.hosts[1])).and_return(None)
 
         data = {'test_configs': [{'complete': 0, 'estimated_runtime': 1}],
@@ -257,7 +257,7 @@
         test_run = self._plan.testrun_set.create(test_job=self._planner_job,
                                                  tko_test=tko_test,
                                                  host=self._planner_host)
-        self._afe_job.hostqueueentry_set.update(status='Completed')
+        self._afe_job.hostqueueentry_set.update(complete=True)
         self._planner_host.complete = True
         self._planner_host.save()
         test_run.bugs.create(external_uid='bug')
@@ -279,5 +279,53 @@
         self.god.check_playback()
 
 
+    def _test_get_test_view_data_helper(self, stage):
+        self._setup_active_plan()
+        self.god.stub_function(rpc_utils, 'compute_test_config_status')
+        hosts = self._plan.host_set.filter(host__in=self.hosts[0:2])
+        rpc_utils.compute_test_config_status.expect_call(
+                hosts[0], self._test_config).and_return(None)
+
+        data = {'total_machines': 2,
+                'machine_status': {'host1': None,
+                                   'host2': None},
+                'total_runs': 1,
+                'total_passes': 0,
+                'bugs': []}
+        if stage < 1:
+            rpc_utils.compute_test_config_status.expect_call(
+                    hosts[1], self._test_config).and_return(None)
+            return {self._test_config.alias: data}
+
+        fail_status = rpc_utils.ComputeTestConfigStatusResult.FAIL
+        rpc_utils.compute_test_config_status.expect_call(
+                hosts[1], self._test_config).and_return(fail_status)
+        tko_test = self._tko_job.test_set.create(kernel=self._tko_kernel,
+                                                 machine=self._tko_machine,
+                                                 status=self._fail_status)
+        test_run = self._plan.testrun_set.create(test_job=self._planner_job,
+                                                 tko_test=tko_test,
+                                                 host=self._planner_host)
+        self._afe_job.hostqueueentry_set.update(complete=True)
+
+        test_run.bugs.create(external_uid='bug')
+
+        data['machine_status']['host2'] = fail_status
+        data['bugs'] = ['bug']
+        return {self._test_config.alias: data}
+
+
+    def test_get_test_view_data_no_progress(self):
+        self.assertEqual(self._test_get_test_view_data_helper(0),
+                         rpc_interface.get_test_view_data(self._plan.id))
+        self.god.check_playback()
+
+
+    def test_get_test_view_data_one_failed_with_bug(self):
+        self.assertEqual(self._test_get_test_view_data_helper(1),
+                         rpc_interface.get_test_view_data(self._plan.id))
+        self.god.check_playback()
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/frontend/planner/rpc_utils.py b/frontend/planner/rpc_utils.py
index bcdca53..f314fa7 100644
--- a/frontend/planner/rpc_utils.py
+++ b/frontend/planner/rpc_utils.py
@@ -5,6 +5,7 @@
 from autotest_lib.frontend.planner import failure_actions, control_file
 from autotest_lib.frontend.tko import models as tko_models
 from autotest_lib.client.common_lib import global_config, utils, global_config
+from autotest_lib.client.common_lib import enum
 
 
 PLANNER_LABEL_PREFIX = 'planner_'
@@ -114,21 +115,27 @@
     if host.blocked:
         return None
 
-    test_configs = plan.testconfig_set.order_by('execution_order')
-    for test_config in test_configs:
-        afe_jobs = plan.job_set.filter(test_config=test_config)
-        afe_job_ids = afe_jobs.values_list('afe_job', flat=True)
-        hqes = afe_models.HostQueueEntry.objects.filter(job__id__in=afe_job_ids,
-                                                        host=host.host)
-        if not hqes and not bool(test_config.skipped_hosts.filter(host=host)):
-            return test_config
-        for hqe in hqes:
-            if not hqe.complete:
-                # HostQueueEntry still active for this host,
-                # should not run another test
-                return None
+    test_configs = plan.testconfig_set.exclude(
+            skipped_hosts=host.host).order_by('execution_order')
+    result = None
 
-    # All HQEs related to this host are complete
+    for test_config in test_configs:
+        planner_jobs = test_config.job_set.filter(
+                afe_job__hostqueueentry__host=host.host)
+        for planner_job in planner_jobs:
+            if planner_job.active():
+                # There is a job active; do not start another one
+                return None
+        try:
+            planner_job = planner_jobs.get(requires_rerun=False)
+        except models.Job.DoesNotExist:
+            if not result:
+                result = test_config
+
+    if result:
+        return result
+
+    # All jobs related to this host are complete
     host.complete = True
     host.save()
     return None
@@ -319,24 +326,40 @@
             **additional_wrap_arguments)
 
 
-def compute_passed(host):
+ComputeTestConfigStatusResult = enum.Enum('Pass', 'Fail', 'Scheduled',
+                                          'Running', string_values=True)
+def compute_test_config_status(host, test_config=None):
     """
-    Returns True if the host can be considered to have passed its test plan
+    Returns a value of ComputeTestConfigStatusResult:
+        Pass: This host passed the test config
+        Fail: This host failed the test config
+        Scheduled: This host has not yet run this test config
+        Running: This host is currently running this test config
 
     A 'pass' means that, for every test configuration in the plan, the machine
     had at least one AFE job with no failed tests. 'passed' could also be None,
     meaning that this host is still running tests.
-    """
-    if not host.complete:
-        return None
 
-    test_configs = host.plan.testconfig_set.exclude(skipped_hosts=host.host)
+    @param test_config: A test config to check. None to check all test configs
+                        in the plan
+    """
+    if test_config:
+        test_configs = [test_config]
+    else:
+        test_configs = host.plan.testconfig_set.exclude(skipped_hosts=host.host)
+
     for test_config in test_configs:
-        for planner_job in test_config.job_set.all():
-            bad = planner_job.testrun_set.exclude(tko_test__status__word='GOOD')
-            if not bad:
-                break
-        else:
-            # Didn't break out of loop; this test config had no good jobs
-            return False
-    return True
+        try:
+            planner_job = test_config.job_set.get(
+                    afe_job__hostqueueentry__host=host.host,
+                    requires_rerun=False)
+        except models.Job.DoesNotExist:
+            return ComputeTestConfigStatusResult.SCHEDULED
+
+        if planner_job.active():
+            return ComputeTestConfigStatusResult.RUNNING
+
+        if planner_job.testrun_set.exclude(tko_test__status__word='GOOD'):
+            return ComputeTestConfigStatusResult.FAIL
+
+    return ComputeTestConfigStatusResult.PASS
diff --git a/frontend/planner/rpc_utils_unittest.py b/frontend/planner/rpc_utils_unittest.py
index a13b6d4..6d97dd1 100644
--- a/frontend/planner/rpc_utils_unittest.py
+++ b/frontend/planner/rpc_utils_unittest.py
@@ -323,14 +323,27 @@
         self.assertEqual(actual, expected)
 
 
-    def test_compute_passed_incomplete(self):
+    def test_compute_test_config_status_scheduled(self):
         self._setup_active_plan()
-        self._planner_host.complete = False
-        self._planner_host.save()
-        self.assertEqual(rpc_utils.compute_passed(self._planner_host), None)
+        self._planner_job.delete()
+
+        self.assertEqual(
+                rpc_utils.compute_test_config_status(self._planner_host),
+                rpc_utils.ComputeTestConfigStatusResult.SCHEDULED)
 
 
-    def test_compute_passed_good(self):
+    def test_compute_test_config_status_running(self):
+        self._setup_active_plan()
+        self.god.stub_function(models.Job, 'active')
+        models.Job.active.expect_call().and_return(True)
+
+        self.assertEqual(
+                rpc_utils.compute_test_config_status(self._planner_host),
+                rpc_utils.ComputeTestConfigStatusResult.RUNNING)
+        self.god.check_playback()
+
+
+    def test_compute_test_config_status_good(self):
         self._setup_active_plan()
         tko_test = self._tko_job.test_set.create(kernel=self._tko_kernel,
                                                  status=self._good_status,
@@ -340,11 +353,16 @@
                                       host=self._planner_host)
         self._planner_host.complete = True
         self._planner_host.save()
+        self.god.stub_function(models.Job, 'active')
+        models.Job.active.expect_call().and_return(False)
 
-        self.assertEqual(rpc_utils.compute_passed(self._planner_host), True)
+        self.assertEqual(
+                rpc_utils.compute_test_config_status(self._planner_host),
+                rpc_utils.ComputeTestConfigStatusResult.PASS)
+        self.god.check_playback()
 
 
-    def test_compute_passed_bad(self):
+    def test_compute_test_config_status_bad(self):
         self._setup_active_plan()
         tko_test = self._tko_job.test_set.create(kernel=self._tko_kernel,
                                                  status=self._fail_status,
@@ -354,8 +372,13 @@
                                       host=self._planner_host)
         self._planner_host.complete = True
         self._planner_host.save()
+        self.god.stub_function(models.Job, 'active')
+        models.Job.active.expect_call().and_return(False)
 
-        self.assertEqual(rpc_utils.compute_passed(self._planner_host), False)
+        self.assertEqual(
+                rpc_utils.compute_test_config_status(self._planner_host),
+                rpc_utils.ComputeTestConfigStatusResult.FAIL)
+        self.god.check_playback()
 
 
 if __name__ == '__main__':