Initial implementation of Test Planner Overview tab

Signed-off-by: James Ren <jamesren@google.com>



git-svn-id: http://test.kernel.org/svn/autotest/trunk@4503 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/frontend/planner/planner_test_utils.py b/frontend/planner/planner_test_utils.py
index f4c9f53..1ce52d7 100644
--- a/frontend/planner/planner_test_utils.py
+++ b/frontend/planner/planner_test_utils.py
@@ -9,6 +9,7 @@
     _PLAN_NAME = 'plan'
     GOOD_STATUS_WORD = 'GOOD'
     RUNNING_STATUS_WORD = 'RUNNING'
+    FAIL_STATUS_WORD = 'FAIL'
 
     def _planner_common_setup(self):
         self._frontend_common_setup()
@@ -73,3 +74,5 @@
                 word=self.RUNNING_STATUS_WORD)
         self._good_status = tko_models.Status.objects.create(
                 word=self.GOOD_STATUS_WORD)
+        self._fail_status = tko_models.Status.objects.create(
+                word=self.FAIL_STATUS_WORD)
diff --git a/frontend/planner/rpc_interface.py b/frontend/planner/rpc_interface.py
index c6b0e23..51284b0 100644
--- a/frontend/planner/rpc_interface.py
+++ b/frontend/planner/rpc_interface.py
@@ -488,6 +488,66 @@
     return result
 
 
+def get_overview_data(plan_ids):
+    """
+    Gets the data for the Overview tab
+
+    @param plan_ids: A list of the plans, by id or name
+    @return A dictionary - keys are plan names, values are dictionaries of data:
+                machines: A list of dictionaries:
+                hostname: The machine's hostname
+                status: The host's status
+                passed: True if the machine passed the test plan. A 'pass' means
+                        that, for every test configuration in the plan, the
+                        machine had at least one AFE job with no failed tests.
+                        'passed' could also be None, meaning that this host is
+                        still running tests.
+                bugs: A list of the bugs filed
+                test_configs: A list of dictionaries, each representing a test
+                              config:
+                    complete: Number of hosts that have completed this test
+                              config
+                    estimated_runtime: Number of hours this test config is
+                                       expected to run on each host
+    """
+    plans = models.Plan.smart_get_bulk(plan_ids)
+    result = {}
+
+    for plan in plans:
+        machines = []
+        for host in plan.host_set.all():
+            machines.append({'hostname': host.host.hostname,
+                             'status': host.status(),
+                             'passed': rpc_utils.compute_passed(host)})
+
+        bugs = set()
+        for testrun in plan.testrun_set.all():
+            bugs.update(testrun.bugs.values_list('external_uid', flat=True))
+
+        test_configs = []
+        for test_config in plan.testconfig_set.all():
+            complete_statuses = afe_models.HostQueueEntry.COMPLETE_STATUSES
+            complete_jobs = test_config.job_set.filter(
+                    afe_job__hostqueueentry__status__in=complete_statuses)
+            complete_afe_jobs = afe_models.Job.objects.filter(
+                    id__in=complete_jobs.values_list('afe_job', flat=True))
+
+            complete_hosts = afe_models.Host.objects.filter(
+                    hostqueueentry__job__in=complete_afe_jobs)
+            complete_hosts |= test_config.skipped_hosts.all()
+
+            test_configs.append(
+                    {'complete': complete_hosts.distinct().count(),
+                     'estimated_runtime': test_config.estimated_runtime})
+
+        plan_data = {'machines': machines,
+                     'bugs': list(bugs),
+                     'test_configs': test_configs}
+        result[plan.name] = plan_data
+
+    return result
+
+
 def get_motd():
     return afe_rpc_utils.get_motd()
 
@@ -497,5 +557,6 @@
               'host_actions': sorted(failure_actions.HostAction.values),
               'test_actions': sorted(failure_actions.TestAction.values),
               'additional_parameter_types':
-                      sorted(model_attributes.AdditionalParameterType.values)}
+                      sorted(model_attributes.AdditionalParameterType.values),
+              'host_statuses': sorted(model_attributes.HostStatus.values)}
     return result
diff --git a/frontend/planner/rpc_interface_unittest.py b/frontend/planner/rpc_interface_unittest.py
index 0048307..f36d458 100644
--- a/frontend/planner/rpc_interface_unittest.py
+++ b/frontend/planner/rpc_interface_unittest.py
@@ -232,5 +232,52 @@
         self.assertEqual(sorted(actual), sorted(expected))
 
 
+    def _test_get_overview_data_helper(self, stage):
+        self._setup_active_plan()
+        self.god.stub_function(rpc_utils, 'compute_passed')
+        rpc_utils.compute_passed.expect_call(
+                self._plan.host_set.get(host=self.hosts[0])).and_return(None)
+        rpc_utils.compute_passed.expect_call(
+                self._plan.host_set.get(host=self.hosts[1])).and_return(None)
+
+        data = {'test_configs': [{'complete': 0, 'estimated_runtime': 1}],
+                'bugs': [],
+                'machines': [{'hostname': self.hosts[0].hostname,
+                              'status': 'Running',
+                              'passed': None},
+                             {'hostname': self.hosts[1].hostname,
+                              'status': 'Running',
+                              'passed': None}]}
+        if stage < 1:
+            return {self._plan.name: data}
+
+        tko_test = self._tko_job.test_set.create(kernel=self._tko_kernel,
+                                                 machine=self._tko_machine,
+                                                 status=self._fail_status)
+        test_run = self._plan.testrun_set.create(test_job=self._planner_job,
+                                                 tko_test=tko_test,
+                                                 host=self._planner_host)
+        self._afe_job.hostqueueentry_set.update(status='Completed')
+        self._planner_host.complete = True
+        self._planner_host.save()
+        test_run.bugs.create(external_uid='bug')
+        data['bugs'] = ['bug']
+        data['test_configs'][0]['complete'] = 1
+        data['machines'][0]['status'] = 'Finished'
+        return {self._plan.name: data}
+
+
+    def test_get_overview_data_no_progress(self):
+        self.assertEqual(self._test_get_overview_data_helper(0),
+                         rpc_interface.get_overview_data([self._plan.id]))
+        self.god.check_playback()
+
+
+    def test_get_overview_data_one_finished_with_bug(self):
+        self.assertEqual(self._test_get_overview_data_helper(1),
+                         rpc_interface.get_overview_data([self._plan.id]))
+        self.god.check_playback()
+
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/frontend/planner/rpc_utils.py b/frontend/planner/rpc_utils.py
index 9bf8a41..bcdca53 100644
--- a/frontend/planner/rpc_utils.py
+++ b/frontend/planner/rpc_utils.py
@@ -317,3 +317,26 @@
             skip_verify=(not run_verify),
             verify_params=verify_params,
             **additional_wrap_arguments)
+
+
+def compute_passed(host):
+    """
+    Returns True if the host can be considered to have passed its test plan
+
+    A 'pass' means that, for every test configuration in the plan, the machine
+    had at least one AFE job with no failed tests. 'passed' could also be None,
+    meaning that this host is still running tests.
+    """
+    if not host.complete:
+        return None
+
+    test_configs = host.plan.testconfig_set.exclude(skipped_hosts=host.host)
+    for test_config in test_configs:
+        for planner_job in test_config.job_set.all():
+            bad = planner_job.testrun_set.exclude(tko_test__status__word='GOOD')
+            if not bad:
+                break
+        else:
+            # Didn't break out of loop; this test config had no good jobs
+            return False
+    return True
diff --git a/frontend/planner/rpc_utils_unittest.py b/frontend/planner/rpc_utils_unittest.py
index d5579ec..a13b6d4 100644
--- a/frontend/planner/rpc_utils_unittest.py
+++ b/frontend/planner/rpc_utils_unittest.py
@@ -322,5 +322,41 @@
 
         self.assertEqual(actual, expected)
 
+
+    def test_compute_passed_incomplete(self):
+        self._setup_active_plan()
+        self._planner_host.complete = False
+        self._planner_host.save()
+        self.assertEqual(rpc_utils.compute_passed(self._planner_host), None)
+
+
+    def test_compute_passed_good(self):
+        self._setup_active_plan()
+        tko_test = self._tko_job.test_set.create(kernel=self._tko_kernel,
+                                                 status=self._good_status,
+                                                 machine=self._tko_machine)
+        self._plan.testrun_set.create(test_job=self._planner_job,
+                                      tko_test=tko_test,
+                                      host=self._planner_host)
+        self._planner_host.complete = True
+        self._planner_host.save()
+
+        self.assertEqual(rpc_utils.compute_passed(self._planner_host), True)
+
+
+    def test_compute_passed_bad(self):
+        self._setup_active_plan()
+        tko_test = self._tko_job.test_set.create(kernel=self._tko_kernel,
+                                                 status=self._fail_status,
+                                                 machine=self._tko_machine)
+        self._plan.testrun_set.create(test_job=self._planner_job,
+                                      tko_test=tko_test,
+                                      host=self._planner_host)
+        self._planner_host.complete = True
+        self._planner_host.save()
+
+        self.assertEqual(rpc_utils.compute_passed(self._planner_host), False)
+
+
 if __name__ == '__main__':
     unittest.main()