fix JobManager.get_status_counts, which was returning incorrect counts in some cases when jobs were aborted.  the problem was that it's possible for a complete entry to have aborted set or not and have the same full status, which was violating an assumption of the method.

to test it, instead of adding stuff to the doctests (which would be messy in this particular case, since we need to reach in and mess with HQE stauses), i instead started a new rpc_interface_unittest, which seems to be the way of the future.  since it shared a bunch of logic with the scheduler unit test (which also depends on setting up a fake AFE database), i extracted common logic into frontend/afe/frontend_test_utils.py.  i also fixed up some of the logic extracted from monitor_db_unittest for reusing an initial DB between tests.

Signed-off-by: Steve Howard <showard@google.com>


git-svn-id: http://test.kernel.org/svn/autotest/trunk@3177 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/frontend/afe/common.py b/frontend/afe/common.py
new file mode 100644
index 0000000..1edf302
--- /dev/null
+++ b/frontend/afe/common.py
@@ -0,0 +1,8 @@
+import os, sys
+dirname = os.path.dirname(sys.modules[__name__].__file__)
+autotest_dir = os.path.abspath(os.path.join(dirname, '..', '..'))
+client_dir = os.path.join(autotest_dir, "client")
+sys.path.insert(0, client_dir)
+import setup_modules
+sys.path.pop(0)
+setup_modules.setup(base_path=autotest_dir, root_module_name="autotest_lib")
diff --git a/frontend/afe/frontend_test_utils.py b/frontend/afe/frontend_test_utils.py
new file mode 100644
index 0000000..a194684
--- /dev/null
+++ b/frontend/afe/frontend_test_utils.py
@@ -0,0 +1,155 @@
+import atexit, datetime, os, tempfile, unittest
+import common
+from autotest_lib.frontend import setup_test_environment
+from autotest_lib.frontend import thread_local
+from autotest_lib.frontend.afe import models
+from autotest_lib.client.common_lib.test_utils import mock
+
+class FrontendTestMixin(object):
+    _test_db_initialized = False
+
+    def _initialize_test_db(self):
+        if self._test_db_initialized:
+            return
+
+        temp_fd, test_db_file = tempfile.mkstemp(suffix='.frontend_test')
+        FrontendTestMixin._test_db_file = test_db_file
+        os.close(temp_fd)
+
+        def cleanup_test_db():
+            os.remove(test_db_file)
+        atexit.register(cleanup_test_db)
+
+        setup_test_environment.set_test_database(test_db_file)
+        setup_test_environment.set_up()
+        FrontendTestMixin._test_db_backup = (
+            setup_test_environment.backup_test_database())
+        FrontendTestMixin._test_db_initialized = True
+
+
+    def _open_test_db(self):
+        self._initialize_test_db()
+        setup_test_environment.restore_test_database(self._test_db_backup)
+
+
+    def _fill_in_test_data(self):
+        """Populate the test database with some hosts and labels."""
+        user = models.User.objects.create(login='my_user')
+        acl_group = models.AclGroup.objects.create(name='my_acl')
+        acl_group.users.add(user)
+
+        hosts = [models.Host.objects.create(hostname=hostname) for hostname in
+                 ('host1', 'host2', 'host3', 'host4', 'host5', 'host6',
+                  'host7', 'host8', 'host9')]
+
+        acl_group.hosts = hosts
+        models.AclGroup.smart_get('Everyone').hosts = []
+
+        labels = [models.Label.objects.create(name=name) for name in
+                  ('label1', 'label2', 'label3', 'label4', 'label5', 'label6',
+                   'label7')]
+
+        platform = models.Label.objects.create(name='myplatform', platform=True)
+        for host in hosts:
+          host.labels.add(platform)
+
+        atomic_group1 = models.AtomicGroup.objects.create(
+                name='atomic1', max_number_of_machines=2)
+        atomic_group2 = models.AtomicGroup.objects.create(
+                name='atomic2', max_number_of_machines=2)
+
+        self.label3 = labels[2]
+        self.label3.only_if_needed = True
+        self.label3.save()
+        self.label4 = labels[3]
+        self.label4.atomic_group = atomic_group1
+        self.label4.save()
+        self.label5 = labels[4]
+        self.label5.atomic_group = atomic_group1
+        self.label5.save()
+        hosts[0].labels.add(labels[0])  # label1
+        hosts[1].labels.add(labels[1])  # label2
+        self.label6 = labels[5]
+        self.label7 = labels[6]
+        for hostnum in xrange(4,7):  # host5..host7
+            hosts[hostnum].labels.add(self.label4)  # an atomic group lavel
+            hosts[hostnum].labels.add(self.label6)  # a normal label
+        hosts[6].labels.add(self.label7)
+        for hostnum in xrange(7,9):  # host8..host9
+            hosts[hostnum].labels.add(self.label5)  # an atomic group lavel
+            hosts[hostnum].labels.add(self.label6)  # a normal label
+            hosts[hostnum].labels.add(self.label7)
+
+
+    def _setup_dummy_user(self):
+        user = models.User.objects.create(login='dummy', access_level=100)
+        thread_local.set_user(user)
+
+
+    def _frontend_common_setup(self):
+        self.god = mock.mock_god()
+        self._open_test_db()
+        self._fill_in_test_data()
+        self._setup_dummy_user()
+
+
+    def _frontend_common_teardown(self):
+        setup_test_environment.tear_down()
+        self.god.unstub_all()
+
+
+    def _create_job(self, hosts=[], metahosts=[], priority=0, active=False,
+                    synchronous=False, atomic_group=None):
+        """
+        Create a job row in the test database.
+
+        @param hosts - A list of explicit host ids for this job to be
+                scheduled on.
+        @param metahosts - A list of label ids for each host that this job
+                should be scheduled on (meta host scheduling).
+        @param priority - The job priority (integer).
+        @param active - bool, mark this job as running or not in the database?
+        @param synchronous - bool, if True use synch_count=2 otherwise use
+                synch_count=1.
+        @param atomic_group - An atomic group id for this job to schedule on
+                or None if atomic scheduling is not required.  Each metahost
+                becomes a request to schedule an entire atomic group.
+                This does not support creating an active atomic group job.
+        """
+        assert not (atomic_group and active)  # TODO(gps): support this
+        synch_count = synchronous and 2 or 1
+        created_on = datetime.datetime(2008, 1, 1)
+        status = models.HostQueueEntry.Status.QUEUED
+        if active:
+            status = models.HostQueueEntry.Status.RUNNING
+        job = models.Job.objects.create(
+            name='test', owner='my_user', priority=priority,
+            synch_count=synch_count, created_on=created_on,
+            reboot_before=models.RebootBefore.NEVER)
+        for host_id in hosts:
+            models.HostQueueEntry.objects.create(job=job, host_id=host_id,
+                                                 status=status,
+                                                 atomic_group_id=atomic_group)
+            models.IneligibleHostQueue.objects.create(job=job, host_id=host_id)
+        for label_id in metahosts:
+            models.HostQueueEntry.objects.create(job=job, meta_host_id=label_id,
+                                                 status=status,
+                                                 atomic_group_id=atomic_group)
+        if atomic_group and not (metahosts or hosts):
+            # Create a single HQE to request the atomic group of hosts even if
+            # no metahosts or hosts are supplied.
+            models.HostQueueEntry.objects.create(job=job,
+                                                 status=status,
+                                                 atomic_group_id=atomic_group)
+        return job
+
+
+    def _create_job_simple(self, hosts, use_metahost=False,
+                          priority=0, active=False):
+        """An alternative interface to _create_job"""
+        args = {'hosts' : [], 'metahosts' : []}
+        if use_metahost:
+            args['metahosts'] = hosts
+        else:
+            args['hosts'] = hosts
+        return self._create_job(priority=priority, active=active, **args)
diff --git a/frontend/afe/models.py b/frontend/afe/models.py
index 3474165..332a95f 100644
--- a/frontend/afe/models.py
+++ b/frontend/afe/models.py
@@ -644,12 +644,12 @@
             GROUP BY job_id, status, aborted, complete
             """ % id_list)
         all_job_counts = {}
-        for job_id in job_ids:
-            all_job_counts[job_id] = {}
         for job_id, status, aborted, complete, count in cursor.fetchall():
+            job_dict = all_job_counts.setdefault(job_id, {})
             full_status = HostQueueEntry.compute_full_status(status, aborted,
                                                              complete)
-            all_job_counts[job_id][full_status] = count
+            job_dict.setdefault(full_status, 0)
+            all_job_counts[job_id][full_status] += count
         return all_job_counts
 
 
diff --git a/frontend/afe/rpc_interface_unittest.py b/frontend/afe/rpc_interface_unittest.py
new file mode 100644
index 0000000..527c90a
--- /dev/null
+++ b/frontend/afe/rpc_interface_unittest.py
@@ -0,0 +1,41 @@
+#!/usr/bin/python2.4
+
+import datetime, unittest
+import common
+from autotest_lib.frontend import setup_django_environment
+from autotest_lib.frontend.afe import frontend_test_utils
+from django.db import connection
+from autotest_lib.frontend.afe import models, rpc_interface, frontend_test_utils
+
+
+_hqe_status = models.HostQueueEntry.Status
+
+
+class RpcInterfaceTest(unittest.TestCase,
+                       frontend_test_utils.FrontendTestMixin):
+    def setUp(self):
+        self._frontend_common_setup()
+
+
+    def tearDown(self):
+        self._frontend_common_teardown()
+
+
+    def test_get_jobs_summary(self):
+        job = self._create_job(xrange(3))
+        entries = list(job.hostqueueentry_set.all())
+        entries[1].status = _hqe_status.FAILED
+        entries[1].save()
+        entries[2].status = _hqe_status.FAILED
+        entries[2].aborted = True
+        entries[2].save()
+
+        job_summaries = rpc_interface.get_jobs_summary(id=job.id)
+        self.assertEquals(len(job_summaries), 1)
+        summary = job_summaries[0]
+        self.assertEquals(summary['status_counts'], {'Queued': 1,
+                                                     'Failed': 2})
+
+
+if __name__ == '__main__':
+    unittest.main()