Autotest: Find long passing experimental tests.

This adds a script to that finds any autotest test that has been marked
as experimental but has been passing for at least 30 days. An email is sent
about these tests so that the appropriate action can be taken.

BUG=chromium:212671
TEST=Unittests. Manual testing by commenting out where emails are sent (so
as not to spam people). Also by commenting out the database updating call when
checking against the real database.

Change-Id: Ieae34adc2ef49458e50a5dd7d6f9851743e80a10
Reviewed-on: https://gerrit.chromium.org/gerrit/63479
Reviewed-by: Dennis Jeffrey <dennisjeffrey@chromium.org>
Tested-by: Keyar Hood <keyar@chromium.org>
Commit-Queue: Keyar Hood <keyar@chromium.org>
diff --git a/frontend/health/passing_experimental.py b/frontend/health/passing_experimental.py
new file mode 100644
index 0000000..9696581
--- /dev/null
+++ b/frontend/health/passing_experimental.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import datetime, logging, os, subprocess, sys
+
+import common
+from autotest_lib.client.common_lib import mail
+from autotest_lib.frontend import setup_django_readonly_environment
+
+# Django and the models are only setup after
+# the setup_django_readonly_environment module is imported.
+from autotest_lib.frontend.afe import models as afe_models
+from autotest_lib.frontend.health import utils
+
+
+# Keep tests that have not failed for at least this many days.
+_MIN_DAYS_SINCE_FAILURE = 30
+# Ignore any tests that have not passed in this many days.
+_MAX_DAYS_SINCE_LAST_PASS = 30
+
+_MAIL_RESULTS_FROM = 'chromeos-test-health@google.com'
+_MAIL_RESULTS_TO = 'chromeos-lab-infrastructure@google.com'
+
+
+def update_afe_autotests_table():
+    """Runs the test_importer.py script to update the afe_autotests table."""
+    dirname = os.path.dirname(__file__)
+    utils_dir = os.path.abspath(os.path.join(dirname, os.pardir, os.pardir,
+                                             'utils'))
+    test_importer_script = os.path.join(utils_dir, 'test_importer.py')
+    return_code = subprocess.call([test_importer_script])
+
+    if return_code != 0:
+        logging.warn('Update DB failed: '
+                     'test_importer.py had nonzero return code %d.',
+                      return_code)
+
+
+def get_experimental_tests():
+    """
+    Get all the tests marked experimental from the afe_autotests table.
+
+    @return the set of experimental test names.
+
+    """
+    entries = afe_models.Test.objects.values('name').filter(experimental=True)
+    return {entry['name'] for entry in entries}
+
+
+def find_long_passing_tests(pass_times, fail_times, valid_names):
+    """
+    Determine the experimental tests that have been passsing for a long time.
+
+    @param pass_times: The dictionary of test_name:pass_time pairs.
+    @param fail_times: The dictionary of test_name:fail_time pairs.
+    @param valid_names: An iterable of experimental test names.
+
+    @return the list of experimental test names that have been passing for a
+        long time.
+
+    """
+    failure_cutoff_date = (datetime.datetime.today() -
+                           datetime.timedelta(_MIN_DAYS_SINCE_FAILURE))
+    pass_cutoff_date = (datetime.datetime.today() -
+                        datetime.timedelta(_MAX_DAYS_SINCE_LAST_PASS))
+
+    valid_passes = {test for test in valid_names if test in pass_times}
+    valid_failures = {test for test in valid_names if test in fail_times}
+
+    recent_passes = {test for test in valid_passes
+                     if (pass_times[test] > pass_cutoff_date)}
+    recent_fails = {test for test in valid_failures
+                    if (fail_times[test] > failure_cutoff_date)}
+
+    return recent_passes - recent_fails
+
+
+def main():
+    """
+    The script code.
+
+    Allows other python code to import and run this code. This will be more
+    important if a nice way to test this code can be determined.
+
+    """
+    update_afe_autotests_table()
+
+    experimental_tests = get_experimental_tests()
+    pass_times = utils.get_last_pass_times()
+    fail_times = utils.get_last_fail_times()
+
+    long_passers = find_long_passing_tests(pass_times, fail_times,
+                                           experimental_tests)
+
+    if long_passers:
+        mail.send(_MAIL_RESULTS_FROM,
+                  [_MAIL_RESULTS_TO],
+                  [],
+                  'Long Passing Experimental Tests',
+                  'The following experimental tests have been passing for at '
+                  'least %i days:\n\n%s'
+                  % (_MIN_DAYS_SINCE_FAILURE, '\n'.join(sorted(long_passers))))
+
+    return 0
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/frontend/health/passing_experimental_functional_test.py b/frontend/health/passing_experimental_functional_test.py
new file mode 100755
index 0000000..bf72c7d
--- /dev/null
+++ b/frontend/health/passing_experimental_functional_test.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import datetime, subprocess, unittest
+
+import mox
+
+import common
+# This must come before the import of complete_failures in order to use the
+# in-memory database.
+from autotest_lib.frontend import setup_django_readonly_environment
+from autotest_lib.frontend import setup_test_environment
+from autotest_lib.frontend.health import passing_experimental
+from autotest_lib.client.common_lib import mail
+from autotest_lib.frontend.afe import models as afe_models
+from autotest_lib.frontend.tko import models as tko_models
+from django import test
+
+
+GOOD_STATUS_IDX = 6
+FAIL_STATUS_IDX = 4
+
+# During the tests there is a point where Django does a type check on
+# datetime.datetime. Unfortunately this means when datetime is mocked out,
+# horrible failures happen when Django tries to do this check. The solution
+# chosen is to create a pure Python class that inheirits from datetime.datetime
+# so that the today class method can be directly mocked out. It is necesarry
+# to mock out datetime.datetime completely as it a C class and so cannot have
+# parts of itself mocked out.
+class MockDatetime(datetime.datetime):
+    """Used to mock out parts of datetime.datetime."""
+    pass
+
+
+class PassingExperimentalFunctionalTests(mox.MoxTestBase, test.TestCase):
+    """
+    Does a functional test of the passing_experimental.py script.
+
+    It uses an in-memory database, mocks out the saving and loading of the
+    storage object and mocks out the sending of the email. Everything else
+    is a full run.
+
+    """
+
+    def setUp(self):
+        super(PassingExperimentalFunctionalTests, self).setUp()
+        setup_test_environment.set_up()
+        # All of our tests will involve mocking out the datetime.today() class
+        # method.
+        self.mox.StubOutWithMock(MockDatetime, 'today')
+        self.datetime = datetime.datetime
+        datetime.datetime = MockDatetime
+        # We need to mock out the send function in all tests or else the
+        # emails will be sent out during tests.
+        self.mox.StubOutWithMock(mail, 'send')
+        # We really do not want a script that modifies the DB to run during
+        # testing. So we will mock this out even though we will mock out the
+        # function that calls it in case of refactoring.
+        self.mox.StubOutWithMock(subprocess, 'call')
+        self.mox.StubOutWithMock(passing_experimental,
+                                 'update_afe_autotests_table')
+        self._orig_since_failure = passing_experimental._MIN_DAYS_SINCE_FAILURE
+        self._orig_since_pass = passing_experimental._MAX_DAYS_SINCE_LAST_PASS
+
+
+    def tearDown(self):
+        passing_experimental._MAX_DAYS_SINCE_LAST_PASS = self._orig_since_pass
+        passing_experimental._MIN_DAYS_SINCE_FAILURE = self._orig_since_failure
+        datetime.datetime = self.datetime
+        setup_test_environment.tear_down()
+        super(PassingExperimentalFunctionalTests, self).tearDown()
+
+
+    def test(self):
+        """Does a basic test of as much of the system as possible."""
+        afe_models.Test(name='test1', test_type=0, path='test1',
+            experimental=True).save()
+        afe_models.Test(name='test2', test_type=0, path='test2',
+            experimental=True).save()
+
+        tko_models.Status(status_idx=6, word='GOOD').save()
+
+        job = tko_models.Job(job_idx=1)
+        kernel = tko_models.Kernel(kernel_idx=1)
+        machine = tko_models.Machine(machine_idx=1)
+        success_status = tko_models.Status(status_idx=GOOD_STATUS_IDX)
+        fail_status = tko_models.Status(status_idx=FAIL_STATUS_IDX)
+
+        tko_test1 = tko_models.Test(job=job, status=success_status,
+                                    kernel=kernel, machine=machine,
+                                    test='test1',
+                                    started_time=self.datetime(2012, 1, 20))
+        tko_test1.save()
+        tko_test2 = tko_models.Test(job=job, status=success_status,
+                                    kernel=kernel, machine=machine,
+                                    test='test2',
+                                    started_time=self.datetime(2012, 1, 20))
+        tko_test2.save()
+
+        passing_experimental._MAX_DAYS_SINCE_LAST_PASS = 10
+        passing_experimental._MIN_DAYS_SINCE_FAILURE = 10
+
+        passing_experimental.update_afe_autotests_table()
+        MockDatetime.today().AndReturn(self.datetime(2012, 1, 21))
+        MockDatetime.today().AndReturn(self.datetime(2012, 1, 21))
+        mail.send('chromeos-test-health@google.com',
+                  ['chromeos-lab-infrastructure@google.com'],
+                  [],
+                  'Long Passing Experimental Tests',
+                  'The following experimental tests have been passing for at '
+                  'least %i days:\n\ntest1\ntest2'
+                  % passing_experimental._MIN_DAYS_SINCE_FAILURE)
+
+        self.mox.ReplayAll()
+        passing_experimental.main()
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/frontend/health/passing_experimental_unittests.py b/frontend/health/passing_experimental_unittests.py
new file mode 100755
index 0000000..04b0d77
--- /dev/null
+++ b/frontend/health/passing_experimental_unittests.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import datetime, unittest
+
+import mox
+
+import common
+# This must come before the import of complete_failures in order to use the
+# in memory database.
+from autotest_lib.frontend import setup_django_readonly_environment
+from autotest_lib.frontend import setup_test_environment
+from autotest_lib.frontend.health import passing_experimental
+from autotest_lib.frontend.afe import models as afe_models
+from django import test
+
+
+# datetime.datetime is all C code and so cannot be mocked out in a normal
+# fashion.
+class MockDatetime(datetime.datetime):
+    """Used to mock out parts of datetime.datetime."""
+    pass
+
+
+class GetExperimentalTestsTests(test.TestCase):
+    """Tests the get_experimetnal_tests function."""
+
+    def setUp(self):
+        super(GetExperimentalTestsTests, self).setUp()
+        setup_test_environment.set_up()
+
+
+    def tearDown(self):
+        setup_test_environment.tear_down()
+        super(GetExperimentalTestsTests, self).setUp()
+
+
+    def test_returns_tests_marked_experimental(self):
+        """Test that tests marked as experimental are returned."""
+        test = afe_models.Test(name='test', test_type=0,
+                               experimental=True)
+        test.save()
+
+        result = passing_experimental.get_experimental_tests()
+
+        self.assertEqual(result, set(['test']))
+
+
+    def test_does_not_return_tests_not_marked_experimental(self):
+        """Test that tests not marked as experimetnal are not returned."""
+        test = afe_models.Test(name='test', test_type=0,
+                               experimental=False)
+        test.save()
+
+        result = passing_experimental.get_experimental_tests()
+
+        self.assertEqual(result, set())
+
+
+class FindLongPassingTestsTests(mox.MoxTestBase, test.TestCase):
+    """Tests the find_long_passing_tests function."""
+    def setUp(self):
+        super(FindLongPassingTestsTests, self).setUp()
+        self.mox.StubOutWithMock(MockDatetime, 'today')
+        self._datetime = datetime.datetime
+        datetime.datetime = MockDatetime
+        self._orig_since_failure = passing_experimental._MIN_DAYS_SINCE_FAILURE
+        self._orig_since_pass = passing_experimental._MAX_DAYS_SINCE_LAST_PASS
+
+
+    def tearDown(self):
+        passing_experimental._MAX_DAYS_SINCE_LAST_PASS = self._orig_since_pass
+        passing_experimental._MIN_DAYS_SINCE_FAILURE = self._orig_since_failure
+        datetime.datetime = self._datetime
+        super(FindLongPassingTestsTests, self).setUp()
+
+
+    def test_do_not_return_tests_that_have_failed_recently(self):
+        """Test that tests that have failed recently are not returned."""
+        passing_experimental._MIN_DAYS_SINCE_FAILURE = 10
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+
+        pass_times = {'test': self._datetime(2013, 3, 12)}
+        fail_times = {'test': self._datetime(2013, 3, 13)}
+        valid_tests = {'test'}
+
+        self.mox.ReplayAll()
+        results = passing_experimental.find_long_passing_tests(pass_times,
+                                                               fail_times,
+                                                               valid_tests)
+
+        self.assertEqual(results, set([]))
+
+
+    def test_return_tests_that_have_recent_pass_but_not_recent_failure(self):
+        """Test returning tests that have recently passed but not failed."""
+        passing_experimental._MIN_DAYS_SINCE_FAILURE = 10
+        passing_experimental._MAX_DAYS_SINCE_LAST_PASS = 10
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+
+        pass_times = {'test': self._datetime(2013, 3, 12)}
+        fail_times = {'test': self._datetime(2013, 3, 1)}
+        valid_tests = {'test'}
+
+        self.mox.ReplayAll()
+        results = passing_experimental.find_long_passing_tests(pass_times,
+                                                               fail_times,
+                                                               valid_tests)
+
+        self.assertEqual(results, set(['test']))
+
+
+    def test_filter_out_tests_that_have_not_passed_recently(self):
+        """Test that tests that have not recently passed are not returned."""
+        passing_experimental._MIN_DAYS_SINCE_FAILURE = 10
+        passing_experimental._MAX_DAYS_SINCE_LAST_PASS = 10
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+
+        pass_times = {'test': self._datetime(2013, 3, 1)}
+        fail_times = {'test': self._datetime(2013, 3, 1)}
+        valid_tests = {'test'}
+
+        self.mox.ReplayAll()
+        results = passing_experimental.find_long_passing_tests(pass_times,
+                                                               fail_times,
+                                                               valid_tests)
+
+        self.assertEqual(results, set([]))
+
+
+    def test_filter_out_tests_that_are_not_valid(self):
+        """Test that tests that are not valid are not returned."""
+        passing_experimental._MIN_DAYS_SINCE_FAILURE = 10
+        passing_experimental._MAX_DAYS_SINCE_LAST_PASS = 10
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+
+        pass_times = {'test2': self._datetime(2013, 3, 1)}
+        fail_times = {'test2': self._datetime(2013, 3, 1)}
+        valid_tests = {'test'}
+
+        self.mox.ReplayAll()
+        results = passing_experimental.find_long_passing_tests(pass_times,
+                                                               fail_times,
+                                                               valid_tests)
+
+        self.assertEqual(results, set([]))
+
+
+    def test_return_tests_that_have_recently_passed_and_never_failed(self):
+        """Test that we can handle tests that have never failed."""
+        passing_experimental._MIN_DAYS_SINCE_FAILURE = 10
+        passing_experimental._MAX_DAYS_SINCE_LAST_PASS = 10
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+
+        pass_times = {'test': self._datetime(2013, 3, 11)}
+        fail_times = {}
+        valid_tests = {'test'}
+
+        self.mox.ReplayAll()
+        results = passing_experimental.find_long_passing_tests(pass_times,
+                                                               fail_times,
+                                                               valid_tests)
+
+        self.assertEqual(results, set(['test']))
+
+
+    def test_handle_tests_that_have_never_passed(self):
+        """Test that we can handle tests that have never passed."""
+        passing_experimental._MIN_DAYS_SINCE_FAILURE = 10
+        passing_experimental._MAX_DAYS_SINCE_LAST_PASS = 10
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+        datetime.datetime.today().AndReturn(self._datetime(2013, 3, 20))
+
+        pass_times = {}
+        fail_times = {'test': self._datetime(2013, 3, 11)}
+        valid_tests = {'test'}
+
+        self.mox.ReplayAll()
+        results = passing_experimental.find_long_passing_tests(pass_times,
+                                                               fail_times,
+                                                               valid_tests)
+
+        self.assertEqual(results, set([]))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/frontend/health/utils.py b/frontend/health/utils.py
index 8b3b58e..11186c9 100644
--- a/frontend/health/utils.py
+++ b/frontend/health/utils.py
@@ -10,7 +10,13 @@
 from autotest_lib.frontend.tko import models as tko_models
 from django.db import models as django_models
 
+_TEST_ERROR_STATUS = 'ERROR'
+_TEST_ABORT_STATUS = 'ABORT'
+_TEST_FAIL_STATUS = 'FAIL'
+_TEST_WARN_STATUS = 'WARN'
 _TEST_PASS_STATUS = 'GOOD'
+_TEST_ALERT_STATUS = 'ALERT'
+
 
 def get_last_pass_times():
     """
@@ -25,3 +31,24 @@
         last_pass=django_models.Max('started_time'))
     return {result['test']: result['last_pass'] for result in results}
 
+
+def get_last_fail_times():
+    """
+    Get all the tests that have failed and the time they last failed.
+
+    @return the dict of test_name:last_finish_time pairs for tests that have
+            failed.
+
+    """
+
+    failure_clauses = (django_models.Q(status__word=_TEST_FAIL_STATUS) |
+                       django_models.Q(status__word=_TEST_ERROR_STATUS) |
+                       django_models.Q(status__word=_TEST_ABORT_STATUS) |
+                       django_models.Q(status__word=_TEST_WARN_STATUS) |
+                       django_models.Q(status__word=_TEST_ALERT_STATUS))
+
+    results = tko_models.Test.objects.values('test').filter(
+        failure_clauses).annotate(
+        last_pass=django_models.Max('started_time'))
+
+    return {result['test']: result['last_pass'] for result in results}
diff --git a/frontend/health/utils_unittests.py b/frontend/health/utils_unittests.py
index b0c1318..5f7620b 100755
--- a/frontend/health/utils_unittests.py
+++ b/frontend/health/utils_unittests.py
@@ -17,8 +17,12 @@
 from autotest_lib.frontend.tko import models
 from django import test
 
+ERROR_STATUS = models.Status(status_idx=2, word='ERROR')
+ABORT_STATUS = models.Status(status_idx=3, word='ABORT')
 FAIL_STATUS = models.Status(status_idx=4, word='FAIL')
+WARN_STATUS = models.Status(status_idx=5, word='WARN')
 GOOD_STATUS = models.Status(status_idx=6, word='GOOD')
+ALERT_STATUS = models.Status(status_idx=7, word='ALERT')
 
 
 def add_statuses():
@@ -28,8 +32,12 @@
     These normally exist in the database and the code expects them. However, the
     normal test database setup does not do this for us.
     """
+    ERROR_STATUS.save()
+    ABORT_STATUS.save()
     FAIL_STATUS.save()
+    WARN_STATUS.save()
     GOOD_STATUS.save()
+    ALERT_STATUS.save()
 
 
 class GetLastPassTimesTests(mox.MoxTestBase, test.TestCase):
@@ -117,5 +125,163 @@
                                     'test2': datetime.datetime(2012, 1, 2)})
 
 
+class GetLastFailTimesTests(mox.MoxTestBase, test.TestCase):
+    """Tests the get_last_fail_times function."""
+
+    def setUp(self):
+        super(GetLastFailTimesTests, self).setUp()
+        setup_test_environment.set_up()
+        add_statuses()
+
+
+    def tearDown(self):
+        setup_test_environment.tear_down()
+        super(GetLastFailTimesTests, self).tearDown()
+
+
+    def test_return_most_recent_fail(self):
+        """The last time a test failed should be returned."""
+        # To add a test entry to the database, the test object has to
+        # be instantiated with various other model instances. We give these
+        # instances dummy id values.
+        job = models.Job(job_idx=1)
+        kernel = models.Kernel(kernel_idx=1)
+        machine = models.Machine(machine_idx=1)
+
+        early_fail = models.Test(job=job, status=FAIL_STATUS,
+                                 kernel=kernel, machine=machine,
+                                 test='test',
+                                 started_time=datetime.datetime(2012, 1, 1))
+        early_fail.save()
+        late_fail = models.Test(job=job, status=FAIL_STATUS,
+                                kernel=kernel, machine=machine,
+                                test='test',
+                                started_time=datetime.datetime(2012, 1, 2))
+        late_fail.save()
+
+        results = utils.get_last_fail_times()
+
+        self.assertEquals(results, {'test': datetime.datetime(2012, 1, 2)})
+
+
+    def test_does_not_return_passing_tests(self):
+        """Test that passing test entries are not included."""
+        job = models.Job(job_idx=1)
+        kernel = models.Kernel(kernel_idx=1)
+        machine = models.Machine(machine_idx=1)
+
+        passing_test = models.Test(job=job, status=GOOD_STATUS,
+                                   kernel=kernel, machine=machine,
+                                   test='passing_test',
+                                   started_time=datetime.datetime(2012, 1, 1))
+        passing_test.save()
+        failing_test = models.Test(job=job, status=FAIL_STATUS,
+                                   kernel=kernel, machine=machine,
+                                   test='failing_test',
+                                   started_time=datetime.datetime(2012, 1, 1))
+        failing_test.save()
+
+        results = utils.get_last_fail_times()
+
+        self.assertEquals(results,
+                          {'failing_test': datetime.datetime(2012, 1, 1)})
+
+
+    def test_return_all_failing_tests(self):
+        """This function returns all tests that failed at least once."""
+        job = models.Job(job_idx=1)
+        kernel = models.Kernel(kernel_idx=1)
+        machine = models.Machine(machine_idx=1)
+
+        test1 = models.Test(job=job, status=FAIL_STATUS,
+                            kernel=kernel, machine=machine,
+                            test='test1',
+                            started_time=datetime.datetime(2012, 1, 1))
+        test1.save()
+        test2 = models.Test(job=job, status=FAIL_STATUS,
+                            kernel=kernel, machine=machine,
+                            test='test2',
+                            started_time=datetime.datetime(2012, 1, 2))
+        test2.save()
+
+        results = utils.get_last_fail_times()
+
+        self.assertEquals(results, {'test1': datetime.datetime(2012, 1, 1),
+                                    'test2': datetime.datetime(2012, 1, 2)})
+
+
+    def test_returns_treats_error_status_as_failure(self):
+        """Error statuses should count as a failure."""
+        job = models.Job(job_idx=1)
+        kernel = models.Kernel(kernel_idx=1)
+        machine = models.Machine(machine_idx=1)
+
+        test = models.Test(job=job, status=ERROR_STATUS,
+                           kernel=kernel, machine=machine,
+                           test='error',
+                           started_time=datetime.datetime(2012, 1, 1))
+        test.save()
+
+        results = utils.get_last_fail_times()
+
+        self.assertEquals(results, {'error': datetime.datetime(2012, 1, 1)})
+
+
+    def test_returns_treats_abort_status_as_failure(self):
+        """
+        Abort statuses should count as failures.
+
+        This should be changed once Abort only represents user caused aborts.
+        See issue crbug.com/188217.
+        """
+        job = models.Job(job_idx=1)
+        kernel = models.Kernel(kernel_idx=1)
+        machine = models.Machine(machine_idx=1)
+
+        test = models.Test(job=job, status=ABORT_STATUS,
+                           kernel=kernel, machine=machine,
+                           test='abort',
+                           started_time=datetime.datetime(2012, 1, 1))
+        test.save()
+
+        results = utils.get_last_fail_times()
+
+        self.assertEquals(results, {'abort': datetime.datetime(2012, 1, 1)})
+
+
+    def test_returns_treats_warn_status_as_failure(self):
+        """Warn statuses should count as failures."""
+        job = models.Job(job_idx=1)
+        kernel = models.Kernel(kernel_idx=1)
+        machine = models.Machine(machine_idx=1)
+
+        test = models.Test(job=job, status=WARN_STATUS,
+                           kernel=kernel, machine=machine,
+                           test='warn',
+                           started_time=datetime.datetime(2012, 1, 1))
+        test.save()
+
+        results = utils.get_last_fail_times()
+
+        self.assertEquals(results, {'warn': datetime.datetime(2012, 1, 1)})
+
+
+    def test_returns_treats_alert_status_as_failure(self):
+        """Alert statuses should count as failures."""
+        job = models.Job(job_idx=1)
+        kernel = models.Kernel(kernel_idx=1)
+        machine = models.Machine(machine_idx=1)
+
+        test = models.Test(job=job, status=ALERT_STATUS,
+                           kernel=kernel, machine=machine,
+                           test='alert',
+                           started_time=datetime.datetime(2012, 1, 1))
+        test.save()
+
+        results = utils.get_last_fail_times()
+
+        self.assertEquals(results, {'alert': datetime.datetime(2012, 1, 1)})
+
+
 if __name__ == '__main__':
     unittest.main()