Add a new status field called TEST_NA (in addition to PASS and FAIL).
From: Travis Miller
git-svn-id: http://test.kernel.org/svn/autotest/trunk@1476 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/client/bin/job.py b/client/bin/job.py
index babee6b..de2f73a 100755
--- a/client/bin/job.py
+++ b/client/bin/job.py
@@ -7,15 +7,14 @@
# standard stuff
import os, sys, re, pickle, shutil, time, traceback, types, copy
+
# autotest stuff
-from autotest_utils import *
-from parallel import *
-from common.error import *
-from common import barrier
-import kernel, xen, test, profilers, filesystem, fd_stack, boottool
-import harness, config
-import sysinfo
-import cpuset
+from autotest_lib.client.bin import autotest_utils
+from autotest_lib.client.common_lib import error, barrier, logging
+
+import parallel, kernel, xen, test, profilers, filesystem, fd_stack, boottool
+import harness, config, sysinfo, cpuset
+
JOB_PREAMBLE = """
@@ -24,7 +23,7 @@
"""
-class StepError(AutotestError):
+class StepError(error.AutotestError):
pass
@@ -90,9 +89,10 @@
if not cont:
if os.path.exists(self.tmpdir):
- system('umount -f %s > /dev/null 2> /dev/null'%\
- self.tmpdir, ignorestatus=True)
- system('rm -rf ' + self.tmpdir)
+ cmd = ('umount -f %s > /dev/null 2> /dev/null'
+ % (self.tmpdir))
+ autotest_utils.system(cmd, ignorestatus=True)
+ autotest_utils.system('rm -rf ' + self.tmpdir)
os.mkdir(self.tmpdir)
results = os.path.join(self.autodir, 'results')
@@ -101,11 +101,12 @@
download = os.path.join(self.testdir, 'download')
if os.path.exists(download):
- system('rm -rf ' + download)
+ autotest_utils.system('rm -rf ' + download)
os.mkdir(download)
if os.path.exists(self.resultdir):
- system('rm -rf ' + self.resultdir)
+ autotest_utils.system('rm -rf '
+ + self.resultdir)
os.mkdir(self.resultdir)
os.mkdir(self.sysinfodir)
@@ -231,21 +232,21 @@
for dep in deps:
try:
os.chdir(os.path.join(self.autodir, 'deps', dep))
- system('./' + dep + '.py')
+ autotest_utils.system('./' + dep + '.py')
except:
- error = "setting up dependency " + dep + "\n"
- raise UnhandledError(error)
+ err = "setting up dependency " + dep + "\n"
+ raise error.UnhandledError(err)
def __runtest(self, url, tag, args, dargs):
try:
l = lambda : test.runtest(self, url, tag, args, dargs)
- pid = fork_start(self.resultdir, l)
- fork_waitfor(self.resultdir, pid)
- except AutotestError:
+ pid = parallel.fork_start(self.resultdir, l)
+ parallel.fork_waitfor(self.resultdir, pid)
+ except error.AutotestError:
raise
except:
- raise UnhandledError('running test ' + \
+ raise error.UnhandledError('running test ' + \
self.__class__.__name__ + "\n")
@@ -259,7 +260,8 @@
"""
if not url:
- raise TypeError("Test name is invalid. Switched arguments?")
+ raise TypeError("Test name is invalid. "
+ "Switched arguments?")
(group, testname) = test.testname(url)
tag = dargs.pop('tag', None)
container = dargs.pop('container', None)
@@ -285,6 +287,10 @@
def group_func():
try:
self.__runtest(url, tag, args, dargs)
+ except error.TestNAError, detail:
+ self.record('TEST_NA', subdir, testname,
+ str(detail))
+ raise
except Exception, detail:
self.record('FAIL', subdir, testname,
str(detail))
@@ -295,7 +301,7 @@
result, exc_info = self.__rungroup(subdir, group_func)
if container:
self.release_container()
- if exc_info and isinstance(exc_info[1], TestError):
+ if exc_info and isinstance(exc_info[1], error.TestError):
return False
elif exc_info:
raise exc_info[0], exc_info[1], exc_info[2]
@@ -325,10 +331,12 @@
result = function(*args, **dargs)
self.group_level -= 1
self.record('END GOOD', None, name)
+ except error.TestNAError, e:
+ self.record('END TEST_NA', None, name, str(e))
except Exception, e:
exc_info = sys.exc_info()
self.group_level -= 1
- err_msg = str(e) + '\n' + format_error()
+ err_msg = str(e) + '\n' + traceback.format_exc()
self.record('END FAIL', None, name, err_msg)
return result, exc_info
@@ -352,9 +360,9 @@
*args, **dargs)
# if there was a non-TestError exception, raise it
- if exc_info and not isinstance(exc_info[1], TestError):
+ if exc_info and not isinstance(exc_info[1], error.TestError):
err = ''.join(traceback.format_exception(*exc_info))
- raise TestError(name + ' failed\n' + err)
+ raise error.TestError(name + ' failed\n' + err)
# pass back the actual return value from the function
return result
@@ -421,7 +429,7 @@
print "Command Line Mark: %d" % (cmdline_when)
print " Command Line: " + cmdline
- raise JobError("boot failure", "reboot.verify")
+ raise error.JobError("boot failure", "reboot.verify")
self.record('GOOD', subdir, 'reboot.verify', expected_id)
@@ -453,7 +461,8 @@
self.bootloader.set_default(tag)
else:
self.bootloader.boot_once(tag)
- system("(sleep 5; reboot) </dev/null >/dev/null 2>&1 &")
+ cmd = "(sleep 5; reboot) </dev/null >/dev/null 2>&1 &"
+ autotest_utils.system(cmd)
self.quit()
@@ -469,7 +478,8 @@
for i, task in enumerate(tasklist):
self.log_filename = old_log_filename + (".%d" % i)
task_func = lambda: task[0](*task[1:])
- pids.append(fork_start(self.resultdir, task_func))
+ pids.append(parallel.fork_start(self.resultdir,
+ task_func))
old_log_path = os.path.join(self.resultdir, old_log_filename)
old_log = open(old_log_path, "a")
@@ -477,7 +487,7 @@
for i, pid in enumerate(pids):
# wait for the task to finish
try:
- fork_waitfor(self.resultdir, pid)
+ parallel.fork_waitfor(self.resultdir, pid)
except Exception, e:
exceptions.append(e)
# copy the logs from the subtask into the main log
@@ -495,13 +505,13 @@
# handle any exceptions raised by the parallel tasks
if exceptions:
msg = "%d task(s) failed" % len(exceptions)
- raise JobError(msg, str(exceptions), exceptions)
+ raise error.JobError(msg, str(exceptions), exceptions)
def quit(self):
# XXX: should have a better name.
self.harness.run_pause()
- raise JobContinue("more to come")
+ raise error.JobContinue("more to come")
def complete(self, status):
@@ -649,8 +659,7 @@
else:
substr = '----'
- if not re.match(r'(START|(END )?(GOOD|WARN|FAIL|ABORT))$', \
- status_code):
+ if not logging.is_valid_status(status_code):
raise ValueError("Invalid status code supplied: %s" % status_code)
if not operation:
operation = '----'
@@ -719,12 +728,13 @@
try:
# Check that the control file is valid
if not os.path.exists(control):
- raise JobError(control + ": control file not found")
+ raise error.JobError(control +
+ ": control file not found")
# When continuing, the job is complete when there is no
# state file, ensure we don't try and continue.
if cont and not os.path.exists(state):
- raise JobComplete("all done")
+ raise error.JobComplete("all done")
if cont == False and os.path.exists(state):
os.unlink(state)
@@ -736,13 +746,13 @@
# 2) define steps, and select the first via next_step()
myjob.step_engine()
- except JobContinue:
+ except error.JobContinue:
sys.exit(5)
- except JobComplete:
+ except error.JobComplete:
sys.exit(1)
- except JobError, instance:
+ except error.JobError, instance:
print "JOB ERROR: " + instance.args[0]
if myjob:
command = None
@@ -756,7 +766,7 @@
sys.exit(1)
except Exception, e:
- msg = str(e) + '\n' + format_error()
+ msg = str(e) + '\n' + traceback.format_exc()
print "JOB ERROR: " + msg
if myjob:
myjob.group_level = 0
diff --git a/client/common_lib/error.py b/client/common_lib/error.py
index 5f960f3..b373ddb 100644
--- a/client/common_lib/error.py
+++ b/client/common_lib/error.py
@@ -34,6 +34,10 @@
"""Indicates an error which terminates and fails the test."""
pass
+class TestNAError(AutotestError):
+ """Indictates that the test is Not Applicable. Should be thrown
+ when various conditions are such that the test is inappropriate"""
+ pass
class CmdError(TestError):
"""\
diff --git a/client/common_lib/logging.py b/client/common_lib/logging.py
index 0bb403c..004975a 100644
--- a/client/common_lib/logging.py
+++ b/client/common_lib/logging.py
@@ -2,9 +2,22 @@
Logging helper tools.
"""
+import re
+
__author__ = 'jadmanski@google.com (John Admanski)'
+job_statuses = ["TEST_NA", "ABORT", "ERROR", "FAIL", "WARN", "GOOD", "ALERT",
+ "NOSTATUS"]
+
+def is_valid_status(status):
+ if not re.match(r'(START|(END )?(GOOD|WARN|FAIL|ABORT|TEST_NA))$',
+ status):
+ return False
+ else:
+ return True
+
+
def record(fn):
"""
Generic method decorator for logging calls under the
diff --git a/server/server_job.py b/server/server_job.py
index cba325a..a11e2d3 100755
--- a/server/server_job.py
+++ b/server/server_job.py
@@ -14,7 +14,7 @@
import os, sys, re, time, select, subprocess, traceback
from autotest_lib.client.bin import fd_stack
-from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib import error, logging
from autotest_lib.server import test, subcommand
from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
from autotest_lib.server.utils import *
@@ -39,14 +39,12 @@
from autotest_lib.server import git_kernel
from autotest_lib.server.subcommand import *
from autotest_lib.server.utils import run, get_tmp_dir, sh_escape
-
from autotest_lib.client.common_lib.error import *
from autotest_lib.client.common_lib import barrier
autotest.Autotest.job = job
hosts.SSHHost.job = job
barrier = barrier.barrier
-
if len(machines) > 1:
open('.machines', 'w').write('\\n'.join(machines) + '\\n')
"""
@@ -230,7 +228,8 @@
namespace = {'machines' : self.machines, 'job' : self}
exec(preamble + verify, namespace, namespace)
except Exception, e:
- msg = 'Verify failed\n' + str(e) + '\n' + traceback.format_exc()
+ msg = ('Verify failed\n' + str(e) + '\n'
+ + traceback.format_exc())
self.record('ABORT', None, None, msg)
raise
@@ -355,9 +354,11 @@
try:
test.runtest(self, url, tag, args, dargs)
self.record('GOOD', subdir, testname, 'completed successfully')
+ except error.TestNAError, detail:
+ self.record('TEST_NA', subdir, testmame, str(detail))
except Exception, detail:
- self.record('FAIL', subdir, testname,
- str(detail) + "\n" + traceback.format_exc())
+ info = str(detail) + "\n" + traceback.format_exc()
+ self.record('FAIL', subdir, testname, info)
def run_group(self, function, *args, **dargs):
@@ -390,7 +391,9 @@
self.record('END GOOD', None, name)
except:
self.record_prefix = old_record_prefix
- self.record('END FAIL', None, name, traceback.format_exc())
+ self.record('END FAIL', None, name,
+ traceback.format_exc())
+
# We don't want to raise up an error higher if it's just
# a TestError - we want to carry on to other tests. Hence
# this outer try/except block.
@@ -413,9 +416,8 @@
Format is <status code>\t<subdir>\t<operation>\t<status>
- status code: (GOOD|WARN|FAIL|ABORT)
- or START
- or END (GOOD|WARN|FAIL|ABORT)
+ status code: see common_lib.logging.is_valid_status()
+ for valid status definition
subdir: MUST be a relevant subdirectory in the results,
or None, which will be represented as '----'
@@ -488,8 +490,7 @@
else:
substr = '----'
- if not re.match(r'(START|(END )?(GOOD|WARN|FAIL|ABORT))$', \
- status_code):
+ if not logging.is_valid_status(status_code):
raise ValueError('Invalid status code supplied: %s' %
status_code)
if not operation:
diff --git a/tko/display.py b/tko/display.py
index 78688d6..a4681a8 100755
--- a/tko/display.py
+++ b/tko/display.py
@@ -100,6 +100,7 @@
def grade_from_status(status):
# % of goodness
# GOOD (6) -> 1
+ # TEST_NA (8) is not counted
# ## If the test doesn't PASS, it FAILS
# else -> 0
@@ -113,9 +114,14 @@
average_grade = 0
total_count = 0
for key in status_count.keys():
- average_grade += grade_from_status(key)*status_count[key]
- total_count += status_count[key]
- average_grade = average_grade / total_count
+ if key != 8: # TEST_NA status
+ average_grade += (grade_from_status(key)
+ * status_count[key])
+ total_count += status_count[key]
+ if total_count != 0:
+ average_grade = average_grade / total_count
+ else:
+ average_grade = 0.0
return average_grade
diff --git a/tko/migrations/005_add_testna_status.py b/tko/migrations/005_add_testna_status.py
new file mode 100644
index 0000000..e22abe5
--- /dev/null
+++ b/tko/migrations/005_add_testna_status.py
@@ -0,0 +1,6 @@
+def migrate_up(monger):
+ monger.execute("INSERT INTO status (word) values ('TEST_NA')")
+
+
+def migrate_down(monger):
+ monger.execute("DELETE FROM status where word = 'TEST_NA'")
diff --git a/tko/status_lib.py b/tko/status_lib.py
index 3f21396..6e430a3 100644
--- a/tko/status_lib.py
+++ b/tko/status_lib.py
@@ -1,9 +1,10 @@
import collections
+import common
+from autotest_lib.client.common_lib import logging
class status_stack(object):
- statuses = ["ABORT", "ERROR", "FAIL", "WARN", "GOOD", "ALERT",
- "NOSTATUS"]
+ statuses = logging.job_statuses
def __init__(self):
diff --git a/tko/status_lib_unittest.py b/tko/status_lib_unittest.py
index 2e6ba70..7df017d 100644
--- a/tko/status_lib_unittest.py
+++ b/tko/status_lib_unittest.py
@@ -1,9 +1,9 @@
#!/usr/bin/python
import unittest
-
import common
from autotest_lib.tko import status_lib
+from autotest_lib.client.common_lib import logging
class line_buffer_test(unittest.TestCase):
@@ -78,6 +78,8 @@
class status_stack_test(unittest.TestCase):
+ statuses = logging.job_statuses
+
def testDefaultToNOSTATUS(self):
stack = status_lib.status_stack()
self.assertEquals(stack.current_status(), "NOSTATUS")
@@ -101,20 +103,18 @@
def testAnythingOverridesNostatus(self):
- statuses = ["ABORT", "ERROR", "FAIL", "WARN", "GOOD"]
- for status in statuses:
+ for status in self.statuses:
stack = status_lib.status_stack()
stack.update(status)
self.assertEquals(stack.current_status(), status)
def testWorseOverridesBetter(self):
- statuses = ["ABORT", "ERROR", "FAIL", "WARN", "GOOD"]
- for i in xrange(len(statuses)):
- worse_status = statuses[i]
- for j in xrange(i + 1, len(statuses)):
+ for i in xrange(len(self.statuses)):
+ worse_status = self.statuses[i]
+ for j in xrange(i + 1, len(self.statuses)):
stack = status_lib.status_stack()
- better_status = statuses[j]
+ better_status = self.statuses[j]
stack.update(better_status)
stack.update(worse_status)
self.assertEquals(stack.current_status(),
@@ -122,12 +122,11 @@
def testBetterNeverOverridesBetter(self):
- statuses = ["ABORT", "ERROR", "FAIL", "WARN", "GOOD"]
- for i in xrange(len(statuses)):
- better_status = statuses[i]
+ for i in xrange(len(self.statuses)):
+ better_status = self.statuses[i]
for j in xrange(i):
stack = status_lib.status_stack()
- worse_status = statuses[j]
+ worse_status = self.statuses[j]
stack.update(worse_status)
stack.update(better_status)
self.assertEquals(stack.current_status(),