Initial release of test auto importer
Update models.py to reflect database changes
Add the following columns to autotests table:
* author
* dependencies
* experimental
* run_verify
* test_time
* test_category
* sync_count
Add run_verify to jobs table
Update scheduler to assert with run_verify
Risk: Medium
Visibility: High, people addings tests will now see more fields via the admin frontend
Signed-off-by: Scott Zawalski <scottz@google.com>
git-svn-id: http://test.kernel.org/svn/autotest/trunk@1837 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/frontend/afe/doctests/001_rpc_test.txt b/frontend/afe/doctests/001_rpc_test.txt
index 3815736..d3e5679 100644
--- a/frontend/afe/doctests/001_rpc_test.txt
+++ b/frontend/afe/doctests/001_rpc_test.txt
@@ -91,16 +91,25 @@
True
# tests...
->>> rpc_interface.add_test(name='sleeptest', test_type='Client',
+>>> rpc_interface.add_test(name='sleeptest', test_type='Client', author='Test',
+... description='Sleep Test', test_time=1,
+... test_category='Functional',
... test_class='Kernel', path='sleeptest')
1L
>>> rpc_interface.modify_test('sleeptest', path='/my/path')
>>> data = rpc_interface.get_tests()
>>> data == [{'id': 1L,
... 'name': 'sleeptest',
-... 'description': '',
+... 'author': 'Test',
+... 'description': 'Sleep Test',
+... 'dependencies': '',
+... 'experimental': 1,
+... 'sync_count': 1L,
... 'test_type': 'Client',
... 'test_class': 'Kernel',
+... 'test_time': 'SHORT',
+... 'run_verify': 1,
+... 'test_category': 'Functional',
... 'synch_type': 'Asynchronous',
... 'path': '/my/path'}]
True
@@ -303,10 +312,13 @@
>>> rpc_interface.add_label(name='my_label', kernel_config='my_kernel_config')
5L
>>> test_control_path = os.path.join(test_path, 'test.control')
->>> rpc_interface.add_test(name='sleeptest', test_type='Client',
+>>> rpc_interface.add_test(name='sleeptest', test_type='Client', author='Test',
+... test_category='Test',
... test_class='Kernel', path=test_control_path)
2L
->>> rpc_interface.add_test(name='my_test', test_type='Client',
+>>> test_control_path = os.path.join(test_path, 'test.control.2')
+>>> rpc_interface.add_test(name='my_test', test_type='Client', author='Test',
+... test_category='Test',
... test_class='Kernel', path=test_control_path)
3L
>>> rpc_interface.add_host(hostname='my_label_host1')
@@ -387,7 +399,8 @@
... 'synch_count': None,
... 'synch_type': 'Asynchronous',
... 'synchronizing': 0,
-... 'timeout': 72}
+... 'timeout': 72,
+... 'run_verify': 1}
True
# get_host_queue_entries returns a lot of data, so let's only check a couple
diff --git a/frontend/afe/doctests/test.control.2 b/frontend/afe/doctests/test.control.2
new file mode 100644
index 0000000..653fafe
--- /dev/null
+++ b/frontend/afe/doctests/test.control.2
@@ -0,0 +1 @@
+job.run_test('testname')
diff --git a/frontend/afe/models.py b/frontend/afe/models.py
index 1e48f6d..e9bc2c8 100644
--- a/frontend/afe/models.py
+++ b/frontend/afe/models.py
@@ -189,30 +189,45 @@
class Test(dbmodels.Model, model_logic.ModelExtensions):
"""\
Required:
+ author: author name
+ description: description of the test
name: test name
+ time: short, medium, long
+ test_class: This describes the class for your the test belongs in.
+ test_category: This describes the category for your tests
test_type: Client or Server
path: path to pass to run_test()
synch_type: whether the test should run synchronously or asynchronously
-
+ sync_count: is a number >=1 (1 being the default). If it's 1, then it's an
+ async job. If it's >1 it's sync job for that number of machines
+ i.e. if sync_count = 2 it is a sync job that requires two
+ machines.
Optional:
- test_class: used for categorization of tests
- description: arbirary text description
+ dependencies: What the test requires to run. Comma deliminated list
+ experimental: If this is set to True production servers will ignore the test
+ run_verify: Whether or not the scheduler should run the verify stage
"""
- Classes = enum.Enum('Kernel', 'Hardware', 'Canned Test Sets',
- string_values=True)
+ TestTime = enum.Enum('SHORT', 'MEDIUM', 'LONG', start_value=1)
SynchType = enum.Enum('Asynchronous', 'Synchronous', start_value=1)
# TODO(showard) - this should be merged with Job.ControlType (but right
# now they use opposite values)
Types = enum.Enum('Client', 'Server', start_value=1)
name = dbmodels.CharField(maxlength=255, unique=True)
- test_class = dbmodels.CharField(maxlength=255,
- choices=Classes.choices())
+ author = dbmodels.CharField(maxlength=255)
+ test_class = dbmodels.CharField(maxlength=255)
+ test_category = dbmodels.CharField(maxlength=255)
+ dependencies = dbmodels.CharField(maxlength=255, blank=True)
description = dbmodels.TextField(blank=True)
+ experimental = dbmodels.BooleanField(default=True)
+ run_verify = dbmodels.BooleanField(default=True)
+ test_time = dbmodels.SmallIntegerField(choices=TestTime.choices(),
+ default=TestTime.MEDIUM)
test_type = dbmodels.SmallIntegerField(choices=Types.choices())
+ sync_count = dbmodels.IntegerField(default=1)
synch_type = dbmodels.SmallIntegerField(choices=SynchType.choices(),
default=SynchType.ASYNCHRONOUS)
- path = dbmodels.CharField(maxlength=255)
+ path = dbmodels.CharField(maxlength=255, unique=True)
name_field = 'name'
objects = model_logic.ExtendedManager()
@@ -224,11 +239,12 @@
class Admin:
fields = (
(None, {'fields' :
- ('name', 'test_class', 'test_type', 'synch_type',
- 'path', 'description')}),
+ ('name', 'author', 'test_category', 'test_class',
+ 'test_time', 'synch_type', 'test_type', 'sync_count',
+ 'path', 'dependencies', 'experimental', 'run_verify',
+ 'description')}),
)
- list_display = ('name', 'test_type', 'synch_type',
- 'description')
+ list_display = ('name', 'test_type', 'description', 'synch_type')
search_fields = ('name',)
def __str__(self):
@@ -447,6 +463,7 @@
synch_type: Asynchronous or Synchronous (i.e. job must run on all hosts
simultaneously; used for server-side control files)
synch_count: ???
+ run_verify: Whether or not to run the verify phase
synchronizing: for scheduler use
timeout: hours until job times out
"""
@@ -469,6 +486,7 @@
blank=True, null=True, choices=Test.SynchType.choices())
synch_count = dbmodels.IntegerField(blank=True, null=True)
synchronizing = dbmodels.BooleanField(default=False)
+ run_verify = dbmodels.BooleanField(default=True)
timeout = dbmodels.IntegerField()
@@ -482,7 +500,7 @@
@classmethod
def create(cls, owner, name, priority, control_file, control_type,
- hosts, synch_type, timeout):
+ hosts, synch_type, timeout, run_verify):
"""\
Creates a job by taking some information (the listed args)
and filling in the rest of the necessary information.
@@ -491,7 +509,8 @@
job = cls.add_object(
owner=owner, name=name, priority=priority,
control_file=control_file, control_type=control_type,
- synch_type=synch_type, timeout=timeout)
+ synch_type=synch_type, timeout=timeout,
+ run_verify=run_verify)
if job.synch_type == Test.SynchType.SYNCHRONOUS:
job.synch_count = len(hosts)
@@ -520,7 +539,8 @@
owner=new_owner, name=self.name, priority=self.priority,
control_file=self.control_file,
control_type=self.control_type, hosts=hosts,
- synch_type=self.synch_type, timeout=self.timeout)
+ synch_type=self.synch_type, timeout=self.timeout,
+ run_verify=self.run_verify)
new_job.queue(hosts)
return new_job
diff --git a/frontend/afe/rpc_interface.py b/frontend/afe/rpc_interface.py
index bac3aa8..dba79f4 100644
--- a/frontend/afe/rpc_interface.py
+++ b/frontend/afe/rpc_interface.py
@@ -115,8 +115,16 @@
# tests
-def add_test(name, test_type, path, test_class=None, description=None):
+def add_test(name, test_type, path, author=None, dependencies=None,
+ experimental=True, run_verify=None, test_class=None,
+ test_time=None, test_category=None, description=None,
+ sync_count=1):
return models.Test.add_object(name=name, test_type=test_type, path=path,
+ author=author, dependencies=dependencies,
+ experimental=experimental,
+ run_verify=run_verify, test_time=test_time,
+ test_category=test_category,
+ sync_count=sync_count,
test_class=test_class,
description=description).id
@@ -253,7 +261,7 @@
def create_job(name, priority, control_file, control_type, timeout=None,
is_synchronous=None, hosts=None, meta_hosts=None,
- one_time_hosts=None):
+ run_verify=True, one_time_hosts=None):
"""\
Create and enqueue a job.
@@ -324,7 +332,8 @@
control_type=control_type,
synch_type=synch_type,
hosts=host_objects,
- timeout=timeout)
+ timeout=timeout,
+ run_verify=run_verify)
job.queue(host_objects)
return job.id
diff --git a/frontend/migrations/013_new_test_fields.py b/frontend/migrations/013_new_test_fields.py
new file mode 100644
index 0000000..02ce6e8
--- /dev/null
+++ b/frontend/migrations/013_new_test_fields.py
@@ -0,0 +1,20 @@
+def migrate_up(manager):
+ manager.execute('ALTER TABLE jobs ADD run_verify tinyint(1) default 1')
+ manager.execute('ALTER TABLE autotests ADD author VARCHAR(256)')
+ manager.execute('ALTER TABLE autotests ADD dependencies VARCHAR(256)')
+ manager.execute('ALTER TABLE autotests ADD experimental SMALLINT DEFAULT 0')
+ manager.execute('ALTER TABLE autotests ADD run_verify SMALLINT DEFAULT 1')
+ manager.execute('ALTER TABLE autotests ADD test_time SMALLINT DEFAULT 1')
+ manager.execute('ALTER TABLE autotests ADD test_category VARCHAR(256)')
+ manager.execute('ALTER TABLE autotests ADD sync_count INT(11) DEFAULT 1')
+
+
+def migrate_down(manager):
+ manager.execute('ALTER TABLE jobs DROP run_verify')
+ manager.execute('ALTER TABLE autotests DROP sync_count')
+ manager.execute('ALTER TABLE autotests DROP author')
+ manager.execute('ALTER TABLE autotests DROP dependencies')
+ manager.execute('ALTER TABLE autotests DROP experimental')
+ manager.execute('ALTER TABLE autotests DROP run_verify')
+ manager.execute('ALTER TABLE autotests DROP test_time')
+ manager.execute('ALTER TABLE autotests DROP test_category')
diff --git a/scheduler/monitor_db.py b/scheduler/monitor_db.py
index 4858b69..ce828fd 100644
--- a/scheduler/monitor_db.py
+++ b/scheduler/monitor_db.py
@@ -3,7 +3,7 @@
"""
Autotest scheduler
"""
-__author__ = "Paul Turner <pjt@google.com>"
+
import os, sys, tempfile, shutil, MySQLdb, time, traceback, subprocess, Queue
import optparse, signal, smtplib, socket, datetime, stat, pwd, errno
@@ -1638,7 +1638,7 @@
def _fields(cls):
return ['id', 'owner', 'name', 'priority', 'control_file',
'control_type', 'created_on', 'synch_type',
- 'synch_count', 'synchronizing', 'timeout']
+ 'synch_count', 'synchronizing', 'timeout', 'run_verify']
def is_server_job(self):
diff --git a/utils/tests.py b/utils/tests.py
new file mode 100644
index 0000000..38da13b
--- /dev/null
+++ b/utils/tests.py
@@ -0,0 +1,345 @@
+#!/usr/bin/python2.4
+#
+# Copyright 2008 Google Inc. All Rights Reserved.
+"""
+This utility allows for easy updating, removing and importing
+of tests into the autotest_web autotests table.
+
+Example of updating client side tests:
+./tests.py -t /usr/local/autotest/client/tests
+
+If for example not all of your control files adhere to the standard outlined at
+http://test.kernel.org/autotest/ControlRequirements
+
+You can force options:
+./tests.py --test-type server -t /usr/local/autotest/server/tests
+
+
+Most options should be fairly self explanatory use --help to display them.
+"""
+
+
+import time, re, os, MySQLdb, sys, optparse
+import common
+from autotest_lib.client.common_lib import control_data, test, global_config
+from autotest_lib.client.common_lib import utils
+
+# Defaults
+AUTHOR = 'Autotest Team'
+DEPENDENCIES = ()
+DOC = 'unknown'
+EXPERIMENTAL = 0
+RUN_VERIFY = 1
+SYNC_COUNT = 1
+TEST_TYPE = 1
+TEST_TIME = 1
+TEST_CLASS = 'Canned Test Sets'
+TEST_CATEGORY = 'Functional'
+
+# Global
+DRY_RUN = False
+
+
+def main(argv):
+ """Main function"""
+ global DRY_RUN
+ parser = optparse.OptionParser()
+ parser.add_option('-c', '--db-clear-tests',
+ dest='clear_tests', action='store_true',
+ default=False,
+ help='Clear client and server tests with invalid control files')
+ parser.add_option('-d', '--dry-run',
+ dest='dry_run', action='store_true', default=False,
+ help='Dry run for operation')
+ parser.add_option('-A', '--add-experimental',
+ dest='add_experimental', action='store_true',
+ default=False,
+ help='Add experimental tests to frontend')
+ parser.add_option('-N', '--add-noncompliant',
+ dest='add_noncompliant', action='store_true',
+ default=False,
+ help='Skip any tests that are not compliant')
+ parser.add_option('-t', '--tests-dir', dest='tests_dir',
+ help='Directory to recursively check for control.*')
+ parser.add_option('-p', '--test-type', dest='test_type', default=TEST_TYPE,
+ help='Default test type for tests (Client=1, Server=2)')
+ parser.add_option('-a', '--test-author', dest='author', default=AUTHOR,
+ help='Set a default author for tests')
+ parser.add_option('-n', '--test-dependencies', dest='dependencies',
+ default=DEPENDENCIES,
+ help='Set default dependencies for tests')
+ parser.add_option('-v', '--test-run-verify', dest='run_verify',
+ default=RUN_VERIFY,
+ help='Set default run_verify (0, 1)')
+ parser.add_option('-e', '--test-experimental', dest='experimental',
+ default=EXPERIMENTAL,
+ help='Set default experimental (0, 1)')
+ parser.add_option('-y', '--test-sync-count', dest='sync_count',
+ default=SYNC_COUNT,
+ help='Set a default sync_count (1, >1)')
+ parser.add_option('-m', '--test-time', dest='test_time', default=TEST_TIME,
+ help='Set a default time for tests')
+ parser.add_option('-g', '--test-category', dest='test_category',
+ default=TEST_CATEGORY,
+ help='Set a default time for tests')
+ parser.add_option('-l', '--test-class', dest='test_class',
+ default=TEST_CLASS, help='Set a default test class')
+ parser.add_option('-r', '--control-pattern', dest='control_pattern',
+ default='^control.*',
+ help='The pattern to look for in directories for control files')
+ parser.add_option('-z', '--autotest_dir', dest='autotest_dir',
+ default='/usr/local/autotest',
+ help='Autotest directory root')
+ options, args = parser.parse_args()
+ DRY_RUN = options.dry_run
+ if len(argv) < 2:
+ parser.print_help()
+ return 1
+
+ if options.clear_tests:
+ tests = get_tests_from_db(autotest_dir=options.autotest_dir)
+ test_paths = [tests['missing'][t] for t in tests['missing']]
+ db_remove_tests(test_paths)
+ if options.tests_dir:
+ tests = get_tests_from_fs(options.tests_dir, options.control_pattern,
+ add_noncompliant=options.add_noncompliant)
+ update_tests_in_db(tests, author=options.author,
+ dependencies=options.dependencies,
+ experimental=options.experimental,
+ run_verify=options.run_verify,
+ doc=DOC,
+ sync_count=options.sync_count,
+ test_type=options.test_type,
+ test_time=options.test_time,
+ test_class=options.test_class,
+ test_category=options.test_category,
+ add_experimental=options.add_experimental,
+ add_noncompliant=options.add_noncompliant,
+ autotest_dir=options.autotest_dir)
+
+
+def db_remove_tests(tests):
+ """Remove tests from autotest_web that do not have valid control files
+
+ Arguments:
+ tests: a list of control file relative paths used as keys for deletion.
+ """
+ connection=db_connect()
+ cursor = connection.cursor()
+ for test in tests:
+ print "Removing " + test
+ sql = "DELETE FROM autotests WHERE path='%s'" % test
+ db_execute(cursor, sql)
+
+ connection.commit()
+ connection.close()
+
+
+def update_tests_in_db(tests, dry_run=False, add_experimental=False,
+ autotest_dir="/usr/local/autotest/", **dargs):
+ """Update or add each test to the database"""
+ connection=db_connect()
+ cursor = connection.cursor()
+ for test in tests:
+ new_test = dargs.copy()
+ new_test['path'] = test.replace(autotest_dir, '').lstrip('/')
+ # Create a name for the test
+ for key in dir(tests[test]):
+ if not key.startswith('__'):
+ value = getattr(tests[test], key)
+ if not callable(value):
+ new_test[key] = value
+ # This only takes place if --add-noncompliant is provided on the CLI
+ if 'name' not in new_test:
+ test_new_test = test.split('/')
+ if test_new_test[-1] == 'control':
+ new_test['name'] = test_new_test[-2]
+ else:
+ control_name = "%s:%s"
+ control_name %= (test_new_test[-2],
+ test_new_test[-1])
+ new_test['name'] = control_name.replace('control.', '')
+ # Experimental Check
+ if not add_experimental:
+ if int(new_test['experimental']):
+ continue
+ # clean tests for insertion into db
+ new_test = dict_db_clean(new_test)
+ sql = "SELECT name,path FROM autotests WHERE path='%s' LIMIT 1"
+ sql %= new_test['path']
+ cursor.execute(sql)
+ # check for entries already in existence
+ results = cursor.fetchall()
+ if results:
+ sql = "UPDATE autotests SET name='%s', test_class='%s',"\
+ "description='%s', test_type=%d, path='%s',"\
+ "synch_type=%d, author='%s', dependencies='%s',"\
+ "experimental=%d, run_verify=%d, test_time=%d,"\
+ "test_category='%s', sync_count=%d"\
+ " WHERE path='%s'"
+ sql %= (new_test['name'], new_test['test_class'], new_test['doc'],
+ int(new_test['test_type']), new_test['path'],
+ int(new_test['synch_type']), new_test['author'],
+ new_test['dependencies'], int(new_test['experimental']),
+ int(new_test['run_verify']), new_test['test_time'],
+ new_test['test_category'], new_test['sync_count'], new_test['path'])
+ else:
+ # Create a relative path
+ path = test.replace(autotest_dir, '')
+ sql = "INSERT INTO autotests"\
+ "(name, test_class, description, test_type, path, "\
+ "synch_type, author, dependencies, experimental, "\
+ "run_verify, test_time, test_category, sync_count) "\
+ "VALUES('%s','%s','%s',%d,'%s',%d,'%s','%s',%d,%d,%d,"\
+ "'%s',%d)"
+ sql %= (new_test['name'], new_test['test_class'], new_test['doc'],
+ int(new_test['test_type']), new_test['path'],
+ int(new_test['synch_type']), new_test['author'],
+ new_test['dependencies'], int(new_test['experimental']),
+ int(new_test['run_verify']), new_test['test_time'],
+ new_test['test_category'], new_test['sync_count'])
+
+ db_execute(cursor, sql)
+
+ connection.commit()
+ connection.close()
+
+
+def dict_db_clean(test):
+ """Take a tests dictionary from update_db and make it pretty for SQL"""
+
+ test_type = { 'client' : 1,
+ 'server' : 2, }
+ test_time = { 'short' : 1,
+ 'medium' : 2,
+ 'long' : 3, }
+
+ test['name'] = MySQLdb.escape_string(test['name'])
+ test['author'] = MySQLdb.escape_string(test['author'])
+ test['test_class'] = MySQLdb.escape_string(test['test_class'])
+ test['test_category'] = MySQLdb.escape_string(test['test_category'])
+ test['doc'] = MySQLdb.escape_string(test['doc'])
+ test['dependencies'] = ", ".join(test['dependencies'])
+ # TODO Fix when we move from synch_type to sync_count
+ if test['sync_count'] == 1:
+ test['synch_type'] = 1
+ else:
+ test['synch_type'] = 2
+ try:
+ test['test_type'] = int(test['test_type'])
+ if test['test_type'] != 1 and test['test_type'] != 2:
+ raise Exception('Incorrect number %d for test_type' %
+ test['test_type'])
+ except ValueError:
+ pass
+ try:
+ test['test_time'] = int(test['test_time'])
+ if test['test_time'] < 1 or test['test_time'] > 3:
+ raise Exception('Incorrect number %d for test_time' %
+ test['test_time'])
+ except ValueError:
+ pass
+
+ if str == type(test['test_time']):
+ test['test_time'] = test_time[test['test_time'].lower()]
+ if str == type(test['test_type']):
+ test['test_type'] = test_type[test['test_type'].lower()]
+ return test
+
+
+def get_tests_from_db(autotest_dir='/usr/local/autotest'):
+ """Get the tests from the DB.
+ Returns:
+ dictionary of form:
+ data['valid'][test_name] = parsed object
+ data['missing'][test_name] = relative_path to test
+ """
+ connection = db_connect()
+ cursor = connection.cursor()
+ tests = {}
+ tests['valid'] = {}
+ tests['missing'] = {}
+ cursor.execute("SELECT name,path from autotests")
+ results = cursor.fetchall()
+ for row in results:
+ name = row[0]
+ relative_path = row[1]
+ control_path = os.path.join(autotest_dir, relative_path)
+ if os.path.exists(control_path):
+ tests['valid'][name] = control_data.parse_control(control_path)
+ else:
+ # test doesn't exist
+ tests['missing'][name] = relative_path
+ connection.close()
+ return tests
+
+
+def get_tests_from_fs(parent_dir, control_pattern, add_noncompliant=False):
+ """Find control jobs in location and create one big job
+ Returns:
+ dictionary of the form:
+ tests[file_path] = parsed_object
+
+ """
+ tests = {}
+ for dir in [ parent_dir ]:
+ files = recursive_walk(dir, control_pattern)
+ for file in files:
+ if not add_noncompliant:
+ try:
+ found_test = control_data.parse_control(file,
+ raise_warnings=True)
+ tests[file] = found_test
+ except control_data.ControlVariableException, e:
+ print "Skipping %s\n%s" % (file, e)
+ pass
+ else:
+ found_test = control_data.parse_control(file)
+ tests[file] = found_test
+ return tests
+
+
+def recursive_walk(path, wildcard):
+ """Recurisvely go through a directory.
+ Returns:
+ A list of files that match wildcard
+ """
+ files = []
+ directories = [ path ]
+ while len(directories)>0:
+ directory = directories.pop()
+ for name in os.listdir(directory):
+ fullpath = os.path.join(directory, name)
+ if os.path.isfile(fullpath):
+ # if we are a control file
+ if re.search(wildcard, name):
+ files.append(fullpath)
+ elif os.path.isdir(fullpath):
+ directories.append(fullpath)
+ return files
+
+
+def db_connect():
+ """Connect to the AUTOTEST_WEB database and return a connect object."""
+ c = global_config.global_config
+ db_host = c.get_config_value('AUTOTEST_WEB', 'host')
+ db_name = c.get_config_value('AUTOTEST_WEB', 'database')
+ username = c.get_config_value('AUTOTEST_WEB', 'user')
+ password = c.get_config_value('AUTOTEST_WEB', 'password')
+ connection = MySQLdb.connect(host=db_host, db=db_name,
+ user=username,
+ passwd=password)
+ return connection
+
+
+def db_execute(cursor, sql):
+ """Execute SQL or print out what would be executed if dry_run is defined"""
+
+ if DRY_RUN:
+ print "Would run: " + sql
+ else:
+ cursor.execute(sql)
+
+
+if __name__ == "__main__":
+ main(sys.argv)