Implement sync_count. The primary change here is replacing the job.synch_type field with a synch_count field. There is no longer just a distinction between synchronous and asynchronous jobs. Instead, every job as a synch_count, with synch_count = 1 corresponding to the old concept of synchronous jobs. This required:
-changes to the job creation RPC and corresponding client code in AFE and the CLI
-massive changes to the scheduler to schedule all jobs in groups based on synch_count (this unified the old synch and async code paths)
-changed results directory structure to accomodate synchronous groups, as documented at http://autotest.kernel.org/wiki/SchedulerSpecification, including widespread changes to monitor_db and a change in AFE
-changes to AFE abort code to handle synchronous groups instead of just synchronous jobs
-also got rid of the "synchronizing" field in the jobs table, since I was changing the table anyway and it seems very likely now that that field will never be used
other changes included:
-add some logging to afe/models.py to match what the scheduler code does, since the scheduler is starting to use the models more
-added checks for aborts of synchronous groups to abort_host_queue_entries RPC
git-svn-id: http://test.kernel.org/svn/autotest/trunk@2402 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/frontend/afe/rpc_interface.py b/frontend/afe/rpc_interface.py
index 4447736..9a85566 100644
--- a/frontend/afe/rpc_interface.py
+++ b/frontend/afe/rpc_interface.py
@@ -249,7 +249,8 @@
tests. Returns a dict with the following keys:
control_file - the control file text
is_server - is the control file a server-side control file?
- is_synchronous - should the control file be run synchronously?
+ synch_count - how many machines the job uses per autoserv execution.
+ synch_count == 1 means the job is asynchronous.
dependencies - a list of the names of labels on which the job depends
tests: list of tests to run
@@ -258,7 +259,7 @@
profilers: list of profilers to activate during the job
"""
if not tests:
- return dict(control_file='', is_server=False, is_synchronous=False,
+ return dict(control_file='', is_server=False, synch_count=1,
dependencies=[])
cf_info, test_objects, profiler_objects, label = (
@@ -271,7 +272,7 @@
def create_job(name, priority, control_file, control_type, timeout=None,
- is_synchronous=None, hosts=None, meta_hosts=None,
+ synch_count=None, hosts=None, meta_hosts=None,
run_verify=True, one_time_hosts=None, email_list='',
dependencies=[], reboot_before=None, reboot_after=None):
"""\
@@ -280,7 +281,8 @@
priority: Low, Medium, High, Urgent
control_file: contents of control file
control_type: type of control file, Client or Server
- is_synchronous: boolean indicating if a job is synchronous
+ synch_count: how many machines the job uses per autoserv execution.
+ synch_count == 1 means the job is asynchronous.
hosts: list of hosts to run job on
meta_hosts: list where each entry is a label name, and for each entry
one host will be chosen from that label to run the job
@@ -329,17 +331,6 @@
% (requested_count, label.name, available_count))
raise model_logic.ValidationError({'meta_hosts' : error})
- # default is_synchronous to some appropriate value
- ControlType = models.Job.ControlType
- control_type = ControlType.get_value(control_type)
- if is_synchronous is None:
- is_synchronous = (control_type == ControlType.SERVER)
- # convert the synch flag to an actual type
- if is_synchronous:
- synch_type = models.Test.SynchType.SYNCHRONOUS
- else:
- synch_type = models.Test.SynchType.ASYNCHRONOUS
-
rpc_utils.check_job_dependencies(host_objects, dependencies)
dependency_labels = [labels_by_name[label_name]
for label_name in dependencies]
@@ -347,7 +338,7 @@
job = models.Job.create(owner=owner, name=name, priority=priority,
control_file=control_file,
control_type=control_type,
- synch_type=synch_type,
+ synch_count=synch_count,
hosts=host_objects + metahost_objects,
timeout=timeout,
run_verify=run_verify,
@@ -366,6 +357,7 @@
query = models.HostQueueEntry.query_objects(filter_data)
host_queue_entries = list(query.select_related())
models.AclGroup.check_for_acl_violation_queue_entries(host_queue_entries)
+ rpc_utils.check_abort_synchronous_jobs(host_queue_entries)
user = thread_local.get_user()
for queue_entry in host_queue_entries: