Begin adding support for parameterized jobs.
This will allow test developers to specify certain parameters that a test
control file may take, so that users can then easily set those parameters
on job create. Enabling this feature removes the ability to edit the
control file directly on job creation.
Feature is currently INCOMPLETE. Do not attempt to use. This feature will
be committed in small pieces for the sake of having smaller code reviews.
Signed-off-by: James Ren <jamesren@google.com>
git-svn-id: http://test.kernel.org/svn/autotest/trunk@4720 592f7852-d20e-0410-864c-8624ca9c26a4
diff --git a/frontend/afe/rpc_interface.py b/frontend/afe/rpc_interface.py
index d9a10ba..0ab132f 100644
--- a/frontend/afe/rpc_interface.py
+++ b/frontend/afe/rpc_interface.py
@@ -397,6 +397,92 @@
return cf_info
+def create_parameterized_job(name, priority, test, parameters, kernel=None,
+ label=None, profilers=(), profiler_parameters=None,
+ use_container=False, profile_only=None,
+ upload_kernel_config=False, hosts=(),
+ meta_hosts=(), one_time_hosts=(),
+ atomic_group_name=None, synch_count=None,
+ is_template=False, timeout=None,
+ max_runtime_hrs=None, run_verify=True,
+ email_list='', dependencies=(), reboot_before=None,
+ reboot_after=None, parse_failed_repair=None,
+ hostless=False, keyvals=None, drone_set=None):
+ """
+ Creates and enqueues a parameterized job.
+
+ Most parameters a combination of the parameters for generate_control_file()
+ and create_job(), with the exception of:
+
+ @param test name or ID of the test to run
+ @param parameters a map of parameter name ->
+ tuple of (param value, param type)
+ @param profiler_parameters a dictionary of parameters for the profilers:
+ key: profiler name
+ value: dict of param name -> tuple of
+ (param value,
+ param type)
+ """
+ # Save the values of the passed arguments here. What we're going to do with
+ # them is pass them all to rpc_utils.get_create_job_common_args(), which
+ # will extract the subset of these arguments that apply for
+ # rpc_utils.create_job_common(), which we then pass in to that function.
+ args = locals()
+
+ # Set up the parameterized job configs
+ test_obj = models.Test.smart_get(test)
+ if test_obj.test_type == model_attributes.TestTypes.SERVER:
+ control_type = models.Job.ControlType.SERVER
+ else:
+ control_type = models.Job.ControlType.CLIENT
+
+ try:
+ label = models.Label.smart_get(label)
+ except models.Label.DoesNotExist:
+ label = None
+
+ kernel_objs = models.Kernel.create_kernels(kernel)
+ profiler_objs = [models.Profiler.smart_get(profiler)
+ for profiler in profilers]
+
+ parameterized_job = models.ParameterizedJob.objects.create(
+ test=test_obj, label=label, use_container=use_container,
+ profile_only=profile_only,
+ upload_kernel_config=upload_kernel_config)
+ parameterized_job.kernels.add(*kernel_objs)
+
+ for profiler in profiler_objs:
+ parameterized_profiler = models.ParameterizedJobProfiler.objects.create(
+ parameterized_job=parameterized_job,
+ profiler=profiler)
+ profiler_params = profiler_parameters.get(profiler.name, {})
+ for name, (value, param_type) in profiler_params.iteritems():
+ models.ParameterizedJobProfilerParameter.objects.create(
+ parameterized_job_profiler=parameterized_profiler,
+ parameter_name=name,
+ parameter_value=value,
+ parameter_type=param_type)
+
+ try:
+ for parameter in test_obj.testparameter_set.all():
+ if parameter.name in parameters:
+ param_value, param_type = parameters.pop(parameter.name)
+ parameterized_job.parameterizedjobparameter_set.create(
+ test_parameter=parameter, parameter_value=param_value,
+ parameter_type=param_type)
+
+ if parameters:
+ raise Exception('Extra parameters remain: %r' % parameters)
+
+ return rpc_utils.create_job_common(
+ parameterized_job=parameterized_job.id,
+ control_type=control_type,
+ **rpc_utils.get_create_job_common_args(args))
+ except:
+ parameterized_job.delete()
+ raise
+
+
def create_job(name, priority, control_file, control_type,
hosts=(), meta_hosts=(), one_time_hosts=(),
atomic_group_name=None, synch_count=None, is_template=False,
@@ -437,110 +523,8 @@
@returns The created Job id number.
"""
- user = models.User.current_user()
- owner = user.login
-
- # Convert metahost names to lower case, to avoid case sensitivity issues
- meta_hosts = [meta_host.lower() for meta_host in meta_hosts]
-
- # input validation
- if not (hosts or meta_hosts or one_time_hosts or atomic_group_name
- or hostless):
- raise model_logic.ValidationError({
- 'arguments' : "You must pass at least one of 'hosts', "
- "'meta_hosts', 'one_time_hosts', "
- "'atomic_group_name', or 'hostless'"
- })
-
- if hostless:
- if hosts or meta_hosts or one_time_hosts or atomic_group_name:
- raise model_logic.ValidationError({
- 'hostless': 'Hostless jobs cannot include any hosts!'})
- server_type = models.Job.ControlType.get_string(
- models.Job.ControlType.SERVER)
- if control_type != server_type:
- raise model_logic.ValidationError({
- 'control_type': 'Hostless jobs cannot use client-side '
- 'control files'})
-
- labels_by_name = dict((label.name.lower(), label)
- for label in models.Label.objects.all())
- atomic_groups_by_name = dict((ag.name.lower(), ag)
- for ag in models.AtomicGroup.objects.all())
-
- # Schedule on an atomic group automagically if one of the labels given
- # is an atomic group label and no explicit atomic_group_name was supplied.
- if not atomic_group_name:
- for label_name in meta_hosts or []:
- label = labels_by_name.get(label_name)
- if label and label.atomic_group:
- atomic_group_name = label.atomic_group.name
- break
-
- # convert hostnames & meta hosts to host/label objects
- host_objects = models.Host.smart_get_bulk(hosts)
- metahost_objects = []
- for label_name in meta_hosts or []:
- if label_name in labels_by_name:
- label = labels_by_name[label_name]
- metahost_objects.append(label)
- elif label_name in atomic_groups_by_name:
- # If given a metahost name that isn't a Label, check to
- # see if the user was specifying an Atomic Group instead.
- atomic_group = atomic_groups_by_name[label_name]
- if atomic_group_name and atomic_group_name != atomic_group.name:
- raise model_logic.ValidationError({
- 'meta_hosts': (
- 'Label "%s" not found. If assumed to be an '
- 'atomic group it would conflict with the '
- 'supplied atomic group "%s".' % (
- label_name, atomic_group_name))})
- atomic_group_name = atomic_group.name
- else:
- raise model_logic.ValidationError(
- {'meta_hosts' : 'Label "%s" not found' % label_name})
-
- # Create and sanity check an AtomicGroup object if requested.
- if atomic_group_name:
- if one_time_hosts:
- raise model_logic.ValidationError(
- {'one_time_hosts':
- 'One time hosts cannot be used with an Atomic Group.'})
- atomic_group = models.AtomicGroup.smart_get(atomic_group_name)
- if synch_count and synch_count > atomic_group.max_number_of_machines:
- raise model_logic.ValidationError(
- {'atomic_group_name' :
- 'You have requested a synch_count (%d) greater than the '
- 'maximum machines in the requested Atomic Group (%d).' %
- (synch_count, atomic_group.max_number_of_machines)})
- else:
- atomic_group = None
-
- for host in one_time_hosts or []:
- this_host = models.Host.create_one_time_host(host)
- host_objects.append(this_host)
-
- options = dict(name=name,
- priority=priority,
- control_file=control_file,
- control_type=control_type,
- is_template=is_template,
- timeout=timeout,
- max_runtime_hrs=max_runtime_hrs,
- synch_count=synch_count,
- run_verify=run_verify,
- email_list=email_list,
- dependencies=dependencies,
- reboot_before=reboot_before,
- reboot_after=reboot_after,
- parse_failed_repair=parse_failed_repair,
- keyvals=keyvals,
- drone_set=drone_set)
- return rpc_utils.create_new_job(owner=owner,
- options=options,
- host_objects=host_objects,
- metahost_objects=metahost_objects,
- atomic_group=atomic_group)
+ return rpc_utils.create_job_common(
+ **rpc_utils.get_create_job_common_args(locals()))
def abort_host_queue_entries(**filter_data):
@@ -838,6 +822,7 @@
result['motd'] = rpc_utils.get_motd()
result['drone_sets_enabled'] = models.DroneSet.drone_sets_enabled()
result['drone_sets'] = drone_sets
+ result['parameterized_jobs'] = models.Job.parameterized_jobs_enabled()
result['status_dictionary'] = {"Aborted": "Aborted",
"Verifying": "Verifying Host",