[autotest] Treat special task labels like "provisionable" labels.
As it stands, to use the special task labels, you need to add the label
to every host, since we still treat it as a normal/standard DEPENDENCY.
If any special task treats a label differently, then we shouldn't
require that label on the DUT, as we're using it as a marker that we
want some special handling of this test rather than an actual
dependency.
BUG=chromium:334418
DEPLOY=scheduler, apache
TEST=added a potato->dummy_PassServer mapping, saw dummy_PassServer run
in cleanup on job with DEPENDENCIES=potato where host didn't have
potato.
Change-Id: I34936a5a84a42897c8dfc0e2acd50195e0e74ec5
Reviewed-on: https://chromium-review.googlesource.com/194212
Reviewed-by: Alex Miller <milleral@chromium.org>
Commit-Queue: Alex Miller <milleral@chromium.org>
Tested-by: Alex Miller <milleral@chromium.org>
diff --git a/frontend/afe/rpc_utils.py b/frontend/afe/rpc_utils.py
index b126aff..7041cd3 100644
--- a/frontend/afe/rpc_utils.py
+++ b/frontend/afe/rpc_utils.py
@@ -235,7 +235,7 @@
hosts_in_job = models.Host.objects.filter(id__in=host_ids)
ok_hosts = hosts_in_job
for index, dependency in enumerate(job_dependencies):
- if not provision.can_provision(dependency):
+ if not provision.is_for_special_action(dependency):
ok_hosts = ok_hosts.filter(labels__name=dependency)
failing_hosts = (set(host.hostname for host in host_objects) -
set(host.hostname for host in ok_hosts))
@@ -258,7 +258,7 @@
for metahost in metahost_objects:
hosts = models.Host.objects.filter(labels=metahost)
for label_name in job_dependencies:
- if not provision.can_provision(label_name):
+ if not provision.is_for_special_action(label_name):
hosts = hosts.filter(labels__name=label_name)
if not any(hosts):
raise error.NoEligibleHostException("No hosts within %s satisfy %s."
@@ -509,7 +509,7 @@
check_for_duplicate_hosts(host_objects)
for label_name in dependencies:
- if provision.can_provision(label_name):
+ if provision.is_for_special_action(label_name):
# TODO: We could save a few queries
# if we had a bulk ensure-label-exists function, which used
# a bulk .get() call. The win is probably very small.
diff --git a/scheduler/host_scheduler.py b/scheduler/host_scheduler.py
index 6d933a6..38ff851 100644
--- a/scheduler/host_scheduler.py
+++ b/scheduler/host_scheduler.py
@@ -369,7 +369,7 @@
# Remove provisionable labels from the set of job_dependencies that we
# need to satisfy
job_dependencies = set([dep for dep in job_dependencies if
- not provision.can_provision(self._labels[dep].name)])
+ not provision.is_for_special_action(self._labels[dep].name)])
host_labels = self._host_labels.get(host_id, set())
return (self._is_acl_accessible(host_id, queue_entry) and
diff --git a/scheduler/rdb_integration_tests.py b/scheduler/rdb_integration_tests.py
index 4ca363b..1a79c4e 100644
--- a/scheduler/rdb_integration_tests.py
+++ b/scheduler/rdb_integration_tests.py
@@ -791,3 +791,20 @@
local_response_handler)
list(rdb_lib.acquire_hosts(self.host_scheduler, queue_entries))
+
+ def testConfigurations(self):
+ """Test that configurations don't matter.
+ @raises AssertionError: If the request doesn't find a host,
+ this will happen if configurations are not stripped out.
+ """
+ self.god.stub_with(provision.Cleanup,
+ '_actions',
+ {'action': 'fakeTest'})
+ job_labels = set(['action', 'a'])
+ host_deps = set(['a'])
+ db_host = self.db_helper.create_host('h1', deps=host_deps)
+ self.create_job(user='autotest_system', deps=job_labels)
+ queue_entries = self._dispatcher._refresh_pending_queue_entries()
+ matching_host = rdb_lib.acquire_hosts(
+ self.host_scheduler, queue_entries).next()
+ self.assert_(matching_host.id == db_host.id)
diff --git a/scheduler/rdb_lib.py b/scheduler/rdb_lib.py
index d8a42e6..73fc2ef 100644
--- a/scheduler/rdb_lib.py
+++ b/scheduler/rdb_lib.py
@@ -36,7 +36,7 @@
job_id = queue_entry.job_id
job_deps = self._job_deps.get(job_id, [])
job_deps = [dep for dep in job_deps
- if not provision.can_provision(self._labels[dep].name)]
+ if not provision.is_for_special_action(self._labels[dep].name)]
job_acls = self._job_acls.get(job_id, [])
return {'deps': job_deps, 'acls': job_acls,
diff --git a/server/cros/provision.py b/server/cros/provision.py
index 44f2b1d..c79f7d2 100644
--- a/server/cros/provision.py
+++ b/server/cros/provision.py
@@ -78,6 +78,29 @@
return cls._actions[label]
+ @classmethod
+ def partition(cls, labels):
+ """
+ Filter a list of labels into two sets: those labels that we know how to
+ act on and those that we don't know how to act on.
+
+ @param labels: A list of strings of labels.
+ @returns: A tuple where the first element is a set of unactionable
+ labels, and the second element is a set of the actionable
+ labels.
+ """
+ capabilities = set()
+ configurations = set()
+
+ for label in labels:
+ if cls.acts_on(label):
+ configurations.add(label)
+ else:
+ capabilities.add(label)
+
+ return capabilities, configurations
+
+
class Verify(_SpecialTaskAction):
"""
Tests to verify that the DUT is in a sane, known good state that we can run
@@ -133,11 +156,25 @@
name = 'repair'
-# For backwards compatibility with old control files, we still need the
-# following:
-can_provision = Provision.acts_on
-provisioner_for = Provision.test_for
+# TODO(milleral): crbug.com/364273
+# Label doesn't really mean label in this context. We're putting things into
+# DEPENDENCIES that really aren't DEPENDENCIES, and we should probably stop
+# doing that.
+def is_for_special_action(label):
+ """
+ If any special task handles the label specially, then we're using the label
+ to communicate that we want an action, and not as an actual dependency that
+ the test has.
+
+ @param label: A string label name.
+ @return True if any special task handles this label specially,
+ False if no special task handles this label.
+ """
+ return (Verify.acts_on(label) or
+ Provision.acts_on(label) or
+ Cleanup.acts_on(label) or
+ Repair.acts_on(label))
def filter_labels(labels):
@@ -155,16 +192,7 @@
(set(['bluetooth']), set(['cros-version:lumpy-release/R28-3993.0.0']))
"""
- capabilities = set()
- configurations = set()
-
- for label in labels:
- if can_provision(label):
- configurations.add(label)
- else:
- capabilities.add(label)
-
- return capabilities, configurations
+ return Provision.partition(labels)
def split_labels(labels):
@@ -188,7 +216,7 @@
configurations = dict()
for label in labels:
- if can_provision(label):
+ if Provision.acts_on(label):
name, value = label.split(':', 1)
configurations[name] = value
else: