[autotest] Continuing refactor of dynamic_suite code; splitting monster files!

Split the biiiiig dynamic_suite.py and dynamic_suite_unittest.py into separate
files for Reimager and Suite.  Adjust imports to compensate.

BUG=chromium-os:30266
TEST=unit
TEST=successful run_suite.py run
TEST=suite_enumerator.py, suite_preprocessor.py,

Change-Id: I0457053a7ae1422970138619a64edbcfbbc338bc
Reviewed-on: https://gerrit.chromium.org/gerrit/30457
Tested-by: Chris Masone <cmasone@chromium.org>
Reviewed-by: Scott Zawalski <scottz@chromium.org>
Commit-Ready: Chris Masone <cmasone@chromium.org>
diff --git a/server/cros/dynamic_suite/comparitors.py b/server/cros/dynamic_suite/comparitors.py
new file mode 100644
index 0000000..64e161f
--- /dev/null
+++ b/server/cros/dynamic_suite/comparitors.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Comparitors for use in dynamic_suite module unit tests."""
+
+import mox
+
+class StatusContains(mox.Comparator):
+    @staticmethod
+    def CreateFromStrings(status=None, test_name=None, reason=None):
+        status_comp = mox.StrContains(status) if status else mox.IgnoreArg()
+        name_comp = mox.StrContains(test_name) if test_name else mox.IgnoreArg()
+        reason_comp = mox.StrContains(reason) if reason else mox.IgnoreArg()
+        return StatusContains(status_comp, name_comp, reason_comp)
+
+
+    def __init__(self, status=mox.IgnoreArg(), test_name=mox.IgnoreArg(),
+                 reason=mox.IgnoreArg()):
+        """Initialize.
+
+        Takes mox.Comparator objects to apply to job_status.Status
+        member variables.
+
+        @param status: status code, e.g. 'INFO', 'START', etc.
+        @param test_name: expected test name.
+        @param reason: expected reason
+        """
+        self._status = status
+        self._test_name = test_name
+        self._reason = reason
+
+
+    def equals(self, rhs):
+        """Check to see if fields match base_job.status_log_entry obj in rhs.
+
+        @param rhs: base_job.status_log_entry object to match.
+        @return boolean
+        """
+        return (self._status.equals(rhs.status_code) and
+                self._test_name.equals(rhs.operation) and
+                self._reason.equals(rhs.message))
+
+
+    def __repr__(self):
+        return '<Status containing \'%s\t%s\t%s\'>' % (self._status,
+                                                       self._test_name,
+                                                       self._reason)
diff --git a/server/cros/dynamic_suite/constants.py b/server/cros/dynamic_suite/constants.py
new file mode 100644
index 0000000..28390d6
--- /dev/null
+++ b/server/cros/dynamic_suite/constants.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+# Job keyvals for finding debug symbols when processing crash dumps.
+JOB_BUILD_KEY = 'build'
+JOB_SUITE_KEY = 'suite'
+
+# Job attribute and label names
+JOB_REPO_URL = 'job_repo_url'
+VERSION_PREFIX = 'cros-version:'
+EXPERIMENTAL_PREFIX = 'experimental_'
+
+# Timings
+ARTIFACT_FINISHED_TIME = 'artifact_finished_time'
+DOWNLOAD_STARTED_TIME = 'download_started_time'
+PAYLOAD_FINISHED_TIME = 'payload_finished_time'
diff --git a/server/cros/dynamic_suite/dynamic_suite.py b/server/cros/dynamic_suite/dynamic_suite.py
index cb910f1..d2edfe9 100644
--- a/server/cros/dynamic_suite/dynamic_suite.py
+++ b/server/cros/dynamic_suite/dynamic_suite.py
@@ -2,18 +2,23 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import common
 import compiler, datetime, hashlib, logging, os, random, re, time, traceback
 import signal
+
+import common
+
 from autotest_lib.client.common_lib import base_job, control_data, global_config
 from autotest_lib.client.common_lib import error, utils
 from autotest_lib.client.common_lib.cros import dev_server
+from autotest_lib.server.cros.dynamic_suite import constants
 from autotest_lib.server.cros.dynamic_suite import control_file_getter
 from autotest_lib.server.cros.dynamic_suite import frontend_wrappers
 from autotest_lib.server.cros.dynamic_suite import host_lock_manager, job_status
 from autotest_lib.server.cros.dynamic_suite.job_status import Status
+from autotest_lib.server.cros.dynamic_suite.reimager import Reimager
+from autotest_lib.server.cros.dynamic_suite.suite import Suite
 from autotest_lib.server import frontend
-from autotest_lib.frontend.afe.json_rpc import proxy
+
 
 """CrOS dynamic test suite generation and execution module.
 
@@ -232,24 +237,6 @@
 """
 
 
-# Job keyvals for finding debug symbols when processing crash dumps.
-JOB_BUILD_KEY = 'build'
-JOB_SUITE_KEY = 'suite'
-
-# Job attribute and label names
-JOB_REPO_URL = 'job_repo_url'
-VERSION_PREFIX = 'cros-version:'
-EXPERIMENTAL_PREFIX = 'experimental_'
-REIMAGE_JOB_NAME = 'try_new_image'
-
-# Timings
-ARTIFACT_FINISHED_TIME = 'artifact_finished_time'
-DOWNLOAD_STARTED_TIME = 'download_started_time'
-PAYLOAD_FINISHED_TIME = 'payload_finished_time'
-
-CONFIG = global_config.global_config
-
-
 # Relevant CrosDynamicSuiteExceptions are defined in client/common_lib/error.py.
 
 class SignalsAsExceptions(object):
@@ -353,8 +340,9 @@
 
                 now = datetime.datetime.now()
                 timestamp = now.strftime(job_status.TIME_FMT)
-                utils.write_keyval(spec.job.resultdir,
-                                   {ARTIFACT_FINISHED_TIME: timestamp})
+                utils.write_keyval(
+                    spec.job.resultdir,
+                    {constants.ARTIFACT_FINISHED_TIME: timestamp})
 
                 suite = Suite.create_from_name(spec.name, spec.build,
                                                afe=afe, tko=tko,
@@ -442,610 +430,5 @@
         self.add_experimental = add_experimental
 
 
-def inject_vars(vars, control_file_in):
-    """
-    Inject the contents of |vars| into |control_file_in|.
-
-    @param vars: a dict to shoehorn into the provided control file string.
-    @param control_file_in: the contents of a control file to munge.
-    @return the modified control file string.
-    """
-    control_file = ''
-    for key, value in vars.iteritems():
-        # None gets injected as 'None' without this check; same for digits.
-        if isinstance(value, str):
-            control_file += "%s='%s'\n" % (key, value)
-        else:
-            control_file += "%s=%r\n" % (key, value)
-    return control_file + control_file_in
-
-
-def _image_url_pattern():
-    return CONFIG.get_config_value('CROS', 'image_url_pattern', type=str)
-
-
-def _package_url_pattern():
-    return CONFIG.get_config_value('CROS', 'package_url_pattern', type=str)
-
-
-def get_package_url(build):
-    """Returns the package url for the given build."""
-    devserver_url = dev_server.DevServer.devserver_url_for_build(build)
-    return _package_url_pattern() % (devserver_url, build)
-
-
 def skip_reimage(g):
     return g.get('SKIP_IMAGE')
-
-
-class Reimager(object):
-    """
-    A class that can run jobs to reimage devices.
-
-    @var _afe: a frontend.AFE instance used to talk to autotest.
-    @var _tko: a frontend.TKO instance used to query the autotest results db.
-    @var _cf_getter: a ControlFileGetter used to get the AU control file.
-    """
-
-
-    def __init__(self, autotest_dir, afe=None, tko=None, results_dir=None):
-        """
-        Constructor
-
-        @param autotest_dir: the place to find autotests.
-        @param afe: an instance of AFE as defined in server/frontend.py.
-        @param tko: an instance of TKO as defined in server/frontend.py.
-        @param results_dir: The directory where the job can write results to.
-                            This must be set if you want job_id of sub-jobs
-                            list in the job keyvals.
-        """
-        self._afe = afe or frontend_wrappers.RetryingAFE(timeout_min=30,
-                                                         delay_sec=10,
-                                                         debug=False)
-        self._tko = tko or frontend_wrappers.RetryingTKO(timeout_min=30,
-                                                         delay_sec=10,
-                                                         debug=False)
-        self._results_dir = results_dir
-        self._reimaged_hosts = {}
-        self._cf_getter = control_file_getter.FileSystemGetter(
-            [os.path.join(autotest_dir, 'server/site_tests')])
-
-
-    def attempt(self, build, board, pool, record, check_hosts,
-                manager, num=None):
-        """
-        Synchronously attempt to reimage some machines.
-
-        Fire off attempts to reimage |num| machines of type |board|, using an
-        image at |url| called |build|.  Wait for completion, polling every
-        10s, and log results with |record| upon completion.
-
-        @param build: the build to install e.g.
-                      x86-alex-release/R18-1655.0.0-a1-b1584.
-        @param board: which kind of devices to reimage.
-        @param pool: Specify the pool of machines to use for scheduling
-                purposes.
-        @param record: callable that records job status.
-               prototype:
-                 record(base_job.status_log_entry)
-        @param check_hosts: require appropriate hosts to be available now.
-        @param manager: an as-yet-unused HostLockManager instance to handle
-                        locking DUTs that we decide to reimage.
-        @param num: how many devices to reimage.
-        @return True if all reimaging jobs succeed, false otherwise.
-        """
-        if not num:
-            num = CONFIG.get_config_value('CROS', 'sharding_factor', type=int)
-        logging.debug("scheduling reimaging across %d machines", num)
-        begin_time_str = datetime.datetime.now().strftime(job_status.TIME_FMT)
-        try:
-            self._ensure_version_label(VERSION_PREFIX + build)
-
-            if check_hosts:
-                # TODO make DEPENDENCIES-aware
-                self._ensure_enough_hosts(board, pool, num)
-
-            # Schedule job and record job metadata.
-            # TODO make DEPENDENCIES-aware
-            canary_job = self._schedule_reimage_job(build, board, pool, num)
-            self._record_job_if_possible(REIMAGE_JOB_NAME, canary_job)
-            logging.info('Created re-imaging job: %d', canary_job.id)
-
-            job_status.wait_for_jobs_to_start(self._afe, [canary_job])
-            logging.debug('Re-imaging job running.')
-
-            hosts = job_status.wait_for_and_lock_job_hosts(self._afe,
-                                                           [canary_job],
-                                                           manager)
-            logging.info('%r locked for reimaging.', hosts)
-
-            job_status.wait_for_jobs_to_finish(self._afe, [canary_job])
-            logging.debug('Re-imaging job finished.')
-
-            # Gather job results.
-            results = self.get_results(canary_job)
-            self._reimaged_hosts[build] = results.keys()
-
-        except error.InadequateHostsException as e:
-            logging.warning(e)
-            Status('WARN', REIMAGE_JOB_NAME, str(e),
-                   begin_time_str=begin_time_str).record_all(record)
-            return False
-        except Exception as e:
-            # catch Exception so we record the job as terminated no matter what.
-            logging.error(e)
-            Status('ERROR', REIMAGE_JOB_NAME, str(e),
-                   begin_time_str=begin_time_str).record_all(record)
-            return False
-
-        return job_status.record_and_report_results(results.values(), record)
-
-
-    def get_results(self, canary_job):
-        """
-        Gather results for |canary_job|, in a map of Statuses indexed by host.
-
-        A host's results will be named REIMAGE_JOB_NAME-<host> in the map, e.g.
-          {'chromeos2-rack1': Status('GOOD', 'try_new_image-chromeos2-rack1')}
-
-        @param canary_job: a completed frontend.Job
-        @return a map of hostname: job_status.Status objects.
-        """
-        return job_status.gather_per_host_results(self._afe,
-                                                  self._tko,
-                                                  [canary_job],
-                                                  REIMAGE_JOB_NAME + '-')
-
-
-    def _ensure_enough_hosts(self, board, pool, num):
-        """
-        Determine if there are enough working hosts to run on.
-
-        Raises exception if there are not enough hosts.
-
-        @param board: which kind of devices to reimage.
-        @param pool: the pool of machines to use for scheduling purposes.
-        @param num: how many devices to reimage.
-        @raises NoHostsException: if no working hosts.
-        @raises InadequateHostsException: if too few working hosts.
-        """
-        labels = [l for l in [board, pool] if l is not None]
-        available = self._count_usable_hosts(labels)
-        if available == 0:
-            raise error.NoHostsException('All hosts with %r are dead!' % labels)
-        elif num > available:
-            raise error.InadequateHostsException(
-                'Too few hosts with %r' % labels)
-
-
-    def clear_reimaged_host_state(self, build):
-        """
-        Clear per-host state created in the autotest DB for this job.
-
-        After reimaging a host, we label it and set some host attributes on it
-        that are then used by the suite scheduling code.  This call cleans
-        that up.
-
-        @param build: the build whose hosts we want to clean up e.g.
-                      x86-alex-release/R18-1655.0.0-a1-b1584.
-        """
-        for host in self._reimaged_hosts.get('build', []):
-            if not host.startswith('hostless'):
-                self._clear_build_state(host)
-
-
-    def _clear_build_state(self, machine):
-        """
-        Clear all build-specific labels, attributes from the target.
-
-        @param machine: the host to clear labels, attributes from.
-        """
-        self._afe.set_host_attribute(JOB_REPO_URL, None, hostname=machine)
-
-
-    def _record_job_if_possible(self, test_name, job):
-        """
-        Record job id as keyval, if possible, so it can be referenced later.
-
-        If |self._results_dir| is None, then this is a NOOP.
-
-        @param test_name: the test to record id/owner for.
-        @param job: the job object to pull info from.
-        """
-        if self._results_dir:
-            job_id_owner = '%s-%s' % (job.id, job.owner)
-            utils.write_keyval(
-                self._results_dir,
-                {hashlib.md5(test_name).hexdigest(): job_id_owner})
-
-
-    def _count_usable_hosts(self, host_spec):
-        """
-        Given a set of host labels, count the live hosts that have them all.
-
-        @param host_spec: list of labels specifying a set of hosts.
-        @return the number of live hosts that satisfy |host_spec|.
-        """
-        count = 0
-        for h in self._afe.get_hosts(multiple_labels=host_spec):
-            if h.status not in ['Repair Failed', 'Repairing']:
-                count += 1
-        return count
-
-
-    def _ensure_version_label(self, name):
-        """
-        Ensure that a label called |name| exists in the autotest DB.
-
-        @param name: the label to check for/create.
-        """
-        try:
-            self._afe.create_label(name=name)
-        except proxy.ValidationError as ve:
-            if ('name' in ve.problem_keys and
-                'This value must be unique' in ve.problem_keys['name']):
-                logging.debug('Version label %s already exists', name)
-            else:
-                raise ve
-
-
-    def _schedule_reimage_job(self, build, board, pool, num_machines):
-        """
-        Schedules the reimaging of |num_machines| |board| devices with |image|.
-
-        Sends an RPC to the autotest frontend to enqueue reimaging jobs on
-        |num_machines| devices of type |board|
-
-        @param build: the build to install (must be unique).
-        @param board: which kind of devices to reimage.
-        @param pool: the pool of machines to use for scheduling purposes.
-        @param num_machines: how many devices to reimage.
-        @return a frontend.Job object for the reimaging job we scheduled.
-        """
-        image_url = _image_url_pattern() % (
-            dev_server.DevServer.devserver_url_for_build(build), build)
-        control_file = inject_vars(
-            dict(image_url=image_url, image_name=build),
-            self._cf_getter.get_control_file_contents_by_name('autoupdate'))
-        job_deps = []
-        if pool:
-            meta_host = pool
-            board_label = board
-            job_deps.append(board_label)
-        else:
-            # No pool specified use board.
-            meta_host = board
-
-        return self._afe.create_job(control_file=control_file,
-                                    name=build + '-try',
-                                    control_type='Server',
-                                    priority='Low',
-                                    meta_hosts=[meta_host] * num_machines,
-                                    dependencies=job_deps)
-
-
-class Suite(object):
-    """
-    A suite of tests, defined by some predicate over control file variables.
-
-    Given a place to search for control files a predicate to match the desired
-    tests, can gather tests and fire off jobs to run them, and then wait for
-    results.
-
-    @var _predicate: a function that should return True when run over a
-         ControlData representation of a control file that should be in
-         this Suite.
-    @var _tag: a string with which to tag jobs run in this suite.
-    @var _build: the build on which we're running this suite.
-    @var _afe: an instance of AFE as defined in server/frontend.py.
-    @var _tko: an instance of TKO as defined in server/frontend.py.
-    @var _jobs: currently scheduled jobs, if any.
-    @var _cf_getter: a control_file_getter.ControlFileGetter
-    """
-
-
-    @staticmethod
-    def create_ds_getter(build):
-        """
-        @param build: the build on which we're running this suite.
-        @return a FileSystemGetter instance that looks under |autotest_dir|.
-        """
-        return control_file_getter.DevServerGetter(
-            build, dev_server.DevServer.create())
-
-
-    @staticmethod
-    def create_fs_getter(autotest_dir):
-        """
-        @param autotest_dir: the place to find autotests.
-        @return a FileSystemGetter instance that looks under |autotest_dir|.
-        """
-        # currently hard-coded places to look for tests.
-        subpaths = ['server/site_tests', 'client/site_tests',
-                    'server/tests', 'client/tests']
-        directories = [os.path.join(autotest_dir, p) for p in subpaths]
-        return control_file_getter.FileSystemGetter(directories)
-
-
-    @staticmethod
-    def parse_tag(tag):
-        """Splits a string on ',' optionally surrounded by whitespace."""
-        return map(lambda x: x.strip(), tag.split(','))
-
-
-    @staticmethod
-    def name_in_tag_predicate(name):
-        """Returns predicate that takes a control file and looks for |name|.
-
-        Builds a predicate that takes in a parsed control file (a ControlData)
-        and returns True if the SUITE tag is present and contains |name|.
-
-        @param name: the suite name to base the predicate on.
-        @return a callable that takes a ControlData and looks for |name| in that
-                ControlData object's suite member.
-        """
-        return lambda t: hasattr(t, 'suite') and \
-                         name in Suite.parse_tag(t.suite)
-
-
-    @staticmethod
-    def list_all_suites(build, cf_getter=None):
-        """
-        Parses all ControlData objects with a SUITE tag and extracts all
-        defined suite names.
-
-        @param cf_getter: control_file_getter.ControlFileGetter. Defaults to
-                          using DevServerGetter.
-
-        @return list of suites
-        """
-        if cf_getter is None:
-            cf_getter = Suite.create_ds_getter(build)
-
-        suites = set()
-        predicate = lambda t: hasattr(t, 'suite')
-        for test in Suite.find_and_parse_tests(cf_getter, predicate,
-                                               add_experimental=True):
-            suites.update(Suite.parse_tag(test.suite))
-        return list(suites)
-
-
-    @staticmethod
-    def create_from_name(name, build, cf_getter=None, afe=None, tko=None,
-                         pool=None, results_dir=None):
-        """
-        Create a Suite using a predicate based on the SUITE control file var.
-
-        Makes a predicate based on |name| and uses it to instantiate a Suite
-        that looks for tests in |autotest_dir| and will schedule them using
-        |afe|.  Pulls control files from the default dev server.
-        Results will be pulled from |tko| upon completion.
-
-        @param name: a value of the SUITE control file variable to search for.
-        @param build: the build on which we're running this suite.
-        @param cf_getter: a control_file_getter.ControlFileGetter.
-                          If None, default to using a DevServerGetter.
-        @param afe: an instance of AFE as defined in server/frontend.py.
-        @param tko: an instance of TKO as defined in server/frontend.py.
-        @param pool: Specify the pool of machines to use for scheduling
-                     purposes.
-        @param results_dir: The directory where the job can write results to.
-                            This must be set if you want job_id of sub-jobs
-                            list in the job keyvals.
-        @return a Suite instance.
-        """
-        if cf_getter is None:
-            cf_getter = Suite.create_ds_getter(build)
-        return Suite(Suite.name_in_tag_predicate(name),
-                     name, build, cf_getter, afe, tko, pool, results_dir)
-
-
-    def __init__(self, predicate, tag, build, cf_getter, afe=None, tko=None,
-                 pool=None, results_dir=None):
-        """
-        Constructor
-
-        @param predicate: a function that should return True when run over a
-               ControlData representation of a control file that should be in
-               this Suite.
-        @param tag: a string with which to tag jobs run in this suite.
-        @param build: the build on which we're running this suite.
-        @param cf_getter: a control_file_getter.ControlFileGetter
-        @param afe: an instance of AFE as defined in server/frontend.py.
-        @param tko: an instance of TKO as defined in server/frontend.py.
-        @param pool: Specify the pool of machines to use for scheduling
-                purposes.
-        @param results_dir: The directory where the job can write results to.
-                            This must be set if you want job_id of sub-jobs
-                            list in the job keyvals.
-        """
-        self._predicate = predicate
-        self._tag = tag
-        self._build = build
-        self._cf_getter = cf_getter
-        self._results_dir = results_dir
-        self._afe = afe or frontend_wrappers.RetryingAFE(timeout_min=30,
-                                                         delay_sec=10,
-                                                         debug=False)
-        self._tko = tko or frontend_wrappers.RetryingTKO(timeout_min=30,
-                                                         delay_sec=10,
-                                                         debug=False)
-        self._pool = pool
-        self._jobs = []
-        self._tests = Suite.find_and_parse_tests(self._cf_getter,
-                                                 self._predicate,
-                                                 add_experimental=True)
-
-
-    @property
-    def tests(self):
-        """
-        A list of ControlData objects in the suite, with added |text| attr.
-        """
-        return self._tests
-
-
-    def stable_tests(self):
-        """
-        |self.tests|, filtered for non-experimental tests.
-        """
-        return filter(lambda t: not t.experimental, self.tests)
-
-
-    def unstable_tests(self):
-        """
-        |self.tests|, filtered for experimental tests.
-        """
-        return filter(lambda t: t.experimental, self.tests)
-
-
-    def _create_job(self, test):
-        """
-        Thin wrapper around frontend.AFE.create_job().
-
-        @param test: ControlData object for a test to run.
-        @return a frontend.Job object with an added test_name member.
-                test_name is used to preserve the higher level TEST_NAME
-                name of the job.
-        """
-        job_deps = []  # TODO(cmasone): init from test.dependencies.
-        if self._pool:
-            meta_hosts = self._pool
-            cros_label = VERSION_PREFIX + self._build
-            job_deps.append(cros_label)
-        else:
-            # No pool specified use any machines with the following label.
-            meta_hosts = VERSION_PREFIX + self._build
-        test_obj = self._afe.create_job(
-            control_file=test.text,
-            name='/'.join([self._build, self._tag, test.name]),
-            control_type=test.test_type.capitalize(),
-            meta_hosts=[meta_hosts],
-            dependencies=job_deps,
-            keyvals={JOB_BUILD_KEY: self._build, JOB_SUITE_KEY: self._tag})
-
-        setattr(test_obj, 'test_name', test.name)
-
-        return test_obj
-
-
-    def run_and_wait(self, record, manager, add_experimental=True):
-        """
-        Synchronously run tests in |self.tests|.
-
-        Schedules tests against a device running image |self._build|, and
-        then polls for status, using |record| to print status when each
-        completes.
-
-        Tests returned by self.stable_tests() will always be run, while tests
-        in self.unstable_tests() will only be run if |add_experimental| is true.
-
-        @param record: callable that records job status.
-                 prototype:
-                   record(base_job.status_log_entry)
-        @param add_experimental: schedule experimental tests as well, or not.
-        """
-        logging.debug('Discovered %d stable tests.', len(self.stable_tests()))
-        logging.debug('Discovered %d unstable tests.',
-                      len(self.unstable_tests()))
-        try:
-            Status('INFO', 'Start %s' % self._tag).record_result(record)
-            self.schedule(add_experimental)
-            # Unlock all hosts, so test jobs can be run on them.
-            manager.unlock()
-            try:
-                for result in job_status.wait_for_results(self._afe,
-                                                          self._tko,
-                                                          self._jobs):
-                    result.record_all(record)
-
-            except Exception as e:
-                logging.error(traceback.format_exc())
-                Status('FAIL', self._tag,
-                       'Exception waiting for results').record_result(record)
-        except Exception as e:
-            logging.error(traceback.format_exc())
-            Status('FAIL', self._tag,
-                   'Exception while scheduling suite').record_result(record)
-        # Sanity check
-        tests_at_end = self.find_and_parse_tests(self._cf_getter,
-                                                 self._predicate,
-                                                 add_experimental=True)
-        if len(self.tests) != len(tests_at_end):
-            msg = 'Dev Server enumerated %d tests at start, %d at end.' % (
-                len(self.tests), len(tests_at_end))
-            Status('FAIL', self._tag, msg).record_result(record)
-
-
-    def schedule(self, add_experimental=True):
-        """
-        Schedule jobs using |self._afe|.
-
-        frontend.Job objects representing each scheduled job will be put in
-        |self._jobs|.
-
-        @param add_experimental: schedule experimental tests as well, or not.
-        """
-        for test in self.stable_tests():
-            logging.debug('Scheduling %s', test.name)
-            self._jobs.append(self._create_job(test))
-
-        if add_experimental:
-            for test in self.unstable_tests():
-                logging.debug('Scheduling experimental %s', test.name)
-                test.name = EXPERIMENTAL_PREFIX + test.name
-                self._jobs.append(self._create_job(test))
-        if self._results_dir:
-            self._record_scheduled_jobs()
-
-
-    def _record_scheduled_jobs(self):
-        """
-        Record scheduled job ids as keyvals, so they can be referenced later.
-        """
-        for job in self._jobs:
-            job_id_owner = '%s-%s' % (job.id, job.owner)
-            utils.write_keyval(
-                self._results_dir,
-                {hashlib.md5(job.test_name).hexdigest(): job_id_owner})
-
-
-    @staticmethod
-    def find_and_parse_tests(cf_getter, predicate, add_experimental=False):
-        """
-        Function to scan through all tests and find eligible tests.
-
-        Looks at control files returned by _cf_getter.get_control_file_list()
-        for tests that pass self._predicate().
-
-        @param cf_getter: a control_file_getter.ControlFileGetter used to list
-               and fetch the content of control files
-        @param predicate: a function that should return True when run over a
-               ControlData representation of a control file that should be in
-               this Suite.
-        @param add_experimental: add tests with experimental attribute set.
-
-        @return list of ControlData objects that should be run, with control
-                file text added in |text| attribute.
-        """
-        tests = {}
-        files = cf_getter.get_control_file_list()
-        matcher = re.compile(r'[^/]+/(deps|profilers)/.+')
-        for file in filter(lambda f: not matcher.match(f), files):
-            logging.debug('Considering %s', file)
-            text = cf_getter.get_control_file_contents(file)
-            try:
-                found_test = control_data.parse_control_string(
-                        text, raise_warnings=True)
-                if not add_experimental and found_test.experimental:
-                    continue
-
-                found_test.text = text
-                found_test.path = file
-                tests[file] = found_test
-            except control_data.ControlVariableException, e:
-                logging.warn("Skipping %s\n%s", file, e)
-            except Exception, e:
-                logging.error("Bad %s\n%s", file, e)
-
-        return [test for test in tests.itervalues() if predicate(test)]
diff --git a/server/cros/dynamic_suite/dynamic_suite_unittest.py b/server/cros/dynamic_suite/dynamic_suite_unittest.py
index 7f5b6db..e4abce4 100755
--- a/server/cros/dynamic_suite/dynamic_suite_unittest.py
+++ b/server/cros/dynamic_suite/dynamic_suite_unittest.py
@@ -9,66 +9,13 @@
 import logging
 import mox
 import os
-import random
-import shutil
 import signal
-import tempfile
-import time
 import unittest
 
-from autotest_lib.client.common_lib import base_job, control_data, error
-from autotest_lib.client.common_lib import global_config
+from autotest_lib.client.common_lib import base_job, error
 from autotest_lib.client.common_lib.cros import dev_server
-from autotest_lib.frontend.afe.json_rpc import proxy
-from autotest_lib.server.cros.dynamic_suite import control_file_getter
 from autotest_lib.server.cros.dynamic_suite import dynamic_suite
-from autotest_lib.server.cros.dynamic_suite import host_lock_manager, job_status
-from autotest_lib.server.cros.dynamic_suite.fakes import FakeControlData
-from autotest_lib.server.cros.dynamic_suite.fakes import FakeHost, FakeJob
-from autotest_lib.server.cros.dynamic_suite.fakes import FakeLabel
-from autotest_lib.server import frontend
-
-
-class StatusContains(mox.Comparator):
-    @staticmethod
-    def CreateFromStrings(status=None, test_name=None, reason=None):
-        status_comp = mox.StrContains(status) if status else mox.IgnoreArg()
-        name_comp = mox.StrContains(test_name) if test_name else mox.IgnoreArg()
-        reason_comp = mox.StrContains(reason) if reason else mox.IgnoreArg()
-        return StatusContains(status_comp, name_comp, reason_comp)
-
-
-    def __init__(self, status=mox.IgnoreArg(), test_name=mox.IgnoreArg(),
-                 reason=mox.IgnoreArg()):
-        """Initialize.
-
-        Takes mox.Comparator objects to apply to dynamic_suite.Status
-        member variables.
-
-        @param status: status code, e.g. 'INFO', 'START', etc.
-        @param test_name: expected test name.
-        @param reason: expected reason
-        """
-        self._status = status
-        self._test_name = test_name
-        self._reason = reason
-
-
-    def equals(self, rhs):
-        """Check to see if fields match base_job.status_log_entry obj in rhs.
-
-        @param rhs: base_job.status_log_entry object to match.
-        @return boolean
-        """
-        return (self._status.equals(rhs.status_code) and
-                self._test_name.equals(rhs.operation) and
-                self._reason.equals(rhs.message))
-
-
-    def __repr__(self):
-        return '<Status containing \'%s\t%s\t%s\'>' % (self._status,
-                                                       self._test_name,
-                                                       self._reason)
+from autotest_lib.server.cros.dynamic_suite import host_lock_manager
 
 
 class DynamicSuiteTest(mox.MoxTestBase):
@@ -182,606 +129,3 @@
             self.assertRaises(error.SignalException,
                               dynamic_suite._perform_reimage_and_run,
                               spec, None, None, None, manager)
-
-
-class ReimagerTest(mox.MoxTestBase):
-    """Unit tests for dynamic_suite.Reimager.
-
-    @var _URL: fake image url
-    @var _BUILD: fake build
-    @var _NUM: fake number of machines to run on
-    @var _BOARD: fake board to reimage
-    """
-
-    _DEVSERVER_URL = 'http://nothing:8082'
-    _URL = '%s/%s'
-    _BUILD = 'build'
-    _NUM = 4
-    _BOARD = 'board'
-    _CONFIG = global_config.global_config
-
-
-    def setUp(self):
-        super(ReimagerTest, self).setUp()
-        self.afe = self.mox.CreateMock(frontend.AFE)
-        self.tko = self.mox.CreateMock(frontend.TKO)
-        self.manager = self.mox.CreateMock(host_lock_manager.HostLockManager)
-        self.reimager = dynamic_suite.Reimager('', afe=self.afe, tko=self.tko)
-        self._CONFIG.override_config_value('CROS',
-                                           'sharding_factor',
-                                           "%d" % self._NUM)
-
-
-    def testEnsureVersionLabelAlreadyExists(self):
-        """Should tolerate a label that already exists."""
-        name = 'label'
-        error = proxy.ValidationError(
-            {'name': 'ValidationError',
-             'message': '{"name": "This value must be unique"}',
-             'traceback': ''},
-            'BAD')
-        self.afe.create_label(name=name).AndRaise(error)
-        self.mox.ReplayAll()
-        self.reimager._ensure_version_label(name)
-
-
-    def testEnsureVersionLabel(self):
-        """Should create a label if it doesn't already exist."""
-        name = 'label'
-        self.afe.create_label(name=name)
-        self.mox.ReplayAll()
-        self.reimager._ensure_version_label(name)
-
-
-    def testCountHostsByBoardAndPool(self):
-        """Should count available hosts by board and pool."""
-        spec = [self._BOARD, 'pool:bvt']
-        self.afe.get_hosts(multiple_labels=spec).AndReturn([FakeHost()])
-        self.mox.ReplayAll()
-        self.assertEquals(self.reimager._count_usable_hosts(spec), 1)
-
-
-    def testCountHostsByBoard(self):
-        """Should count available hosts by board."""
-        spec = [self._BOARD]
-        self.afe.get_hosts(multiple_labels=spec).AndReturn([FakeHost()] * 2)
-        self.mox.ReplayAll()
-        self.assertEquals(self.reimager._count_usable_hosts(spec), 2)
-
-
-    def testCountZeroHostsByBoard(self):
-        """Should count the available hosts, by board, getting zero."""
-        spec = [self._BOARD]
-        self.afe.get_hosts(multiple_labels=spec).AndReturn([])
-        self.mox.ReplayAll()
-        self.assertEquals(self.reimager._count_usable_hosts(spec), 0)
-
-
-    def testInjectVars(self):
-        """Should inject dict of varibles into provided strings."""
-        def find_all_in(d, s):
-            """Returns true if all key-value pairs in |d| are printed in |s|."""
-            for k, v in d.iteritems():
-                if isinstance(v, str):
-                    if "%s='%s'\n" % (k, v) not in s:
-                        return False
-                else:
-                    if "%s=%r\n" % (k, v) not in s:
-                        return False
-            return True
-
-        v = {'v1': 'one', 'v2': 'two', 'v3': None, 'v4': False, 'v5': 5}
-        self.assertTrue(find_all_in(v, dynamic_suite.inject_vars(v, '')))
-        self.assertTrue(find_all_in(v, dynamic_suite.inject_vars(v, 'ctrl')))
-
-
-    def testScheduleJob(self):
-        """Should be able to create a job with the AFE."""
-        # Fake out getting the autoupdate control file contents.
-        cf_getter = self.mox.CreateMock(control_file_getter.ControlFileGetter)
-        cf_getter.get_control_file_contents_by_name('autoupdate').AndReturn('')
-        self.reimager._cf_getter = cf_getter
-        self._CONFIG.override_config_value('CROS',
-                                           'dev_server',
-                                           self._DEVSERVER_URL)
-        self._CONFIG.override_config_value('CROS',
-                                           'image_url_pattern',
-                                           self._URL)
-        self.afe.create_job(
-            control_file=mox.And(
-                mox.StrContains(self._BUILD),
-                mox.StrContains(self._URL % (self._DEVSERVER_URL,
-                                             self._BUILD))),
-            name=mox.StrContains(self._BUILD),
-            control_type='Server',
-            meta_hosts=[self._BOARD] * self._NUM,
-            dependencies=[],
-            priority='Low')
-        self.mox.ReplayAll()
-        self.reimager._schedule_reimage_job(self._BUILD, self._BOARD, None,
-                                            self._NUM)
-
-    def testPackageUrl(self):
-        """Should be able to get the package_url for any build."""
-        self._CONFIG.override_config_value('CROS',
-                                           'dev_server',
-                                           self._DEVSERVER_URL)
-        self._CONFIG.override_config_value('CROS',
-                                           'package_url_pattern',
-                                           self._URL)
-        self.mox.ReplayAll()
-        package_url = dynamic_suite.get_package_url(self._BUILD)
-        self.assertEqual(package_url, self._URL % (self._DEVSERVER_URL,
-                                                   self._BUILD))
-
-    def expect_attempt(self, canary_job, statuses, ex=None, check_hosts=True):
-        """Sets up |self.reimager| to expect an attempt() that returns |success|
-
-        Also stubs out Reimager._clear_build_state(), should the caller wish
-        to set an expectation there as well.
-
-        @param canary_job: a FakeJob representing the job we're expecting.
-        @param statuses: dict mapping a hostname to its job_status.Status.
-                         Will be returned by job_status.gather_per_host_results
-        @param ex: if not None, |ex| is raised by get_jobs()
-        @return a FakeJob configured with appropriate expectations
-        """
-        self.mox.StubOutWithMock(self.reimager, '_ensure_version_label')
-        self.mox.StubOutWithMock(self.reimager, '_schedule_reimage_job')
-        self.mox.StubOutWithMock(self.reimager, '_count_usable_hosts')
-        self.mox.StubOutWithMock(self.reimager, '_clear_build_state')
-
-        self.mox.StubOutWithMock(job_status, 'wait_for_jobs_to_start')
-        self.mox.StubOutWithMock(job_status, 'wait_for_and_lock_job_hosts')
-        self.mox.StubOutWithMock(job_status, 'gather_job_hostnames')
-        self.mox.StubOutWithMock(job_status, 'wait_for_jobs_to_finish')
-        self.mox.StubOutWithMock(job_status, 'gather_per_host_results')
-        self.mox.StubOutWithMock(job_status, 'record_and_report_results')
-
-        self.reimager._ensure_version_label(mox.StrContains(self._BUILD))
-        self.reimager._schedule_reimage_job(self._BUILD,
-                                            self._BOARD,
-                                            None,
-                                            self._NUM).AndReturn(canary_job)
-        if check_hosts:
-            self.reimager._count_usable_hosts(
-                mox.IgnoreArg()).AndReturn(self._NUM)
-
-        job_status.wait_for_jobs_to_start(self.afe, [canary_job])
-        job_status.wait_for_and_lock_job_hosts(
-            self.afe, [canary_job], self.manager).AndReturn(statuses.keys())
-
-        if ex:
-            job_status.wait_for_jobs_to_finish(self.afe,
-                                               [canary_job]).AndRaise(ex)
-        else:
-            job_status.wait_for_jobs_to_finish(self.afe, [canary_job])
-            job_status.gather_per_host_results(
-                    mox.IgnoreArg(), mox.IgnoreArg(), [canary_job],
-                    mox.StrContains(dynamic_suite.REIMAGE_JOB_NAME)).AndReturn(
-                            statuses)
-
-        if statuses:
-            ret_val = reduce(lambda v, s: v and s.is_good(),
-                             statuses.values(), True)
-            job_status.record_and_report_results(
-                statuses.values(), mox.IgnoreArg()).AndReturn(ret_val)
-
-
-    def testSuccessfulReimage(self):
-        """Should attempt a reimage and record success."""
-        canary = FakeJob()
-        statuses = {canary.hostnames[0]: job_status.Status('GOOD',
-                                                           canary.hostnames[0])}
-        self.expect_attempt(canary, statuses)
-
-        rjob = self.mox.CreateMock(base_job.base_job)
-        self.reimager._clear_build_state(mox.StrContains(canary.hostnames[0]))
-        self.mox.ReplayAll()
-        self.assertTrue(self.reimager.attempt(self._BUILD, self._BOARD, None,
-                                              rjob.record_entry, True,
-                                              self.manager))
-        self.reimager.clear_reimaged_host_state(self._BUILD)
-
-
-    def testFailedReimage(self):
-        """Should attempt a reimage and record failure."""
-        canary = FakeJob()
-        statuses = {canary.hostnames[0]: job_status.Status('FAIL',
-                                                           canary.hostnames[0])}
-        self.expect_attempt(canary, statuses)
-
-        rjob = self.mox.CreateMock(base_job.base_job)
-        self.reimager._clear_build_state(mox.StrContains(canary.hostnames[0]))
-        self.mox.ReplayAll()
-        self.assertFalse(self.reimager.attempt(self._BUILD, self._BOARD, None,
-                                               rjob.record_entry, True,
-                                               self.manager))
-        self.reimager.clear_reimaged_host_state(self._BUILD)
-
-
-    def testReimageThatNeverHappened(self):
-        """Should attempt a reimage and record that it didn't run."""
-        canary = FakeJob()
-        statuses = {'hostless': job_status.Status('ABORT', 'big_job_name')}
-        self.expect_attempt(canary, statuses)
-
-        rjob = self.mox.CreateMock(base_job.base_job)
-        self.mox.ReplayAll()
-        self.reimager.attempt(self._BUILD, self._BOARD, None,
-                              rjob.record_entry, True, self.manager)
-        self.reimager.clear_reimaged_host_state(self._BUILD)
-
-
-    def testReimageThatRaised(self):
-        """Should attempt a reimage that raises an exception and record that."""
-        canary = FakeJob()
-        ex_message = 'Oh no!'
-        self.expect_attempt(canary, statuses={}, ex=Exception(ex_message))
-
-        rjob = self.mox.CreateMock(base_job.base_job)
-        rjob.record_entry(StatusContains.CreateFromStrings('START'))
-        rjob.record_entry(StatusContains.CreateFromStrings('ERROR',
-                                                           reason=ex_message))
-        rjob.record_entry(StatusContains.CreateFromStrings('END ERROR'))
-        self.mox.ReplayAll()
-        self.reimager.attempt(self._BUILD, self._BOARD, None,
-                              rjob.record_entry, True, self.manager)
-        self.reimager.clear_reimaged_host_state(self._BUILD)
-
-
-    def testSuccessfulReimageThatCouldNotScheduleRightAway(self):
-        """Should attempt reimage, ignoring host availability; record success.
-        """
-        canary = FakeJob()
-        statuses = {canary.hostnames[0]: job_status.Status('GOOD',
-                                                           canary.hostnames[0])}
-        self.expect_attempt(canary, statuses, check_hosts=False)
-
-        rjob = self.mox.CreateMock(base_job.base_job)
-        self.reimager._clear_build_state(mox.StrContains(canary.hostnames[0]))
-        self.mox.ReplayAll()
-        self.assertTrue(self.reimager.attempt(self._BUILD, self._BOARD, None,
-                                              rjob.record_entry, False,
-                                              self.manager))
-        self.reimager.clear_reimaged_host_state(self._BUILD)
-
-
-    def testReimageThatCouldNotSchedule(self):
-        """Should attempt a reimage that can't be scheduled."""
-        self.mox.StubOutWithMock(self.reimager, '_ensure_version_label')
-        self.reimager._ensure_version_label(mox.StrContains(self._BUILD))
-
-        self.mox.StubOutWithMock(self.reimager, '_count_usable_hosts')
-        self.reimager._count_usable_hosts(mox.IgnoreArg()).AndReturn(1)
-
-        rjob = self.mox.CreateMock(base_job.base_job)
-        rjob.record_entry(StatusContains.CreateFromStrings('START'))
-        rjob.record_entry(
-            StatusContains.CreateFromStrings('WARN', reason='Too few hosts'))
-        rjob.record_entry(StatusContains.CreateFromStrings('END WARN'))
-        self.mox.ReplayAll()
-        self.reimager.attempt(self._BUILD, self._BOARD, None,
-                              rjob.record_entry, True, self.manager)
-        self.reimager.clear_reimaged_host_state(self._BUILD)
-
-
-    def testReimageWithNoAvailableHosts(self):
-        """Should attempt a reimage while all hosts are dead."""
-        self.mox.StubOutWithMock(self.reimager, '_ensure_version_label')
-        self.reimager._ensure_version_label(mox.StrContains(self._BUILD))
-
-        self.mox.StubOutWithMock(self.reimager, '_count_usable_hosts')
-        self.reimager._count_usable_hosts(mox.IgnoreArg()).AndReturn(0)
-
-        rjob = self.mox.CreateMock(base_job.base_job)
-        rjob.record_entry(StatusContains.CreateFromStrings('START'))
-        rjob.record_entry(StatusContains.CreateFromStrings('ERROR',
-                                                           reason='All hosts'))
-        rjob.record_entry(StatusContains.CreateFromStrings('END ERROR'))
-        self.mox.ReplayAll()
-        self.reimager.attempt(self._BUILD, self._BOARD, None,
-                              rjob.record_entry, True, self.manager)
-        self.reimager.clear_reimaged_host_state(self._BUILD)
-
-
-class SuiteTest(mox.MoxTestBase):
-    """Unit tests for dynamic_suite.Suite.
-
-    @var _BUILD: fake build
-    @var _TAG: fake suite tag
-    """
-
-    _BUILD = 'build'
-    _TAG = 'suite_tag'
-
-
-    def setUp(self):
-        super(SuiteTest, self).setUp()
-        self.afe = self.mox.CreateMock(frontend.AFE)
-        self.tko = self.mox.CreateMock(frontend.TKO)
-
-        self.tmpdir = tempfile.mkdtemp(suffix=type(self).__name__)
-
-        self.manager = self.mox.CreateMock(host_lock_manager.HostLockManager)
-        self.getter = self.mox.CreateMock(control_file_getter.ControlFileGetter)
-
-        self.files = {'one': FakeControlData(self._TAG, 'data_one', expr=True),
-                      'two': FakeControlData(self._TAG, 'data_two'),
-                      'three': FakeControlData(self._TAG, 'data_three')}
-
-        self.files_to_filter = {
-            'with/deps/...': FakeControlData(self._TAG, 'gets filtered'),
-            'with/profilers/...': FakeControlData(self._TAG, 'gets filtered')}
-
-
-    def tearDown(self):
-        super(SuiteTest, self).tearDown()
-        shutil.rmtree(self.tmpdir, ignore_errors=True)
-
-
-    def expect_control_file_parsing(self):
-        """Expect an attempt to parse the 'control files' in |self.files|."""
-        all_files = self.files.keys() + self.files_to_filter.keys()
-        self._set_control_file_parsing_expectations(False, all_files,
-                                                    self.files.iteritems())
-
-
-    def expect_control_file_reparsing(self):
-        """Expect re-parsing the 'control files' in |self.files|."""
-        all_files = self.files.keys() + self.files_to_filter.keys()
-        self._set_control_file_parsing_expectations(True, all_files,
-                                                    self.files.iteritems())
-
-
-    def expect_racy_control_file_reparsing(self, new_files):
-        """Expect re-fetching and parsing of control files to return extra.
-
-        @param new_files: extra control files that showed up during scheduling.
-        """
-        all_files = (self.files.keys() + self.files_to_filter.keys() +
-                     new_files.keys())
-        new_files.update(self.files)
-        self._set_control_file_parsing_expectations(True, all_files,
-                                                    new_files.iteritems())
-
-
-    def _set_control_file_parsing_expectations(self, already_stubbed,
-                                               file_list, files_to_parse):
-        """Expect an attempt to parse the 'control files' in |files|.
-
-        @param already_stubbed: parse_control_string already stubbed out.
-        @param file_list: the files the dev server returns
-        @param files_to_parse: the {'name': FakeControlData} dict of files we
-                               expect to get parsed.
-        """
-        if not already_stubbed:
-            self.mox.StubOutWithMock(control_data, 'parse_control_string')
-
-        self.getter.get_control_file_list().AndReturn(file_list)
-        for file, data in files_to_parse:
-            self.getter.get_control_file_contents(
-                file).InAnyOrder().AndReturn(data.string)
-            control_data.parse_control_string(
-                data.string, raise_warnings=True).InAnyOrder().AndReturn(data)
-
-
-    def testFindAndParseStableTests(self):
-        """Should find only non-experimental tests that match a predicate."""
-        self.expect_control_file_parsing()
-        self.mox.ReplayAll()
-
-        predicate = lambda d: d.text == self.files['two'].string
-        tests = dynamic_suite.Suite.find_and_parse_tests(self.getter, predicate)
-        self.assertEquals(len(tests), 1)
-        self.assertEquals(tests[0], self.files['two'])
-
-
-    def testFindAndParseTests(self):
-        """Should find all tests that match a predicate."""
-        self.expect_control_file_parsing()
-        self.mox.ReplayAll()
-
-        predicate = lambda d: d.text != self.files['two'].string
-        tests = dynamic_suite.Suite.find_and_parse_tests(self.getter,
-                                                         predicate,
-                                                         add_experimental=True)
-        self.assertEquals(len(tests), 2)
-        self.assertTrue(self.files['one'] in tests)
-        self.assertTrue(self.files['three'] in tests)
-
-
-    def mock_control_file_parsing(self):
-        """Fake out find_and_parse_tests(), returning content from |self.files|.
-        """
-        for test in self.files.values():
-            test.text = test.string  # mimic parsing.
-        self.mox.StubOutWithMock(dynamic_suite.Suite, 'find_and_parse_tests')
-        dynamic_suite.Suite.find_and_parse_tests(
-            mox.IgnoreArg(),
-            mox.IgnoreArg(),
-            add_experimental=True).AndReturn(self.files.values())
-
-
-    def testStableUnstableFilter(self):
-        """Should distinguish between experimental and stable tests."""
-        self.mock_control_file_parsing()
-        self.mox.ReplayAll()
-        suite = dynamic_suite.Suite.create_from_name(self._TAG, self.tmpdir,
-                                                     afe=self.afe, tko=self.tko)
-
-        self.assertTrue(self.files['one'] in suite.tests)
-        self.assertTrue(self.files['two'] in suite.tests)
-        self.assertTrue(self.files['one'] in suite.unstable_tests())
-        self.assertTrue(self.files['two'] in suite.stable_tests())
-        self.assertFalse(self.files['one'] in suite.stable_tests())
-        self.assertFalse(self.files['two'] in suite.unstable_tests())
-
-
-    def expect_job_scheduling(self, add_experimental):
-        """Expect jobs to be scheduled for 'tests' in |self.files|.
-
-        @param add_experimental: expect jobs for experimental tests as well.
-        """
-        for test in self.files.values():
-            if not add_experimental and test.experimental:
-                continue
-            self.afe.create_job(
-                control_file=test.text,
-                name=mox.And(mox.StrContains(self._BUILD),
-                             mox.StrContains(test.name)),
-                control_type=mox.IgnoreArg(),
-                meta_hosts=[dynamic_suite.VERSION_PREFIX + self._BUILD],
-                dependencies=[],
-                keyvals={'build': self._BUILD, 'suite': self._TAG}
-                ).AndReturn(FakeJob())
-
-
-    def testScheduleTests(self):
-        """Should schedule stable and experimental tests with the AFE."""
-        self.mock_control_file_parsing()
-        self.expect_job_scheduling(add_experimental=True)
-
-        self.mox.ReplayAll()
-        suite = dynamic_suite.Suite.create_from_name(self._TAG, self._BUILD,
-                                                     afe=self.afe, tko=self.tko)
-        suite.schedule()
-
-
-    def testScheduleTestsAndRecord(self):
-        """Should schedule stable and experimental tests with the AFE."""
-        self.mock_control_file_parsing()
-        self.mox.ReplayAll()
-        suite = dynamic_suite.Suite.create_from_name(self._TAG, self._BUILD,
-                                                     afe=self.afe, tko=self.tko,
-                                                     results_dir=self.tmpdir)
-        self.mox.ResetAll()
-        self.expect_job_scheduling(add_experimental=True)
-        self.mox.StubOutWithMock(suite, '_record_scheduled_jobs')
-        suite._record_scheduled_jobs()
-        self.mox.ReplayAll()
-        suite.schedule()
-        for job in  suite._jobs:
-          self.assertTrue(hasattr(job, 'test_name'))
-
-
-    def testScheduleStableTests(self):
-        """Should schedule only stable tests with the AFE."""
-        self.mock_control_file_parsing()
-        self.expect_job_scheduling(add_experimental=False)
-
-        self.mox.ReplayAll()
-        suite = dynamic_suite.Suite.create_from_name(self._TAG, self._BUILD,
-                                                     afe=self.afe, tko=self.tko)
-        suite.schedule(add_experimental=False)
-
-
-    def _createSuiteWithMockedTestsAndControlFiles(self):
-        """Create a Suite, using mocked tests and control file contents.
-
-        @return Suite object, after mocking out behavior needed to create it.
-        """
-        self.expect_control_file_parsing()
-        self.mox.ReplayAll()
-        suite = dynamic_suite.Suite.create_from_name(self._TAG, self._BUILD,
-                                                     self.getter, self.afe,
-                                                     self.tko)
-        self.mox.ResetAll()
-        return suite
-
-
-    def schedule_and_expect_these_results(self, suite, results, recorder):
-        self.mox.StubOutWithMock(suite, 'schedule')
-        suite.schedule(True)
-        self.manager.unlock()
-        for result in results:
-            status = result[0]
-            test_name = result[1]
-            recorder.record_entry(
-                StatusContains.CreateFromStrings('START', test_name))
-            recorder.record_entry(
-                StatusContains.CreateFromStrings(*result)).InAnyOrder('results')
-            recorder.record_entry(
-                StatusContains.CreateFromStrings('END %s' % status, test_name))
-        self.mox.StubOutWithMock(job_status, 'wait_for_results')
-        job_status.wait_for_results(self.afe, self.tko, suite._jobs).AndReturn(
-            map(lambda r: job_status.Status(*r), results))
-
-
-    def testRunAndWaitSuccess(self):
-        """Should record successful results."""
-        suite = self._createSuiteWithMockedTestsAndControlFiles()
-
-        recorder = self.mox.CreateMock(base_job.base_job)
-        recorder.record_entry(
-            StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG))
-
-        results = [('GOOD', 'good'), ('FAIL', 'bad', 'reason')]
-        self.schedule_and_expect_these_results(suite, results, recorder)
-        self.expect_control_file_reparsing()
-        self.mox.ReplayAll()
-
-        suite.run_and_wait(recorder.record_entry, self.manager, True)
-
-
-    def testRunAndWaitFailure(self):
-        """Should record failure to gather results."""
-        suite = self._createSuiteWithMockedTestsAndControlFiles()
-
-        recorder = self.mox.CreateMock(base_job.base_job)
-        recorder.record_entry(
-            StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG))
-        recorder.record_entry(
-            StatusContains.CreateFromStrings('FAIL', self._TAG, 'waiting'))
-
-        self.mox.StubOutWithMock(suite, 'schedule')
-        suite.schedule(True)
-        self.manager.unlock()
-        self.mox.StubOutWithMock(job_status, 'wait_for_results')
-        job_status.wait_for_results(mox.IgnoreArg(),
-                                    mox.IgnoreArg(),
-                                    mox.IgnoreArg()).AndRaise(
-                                            Exception('Expected during test.'))
-        self.expect_control_file_reparsing()
-        self.mox.ReplayAll()
-
-        suite.run_and_wait(recorder.record_entry, self.manager, True)
-
-
-    def testRunAndWaitScheduleFailure(self):
-        """Should record failure to schedule jobs."""
-        suite = self._createSuiteWithMockedTestsAndControlFiles()
-
-        recorder = self.mox.CreateMock(base_job.base_job)
-        recorder.record_entry(
-            StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG))
-        recorder.record_entry(
-            StatusContains.CreateFromStrings('FAIL', self._TAG, 'scheduling'))
-
-        self.mox.StubOutWithMock(suite, 'schedule')
-        suite.schedule(True).AndRaise(Exception('Expected during test.'))
-        self.expect_control_file_reparsing()
-        self.mox.ReplayAll()
-
-        suite.run_and_wait(recorder.record_entry, self.manager, True)
-
-
-    def testRunAndWaitDevServerRacyFailure(self):
-        """Should record discovery of dev server races in listing files."""
-        suite = self._createSuiteWithMockedTestsAndControlFiles()
-
-        recorder = self.mox.CreateMock(base_job.base_job)
-        recorder.record_entry(
-            StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG))
-
-        results = [('GOOD', 'good'), ('FAIL', 'bad', 'reason')]
-        self.schedule_and_expect_these_results(suite, results, recorder)
-
-        self.expect_racy_control_file_reparsing(
-            {'new': FakeControlData(self._TAG, '!')})
-
-        recorder.record_entry(
-            StatusContains.CreateFromStrings('FAIL', self._TAG, 'Dev Server'))
-        self.mox.ReplayAll()
-
-        suite.run_and_wait(recorder.record_entry, self.manager, True)
diff --git a/server/cros/dynamic_suite/reimager.py b/server/cros/dynamic_suite/reimager.py
new file mode 100644
index 0000000..9d135c0
--- /dev/null
+++ b/server/cros/dynamic_suite/reimager.py
@@ -0,0 +1,269 @@
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import compiler, datetime, hashlib, logging, os
+
+import common
+
+from autotest_lib.client.common_lib import control_data, global_config
+from autotest_lib.client.common_lib import error, utils
+from autotest_lib.client.common_lib.cros import dev_server
+from autotest_lib.server.cros.dynamic_suite import constants
+from autotest_lib.server.cros.dynamic_suite import control_file_getter
+from autotest_lib.server.cros.dynamic_suite import frontend_wrappers
+from autotest_lib.server.cros.dynamic_suite import host_lock_manager, job_status
+from autotest_lib.server.cros.dynamic_suite import tools
+from autotest_lib.server.cros.dynamic_suite.job_status import Status
+from autotest_lib.server import frontend
+from autotest_lib.frontend.afe.json_rpc import proxy
+
+
+class Reimager(object):
+    """
+    A class that can run jobs to reimage devices.
+
+    @var _afe: a frontend.AFE instance used to talk to autotest.
+    @var _tko: a frontend.TKO instance used to query the autotest results db.
+    @var _cf_getter: a ControlFileGetter used to get the AU control file.
+    """
+
+    JOB_NAME = 'try_new_image'
+
+
+    def __init__(self, autotest_dir, afe=None, tko=None, results_dir=None):
+        """
+        Constructor
+
+        @param autotest_dir: the place to find autotests.
+        @param afe: an instance of AFE as defined in server/frontend.py.
+        @param tko: an instance of TKO as defined in server/frontend.py.
+        @param results_dir: The directory where the job can write results to.
+                            This must be set if you want job_id of sub-jobs
+                            list in the job keyvals.
+        """
+        self._afe = afe or frontend_wrappers.RetryingAFE(timeout_min=30,
+                                                         delay_sec=10,
+                                                         debug=False)
+        self._tko = tko or frontend_wrappers.RetryingTKO(timeout_min=30,
+                                                         delay_sec=10,
+                                                         debug=False)
+        self._results_dir = results_dir
+        self._reimaged_hosts = {}
+        self._cf_getter = control_file_getter.FileSystemGetter(
+            [os.path.join(autotest_dir, 'server/site_tests')])
+
+
+    def attempt(self, build, board, pool, record, check_hosts,
+                manager, num=None):
+        """
+        Synchronously attempt to reimage some machines.
+
+        Fire off attempts to reimage |num| machines of type |board|, using an
+        image at |url| called |build|.  Wait for completion, polling every
+        10s, and log results with |record| upon completion.
+
+        @param build: the build to install e.g.
+                      x86-alex-release/R18-1655.0.0-a1-b1584.
+        @param board: which kind of devices to reimage.
+        @param pool: Specify the pool of machines to use for scheduling
+                purposes.
+        @param record: callable that records job status.
+               prototype:
+                 record(base_job.status_log_entry)
+        @param check_hosts: require appropriate hosts to be available now.
+        @param manager: an as-yet-unused HostLockManager instance to handle
+                        locking DUTs that we decide to reimage.
+        @param num: how many devices to reimage.
+        @return True if all reimaging jobs succeed, false otherwise.
+        """
+        if not num:
+            num = tools.sharding_factor()
+        logging.debug("scheduling reimaging across %d machines", num)
+        begin_time_str = datetime.datetime.now().strftime(job_status.TIME_FMT)
+        try:
+            self._ensure_version_label(constants.VERSION_PREFIX + build)
+
+            if check_hosts:
+                # TODO make DEPENDENCIES-aware
+                self._ensure_enough_hosts(board, pool, num)
+
+            # Schedule job and record job metadata.
+            # TODO make DEPENDENCIES-aware
+            canary_job = self._schedule_reimage_job(build, board, pool, num)
+            self._record_job_if_possible(Reimager.JOB_NAME, canary_job)
+            logging.info('Created re-imaging job: %d', canary_job.id)
+
+            job_status.wait_for_jobs_to_start(self._afe, [canary_job])
+            logging.debug('Re-imaging job running.')
+
+            hosts = job_status.wait_for_and_lock_job_hosts(self._afe,
+                                                           [canary_job],
+                                                           manager)
+            logging.info('%r locked for reimaging.', hosts)
+
+            job_status.wait_for_jobs_to_finish(self._afe, [canary_job])
+            logging.debug('Re-imaging job finished.')
+
+            # Gather job results.
+            results = self.get_results(canary_job)
+            self._reimaged_hosts[build] = results.keys()
+
+        except error.InadequateHostsException as e:
+            logging.warning(e)
+            Status('WARN', Reimager.JOB_NAME, str(e),
+                   begin_time_str=begin_time_str).record_all(record)
+            return False
+        except Exception as e:
+            # catch Exception so we record the job as terminated no matter what.
+            logging.error(e)
+            Status('ERROR', Reimager.JOB_NAME, str(e),
+                   begin_time_str=begin_time_str).record_all(record)
+            return False
+
+        return job_status.record_and_report_results(results.values(), record)
+
+
+    def get_results(self, canary_job):
+        """
+        Gather results for |canary_job|, in a map of Statuses indexed by host.
+
+        A host's results will be named Reimager.JOB_NAME-<host> in the map, e.g.
+          {'chromeos2-rack1': Status('GOOD', 'try_new_image-chromeos2-rack1')}
+
+        @param canary_job: a completed frontend.Job
+        @return a map of hostname: job_status.Status objects.
+        """
+        return job_status.gather_per_host_results(self._afe,
+                                                  self._tko,
+                                                  [canary_job],
+                                                  Reimager.JOB_NAME + '-')
+
+
+    def _ensure_enough_hosts(self, board, pool, num):
+        """
+        Determine if there are enough working hosts to run on.
+
+        Raises exception if there are not enough hosts.
+
+        @param board: which kind of devices to reimage.
+        @param pool: the pool of machines to use for scheduling purposes.
+        @param num: how many devices to reimage.
+        @raises NoHostsException: if no working hosts.
+        @raises InadequateHostsException: if too few working hosts.
+        """
+        labels = [l for l in [board, pool] if l is not None]
+        available = self._count_usable_hosts(labels)
+        if available == 0:
+            raise error.NoHostsException('All hosts with %r are dead!' % labels)
+        elif num > available:
+            raise error.InadequateHostsException(
+                'Too few hosts with %r' % labels)
+
+
+    def clear_reimaged_host_state(self, build):
+        """
+        Clear per-host state created in the autotest DB for this job.
+
+        After reimaging a host, we label it and set some host attributes on it
+        that are then used by the suite scheduling code.  This call cleans
+        that up.
+
+        @param build: the build whose hosts we want to clean up e.g.
+                      x86-alex-release/R18-1655.0.0-a1-b1584.
+        """
+        for host in self._reimaged_hosts.get('build', []):
+            if not host.startswith('hostless'):
+                self._clear_build_state(host)
+
+
+    def _clear_build_state(self, machine):
+        """
+        Clear all build-specific labels, attributes from the target.
+
+        @param machine: the host to clear labels, attributes from.
+        """
+        self._afe.set_host_attribute(constants.JOB_REPO_URL, None,
+                                     hostname=machine)
+
+
+    def _record_job_if_possible(self, test_name, job):
+        """
+        Record job id as keyval, if possible, so it can be referenced later.
+
+        If |self._results_dir| is None, then this is a NOOP.
+
+        @param test_name: the test to record id/owner for.
+        @param job: the job object to pull info from.
+        """
+        if self._results_dir:
+            job_id_owner = '%s-%s' % (job.id, job.owner)
+            utils.write_keyval(
+                self._results_dir,
+                {hashlib.md5(test_name).hexdigest(): job_id_owner})
+
+
+    def _count_usable_hosts(self, host_spec):
+        """
+        Given a set of host labels, count the live hosts that have them all.
+
+        @param host_spec: list of labels specifying a set of hosts.
+        @return the number of live hosts that satisfy |host_spec|.
+        """
+        count = 0
+        for h in self._afe.get_hosts(multiple_labels=host_spec):
+            if h.status not in ['Repair Failed', 'Repairing']:
+                count += 1
+        return count
+
+
+    def _ensure_version_label(self, name):
+        """
+        Ensure that a label called |name| exists in the autotest DB.
+
+        @param name: the label to check for/create.
+        """
+        try:
+            self._afe.create_label(name=name)
+        except proxy.ValidationError as ve:
+            if ('name' in ve.problem_keys and
+                'This value must be unique' in ve.problem_keys['name']):
+                logging.debug('Version label %s already exists', name)
+            else:
+                raise ve
+
+
+    def _schedule_reimage_job(self, build, board, pool, num_machines):
+        """
+        Schedules the reimaging of |num_machines| |board| devices with |image|.
+
+        Sends an RPC to the autotest frontend to enqueue reimaging jobs on
+        |num_machines| devices of type |board|
+
+        @param build: the build to install (must be unique).
+        @param board: which kind of devices to reimage.
+        @param pool: the pool of machines to use for scheduling purposes.
+        @param num_machines: how many devices to reimage.
+        @return a frontend.Job object for the reimaging job we scheduled.
+        """
+        image_url = tools.image_url_pattern() % (
+            dev_server.DevServer.devserver_url_for_build(build), build)
+        control_file = tools.inject_vars(
+            dict(image_url=image_url, image_name=build),
+            self._cf_getter.get_control_file_contents_by_name('autoupdate'))
+        job_deps = []
+        if pool:
+            meta_host = pool
+            board_label = board
+            job_deps.append(board_label)
+        else:
+            # No pool specified use board.
+            meta_host = board
+
+        return self._afe.create_job(control_file=control_file,
+                                    name=build + '-try',
+                                    control_type='Server',
+                                    priority='Low',
+                                    meta_hosts=[meta_host] * num_machines,
+                                    dependencies=job_deps)
diff --git a/server/cros/dynamic_suite/reimager_unittest.py b/server/cros/dynamic_suite/reimager_unittest.py
new file mode 100644
index 0000000..91afcfb
--- /dev/null
+++ b/server/cros/dynamic_suite/reimager_unittest.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for server/cros/dynamic_suite/reimager.py."""
+
+import logging
+import mox
+import unittest
+
+from autotest_lib.client.common_lib import base_job, control_data, error
+from autotest_lib.client.common_lib import global_config
+from autotest_lib.client.common_lib.cros import dev_server
+from autotest_lib.frontend.afe.json_rpc import proxy
+from autotest_lib.server.cros.dynamic_suite import constants
+from autotest_lib.server.cros.dynamic_suite import control_file_getter
+from autotest_lib.server.cros.dynamic_suite import host_lock_manager, job_status
+from autotest_lib.server.cros.dynamic_suite import tools
+from autotest_lib.server.cros.dynamic_suite.comparitors import StatusContains
+from autotest_lib.server.cros.dynamic_suite.reimager import Reimager
+from autotest_lib.server.cros.dynamic_suite.fakes import FakeHost, FakeJob
+from autotest_lib.server import frontend
+
+
+class ReimagerTest(mox.MoxTestBase):
+    """Unit tests for dynamic_suite Reimager class.
+
+    @var _URL: fake image url
+    @var _BUILD: fake build
+    @var _NUM: fake number of machines to run on
+    @var _BOARD: fake board to reimage
+    """
+
+
+    _DEVSERVER_URL = 'http://nothing:8082'
+    _URL = '%s/%s'
+    _BUILD = 'build'
+    _NUM = 4
+    _BOARD = 'board'
+    _CONFIG = global_config.global_config
+
+
+    def setUp(self):
+        super(ReimagerTest, self).setUp()
+        self.afe = self.mox.CreateMock(frontend.AFE)
+        self.tko = self.mox.CreateMock(frontend.TKO)
+        self.manager = self.mox.CreateMock(host_lock_manager.HostLockManager)
+        self.reimager = Reimager('', afe=self.afe, tko=self.tko)
+        self._CONFIG.override_config_value('CROS',
+                                           'sharding_factor',
+                                           "%d" % self._NUM)
+
+
+    def testEnsureVersionLabelAlreadyExists(self):
+        """Should tolerate a label that already exists."""
+        name = 'label'
+        error = proxy.ValidationError(
+            {'name': 'ValidationError',
+             'message': '{"name": "This value must be unique"}',
+             'traceback': ''},
+            'BAD')
+        self.afe.create_label(name=name).AndRaise(error)
+        self.mox.ReplayAll()
+        self.reimager._ensure_version_label(name)
+
+
+    def testEnsureVersionLabel(self):
+        """Should create a label if it doesn't already exist."""
+        name = 'label'
+        self.afe.create_label(name=name)
+        self.mox.ReplayAll()
+        self.reimager._ensure_version_label(name)
+
+
+    def testCountHostsByBoardAndPool(self):
+        """Should count available hosts by board and pool."""
+        spec = [self._BOARD, 'pool:bvt']
+        self.afe.get_hosts(multiple_labels=spec).AndReturn([FakeHost()])
+        self.mox.ReplayAll()
+        self.assertEquals(self.reimager._count_usable_hosts(spec), 1)
+
+
+    def testCountHostsByBoard(self):
+        """Should count available hosts by board."""
+        spec = [self._BOARD]
+        self.afe.get_hosts(multiple_labels=spec).AndReturn([FakeHost()] * 2)
+        self.mox.ReplayAll()
+        self.assertEquals(self.reimager._count_usable_hosts(spec), 2)
+
+
+    def testCountZeroHostsByBoard(self):
+        """Should count the available hosts, by board, getting zero."""
+        spec = [self._BOARD]
+        self.afe.get_hosts(multiple_labels=spec).AndReturn([])
+        self.mox.ReplayAll()
+        self.assertEquals(self.reimager._count_usable_hosts(spec), 0)
+
+
+    def testScheduleJob(self):
+        """Should be able to create a job with the AFE."""
+        # Fake out getting the autoupdate control file contents.
+        cf_getter = self.mox.CreateMock(control_file_getter.ControlFileGetter)
+        cf_getter.get_control_file_contents_by_name('autoupdate').AndReturn('')
+        self.reimager._cf_getter = cf_getter
+        self._CONFIG.override_config_value('CROS',
+                                           'dev_server',
+                                           self._DEVSERVER_URL)
+        self._CONFIG.override_config_value('CROS',
+                                           'image_url_pattern',
+                                           self._URL)
+        self.afe.create_job(
+            control_file=mox.And(
+                mox.StrContains(self._BUILD),
+                mox.StrContains(self._URL % (self._DEVSERVER_URL,
+                                             self._BUILD))),
+            name=mox.StrContains(self._BUILD),
+            control_type='Server',
+            meta_hosts=[self._BOARD] * self._NUM,
+            dependencies=[],
+            priority='Low')
+        self.mox.ReplayAll()
+        self.reimager._schedule_reimage_job(self._BUILD, self._BOARD, None,
+                                            self._NUM)
+
+    def testPackageUrl(self):
+        """Should be able to get the package_url for any build."""
+        self._CONFIG.override_config_value('CROS',
+                                           'dev_server',
+                                           self._DEVSERVER_URL)
+        self._CONFIG.override_config_value('CROS',
+                                           'package_url_pattern',
+                                           self._URL)
+        self.mox.ReplayAll()
+        package_url = tools.get_package_url(self._BUILD)
+        self.assertEqual(package_url, self._URL % (self._DEVSERVER_URL,
+                                                   self._BUILD))
+
+    def expect_attempt(self, canary_job, statuses, ex=None, check_hosts=True):
+        """Sets up |self.reimager| to expect an attempt() that returns |success|
+
+        Also stubs out Reimager._clear_build_state(), should the caller wish
+        to set an expectation there as well.
+
+        @param canary_job: a FakeJob representing the job we're expecting.
+        @param statuses: dict mapping a hostname to its job_status.Status.
+                         Will be returned by job_status.gather_per_host_results
+        @param ex: if not None, |ex| is raised by get_jobs()
+        @return a FakeJob configured with appropriate expectations
+        """
+        self.mox.StubOutWithMock(self.reimager, '_ensure_version_label')
+        self.mox.StubOutWithMock(self.reimager, '_schedule_reimage_job')
+        self.mox.StubOutWithMock(self.reimager, '_count_usable_hosts')
+        self.mox.StubOutWithMock(self.reimager, '_clear_build_state')
+
+        self.mox.StubOutWithMock(job_status, 'wait_for_jobs_to_start')
+        self.mox.StubOutWithMock(job_status, 'wait_for_and_lock_job_hosts')
+        self.mox.StubOutWithMock(job_status, 'gather_job_hostnames')
+        self.mox.StubOutWithMock(job_status, 'wait_for_jobs_to_finish')
+        self.mox.StubOutWithMock(job_status, 'gather_per_host_results')
+        self.mox.StubOutWithMock(job_status, 'record_and_report_results')
+
+        self.reimager._ensure_version_label(mox.StrContains(self._BUILD))
+        self.reimager._schedule_reimage_job(self._BUILD,
+                                            self._BOARD,
+                                            None,
+                                            self._NUM).AndReturn(canary_job)
+        if check_hosts:
+            self.reimager._count_usable_hosts(
+                mox.IgnoreArg()).AndReturn(self._NUM)
+
+        job_status.wait_for_jobs_to_start(self.afe, [canary_job])
+        job_status.wait_for_and_lock_job_hosts(
+            self.afe, [canary_job], self.manager).AndReturn(statuses.keys())
+
+        if ex:
+            job_status.wait_for_jobs_to_finish(self.afe,
+                                               [canary_job]).AndRaise(ex)
+        else:
+            job_status.wait_for_jobs_to_finish(self.afe, [canary_job])
+            job_status.gather_per_host_results(
+                    mox.IgnoreArg(), mox.IgnoreArg(), [canary_job],
+                    mox.StrContains(Reimager.JOB_NAME)).AndReturn(
+                            statuses)
+
+        if statuses:
+            ret_val = reduce(lambda v, s: v and s.is_good(),
+                             statuses.values(), True)
+            job_status.record_and_report_results(
+                statuses.values(), mox.IgnoreArg()).AndReturn(ret_val)
+
+
+    def testSuccessfulReimage(self):
+        """Should attempt a reimage and record success."""
+        canary = FakeJob()
+        statuses = {canary.hostnames[0]: job_status.Status('GOOD',
+                                                           canary.hostnames[0])}
+        self.expect_attempt(canary, statuses)
+
+        rjob = self.mox.CreateMock(base_job.base_job)
+        self.reimager._clear_build_state(mox.StrContains(canary.hostnames[0]))
+        self.mox.ReplayAll()
+        self.assertTrue(self.reimager.attempt(self._BUILD, self._BOARD, None,
+                                              rjob.record_entry, True,
+                                              self.manager))
+        self.reimager.clear_reimaged_host_state(self._BUILD)
+
+
+    def testFailedReimage(self):
+        """Should attempt a reimage and record failure."""
+        canary = FakeJob()
+        statuses = {canary.hostnames[0]: job_status.Status('FAIL',
+                                                           canary.hostnames[0])}
+        self.expect_attempt(canary, statuses)
+
+        rjob = self.mox.CreateMock(base_job.base_job)
+        self.reimager._clear_build_state(mox.StrContains(canary.hostnames[0]))
+        self.mox.ReplayAll()
+        self.assertFalse(self.reimager.attempt(self._BUILD, self._BOARD, None,
+                                               rjob.record_entry, True,
+                                               self.manager))
+        self.reimager.clear_reimaged_host_state(self._BUILD)
+
+
+    def testReimageThatNeverHappened(self):
+        """Should attempt a reimage and record that it didn't run."""
+        canary = FakeJob()
+        statuses = {'hostless': job_status.Status('ABORT', 'big_job_name')}
+        self.expect_attempt(canary, statuses)
+
+        rjob = self.mox.CreateMock(base_job.base_job)
+        self.mox.ReplayAll()
+        self.reimager.attempt(self._BUILD, self._BOARD, None,
+                              rjob.record_entry, True, self.manager)
+        self.reimager.clear_reimaged_host_state(self._BUILD)
+
+
+    def testReimageThatRaised(self):
+        """Should attempt a reimage that raises an exception and record that."""
+        canary = FakeJob()
+        ex_message = 'Oh no!'
+        self.expect_attempt(canary, statuses={}, ex=Exception(ex_message))
+
+        rjob = self.mox.CreateMock(base_job.base_job)
+        rjob.record_entry(StatusContains.CreateFromStrings('START'))
+        rjob.record_entry(StatusContains.CreateFromStrings('ERROR',
+                                                           reason=ex_message))
+        rjob.record_entry(StatusContains.CreateFromStrings('END ERROR'))
+        self.mox.ReplayAll()
+        self.reimager.attempt(self._BUILD, self._BOARD, None,
+                              rjob.record_entry, True, self.manager)
+        self.reimager.clear_reimaged_host_state(self._BUILD)
+
+
+    def testSuccessfulReimageThatCouldNotScheduleRightAway(self):
+        """Should attempt reimage, ignoring host availability; record success.
+        """
+        canary = FakeJob()
+        statuses = {canary.hostnames[0]: job_status.Status('GOOD',
+                                                           canary.hostnames[0])}
+        self.expect_attempt(canary, statuses, check_hosts=False)
+
+        rjob = self.mox.CreateMock(base_job.base_job)
+        self.reimager._clear_build_state(mox.StrContains(canary.hostnames[0]))
+        self.mox.ReplayAll()
+        self.assertTrue(self.reimager.attempt(self._BUILD, self._BOARD, None,
+                                              rjob.record_entry, False,
+                                              self.manager))
+        self.reimager.clear_reimaged_host_state(self._BUILD)
+
+
+    def testReimageThatCouldNotSchedule(self):
+        """Should attempt a reimage that can't be scheduled."""
+        self.mox.StubOutWithMock(self.reimager, '_ensure_version_label')
+        self.reimager._ensure_version_label(mox.StrContains(self._BUILD))
+
+        self.mox.StubOutWithMock(self.reimager, '_count_usable_hosts')
+        self.reimager._count_usable_hosts(mox.IgnoreArg()).AndReturn(1)
+
+        rjob = self.mox.CreateMock(base_job.base_job)
+        rjob.record_entry(StatusContains.CreateFromStrings('START'))
+        rjob.record_entry(
+            StatusContains.CreateFromStrings('WARN', reason='Too few hosts'))
+        rjob.record_entry(StatusContains.CreateFromStrings('END WARN'))
+        self.mox.ReplayAll()
+        self.reimager.attempt(self._BUILD, self._BOARD, None,
+                              rjob.record_entry, True, self.manager)
+        self.reimager.clear_reimaged_host_state(self._BUILD)
+
+
+    def testReimageWithNoAvailableHosts(self):
+        """Should attempt a reimage while all hosts are dead."""
+        self.mox.StubOutWithMock(self.reimager, '_ensure_version_label')
+        self.reimager._ensure_version_label(mox.StrContains(self._BUILD))
+
+        self.mox.StubOutWithMock(self.reimager, '_count_usable_hosts')
+        self.reimager._count_usable_hosts(mox.IgnoreArg()).AndReturn(0)
+
+        rjob = self.mox.CreateMock(base_job.base_job)
+        rjob.record_entry(StatusContains.CreateFromStrings('START'))
+        rjob.record_entry(StatusContains.CreateFromStrings('ERROR',
+                                                           reason='All hosts'))
+        rjob.record_entry(StatusContains.CreateFromStrings('END ERROR'))
+        self.mox.ReplayAll()
+        self.reimager.attempt(self._BUILD, self._BOARD, None,
+                              rjob.record_entry, True, self.manager)
+        self.reimager.clear_reimaged_host_state(self._BUILD)
diff --git a/server/cros/dynamic_suite/suite.py b/server/cros/dynamic_suite/suite.py
new file mode 100644
index 0000000..3057762
--- /dev/null
+++ b/server/cros/dynamic_suite/suite.py
@@ -0,0 +1,345 @@
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import compiler, hashlib, logging, os, re, traceback, signal
+
+import common
+
+from autotest_lib.client.common_lib import base_job, control_data
+from autotest_lib.client.common_lib import utils
+from autotest_lib.client.common_lib.cros import dev_server
+from autotest_lib.server.cros.dynamic_suite import constants
+from autotest_lib.server.cros.dynamic_suite import control_file_getter
+from autotest_lib.server.cros.dynamic_suite import frontend_wrappers
+from autotest_lib.server.cros.dynamic_suite import host_lock_manager, job_status
+from autotest_lib.server.cros.dynamic_suite.job_status import Status
+from autotest_lib.server import frontend
+
+
+class Suite(object):
+    """
+    A suite of tests, defined by some predicate over control file variables.
+
+    Given a place to search for control files a predicate to match the desired
+    tests, can gather tests and fire off jobs to run them, and then wait for
+    results.
+
+    @var _predicate: a function that should return True when run over a
+         ControlData representation of a control file that should be in
+         this Suite.
+    @var _tag: a string with which to tag jobs run in this suite.
+    @var _build: the build on which we're running this suite.
+    @var _afe: an instance of AFE as defined in server/frontend.py.
+    @var _tko: an instance of TKO as defined in server/frontend.py.
+    @var _jobs: currently scheduled jobs, if any.
+    @var _cf_getter: a control_file_getter.ControlFileGetter
+    """
+
+
+    @staticmethod
+    def create_ds_getter(build):
+        """
+        @param build: the build on which we're running this suite.
+        @return a FileSystemGetter instance that looks under |autotest_dir|.
+        """
+        return control_file_getter.DevServerGetter(
+            build, dev_server.DevServer.create())
+
+
+    @staticmethod
+    def create_fs_getter(autotest_dir):
+        """
+        @param autotest_dir: the place to find autotests.
+        @return a FileSystemGetter instance that looks under |autotest_dir|.
+        """
+        # currently hard-coded places to look for tests.
+        subpaths = ['server/site_tests', 'client/site_tests',
+                    'server/tests', 'client/tests']
+        directories = [os.path.join(autotest_dir, p) for p in subpaths]
+        return control_file_getter.FileSystemGetter(directories)
+
+
+    @staticmethod
+    def parse_tag(tag):
+        """Splits a string on ',' optionally surrounded by whitespace."""
+        return map(lambda x: x.strip(), tag.split(','))
+
+
+    @staticmethod
+    def name_in_tag_predicate(name):
+        """Returns predicate that takes a control file and looks for |name|.
+
+        Builds a predicate that takes in a parsed control file (a ControlData)
+        and returns True if the SUITE tag is present and contains |name|.
+
+        @param name: the suite name to base the predicate on.
+        @return a callable that takes a ControlData and looks for |name| in that
+                ControlData object's suite member.
+        """
+        return lambda t: hasattr(t, 'suite') and \
+                         name in Suite.parse_tag(t.suite)
+
+
+    @staticmethod
+    def list_all_suites(build, cf_getter=None):
+        """
+        Parses all ControlData objects with a SUITE tag and extracts all
+        defined suite names.
+
+        @param cf_getter: control_file_getter.ControlFileGetter. Defaults to
+                          using DevServerGetter.
+
+        @return list of suites
+        """
+        if cf_getter is None:
+            cf_getter = Suite.create_ds_getter(build)
+
+        suites = set()
+        predicate = lambda t: hasattr(t, 'suite')
+        for test in Suite.find_and_parse_tests(cf_getter, predicate,
+                                               add_experimental=True):
+            suites.update(Suite.parse_tag(test.suite))
+        return list(suites)
+
+
+    @staticmethod
+    def create_from_name(name, build, cf_getter=None, afe=None, tko=None,
+                         pool=None, results_dir=None):
+        """
+        Create a Suite using a predicate based on the SUITE control file var.
+
+        Makes a predicate based on |name| and uses it to instantiate a Suite
+        that looks for tests in |autotest_dir| and will schedule them using
+        |afe|.  Pulls control files from the default dev server.
+        Results will be pulled from |tko| upon completion.
+
+        @param name: a value of the SUITE control file variable to search for.
+        @param build: the build on which we're running this suite.
+        @param cf_getter: a control_file_getter.ControlFileGetter.
+                          If None, default to using a DevServerGetter.
+        @param afe: an instance of AFE as defined in server/frontend.py.
+        @param tko: an instance of TKO as defined in server/frontend.py.
+        @param pool: Specify the pool of machines to use for scheduling
+                     purposes.
+        @param results_dir: The directory where the job can write results to.
+                            This must be set if you want job_id of sub-jobs
+                            list in the job keyvals.
+        @return a Suite instance.
+        """
+        if cf_getter is None:
+            cf_getter = Suite.create_ds_getter(build)
+        return Suite(Suite.name_in_tag_predicate(name),
+                     name, build, cf_getter, afe, tko, pool, results_dir)
+
+
+    def __init__(self, predicate, tag, build, cf_getter, afe=None, tko=None,
+                 pool=None, results_dir=None):
+        """
+        Constructor
+
+        @param predicate: a function that should return True when run over a
+               ControlData representation of a control file that should be in
+               this Suite.
+        @param tag: a string with which to tag jobs run in this suite.
+        @param build: the build on which we're running this suite.
+        @param cf_getter: a control_file_getter.ControlFileGetter
+        @param afe: an instance of AFE as defined in server/frontend.py.
+        @param tko: an instance of TKO as defined in server/frontend.py.
+        @param pool: Specify the pool of machines to use for scheduling
+                purposes.
+        @param results_dir: The directory where the job can write results to.
+                            This must be set if you want job_id of sub-jobs
+                            list in the job keyvals.
+        """
+        self._predicate = predicate
+        self._tag = tag
+        self._build = build
+        self._cf_getter = cf_getter
+        self._results_dir = results_dir
+        self._afe = afe or frontend_wrappers.RetryingAFE(timeout_min=30,
+                                                         delay_sec=10,
+                                                         debug=False)
+        self._tko = tko or frontend_wrappers.RetryingTKO(timeout_min=30,
+                                                         delay_sec=10,
+                                                         debug=False)
+        self._pool = pool
+        self._jobs = []
+        self._tests = Suite.find_and_parse_tests(self._cf_getter,
+                                                 self._predicate,
+                                                 add_experimental=True)
+
+
+    @property
+    def tests(self):
+        """
+        A list of ControlData objects in the suite, with added |text| attr.
+        """
+        return self._tests
+
+
+    def stable_tests(self):
+        """
+        |self.tests|, filtered for non-experimental tests.
+        """
+        return filter(lambda t: not t.experimental, self.tests)
+
+
+    def unstable_tests(self):
+        """
+        |self.tests|, filtered for experimental tests.
+        """
+        return filter(lambda t: t.experimental, self.tests)
+
+
+    def _create_job(self, test):
+        """
+        Thin wrapper around frontend.AFE.create_job().
+
+        @param test: ControlData object for a test to run.
+        @return a frontend.Job object with an added test_name member.
+                test_name is used to preserve the higher level TEST_NAME
+                name of the job.
+        """
+        job_deps = []  # TODO(cmasone): init from test.dependencies.
+        if self._pool:
+            meta_hosts = self._pool
+            cros_label = constants.VERSION_PREFIX + self._build
+            job_deps.append(cros_label)
+        else:
+            # No pool specified use any machines with the following label.
+            meta_hosts = constants.VERSION_PREFIX + self._build
+        test_obj = self._afe.create_job(
+            control_file=test.text,
+            name='/'.join([self._build, self._tag, test.name]),
+            control_type=test.test_type.capitalize(),
+            meta_hosts=[meta_hosts],
+            dependencies=job_deps,
+            keyvals={constants.JOB_BUILD_KEY: self._build,
+                     constants.JOB_SUITE_KEY: self._tag})
+
+        setattr(test_obj, 'test_name', test.name)
+
+        return test_obj
+
+
+    def run_and_wait(self, record, manager, add_experimental=True):
+        """
+        Synchronously run tests in |self.tests|.
+
+        Schedules tests against a device running image |self._build|, and
+        then polls for status, using |record| to print status when each
+        completes.
+
+        Tests returned by self.stable_tests() will always be run, while tests
+        in self.unstable_tests() will only be run if |add_experimental| is true.
+
+        @param record: callable that records job status.
+                 prototype:
+                   record(base_job.status_log_entry)
+        @param add_experimental: schedule experimental tests as well, or not.
+        """
+        logging.debug('Discovered %d stable tests.', len(self.stable_tests()))
+        logging.debug('Discovered %d unstable tests.',
+                      len(self.unstable_tests()))
+        try:
+            Status('INFO', 'Start %s' % self._tag).record_result(record)
+            self.schedule(add_experimental)
+            # Unlock all hosts, so test jobs can be run on them.
+            manager.unlock()
+            try:
+                for result in job_status.wait_for_results(self._afe,
+                                                          self._tko,
+                                                          self._jobs):
+                    result.record_all(record)
+
+            except Exception as e:
+                logging.error(traceback.format_exc())
+                Status('FAIL', self._tag,
+                       'Exception waiting for results').record_result(record)
+        except Exception as e:
+            logging.error(traceback.format_exc())
+            Status('FAIL', self._tag,
+                   'Exception while scheduling suite').record_result(record)
+        # Sanity check
+        tests_at_end = self.find_and_parse_tests(self._cf_getter,
+                                                 self._predicate,
+                                                 add_experimental=True)
+        if len(self.tests) != len(tests_at_end):
+            msg = 'Dev Server enumerated %d tests at start, %d at end.' % (
+                len(self.tests), len(tests_at_end))
+            Status('FAIL', self._tag, msg).record_result(record)
+
+
+    def schedule(self, add_experimental=True):
+        """
+        Schedule jobs using |self._afe|.
+
+        frontend.Job objects representing each scheduled job will be put in
+        |self._jobs|.
+
+        @param add_experimental: schedule experimental tests as well, or not.
+        """
+        for test in self.stable_tests():
+            logging.debug('Scheduling %s', test.name)
+            self._jobs.append(self._create_job(test))
+
+        if add_experimental:
+            for test in self.unstable_tests():
+                logging.debug('Scheduling experimental %s', test.name)
+                test.name = constants.EXPERIMENTAL_PREFIX + test.name
+                self._jobs.append(self._create_job(test))
+        if self._results_dir:
+            self._record_scheduled_jobs()
+
+
+    def _record_scheduled_jobs(self):
+        """
+        Record scheduled job ids as keyvals, so they can be referenced later.
+        """
+        for job in self._jobs:
+            job_id_owner = '%s-%s' % (job.id, job.owner)
+            utils.write_keyval(
+                self._results_dir,
+                {hashlib.md5(job.test_name).hexdigest(): job_id_owner})
+
+
+    @staticmethod
+    def find_and_parse_tests(cf_getter, predicate, add_experimental=False):
+        """
+        Function to scan through all tests and find eligible tests.
+
+        Looks at control files returned by _cf_getter.get_control_file_list()
+        for tests that pass self._predicate().
+
+        @param cf_getter: a control_file_getter.ControlFileGetter used to list
+               and fetch the content of control files
+        @param predicate: a function that should return True when run over a
+               ControlData representation of a control file that should be in
+               this Suite.
+        @param add_experimental: add tests with experimental attribute set.
+
+        @return list of ControlData objects that should be run, with control
+                file text added in |text| attribute.
+        """
+        tests = {}
+        files = cf_getter.get_control_file_list()
+        matcher = re.compile(r'[^/]+/(deps|profilers)/.+')
+        for file in filter(lambda f: not matcher.match(f), files):
+            logging.debug('Considering %s', file)
+            text = cf_getter.get_control_file_contents(file)
+            try:
+                found_test = control_data.parse_control_string(
+                        text, raise_warnings=True)
+                if not add_experimental and found_test.experimental:
+                    continue
+
+                found_test.text = text
+                found_test.path = file
+                tests[file] = found_test
+            except control_data.ControlVariableException, e:
+                logging.warn("Skipping %s\n%s", file, e)
+            except Exception, e:
+                logging.error("Bad %s\n%s", file, e)
+
+        return [test for test in tests.itervalues() if predicate(test)]
diff --git a/server/cros/dynamic_suite/suite_unittest.py b/server/cros/dynamic_suite/suite_unittest.py
new file mode 100644
index 0000000..cd28743
--- /dev/null
+++ b/server/cros/dynamic_suite/suite_unittest.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""Unit tests for server/cros/dynamic_suite/dynamic_suite.py."""
+
+import logging
+import mox
+import shutil
+import tempfile
+import unittest
+
+from autotest_lib.client.common_lib import base_job, control_data
+from autotest_lib.server.cros.dynamic_suite import constants
+from autotest_lib.server.cros.dynamic_suite import control_file_getter
+from autotest_lib.server.cros.dynamic_suite import host_lock_manager, job_status
+from autotest_lib.server.cros.dynamic_suite import tools
+from autotest_lib.server.cros.dynamic_suite.comparitors import StatusContains
+from autotest_lib.server.cros.dynamic_suite.suite import Suite
+from autotest_lib.server.cros.dynamic_suite.fakes import FakeControlData
+from autotest_lib.server.cros.dynamic_suite.fakes import FakeHost, FakeJob
+from autotest_lib.server.cros.dynamic_suite.fakes import FakeLabel
+from autotest_lib.server import frontend
+
+
+class SuiteTest(mox.MoxTestBase):
+    """Unit tests for dynamic_suite Suite class.
+
+    @var _BUILD: fake build
+    @var _TAG: fake suite tag
+    """
+
+    _BUILD = 'build'
+    _TAG = 'suite_tag'
+
+
+    def setUp(self):
+        super(SuiteTest, self).setUp()
+        self.afe = self.mox.CreateMock(frontend.AFE)
+        self.tko = self.mox.CreateMock(frontend.TKO)
+
+        self.tmpdir = tempfile.mkdtemp(suffix=type(self).__name__)
+
+        self.manager = self.mox.CreateMock(host_lock_manager.HostLockManager)
+        self.getter = self.mox.CreateMock(control_file_getter.ControlFileGetter)
+
+        self.files = {'one': FakeControlData(self._TAG, 'data_one', expr=True),
+                      'two': FakeControlData(self._TAG, 'data_two'),
+                      'three': FakeControlData(self._TAG, 'data_three')}
+
+        self.files_to_filter = {
+            'with/deps/...': FakeControlData(self._TAG, 'gets filtered'),
+            'with/profilers/...': FakeControlData(self._TAG, 'gets filtered')}
+
+
+    def tearDown(self):
+        super(SuiteTest, self).tearDown()
+        shutil.rmtree(self.tmpdir, ignore_errors=True)
+
+
+    def expect_control_file_parsing(self):
+        """Expect an attempt to parse the 'control files' in |self.files|."""
+        all_files = self.files.keys() + self.files_to_filter.keys()
+        self._set_control_file_parsing_expectations(False, all_files,
+                                                    self.files.iteritems())
+
+
+    def expect_control_file_reparsing(self):
+        """Expect re-parsing the 'control files' in |self.files|."""
+        all_files = self.files.keys() + self.files_to_filter.keys()
+        self._set_control_file_parsing_expectations(True, all_files,
+                                                    self.files.iteritems())
+
+
+    def expect_racy_control_file_reparsing(self, new_files):
+        """Expect re-fetching and parsing of control files to return extra.
+
+        @param new_files: extra control files that showed up during scheduling.
+        """
+        all_files = (self.files.keys() + self.files_to_filter.keys() +
+                     new_files.keys())
+        new_files.update(self.files)
+        self._set_control_file_parsing_expectations(True, all_files,
+                                                    new_files.iteritems())
+
+
+    def _set_control_file_parsing_expectations(self, already_stubbed,
+                                               file_list, files_to_parse):
+        """Expect an attempt to parse the 'control files' in |files|.
+
+        @param already_stubbed: parse_control_string already stubbed out.
+        @param file_list: the files the dev server returns
+        @param files_to_parse: the {'name': FakeControlData} dict of files we
+                               expect to get parsed.
+        """
+        if not already_stubbed:
+            self.mox.StubOutWithMock(control_data, 'parse_control_string')
+
+        self.getter.get_control_file_list().AndReturn(file_list)
+        for file, data in files_to_parse:
+            self.getter.get_control_file_contents(
+                file).InAnyOrder().AndReturn(data.string)
+            control_data.parse_control_string(
+                data.string, raise_warnings=True).InAnyOrder().AndReturn(data)
+
+
+    def testFindAndParseStableTests(self):
+        """Should find only non-experimental tests that match a predicate."""
+        self.expect_control_file_parsing()
+        self.mox.ReplayAll()
+
+        predicate = lambda d: d.text == self.files['two'].string
+        tests = Suite.find_and_parse_tests(self.getter, predicate)
+        self.assertEquals(len(tests), 1)
+        self.assertEquals(tests[0], self.files['two'])
+
+
+    def testFindAndParseTests(self):
+        """Should find all tests that match a predicate."""
+        self.expect_control_file_parsing()
+        self.mox.ReplayAll()
+
+        predicate = lambda d: d.text != self.files['two'].string
+        tests = Suite.find_and_parse_tests(self.getter,
+                                           predicate,
+                                           add_experimental=True)
+        self.assertEquals(len(tests), 2)
+        self.assertTrue(self.files['one'] in tests)
+        self.assertTrue(self.files['three'] in tests)
+
+
+    def mock_control_file_parsing(self):
+        """Fake out find_and_parse_tests(), returning content from |self.files|.
+        """
+        for test in self.files.values():
+            test.text = test.string  # mimic parsing.
+        self.mox.StubOutWithMock(Suite, 'find_and_parse_tests')
+        Suite.find_and_parse_tests(
+            mox.IgnoreArg(),
+            mox.IgnoreArg(),
+            add_experimental=True).AndReturn(self.files.values())
+
+
+    def testStableUnstableFilter(self):
+        """Should distinguish between experimental and stable tests."""
+        self.mock_control_file_parsing()
+        self.mox.ReplayAll()
+        suite = Suite.create_from_name(self._TAG, self.tmpdir,
+                                       afe=self.afe, tko=self.tko)
+
+        self.assertTrue(self.files['one'] in suite.tests)
+        self.assertTrue(self.files['two'] in suite.tests)
+        self.assertTrue(self.files['one'] in suite.unstable_tests())
+        self.assertTrue(self.files['two'] in suite.stable_tests())
+        self.assertFalse(self.files['one'] in suite.stable_tests())
+        self.assertFalse(self.files['two'] in suite.unstable_tests())
+
+
+    def expect_job_scheduling(self, add_experimental):
+        """Expect jobs to be scheduled for 'tests' in |self.files|.
+
+        @param add_experimental: expect jobs for experimental tests as well.
+        """
+        for test in self.files.values():
+            if not add_experimental and test.experimental:
+                continue
+            self.afe.create_job(
+                control_file=test.text,
+                name=mox.And(mox.StrContains(self._BUILD),
+                             mox.StrContains(test.name)),
+                control_type=mox.IgnoreArg(),
+                meta_hosts=[constants.VERSION_PREFIX + self._BUILD],
+                dependencies=[],
+                keyvals={'build': self._BUILD, 'suite': self._TAG}
+                ).AndReturn(FakeJob())
+
+
+    def testScheduleTests(self):
+        """Should schedule stable and experimental tests with the AFE."""
+        self.mock_control_file_parsing()
+        self.expect_job_scheduling(add_experimental=True)
+
+        self.mox.ReplayAll()
+        suite = Suite.create_from_name(self._TAG, self._BUILD,
+                                                     afe=self.afe, tko=self.tko)
+        suite.schedule()
+
+
+    def testScheduleTestsAndRecord(self):
+        """Should schedule stable and experimental tests with the AFE."""
+        self.mock_control_file_parsing()
+        self.mox.ReplayAll()
+        suite = Suite.create_from_name(self._TAG, self._BUILD,
+                                       afe=self.afe, tko=self.tko,
+                                       results_dir=self.tmpdir)
+        self.mox.ResetAll()
+        self.expect_job_scheduling(add_experimental=True)
+        self.mox.StubOutWithMock(suite, '_record_scheduled_jobs')
+        suite._record_scheduled_jobs()
+        self.mox.ReplayAll()
+        suite.schedule()
+        for job in  suite._jobs:
+          self.assertTrue(hasattr(job, 'test_name'))
+
+
+    def testScheduleStableTests(self):
+        """Should schedule only stable tests with the AFE."""
+        self.mock_control_file_parsing()
+        self.expect_job_scheduling(add_experimental=False)
+
+        self.mox.ReplayAll()
+        suite = Suite.create_from_name(self._TAG, self._BUILD,
+                                       afe=self.afe, tko=self.tko)
+        suite.schedule(add_experimental=False)
+
+
+    def _createSuiteWithMockedTestsAndControlFiles(self):
+        """Create a Suite, using mocked tests and control file contents.
+
+        @return Suite object, after mocking out behavior needed to create it.
+        """
+        self.expect_control_file_parsing()
+        self.mox.ReplayAll()
+        suite = Suite.create_from_name(self._TAG, self._BUILD, self.getter,
+                                       self.afe, self.tko)
+        self.mox.ResetAll()
+        return suite
+
+
+    def schedule_and_expect_these_results(self, suite, results, recorder):
+        self.mox.StubOutWithMock(suite, 'schedule')
+        suite.schedule(True)
+        self.manager.unlock()
+        for result in results:
+            status = result[0]
+            test_name = result[1]
+            recorder.record_entry(
+                StatusContains.CreateFromStrings('START', test_name))
+            recorder.record_entry(
+                StatusContains.CreateFromStrings(*result)).InAnyOrder('results')
+            recorder.record_entry(
+                StatusContains.CreateFromStrings('END %s' % status, test_name))
+        self.mox.StubOutWithMock(job_status, 'wait_for_results')
+        job_status.wait_for_results(self.afe, self.tko, suite._jobs).AndReturn(
+            map(lambda r: job_status.Status(*r), results))
+
+
+    def testRunAndWaitSuccess(self):
+        """Should record successful results."""
+        suite = self._createSuiteWithMockedTestsAndControlFiles()
+
+        recorder = self.mox.CreateMock(base_job.base_job)
+        recorder.record_entry(
+            StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG))
+
+        results = [('GOOD', 'good'), ('FAIL', 'bad', 'reason')]
+        self.schedule_and_expect_these_results(suite, results, recorder)
+        self.expect_control_file_reparsing()
+        self.mox.ReplayAll()
+
+        suite.run_and_wait(recorder.record_entry, self.manager, True)
+
+
+    def testRunAndWaitFailure(self):
+        """Should record failure to gather results."""
+        suite = self._createSuiteWithMockedTestsAndControlFiles()
+
+        recorder = self.mox.CreateMock(base_job.base_job)
+        recorder.record_entry(
+            StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG))
+        recorder.record_entry(
+            StatusContains.CreateFromStrings('FAIL', self._TAG, 'waiting'))
+
+        self.mox.StubOutWithMock(suite, 'schedule')
+        suite.schedule(True)
+        self.manager.unlock()
+        self.mox.StubOutWithMock(job_status, 'wait_for_results')
+        job_status.wait_for_results(mox.IgnoreArg(),
+                                    mox.IgnoreArg(),
+                                    mox.IgnoreArg()).AndRaise(
+                                            Exception('Expected during test.'))
+        self.expect_control_file_reparsing()
+        self.mox.ReplayAll()
+
+        suite.run_and_wait(recorder.record_entry, self.manager, True)
+
+
+    def testRunAndWaitScheduleFailure(self):
+        """Should record failure to schedule jobs."""
+        suite = self._createSuiteWithMockedTestsAndControlFiles()
+
+        recorder = self.mox.CreateMock(base_job.base_job)
+        recorder.record_entry(
+            StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG))
+        recorder.record_entry(
+            StatusContains.CreateFromStrings('FAIL', self._TAG, 'scheduling'))
+
+        self.mox.StubOutWithMock(suite, 'schedule')
+        suite.schedule(True).AndRaise(Exception('Expected during test.'))
+        self.expect_control_file_reparsing()
+        self.mox.ReplayAll()
+
+        suite.run_and_wait(recorder.record_entry, self.manager, True)
+
+
+    def testRunAndWaitDevServerRacyFailure(self):
+        """Should record discovery of dev server races in listing files."""
+        suite = self._createSuiteWithMockedTestsAndControlFiles()
+
+        recorder = self.mox.CreateMock(base_job.base_job)
+        recorder.record_entry(
+            StatusContains.CreateFromStrings('INFO', 'Start %s' % self._TAG))
+
+        results = [('GOOD', 'good'), ('FAIL', 'bad', 'reason')]
+        self.schedule_and_expect_these_results(suite, results, recorder)
+
+        self.expect_racy_control_file_reparsing(
+            {'new': FakeControlData(self._TAG, '!')})
+
+        recorder.record_entry(
+            StatusContains.CreateFromStrings('FAIL', self._TAG, 'Dev Server'))
+        self.mox.ReplayAll()
+
+        suite.run_and_wait(recorder.record_entry, self.manager, True)
diff --git a/server/cros/dynamic_suite/tools.py b/server/cros/dynamic_suite/tools.py
new file mode 100644
index 0000000..08205ad
--- /dev/null
+++ b/server/cros/dynamic_suite/tools.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from autotest_lib.client.common_lib import global_config
+from autotest_lib.client.common_lib.cros import dev_server
+
+
+_CONFIG = global_config.global_config
+
+
+def image_url_pattern():
+    return _CONFIG.get_config_value('CROS', 'image_url_pattern', type=str)
+
+
+def sharding_factor():
+    return _CONFIG.get_config_value('CROS', 'sharding_factor', type=int)
+
+
+def package_url_pattern():
+    return _CONFIG.get_config_value('CROS', 'package_url_pattern', type=str)
+
+
+def get_package_url(build):
+    """Returns the package url for the given build."""
+    devserver_url = dev_server.DevServer.devserver_url_for_build(build)
+    return package_url_pattern() % (devserver_url, build)
+
+
+def inject_vars(vars, control_file_in):
+    """
+    Inject the contents of |vars| into |control_file_in|.
+
+    @param vars: a dict to shoehorn into the provided control file string.
+    @param control_file_in: the contents of a control file to munge.
+    @return the modified control file string.
+    """
+    control_file = ''
+    for key, value in vars.iteritems():
+        # None gets injected as 'None' without this check; same for digits.
+        if isinstance(value, str):
+            control_file += "%s='%s'\n" % (key, value)
+        else:
+            control_file += "%s=%r\n" % (key, value)
+    return control_file + control_file_in
diff --git a/server/cros/dynamic_suite/tools_unittest.py b/server/cros/dynamic_suite/tools_unittest.py
new file mode 100644
index 0000000..f1c5615
--- /dev/null
+++ b/server/cros/dynamic_suite/tools_unittest.py
@@ -0,0 +1,37 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for server/cros/dynamic_suite/tools.py."""
+
+import logging
+import mox
+import unittest
+
+from autotest_lib.client.common_lib import global_config
+from autotest_lib.client.common_lib.cros import dev_server
+from autotest_lib.server.cros.dynamic_suite import tools
+
+
+class DynamicSuiteToolsTest(mox.MoxTestBase):
+    """Unit tests for dynamic_suite tools module methods."""
+
+
+    def testInjectVars(self):
+        """Should inject dict of varibles into provided strings."""
+        def find_all_in(d, s):
+            """Returns true if all key-value pairs in |d| are printed in |s|."""
+            for k, v in d.iteritems():
+                if isinstance(v, str):
+                    if "%s='%s'\n" % (k, v) not in s:
+                        return False
+                else:
+                    if "%s=%r\n" % (k, v) not in s:
+                        return False
+            return True
+
+        v = {'v1': 'one', 'v2': 'two', 'v3': None, 'v4': False, 'v5': 5}
+        self.assertTrue(find_all_in(v, tools.inject_vars(v, '')))
+        self.assertTrue(find_all_in(v, tools.inject_vars(v, 'ctrl')))
diff --git a/server/site_autotest.py b/server/site_autotest.py
index 65ed056..939085c 100755
--- a/server/site_autotest.py
+++ b/server/site_autotest.py
@@ -6,11 +6,11 @@
 import os
 from autotest_lib.client.common_lib import error, global_config
 from autotest_lib.server import installable_object, autoserv_parser
-from autotest_lib.server.cros import dynamic_suite
+from autotest_lib.server.cros.dynamic_suite.constants import JOB_REPO_URL
 
 
-config = global_config.global_config
-parser = autoserv_parser.autoserv_parser
+_CONFIG = global_config.global_config
+_PARSER = autoserv_parser.autoserv_parser
 
 
 class SiteAutotest(installable_object.InstallableObject):
@@ -38,13 +38,11 @@
             if self.host:
                 afe = frontend.AFE(debug=False)
                 hosts = afe.get_hosts(hostname=self.host.hostname)
-                if hosts and dynamic_suite.JOB_REPO_URL in hosts[0].attributes:
-                    return hosts[0].attributes[dynamic_suite.JOB_REPO_URL]
-                logging.warning("No %s for %s", dynamic_suite.JOB_REPO_URL,
-                                self.host)
+                if hosts and JOB_REPO_URL in hosts[0].attributes:
+                    return hosts[0].attributes[JOB_REPO_URL]
+                logging.warning("No %s for %s", JOB_REPO_URL, self.host)
         except ImportError:
-            logging.warning('Not attempting to look for %s',
-                            dynamic_suite.JOB_REPO_URL)
+            logging.warning('Not attempting to look for %s', JOB_REPO_URL)
             pass
         return None
 
@@ -61,11 +59,11 @@
         """
         repos = super(SiteAutotest, self).get_fetch_location()
 
-        if parser.options.image:
+        if _PARSER.options.image:
             # The old way.
             # Add our new repo to the end, the package manager will later
             # reverse the list of repositories resulting in ours being first.
-            repos.append(parser.options.image.replace(
+            repos.append(_PARSER.options.image.replace(
                 'update', 'static/archive').rstrip('/') + '/autotest')
         else:
             # The new way.
@@ -101,7 +99,7 @@
         fetch_package_match = self.fetch_package_parser.search(line)
         if fetch_package_match:
             pkg_name, dest_path, fifo_path = fetch_package_match.groups()
-            serve_packages = global_config.global_config.get_config_value(
+            serve_packages = _CONFIG.get_config_value(
                 "PACKAGES", "serve_packages_from_autoserv", type=bool)
             if serve_packages and pkg_name == 'packages.checksum':
                 try:
diff --git a/server/site_crashcollect.py b/server/site_crashcollect.py
index ed1d6dd..d8869d2 100644
--- a/server/site_crashcollect.py
+++ b/server/site_crashcollect.py
@@ -8,7 +8,7 @@
 from autotest_lib.client.common_lib import utils as client_utils
 from autotest_lib.client.common_lib.cros import dev_server
 from autotest_lib.client.cros import constants
-from autotest_lib.server.cros import dynamic_suite
+from autotest_lib.server.cros.dynamic_suite.constants import JOB_BUILD_KEY
 from autotest_lib.server import utils
 
 def generate_minidump_stacktrace(minidump_path):
@@ -40,13 +40,13 @@
     # First, look up what build we tested.  If we can't find this, we can't
     # get the right debug symbols, so we might as well give up right now.
     keyvals = client_utils.read_keyval(resultdir)
-    if dynamic_suite.JOB_BUILD_KEY not in keyvals:
+    if JOB_BUILD_KEY not in keyvals:
         raise dev_server.DevServerException(
             'Cannot determine build being tested.')
 
     devserver = dev_server.DevServer.create()
     trace_text = devserver.symbolicate_dump(
-        minidump_path, keyvals[dynamic_suite.JOB_BUILD_KEY])
+        minidump_path, keyvals[JOB_BUILD_KEY])
     if not trace_text:
         raise dev_server.DevServerException('Unknown error!!')
     with open(minidump_path + '.txt', 'w') as trace_file:
diff --git a/server/site_tests/autoupdate/control b/server/site_tests/autoupdate/control
index 3d992a1..6bce18f 100644
--- a/server/site_tests/autoupdate/control
+++ b/server/site_tests/autoupdate/control
@@ -24,9 +24,10 @@
 vers = 'cros-version:'
 repo_url = None
 if 'image_name' in locals():
-    from autotest_lib.server.cros.dynamic_suite import dynamic_suite
-    vers = dynamic_suite.VERSION_PREFIX
-    repo_url = dynamic_suite.get_package_url(build=image_name)
+    from autotest_lib.server.cros.dynamic_suite import constants
+    from autotest_lib.server.cros.dynamic_suite import tools
+    vers = constants.VERSION_PREFIX
+    repo_url = tools.get_package_url(build=image_name)
 
 AFE = frontend_wrappers.RetryingAFE(timeout_min=30, delay_sec=10, debug=False)