cheets_CTS: move test to public repo.

CQ-DEPEND=CL:*291141, CL:390359
BUG=chromium:644471
TEST=test_that $DUT cheets_CTS.arm.android.core.tests.libcore.package.harmony_java_math

Change-Id: Iebc9021adaf1f4eb04902becc0a56b95d4ce04b4
Reviewed-on: https://chromium-review.googlesource.com/390325
Commit-Ready: Ilja H. Friedel <ihf@chromium.org>
Tested-by: Ilja H. Friedel <ihf@chromium.org>
Reviewed-by: Ilja H. Friedel <ihf@chromium.org>
diff --git a/client/site_tests/cheets_CTSHelper/cheets_CTSHelper.py b/client/site_tests/cheets_CTSHelper/cheets_CTSHelper.py
new file mode 100644
index 0000000..4a08f87
--- /dev/null
+++ b/client/site_tests/cheets_CTSHelper/cheets_CTSHelper.py
@@ -0,0 +1,21 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from autotest_lib.client.common_lib.cros import arc, chrome
+from autotest_lib.client.bin import test
+
+
+class cheets_CTSHelper(test.test):
+    """Helper to run Android's CTS on autotest.
+
+    Android CTS needs a running Android, which depends on a logged in ChromeOS
+    user. This helper class logs in to ChromeOS and waits for Android boot
+    complete.
+
+    We do not log out, and the machine will be rebooted after test.
+    """
+    version = 1
+
+    def run_once(self):
+        self.chrome = chrome.Chrome(arc_mode=arc.arc_common.ARC_MODE_ENABLED)
diff --git a/client/site_tests/cheets_CTSHelper/control b/client/site_tests/cheets_CTSHelper/control
new file mode 100644
index 0000000..836b7d2
--- /dev/null
+++ b/client/site_tests/cheets_CTSHelper/control
@@ -0,0 +1,19 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+AUTHOR = "ARC Team"
+NAME = "cheets_CTSHelper"
+TIME = "SHORT"
+TEST_CATEGORY = "Functional"
+TEST_CLASS = "login"
+TEST_TYPE = "client"
+JOB_RETRIES = 2
+
+DOC = """
+This is a helper utility to log-in Chrome for CTS.
+See also cheets_CTS for more details.
+"""
+
+job.run_test('cheets_CTSHelper')
diff --git a/server/cros/tradefed_test.py b/server/cros/tradefed_test.py
new file mode 100644
index 0000000..2e9821e
--- /dev/null
+++ b/server/cros/tradefed_test.py
@@ -0,0 +1,598 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# repohooks/pre-upload.py currently does not run pylint. But for developers who
+# want to check their code manually we disable several harmless pylint warnings
+# which just distract from more serious remaining issues.
+#
+# The instance variables _host and _install_paths are not defined in __init__().
+# pylint: disable=attribute-defined-outside-init
+#
+# Many short variable names don't follow the naming convention.
+# pylint: disable=invalid-name
+#
+# _parse_result() and _dir_size() don't access self and could be functions.
+# pylint: disable=no-self-use
+#
+# _ChromeLogin and _TradefedLogCollector have no public methods.
+# pylint: disable=too-few-public-methods
+
+import contextlib
+import errno
+import hashlib
+import logging
+import os
+import pipes
+import random
+import re
+import shutil
+import stat
+import tempfile
+import urlparse
+
+from autotest_lib.client.bin import utils as client_utils
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib.cros import dev_server
+from autotest_lib.server import afe_utils
+from autotest_lib.server import autotest
+from autotest_lib.server import test
+from autotest_lib.server import utils
+from autotest_lib.site_utils import lxc
+
+try:
+    import lockfile
+except ImportError:
+    if utils.is_in_container():
+        # Ensure the container has the required packages installed.
+        lxc.install_packages(python_packages=['lockfile'])
+        import lockfile
+    else:
+        raise
+
+
+_SDK_TOOLS_DIR = ('gs://chromeos-arc-images/builds/'
+        'git_mnc-dr-arc-dev-linux-static_sdk_tools/3264272')
+_SDK_TOOLS_FILES = ['aapt']
+# To stabilize adb behavior, we use dynamically linked adb.
+_ADB_DIR = ('gs://chromeos-arc-images/builds/'
+        'git_mnc-dr-arc-dev-linux-cheets_arm-user/3264272')
+_ADB_FILES = ['adb']
+
+_ADB_POLLING_INTERVAL_SECONDS = 1
+_ADB_READY_TIMEOUT_SECONDS = 60
+_ANDROID_ADB_KEYS_PATH = '/data/misc/adb/adb_keys'
+
+_ARC_POLLING_INTERVAL_SECONDS = 1
+_ARC_READY_TIMEOUT_SECONDS = 60
+
+_TRADEFED_PREFIX = 'autotest-tradefed-install_'
+_TRADEFED_CACHE_LOCAL = '/tmp/autotest-tradefed-cache'
+_TRADEFED_CACHE_CONTAINER = '/usr/local/autotest/results/shared/cache'
+_TRADEFED_CACHE_CONTAINER_LOCK = '/usr/local/autotest/results/shared/lock'
+
+# According to dshi a drone has 500GB of disk space. It is ok for now to use
+# 10GB of disk space, as no more than 10 tests should run in parallel.
+# TODO(ihf): Investigate tighter cache size.
+_TRADEFED_CACHE_MAX_SIZE = (10 * 1024 * 1024 * 1024)
+
+
+class _ChromeLogin(object):
+    """Context manager to handle Chrome login state."""
+
+    def __init__(self, host):
+        self._host = host
+
+    def __enter__(self):
+        """Logs in to the Chrome."""
+        logging.info('Ensure Android is running...')
+        autotest.Autotest(self._host).run_test('cheets_CTSHelper',
+                                               check_client_result=True)
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        """On exit, to wipe out all the login state, reboot the machine.
+
+        @param exc_type: Exception type if an exception is raised from the
+                         with-block.
+        @param exc_value: Exception instance if an exception is raised from
+                          the with-block.
+        @param traceback: Stack trace info if an exception is raised from
+                          the with-block.
+        @return None, indicating not to ignore an exception from the with-block
+                if raised.
+        """
+        logging.info('Rebooting...')
+        try:
+            self._host.reboot()
+        except Exception:
+            if exc_type is None:
+                raise
+            # If an exception is raise from the with-block, just record the
+            # exception for the rebooting to avoid ignoring the original
+            # exception.
+            logging.exception('Rebooting failed.')
+
+
+@contextlib.contextmanager
+def lock(filename):
+    """Prevents other autotest/tradefed instances from accessing cache."""
+    filelock = lockfile.FileLock(filename)
+    # It is tempting just to call filelock.acquire(3600). But the implementation
+    # has very poor temporal granularity (timeout/10), which is unsuitable for
+    # our needs. See /usr/lib64/python2.7/site-packages/lockfile/
+    while not filelock.i_am_locking():
+        try:
+            logging.info('Waiting for cache lock...')
+            filelock.acquire(random.randint(1, 5))
+        except (lockfile.AlreadyLocked, lockfile.LockTimeout):
+            pass
+        else:
+            logging.info('Acquired cache lock.')
+    try:
+        yield
+    finally:
+        filelock.release()
+        logging.info('Released cache lock.')
+
+
+class TradefedTest(test.test):
+    """Base class to prepare DUT to run tests via tradefed."""
+    version = 1
+
+    def initialize(self, host=None):
+        """Sets up the tools and binary bundles for the test."""
+        logging.info('Hostname: %s', host.hostname)
+        self._host = host
+        self._install_paths = []
+        # Tests in the lab run within individual lxc container instances.
+        if utils.is_in_container():
+            # Ensure the container has the required packages installed.
+            lxc.install_packages(packages=['unzip', 'default-jre'])
+            cache_root = _TRADEFED_CACHE_CONTAINER
+        else:
+            cache_root = _TRADEFED_CACHE_LOCAL
+        # The content of the cache survives across jobs.
+        self._safe_makedirs(cache_root)
+        self._tradefed_cache = os.path.join(cache_root, 'cache')
+        self._tradefed_cache_lock = os.path.join(cache_root, 'lock')
+        # The content of the install location does not survive across jobs and
+        # is isolated (by using a unique path)_against other autotest instances.
+        # This is not needed for the lab, but if somebody wants to run multiple
+        # TradedefTest instance.
+        self._tradefed_install = tempfile.mkdtemp(prefix=_TRADEFED_PREFIX)
+        # Under lxc the cache is shared between multiple autotest/tradefed
+        # instances. We need to synchronize access to it. All binaries are
+        # installed through the (shared) cache into the local (unshared)
+        # lxc/autotest instance storage.
+        # If clearing the cache it must happen before all downloads.
+        self._clear_download_cache_if_needed()
+        # Set permissions (rwxr-xr-x) to the executable binaries.
+        permission = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH
+                | stat.S_IXOTH)
+        self._install_files(_ADB_DIR, _ADB_FILES, permission)
+        self._install_files(_SDK_TOOLS_DIR, _SDK_TOOLS_FILES, permission)
+
+    def cleanup(self):
+        """Cleans up any dirtied state."""
+        # Kill any lingering adb servers.
+        self._run('adb', verbose=True, args=('kill-server',))
+        logging.info('Cleaning up %s.', self._tradefed_install)
+        shutil.rmtree(self._tradefed_install)
+
+    def _login_chrome(self):
+        """Returns Chrome log-in context manager.
+
+        Please see also cheets_CTSHelper for details about how this works.
+        """
+        return _ChromeLogin(self._host)
+
+    def _try_adb_connect(self):
+        """Attempts to connect to adb on the DUT.
+
+        @return boolean indicating if adb connected successfully.
+        """
+        # This may fail return failure due to a race condition in adb connect
+        # (b/29370989). If adb is already connected, this command will
+        # immediately return success.
+        hostport = '{}:{}'.format(self._host.hostname, self._host.port)
+        result = self._run(
+                'adb',
+                args=('connect', hostport),
+                verbose=True,
+                ignore_status=True)
+        logging.info('adb connect {}:\n{}'.format(hostport, result.stdout))
+        if result.exit_status != 0:
+            return False
+
+        result = self._run('adb', args=('devices',))
+        logging.info('adb devices:\n' + result.stdout)
+        if not re.search(
+                r'{}\s+(device|unauthorized)'.format(re.escape(hostport)),
+                result.stdout):
+            return False
+
+        # Actually test the connection with an adb command as there can be
+        # a race between detecting the connected device and actually being
+        # able to run a commmand with authenticated adb.
+        result = self._run('adb', args=('shell', 'exit'), ignore_status=True)
+        return result.exit_status == 0
+
+    def _android_shell(self, command):
+        """Run a command remotely on the device in an android shell
+
+        This function is strictly for internal use only, as commands do not run
+        in a fully consistent Android environment. Prefer adb shell instead.
+        """
+        self._host.run('android-sh -c ' + pipes.quote(command))
+
+    def _write_android_file(self, filename, data):
+        """Writes a file to a location relative to the android container.
+
+        This is an internal function used to bootstrap adb.
+        Tests should use adb push to write files.
+        """
+        android_cmd = 'echo %s > %s' % (pipes.quote(data),
+                                        pipes.quote(filename))
+        self._android_shell(android_cmd)
+
+    def _connect_adb(self):
+        """Sets up ADB connection to the ARC container."""
+        logging.info('Setting up adb connection.')
+        # Generate and push keys for adb.
+        # TODO(elijahtaylor): Extract this code to arc_common and de-duplicate
+        # code in arc.py on the client side tests.
+        key_path = os.path.join(self.tmpdir, 'test_key')
+        pubkey_path = key_path + '.pub'
+        self._run('adb', verbose=True, args=('keygen', pipes.quote(key_path)))
+        with open(pubkey_path, 'r') as f:
+            self._write_android_file(_ANDROID_ADB_KEYS_PATH, f.read())
+        self._android_shell('restorecon ' + pipes.quote(_ANDROID_ADB_KEYS_PATH))
+        os.environ['ADB_VENDOR_KEYS'] = key_path
+
+        # Kill existing adb server to ensure that the env var is picked up.
+        self._run('adb', verbose=True, args=('kill-server',))
+
+        # This starts adbd.
+        self._android_shell('setprop sys.usb.config mtp,adb')
+
+        # adbd may take some time to come up. Repeatedly try to connect to adb.
+        utils.poll_for_condition(lambda: self._try_adb_connect(),
+                                 exception=error.TestError('Failed to set up '
+                                                           'adb connection'),
+                                 timeout=_ADB_READY_TIMEOUT_SECONDS,
+                                 sleep_interval=_ADB_POLLING_INTERVAL_SECONDS)
+
+        logging.info('Successfully setup adb connection.')
+
+    def _wait_for_arc_boot(self):
+        """Wait until ARC is fully booted.
+
+        Tests for the presence of the intent helper app to determine whether ARC
+        has finished booting.
+        """
+        def intent_helper_running():
+            result = self._run('adb', args=('shell', 'pgrep',
+                                            'org.chromium.arc.intent_helper'))
+            return bool(result.stdout)
+        utils.poll_for_condition(
+            intent_helper_running,
+            exception=error.TestError('Timed out waiting for intent helper.'),
+            timeout=_ARC_READY_TIMEOUT_SECONDS,
+            sleep_interval=_ARC_POLLING_INTERVAL_SECONDS)
+
+    def _disable_adb_install_dialog(self):
+        """Disables a dialog shown on adb install execution.
+
+        By default, on adb install execution, "Allow Google to regularly check
+        device activity ... " dialog is shown. It requires manual user action
+        so that tests are blocked at the point.
+        This method disables it.
+        """
+        logging.info('Disabling the adb install dialog.')
+        result = self._run(
+                'adb',
+                verbose=True,
+                args=(
+                        'shell',
+                        'settings',
+                        'put',
+                        'global',
+                        'verifier_verify_adb_installs',
+                        '0'))
+        logging.info('Disable adb dialog: %s', result.stdout)
+
+    def _ready_arc(self):
+        """Ready ARC and adb for running tests via tradefed."""
+        self._connect_adb()
+        self._disable_adb_install_dialog()
+        self._wait_for_arc_boot()
+
+    def _safe_makedirs(self, path):
+        """Creates a directory at |path| and its ancestors.
+
+        Unlike os.makedirs(), ignore errors even if directories exist.
+        """
+        try:
+            os.makedirs(path)
+        except OSError as e:
+            if not (e.errno == errno.EEXIST and os.path.isdir(path)):
+                raise
+
+    def _unzip(self, filename):
+        """Unzip the file.
+
+        The destination directory name will be the stem of filename.
+        E.g., _unzip('foo/bar/baz.zip') will create directory at
+        'foo/bar/baz', and then will inflate zip's content under the directory.
+        If here is already a directory at the stem, that directory will be used.
+
+        @param filename: Path to the zip archive.
+        @return Path to the inflated directory.
+        """
+        destination = os.path.splitext(filename)[0]
+        if os.path.isdir(destination):
+            return destination
+        self._safe_makedirs(destination)
+        utils.run('unzip', args=('-d', destination, filename))
+        return destination
+
+    def _dir_size(self, directory):
+        """Compute recursive size in bytes of directory."""
+        size = 0
+        for root, _, files in os.walk(directory):
+            size += sum(os.path.getsize(os.path.join(root, name))
+                    for name in files)
+        return size
+
+    def _clear_download_cache_if_needed(self):
+        """Invalidates cache to prevent it from growing too large."""
+        # If the cache is large enough to hold a working set, we can simply
+        # delete everything without thrashing.
+        # TODO(ihf): Investigate strategies like LRU.
+        with lock(self._tradefed_cache_lock):
+            size = self._dir_size(self._tradefed_cache)
+            if size > _TRADEFED_CACHE_MAX_SIZE:
+                logging.info('Current cache size=%d got too large. Clearing %s.'
+                        , size, self._tradefed_cache)
+                shutil.rmtree(self._tradefed_cache)
+                self._safe_makedirs(self._tradefed_cache)
+            else:
+                logging.info('Current cache size=%d of %s.', size,
+                        self._tradefed_cache)
+
+    def _download_to_cache(self, uri):
+        """Downloads the uri from the storage server.
+
+        It always checks the cache for available binaries first and skips
+        download if binaries are already in cache.
+
+        The caller of this function is responsible for holding the cache lock.
+
+        @param uri: The Google Storage or dl.google.com uri.
+        @return Path to the downloaded object, name.
+        """
+        # Split uri into 3 pieces for use by gsutil and also by wget.
+        parsed = urlparse.urlparse(uri)
+        filename = os.path.basename(parsed.path)
+        # We are hashing the uri instead of the binary. This is acceptable, as
+        # the uris are supposed to contain version information and an object is
+        # not supposed to be changed once created.
+        output_dir = os.path.join(self._tradefed_cache,
+                                  hashlib.md5(uri).hexdigest())
+        output = os.path.join(output_dir, filename)
+        # Check for existence of file.
+        if os.path.exists(output):
+            logging.info('Skipping download of %s, reusing %s.', uri, output)
+            return output
+        self._safe_makedirs(output_dir)
+
+        if parsed.scheme not in ['gs', 'http', 'https']:
+            raise error.TestError('Unknown download scheme %s' % parsed.scheme)
+        if parsed.scheme in ['http', 'https']:
+            logging.info('Using wget to download %s to %s.', uri, output_dir)
+            # We are downloading 1 file at a time, hence using -O over -P.
+            # We also limit the rate to 20MBytes/s
+            utils.run(
+                'wget',
+                args=(
+                    '--report-speed=bits',
+                    '--limit-rate=20M',
+                    '-O',
+                    output,
+                    uri),
+                verbose=True)
+            return output
+
+        if not client_utils.is_moblab():
+            # If the machine can access to the storage server directly,
+            # defer to "gsutil" for downloading.
+            logging.info('Host %s not in lab. Downloading %s directly to %s.',
+                    self._host.hostname, uri, output)
+            # b/17445576: gsutil rsync of individual files is not implemented.
+            utils.run('gsutil', args=('cp', uri, output), verbose=True)
+            return output
+
+        # We are in the moblab. Because the machine cannot access the storage
+        # server directly, use dev server to proxy.
+        logging.info('Host %s is in lab. Downloading %s by staging to %s.',
+                self._host.hostname, uri, output)
+
+        dirname = os.path.dirname(parsed.path)
+        archive_url = '%s://%s%s' % (parsed.scheme, parsed.netloc, dirname)
+
+        # First, request the devserver to download files into the lab network.
+        # TODO(ihf): Switch stage_artifacts to honor rsync. Then we don't have
+        # to shuffle files inside of tarballs.
+        build = afe_utils.get_build(self._host)
+        ds = dev_server.ImageServer.resolve(build)
+        ds.stage_artifacts(build, files=[filename], archive_url=archive_url)
+
+        # Then download files from the dev server.
+        # TODO(ihf): use rsync instead of wget. Are there 3 machines involved?
+        # Itself, dev_server plus DUT? Or is there just no rsync in moblab?
+        ds_src = '/'.join([ds.url(), 'static', dirname, filename])
+        logging.info('dev_server URL: %s', ds_src)
+        # Calls into DUT to pull uri from dev_server.
+        utils.run(
+                'wget',
+                args=(
+                        '--report-speed=bits',
+                        '--limit-rate=20M',
+                        '-O',
+                        output) + tuple(ds_src),
+                verbose=True)
+        return output
+
+    def _instance_copy(self, cache_path):
+        """Makes a copy of a file from the (shared) cache to a wholy owned
+        local instance. Also copies one level of cache directoy (MD5 named).
+        """
+        filename = os.path.basename(cache_path)
+        dirname = os.path.basename(os.path.dirname(cache_path))
+        instance_dir = os.path.join(self._tradefed_install, dirname)
+        # Make sure destination directory is named the same.
+        self._safe_makedirs(instance_dir)
+        instance_path = os.path.join(instance_dir, filename)
+        shutil.copyfile(cache_path, instance_path)
+        return instance_path
+
+    def _install_bundle(self, gs_uri):
+        """Downloads a zip file, installs it and returns the local path."""
+        if not gs_uri.endswith('.zip'):
+            raise error.TestError('Not a .zip file %s.', gs_uri)
+        # Atomic write through of file.
+        with lock(self._tradefed_cache_lock):
+            cache_path = self._download_to_cache(gs_uri)
+            local = self._instance_copy(cache_path)
+        return self._unzip(local)
+
+    def _install_files(self, gs_dir, files, permission):
+        """Installs binary tools."""
+        for filename in files:
+            gs_uri = os.path.join(gs_dir, filename)
+            # Atomic write through of file.
+            with lock(self._tradefed_cache_lock):
+                cache_path = self._download_to_cache(gs_uri)
+                local = self._instance_copy(cache_path)
+            os.chmod(local, permission)
+            # Keep track of PATH.
+            self._install_paths.append(os.path.dirname(local))
+
+    def _run(self, *args, **kwargs):
+        """Executes the given command line.
+
+        To support SDK tools, such as adb or aapt, this adds _install_paths
+        to the extra_paths. Before invoking this, ensure _install_files() has
+        been called.
+        """
+        kwargs['extra_paths'] = (
+                kwargs.get('extra_paths', []) + self._install_paths)
+        return utils.run(*args, **kwargs)
+
+    def _parse_tradefed_datetime(self, result, summary=None):
+        """Get the tradefed provided result ID consisting of a datetime stamp.
+
+        Unfortunately we are unable to tell tradefed where to store the results.
+        In the lab we have multiple instances of tradefed running in parallel
+        writing results and logs to the same base directory. This function
+        finds the identifier which tradefed used during the current run and
+        returns it for further processing of result files.
+
+        @param result: The result object from utils.run.
+        @param summary: Test result summary from runs so far.
+        @return datetime_id: The result ID chosen by tradefed.
+                             Example: '2016.07.14_00.34.50'.
+        """
+        # This string is show for both 'run' and 'continue' after all tests.
+        match = re.search(r': XML test result file generated at (\S+). Passed',
+                result.stdout)
+        if not (match and match.group(1)):
+            # TODO(ihf): Find out if we ever recover something interesting in
+            # this case. Otherwise delete it.
+            # Try harder to find the remains. This string shows before all
+            # tests but only with 'run', not 'continue'.
+            logging.warning('XML test result file incomplete?')
+            match = re.search(r': Created result dir (\S+)', result.stdout)
+            if not (match and match.group(1)):
+                error_msg = 'Test did not complete due to Chrome or ARC crash.'
+                if summary:
+                    error_msg += (' Test summary from previous runs: %s'
+                            % summary)
+                raise error.TestError(error_msg)
+        datetime_id = match.group(1)
+        logging.info('Tradefed identified results and logs with %s.',
+                     datetime_id)
+        return datetime_id
+
+    def _parse_result(self, result):
+        """Check the result from the tradefed output.
+
+        This extracts the test pass/fail/executed list from the output of
+        tradefed. It is up to the caller to handle inconsistencies.
+
+        @param result: The result object from utils.run.
+        """
+        # Parse the stdout to extract test status. In particular step over
+        # similar output for each ABI and just look at the final summary.
+        match = re.search(r'(XML test result file generated at (\S+). '
+                 r'Passed (\d+), Failed (\d+), Not Executed (\d+))',
+                 result.stdout)
+        if not match:
+            raise error.Test('Test log does not contain a summary.')
+
+        passed = int(match.group(3))
+        failed = int(match.group(4))
+        not_executed = int(match.group(5))
+        match = re.search(r'(Start test run of (\d+) packages, containing '
+                          r'(\d+(?:,\d+)?) tests)', result.stdout)
+        if match and match.group(3):
+            tests = int(match.group(3).replace(',', ''))
+        else:
+            # Unfortunately this happens. Assume it made no other mistakes.
+            logging.warning('Tradefed forgot to print number of tests.')
+            tests = passed + failed + not_executed
+        return (tests, passed, failed, not_executed)
+
+    def _collect_logs(self, repository, datetime, destination):
+        """Collects the tradefed logs.
+
+        It is legal to collect the same logs multiple times. This is normal
+        after 'tradefed continue' updates existing logs with new results.
+
+        @param repository: Full path to tradefeds output on disk.
+        @param datetime: The identifier which tradefed assigned to the run.
+                         Currently this looks like '2016.07.14_00.34.50'.
+        @param destination: Autotest result directory (destination of logs).
+        """
+        logging.info('Collecting tradefed testResult.xml and logs to %s.',
+                     destination)
+        repository_results = os.path.join(repository, 'results')
+        repository_logs = os.path.join(repository, 'logs')
+        # Because other tools rely on the currently chosen Google storage paths
+        # we need to keep destination_results in
+        # cheets_CTS.*/results/android-cts/2016.mm.dd_hh.mm.ss(/|.zip)
+        # and destination_logs in
+        # cheets_CTS.*/results/android-cts/logs/2016.mm.dd_hh.mm.ss/
+        destination_results = destination
+        destination_results_datetime = os.path.join(destination_results, datetime)
+        destination_results_datetime_zip = destination_results_datetime + '.zip'
+        destination_logs = os.path.join(destination, 'logs')
+        destination_logs_datetime = os.path.join(destination_logs, datetime)
+        # We may have collected the same logs before, clean old versions.
+        if os.path.exists(destination_results_datetime_zip):
+            os.remove(destination_results_datetime_zip)
+        if os.path.exists(destination_results_datetime):
+            shutil.rmtree(destination_results_datetime)
+        if os.path.exists(destination_logs_datetime):
+            shutil.rmtree(destination_logs_datetime)
+        shutil.copytree(
+                os.path.join(repository_results, datetime),
+                destination_results_datetime)
+        # Copying the zip file has to happen after the tree so the destination
+        # directory is available.
+        shutil.copy(
+                os.path.join(repository_results, datetime) + '.zip',
+                destination_results_datetime_zip)
+        shutil.copytree(
+                os.path.join(repository_logs, datetime),
+                destination_logs_datetime)
diff --git a/server/site_tests/cheets_CTS/cheets_CTS.py b/server/site_tests/cheets_CTS/cheets_CTS.py
new file mode 100644
index 0000000..6205097
--- /dev/null
+++ b/server/site_tests/cheets_CTS/cheets_CTS.py
@@ -0,0 +1,405 @@
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# repohooks/pre-upload.py currently does not run pylint. But for developers who
+# want to check their code manually we disable several harmless pylint warnings
+# which just distract from more serious remaining issues.
+#
+# The instance variable _android_cts is not defined in __init__().
+# pylint: disable=attribute-defined-outside-init
+#
+# Many short variable names don't follow the naming convention.
+# pylint: disable=invalid-name
+
+import contextlib
+import logging
+import os
+import shutil
+
+from autotest_lib.client.common_lib import error
+from autotest_lib.server import utils
+from autotest_lib.server.cros import tradefed_test
+
+# Notice if there are only a few failures each RETRY step currently (08/01/2016)
+# takes a bit more than 6 minutes (mostly for reboot, login, starting ARC).
+# In other words a RETRY of 10 takes about 1h, which is well within the limit of
+# the 4h TIMEOUT. Nevertheless RETRY steps will cause the test to end with a
+# test warning and should be treated as serious bugs. As bugs are more likely
+# on dev and beta channels (and we test more builds), we won't retry too hard
+# on these channels.
+# Finally, if we have a true test hang or even reboot, tradefed currently will
+# likely hang unit the TIMEOUT hits and no RETRY steps will happen.
+_CTS_MAX_RETRY = {'dev': 3, 'beta': 5, 'stable': 7}
+# Maximum default time allowed for each individual CTS package.
+_CTS_TIMEOUT_SECONDS = (4 * 3600)
+
+# Public download locations for android cts bundles.
+_DL_CTS = 'https://dl.google.com/dl/android/cts/'
+_CTS_URI = {
+    'arm' : _DL_CTS + 'android-cts-6.0_r9-linux_x86-arm.zip',
+    'x86' : _DL_CTS + 'android-cts-6.0_r9-linux_x86-x86.zip',
+    'media' : _DL_CTS + 'android-cts-media-1.1.zip'
+}
+
+
+@contextlib.contextmanager
+def pushd(d):
+    """Defines pushd."""
+    current = os.getcwd()
+    os.chdir(d)
+    try:
+        yield
+    finally:
+        os.chdir(current)
+
+
+class cheets_CTS(tradefed_test.TradefedTest):
+    """Sets up tradefed to run CTS tests."""
+    version = 1
+
+    def setup(self, bundle=None, uri=None):
+        """Download and install a zipfile bundle from Google Storage.
+
+        @param bundle: bundle name, which needs to be key of the _CTS_URI
+                       dictionary. Can be 'arm', 'x86' and undefined.
+        @param uri: URI of CTS bundle. Required if |abi| is undefined.
+        """
+        if bundle in _CTS_URI:
+            self._android_cts = self._install_bundle(_CTS_URI[bundle])
+        else:
+            self._android_cts = self._install_bundle(uri)
+
+        self._cts_tradefed = os.path.join(
+                self._android_cts,
+                'android-cts',
+                'tools',
+                'cts-tradefed')
+        logging.info('CTS-tradefed path: %s', self._cts_tradefed)
+        self._needs_push_media = False
+
+    def _clean_repository(self):
+        """Ensures all old logs, results and plans are deleted.
+
+        This function should be called at the start of each autotest iteration.
+        """
+        logging.info('Cleaning up repository.')
+        repository = os.path.join(self._android_cts, 'android-cts',
+                'repository')
+        for directory in ['logs', 'plans', 'results']:
+            path = os.path.join(repository, directory)
+            if os.path.exists(path):
+                shutil.rmtree(path)
+            self._safe_makedirs(path)
+
+    def _push_media(self):
+        """Downloads, caches and pushed media files to DUT."""
+        media = self._install_bundle(_CTS_URI['media'])
+        base = os.path.splitext(os.path.basename(_CTS_URI['media']))[0]
+        cts_media = os.path.join(media, base)
+        copy_media = os.path.join(cts_media, 'copy_media.sh')
+        with pushd(cts_media):
+            self._run(
+                'source',
+                args=(copy_media, 'all'),
+                timeout=7200,  # Wait at most 2h for download of media files.
+                verbose=True,
+                stdout_tee=utils.TEE_TO_LOGS,
+                stderr_tee=utils.TEE_TO_LOGS)
+
+    def _tradefed_run_command(self,
+                              package=None,
+                              derivedplan=None,
+                              session_id=None):
+        """Builds the CTS tradefed 'run' command line.
+
+        There should be exactly one parameter which is not None:
+        @param package: the name of test package to be run.
+        @param derivedplan: name of derived plan to retry.
+        @param session_id: tradefed session id to continue.
+        @return: list of command tokens for the 'run' command.
+        """
+        if package is not None:
+            cmd = ['run', 'cts', '--package', package]
+        elif derivedplan is not None:
+            cmd = ['run', 'cts', '--plan', derivedplan]
+        elif session_id is not None:
+            cmd = ['run', 'cts', '--continue-session', '%d' % session_id]
+        else:
+            raise error.TestError('Need to provide an argument.')
+        # Automated media download is broken, so disable it. Instead we handle
+        # this explicitly via _push_media(). This has the benefit of being
+        # cached on the dev server. b/27245577
+        cmd.append('--skip-media-download')
+        # Only push media for tests that need it. b/29371037
+        if self._needs_push_media:
+            self._push_media()
+            # copy_media.sh is not lazy, but we try to be.
+            self._needs_push_media = False
+
+        # If we are running outside of the lab we can collect more data.
+        if not utils.is_in_container():
+            logging.info('Running outside of lab, adding extra debug options.')
+            cmd.append('--log-level-display=DEBUG')
+            cmd.append('--screenshot-on-failure')
+            cmd.append('--collect-deqp-logs')
+        # At early stage, cts-tradefed tries to reboot the device by
+        # "adb reboot" command. In a real Android device case, when the
+        # rebooting is completed, adb connection is re-established
+        # automatically, and cts-tradefed expects that behavior.
+        # However, in ARC, it doesn't work, so the whole test process
+        # is just stuck. Here, disable the feature.
+        cmd.append('--disable-reboot')
+        # Create a logcat file for each individual failure.
+        cmd.append('--logcat-on-failure')
+        return cmd
+
+    def _run_cts_tradefed(self, commands, datetime_id=None):
+        """Runs tradefed, collects logs and returns the result counts.
+
+        Assumes that only last entry of |commands| actually runs tests and has
+        interesting output (results, logs) for collection. Ignores all other
+        commands for this purpose.
+
+        @param commands: List of lists of command tokens.
+        @param datetime_id: For 'continue' datetime of previous run is known.
+                            Knowing it makes collecting logs more robust.
+        @return: tuple of (tests, pass, fail, notexecuted) counts.
+        """
+        for command in commands:
+            # Assume only last command actually runs tests and has interesting
+            # output (results, logs) for collection.
+            logging.info('RUN: ./cts-tradefed %s', ' '.join(command))
+            output = self._run(
+                self._cts_tradefed,
+                args=tuple(command),
+                timeout=self._timeout,
+                verbose=True,
+                # Make sure to tee tradefed stdout/stderr to autotest logs
+                # continuously during the test run.
+                stdout_tee=utils.TEE_TO_LOGS,
+                stderr_tee=utils.TEE_TO_LOGS)
+            logging.info('END: ./cts-tradefed %s\n', ' '.join(command))
+        if not datetime_id:
+            # Parse stdout to obtain datetime of the session. This is needed to
+            # locate result xml files and logs.
+            datetime_id = self._parse_tradefed_datetime(output, self.summary)
+        # Collect tradefed logs for autotest.
+        tradefed = os.path.join(self._android_cts, 'android-cts', 'repository')
+        autotest = os.path.join(self.resultsdir, 'android-cts')
+        self._collect_logs(tradefed, datetime_id, autotest)
+        return self._parse_result(output)
+
+    def _tradefed_run(self, package):
+        """Executes 'tradefed run |package|' command.
+
+        @param package: the name of test package to be run.
+        @return: tuple of (tests, pass, fail, notexecuted) counts.
+        """
+        # The list command is not required. It allows the reader to inspect the
+        # tradefed state when examining the autotest logs.
+        commands = [
+                ['list', 'results'],
+                self._tradefed_run_command(package=package)]
+        return self._run_cts_tradefed(commands)
+
+    def _tradefed_continue(self, session_id, datetime_id=None):
+        """Continues a previously started session.
+
+        Attempts to run all 'notexecuted' tests.
+        @param session_id: tradefed session id to continue.
+        @param datetime_id: datetime of run to continue.
+        @return: tuple of (tests, pass, fail, notexecuted) counts.
+        """
+        # The list command is not required. It allows the reader to inspect the
+        # tradefed state when examining the autotest logs.
+        commands = [
+                ['list', 'results'],
+                self._tradefed_run_command(session_id=session_id)]
+        return self._run_cts_tradefed(commands, datetime_id)
+
+    def _tradefed_retry(self, package, session_id):
+        """Retries failing tests in session.
+
+        It is assumed that there are no notexecuted tests of session_id,
+        otherwise some tests will be missed and never run.
+
+        @param package: the name of test package to be run.
+        @param session_id: tradefed session id to retry.
+        @return: tuple of (new session_id, tests, pass, fail, notexecuted).
+        """
+        # Creating new test plan for retry.
+        derivedplan = 'retry.%s.%s' % (package, session_id)
+        logging.info('Retrying failures using derived plan %s.', derivedplan)
+        # The list commands are not required. It allows the reader to inspect
+        # the tradefed state when examining the autotest logs.
+        commands = [
+                ['list', 'plans'],
+                ['add', 'derivedplan', '--plan', derivedplan, '--session', '%d'
+                        % session_id, '-r', 'fail'],
+                ['list', 'plans'],
+                ['list', 'results'],
+                self._tradefed_run_command(derivedplan=derivedplan)]
+        tests, passed, failed, notexecuted = self._run_cts_tradefed(commands)
+        # TODO(ihf): Consider if diffing/parsing output of "list results" for
+        # new session_id might be more reliable. For now just assume simple
+        # increment. This works if only one tradefed instance is active and
+        # only a single run command is executing at any moment.
+        session_id += 1
+        return session_id, tests, passed, failed, notexecuted
+
+    def _get_release_channel(self):
+        """Returns the DUT channel of the image ('dev', 'beta', 'stable')."""
+        # TODO(ihf): check CHROMEOS_RELEASE_DESCRIPTION and return channel.
+        return 'dev'
+
+    def _get_channel_retry(self):
+        """Returns the maximum number of retries for DUT image channel."""
+        channel = self._get_release_channel()
+        if channel in _CTS_MAX_RETRY:
+            return _CTS_MAX_RETRY[channel]
+        retry = _CTS_MAX_RETRY['dev']
+        logging.warning('Could not establish channel. Using retry=%d.', retry)
+        return retry
+
+    def run_once(self,
+                 target_package,
+                 max_retry=None,
+                 timeout=_CTS_TIMEOUT_SECONDS):
+        """Runs CTS |target_package| once, but with several retries.
+
+        @param target_package: the name of test package to run.
+        @param max_retry: number of retry steps before reporting results.
+        @param timeout: time after which tradefed can be interrupted.
+        """
+        # On dev and beta channels timeouts are sharp, lenient on stable.
+        self._timeout = timeout
+        if self._get_release_channel == 'stable':
+            self._timeout += 3600
+        # Retries depend on target_package and channel.
+        self._max_retry = max_retry
+        if not self._max_retry:
+            self._max_retry = self._get_channel_retry()
+        self.summary = ''
+        session_id = 0
+        # Don't download media for tests that don't need it. b/29371037
+        if target_package.startswith('android.mediastress'):
+            self._needs_push_media = True
+        # Unconditionally run CTS package.
+        with self._login_chrome():
+            self._ready_arc()
+            # Start each iteration with a clean repository. This allows us to
+            # track session_id blindly.
+            self._clean_repository()
+            logging.info('Running %s:', target_package)
+            tests, passed, failed, notexecuted = self._tradefed_run(
+                    target_package)
+            logging.info('RESULT: tests=%d, passed=%d, failed=%d, notexecuted='
+                    '%d', tests, passed, failed, notexecuted)
+            self.summary = ('run(t=%d, p=%d, f=%d, ne=%d)' %
+                    (tests, passed, failed, notexecuted))
+            # An internal self-check. We really should never hit this.
+            if tests != passed + failed + notexecuted:
+                raise error.TestError(
+                        'Test count inconsistent. %s' % self.summary)
+            # Keep track of global counts as each step works on local failures.
+            total_tests = tests
+            total_passed = passed
+        # The DUT has rebooted at this point and is in a clean state.
+
+        # If the results were not completed or were failing then continue or
+        # retry them iteratively MAX_RETRY times.
+        steps = 0
+        while steps < self._max_retry and (notexecuted > 0 or failed > 0):
+            # First retry until there is no test is left that was not executed.
+            while notexecuted > 0 and steps < self._max_retry:
+                with self._login_chrome():
+                    steps += 1
+                    self._ready_arc()
+                    logging.info('Continuing session %d:', session_id)
+                    # 'Continue' reports as passed all passing results in the
+                    # current session (including all tests passing before
+                    # continue). Hence first subtract the old count before
+                    # adding the new count. (Same for failed.)
+                    previously_passed = passed
+                    previously_failed = failed
+                    previously_notexecuted = notexecuted
+                    # TODO(ihf): For increased robustness pass in datetime_id of
+                    # session we are continuing.
+                    tests, passed, failed, notexecuted = self._tradefed_continue(
+                            session_id)
+                    # Unfortunately tradefed sometimes encounters an error
+                    # running the tests for instance timing out on downloading
+                    # the media files. Check for this condition and give it one
+                    # extra chance.
+                    if not (tests == previously_notexecuted and
+                            tests == passed + failed + notexecuted):
+                        logging.warning('Tradefed inconsistency - retrying.')
+                        tests, passed, failed, notexecuted = self._tradefed_continue(
+                                session_id)
+                    newly_passed = passed - previously_passed
+                    newly_failed = failed - previously_failed
+                    total_passed += newly_passed
+                    logging.info('RESULT: total_tests=%d, total_passed=%d, step'
+                            '(tests=%d, passed=%d, failed=%d, notexecuted=%d)',
+                            total_tests, total_passed, tests, newly_passed,
+                            newly_failed, notexecuted)
+                    self.summary += ' cont(t=%d, p=%d, f=%d, ne=%d)' % (tests,
+                            newly_passed, newly_failed, notexecuted)
+                    # An internal self-check. We really should never hit this.
+                    if not (tests == previously_notexecuted and
+                            tests == newly_passed + newly_failed + notexecuted):
+                        logging.warning('Test count inconsistent. %s',
+                                self.summary)
+                # The DUT has rebooted at this point and is in a clean state.
+
+            if notexecuted > 0:
+                # This likely means there were too many crashes/reboots to
+                # attempt running all tests. Don't attempt to retry as it is
+                # impossible to pass at this stage (and also inconsistent).
+                raise error.TestFail('Fail: Ran out of steps with %d total '
+                        'passed and %d remaining not executed tests. %s' %
+                        (total_passed, notexecuted, self.summary))
+
+            # Managed to reduce notexecuted to zero. Now create a new test plan
+            # to rerun only the failures we did encounter.
+            if failed > 0:
+                with self._login_chrome():
+                    steps += 1
+                    self._ready_arc()
+                    logging.info('Retrying failures of %s with session_id %d:',
+                            target_package, session_id)
+                    previously_failed = failed
+                    session_id, tests, passed, failed, notexecuted = self._tradefed_retry(
+                            target_package, session_id)
+                    # Unfortunately tradefed sometimes encounters an error
+                    # running the tests for instance timing out on downloading
+                    # the media files. Check for this condition and give it one
+                    # extra chance.
+                    if not (tests == previously_failed and
+                            tests == passed + failed + notexecuted):
+                        logging.warning('Tradefed inconsistency - retrying.')
+                        session_id, tests, passed, failed, notexecuted = self._tradefed_retry(
+                                target_package, session_id)
+                    total_passed += passed
+                    logging.info('RESULT: total_tests=%d, total_passed=%d, step'
+                            '(tests=%d, passed=%d, failed=%d, notexecuted=%d)',
+                            total_tests, total_passed, tests, passed, failed,
+                            notexecuted)
+                    self.summary += ' retry(t=%d, p=%d, f=%d, ne=%d)' % (tests,
+                            passed, failed, notexecuted)
+                    # An internal self-check. We really should never hit this.
+                    if not (previously_failed == tests and
+                            tests == passed + failed + notexecuted):
+                        logging.warning('Test count inconsistent. %s',
+                                self.summary)
+                # The DUT has rebooted at this point and is in a clean state.
+
+        # Final classification of test results.
+        if notexecuted > 0 or failed > 0:
+            raise error.TestFail('Failed: after %d retries giving up. '
+                    'total_passed=%d, failed=%d, notexecuted=%d. %s' % (steps,
+                    total_passed, failed, notexecuted, self.summary))
+        if steps > 0:
+            raise error.TestWarn('Passed: after %d retries passing %d tests. %s'
+                    % (steps, total_passed, self.summary))