Telemetry on Autotest: Telemetry runner code and simple test

This change adds the code to have the drones and server side tests
able to execute the telemetry code installed on the devserver.

The telemetry runner will determine the correct devserver for the
DUT's specific build, tell the devserver to setup telemetry and given
a benchmark and page_set it will execute a telemetry benchmark and
return a TelemetryResultObject to represent the results of telemetry's
execution.

The stdio of the telemetry output is also parsed and used to generate
perf key-value pairs to be uploaded into the autotest results db.

BUG=chromium-os:37412, chromium-os:38997
TEST=tested the telemetry side with my local AFE and devserver and
tested the pyauto proxy portion in the actual lab.

Change-Id: Ia8da3f991b498e1ee63fbfffbfaaa628e3785bf3
Reviewed-on: https://gerrit.chromium.org/gerrit/43679
Commit-Queue: Simran Basi <sbasi@chromium.org>
Reviewed-by: Simran Basi <sbasi@chromium.org>
Tested-by: Simran Basi <sbasi@chromium.org>
diff --git a/client/common_lib/cros/dev_server.py b/client/common_lib/cros/dev_server.py
index 95052c4..ee23b57 100644
--- a/client/common_lib/cros/dev_server.py
+++ b/client/common_lib/cros/dev_server.py
@@ -336,6 +336,24 @@
 
 
     @remote_devserver_call()
+    def setup_telemetry(self, build):
+        """Tell the devserver to setup telemetry for this build.
+
+        The devserver will stage autotest and then extract the required files
+        for telemetry.
+
+        @param build: the build to setup telemetry for.
+
+        @returns path on the devserver that telemetry is installed to.
+        """
+        call = self.build_call(
+                'setup_telemetry',
+                archive_url=_get_image_storage_server() + build)
+        response = urllib2.urlopen(call)
+        return response.read()
+
+
+    @remote_devserver_call()
     def finish_download(self, image):
         """Tell the devserver to finish staging |image|.
 
diff --git a/server/cros/dynamic_suite/constants.py b/server/cros/dynamic_suite/constants.py
index d8fda28..b1e94c1 100644
--- a/server/cros/dynamic_suite/constants.py
+++ b/server/cros/dynamic_suite/constants.py
@@ -12,6 +12,7 @@
 FW_VERSION_PREFIX = 'fw-version:'
 JOB_REPO_URL = 'job_repo_url'
 VERSION_PREFIX = 'cros-version:'
+BOARD_PREFIX = 'board:'
 
 # Timings
 ARTIFACT_FINISHED_TIME = 'artifact_finished_time'
diff --git a/server/cros/telemetry_runner.py b/server/cros/telemetry_runner.py
new file mode 100644
index 0000000..7262a13
--- /dev/null
+++ b/server/cros/telemetry_runner.py
@@ -0,0 +1,301 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import re
+import StringIO
+
+import common
+from autotest_lib.client.common_lib import error, utils
+from autotest_lib.client.common_lib.cros import dev_server
+
+
+TELEMETRY_RUN_BENCHMARKS_SCRIPT = 'tools/perf/run_multipage_benchmarks'
+TELEMETRY_RUN_TESTS_SCRIPT = 'tools/telemetry/run_tests'
+TELEMETRY_TIMEOUT_MINS = 15
+
+# Result Statuses
+SUCCESS_STATUS = 'SUCCESS'
+WARNING_STATUS = 'WARNING'
+FAILED_STATUS = 'FAILED'
+
+
+class TelemetryResult(object):
+    """Class to represent the results of a telemetry run.
+
+    This class represents the results of a telemetry run, whether it ran
+    successful, failed or had warnings.
+    """
+
+
+    def __init__(self, exit_code=0, stdout='', stderr=''):
+        """Initializes this TelemetryResultObject instance.
+
+        @param status: Status of the telemtry run.
+        @param stdout: Stdout of the telemetry run.
+        @param stderr: Stderr of the telemetry run.
+        """
+        if exit_code == 0:
+            self.status = SUCCESS_STATUS
+        else:
+            self.status = FAILED_STATUS
+
+        self.perf_keyvals = {}
+        self._stdout = stdout
+        self._stderr = stderr
+        self.output = '\n'.join([stdout, stderr])
+
+
+    def _cleanup_value(self, value):
+        """Cleanup a value string.
+
+        Given a string representing a value clean it up by removing the space
+        and parenthesis around the units, and either append the units or get
+        rid of them.
+
+        Examples:
+            loadtime (ms) -> loadtime_ms
+            image_count () -> image_count
+            image_count (count) -> image_count
+            CodeLoad (score (bigger is better)) -> CodeLoad_score
+            load_percent (%) -> load_percent
+            score (runs/s) -> score_runs_per_s
+
+        @param value: Value we are cleaning up.
+
+        @result a String representing the cleaned up value.
+        """
+        value_sections = value.split(' (')
+        value_name = value_sections[0]
+        # There can be sub-parens in the units -> if so remove them.
+        units = value_sections[1].split('(')[0]
+        units = units.split(')')[0]
+        if units is '%':
+            units = 'percent'
+        if '/' in units:
+            units = units.replace('/','_per_')
+        if not units:
+            return value_name
+        if value_name.endswith(units):
+            return value_name
+        return '_'.join([value_name, units])
+
+
+    def parse_benchmark_results(self):
+        """Parse the results of a telemetry benchmark run.
+
+        Stdout has the format of CSV at the top and then the output repeated
+        in RESULT block format below.
+
+        We will parse the CSV part to get the perf key-value pairs we are
+        interested in.
+
+        Example stdout:
+        url,average_commit_time (ms),average_image_gathering_time (ms)
+        file:///tough_scrolling_cases/cust_scrollbar.html,1.3644,0
+        RESULT average_commit_time: <URL>= <SCORE> score
+        RESULT average_image_gathering_time: <URL>= <SCORE> score
+
+        We want to generate perf keys in the format of value-url i.e.:
+        average_commit_time-http____www.google.com
+        Where we also removed non non-alphanumeric characters except '.', '_',
+        and '-'.
+
+        Stderr has the format of Warnings/Tracebacks. There is always a default
+        warning of the display enviornment setting. Followed by warnings of
+        page timeouts or a traceback.
+
+        If there are any other warnings we flag the test as warning. If there
+        is a traceback we consider this test a failure.
+
+        @param exit_code: Exit code of the the telemetry run. 0 == SUCCESS,
+                          otherwise it is a warning or failure.
+        @param stdout: Stdout of the telemetry run.
+        @param stderr: Stderr of the telemetry run.
+
+        @returns A TelemetryResult instance with the results of the telemetry
+                 run.
+        """
+        # The output will be in CSV format.
+        if not self._stdout:
+            # Nothing in stdout implies a test failure.
+            logging.error('No stdout, test failed.')
+            self.status = FAILED_STATUS
+            return
+
+        stdout_lines = self._stdout.splitlines()
+        value_names = None
+        for line in stdout_lines:
+            if not line:
+                continue
+            if not value_names and line.startswith('url,'):
+                # This line lists out all the values we care about and we drop
+                # the first one as it is the url name.
+                value_names = line.split(',')[1:]
+                # Clean up each value name.
+                value_names = [self._cleanup_value(v) for v in value_names]
+                logging.debug('Value_names: %s', value_names)
+            if not value_names:
+                continue
+            if ' ' in line:
+                # We are in a non-CSV part of the output, ignore this line.
+                continue
+            # We are now a CSV line we care about, parse it accordingly.
+            line_values = line.split(',')
+            # Grab the URL
+            url = line_values[0]
+            # We want the perf keys to be format value|url. Example:
+            # load_time-http___www.google.com
+            # Andd replace all non-alphanumeric characters except
+            # '-', '.' and '_' with '_'
+            url_values_names = [re.sub(r'[^\w.-]', '_', '-'.join([v, url]))
+                    for v in value_names]
+            self.perf_keyvals.update(dict(zip(url_values_names,
+                                              line_values[1:])))
+        logging.debug('Perf Keyvals: %s', self.perf_keyvals)
+
+        if self.status is SUCCESS_STATUS:
+            return
+
+        # Otherwise check if simply a Warning occurred or a Failure,
+        # i.e. a Traceback is listed.
+        self.status = WARNING_STATUS
+        for line in self._stderr.splitlines():
+            if line.startswith('Traceback'):
+                self.status = FAILED_STATUS
+
+
+class TelemetryRunner(object):
+    """Class responsible for telemetry for a given build.
+
+    This class will extract and install telemetry on the devserver and is
+    responsible for executing the telemetry benchmarks and returning their
+    output to the caller.
+    """
+
+    def __init__(self, host):
+        """Initializes this telemetry runner instance.
+
+        If telemetry is not installed for this build, it will be.
+        """
+        self._host = host
+        logging.debug('Grabbing build from AFE.')
+
+        build = host.get_build()
+        if not build:
+            logging.error('Unable to locate build label for host: %s.',
+                          self._host.hostname)
+            raise error.AutotestError('Failed to grab build for host %s.' %
+                                      self._host.hostname)
+
+        logging.debug('Setting up telemetry for build: %s', build)
+
+        self._devserver = dev_server.ImageServer.resolve(build)
+        self._telemetry_path = self._devserver.setup_telemetry(build=build)
+        logging.debug('Telemetry Path: %s',self._telemetry_path)
+
+
+    def _run_telemetry(self, script, test_or_benchmark):
+        """Runs telemetry on a dut.
+
+        @param script: Telemetry script we want to run. For example:
+                       [path_to_telemetry_src]/src/tools/telemetry/run_tests
+        @param test_or_benchmark: Name of the test or benchmark we want to run,
+                                 with the page_set (if required) as part of the
+                                 string.
+
+        @returns A TelemetryResult Instance with the results of this telemetry
+                 execution.
+        """
+        devserver_hostname = self._devserver.url().split(
+                'http://')[1].split(':')[0]
+        telemetry_args = ['ssh',
+                          devserver_hostname,
+                          'python',
+                          script,
+                          '--browser=cros-chrome',
+                          '--remote=%s' % self._host.hostname,
+                          test_or_benchmark]
+
+        logging.debug('Running Telemetry: %s', ' '.join(telemetry_args))
+        output = StringIO.StringIO()
+        error_output = StringIO.StringIO()
+        exit_code = 0
+        try:
+            result = utils.run(' '.join(telemetry_args), stdout_tee=output,
+                               stderr_tee=error_output,
+                               timeout=TELEMETRY_TIMEOUT_MINS*60)
+            exit_code = result.exit_status
+        except error.CmdError as e:
+            # Telemetry returned a return code of not 0; for benchmarks this
+            # can be due to a timeout on one of the pages of the page set and
+            # we may still have data on the rest. For a test however this
+            # indicates failure.
+            logging.debug('Error occurred executing telemetry.')
+            exit_code = e.result_obj.exit_status
+
+        stdout = output.getvalue()
+        stderr = error_output.getvalue()
+        logging.debug('Telemetry completed with exit code: %d.\nstdout:%s\n'
+                      'stderr:%s', exit_code, stdout, stderr)
+
+        return TelemetryResult(exit_code=exit_code, stdout=stdout,
+                               stderr=stderr)
+
+
+    def run_telemetry_test(self, test):
+        """Runs a telemetry test on a dut.
+
+        @param test: Telemetry test we want to run.
+
+        @returns A TelemetryResult Instance with the results of this telemetry
+                 execution.
+        """
+        logging.debug('Running telemetry test: %s', test)
+        telemetry_script = os.path.join(self._telemetry_path,
+                                        TELEMETRY_RUN_TESTS_SCRIPT)
+        result = self._run_telemetry(telemetry_script, test)
+        if result.status is FAILED_STATUS:
+            raise error.TestFail('Telemetry test: %s failed.',
+                                 test)
+        return result
+
+
+    def run_telemetry_benchmark(self, benchmark, page_set, keyval_writer=None):
+        """Runs a telemetry benchmark on a dut.
+
+        @param benchmark: Benchmark we want to run.
+        @param page_set: Page set we want to use.
+        @param keyval_writer: Should be a instance with the function
+                              write_perf_keyval(), if None, no keyvals will be
+                              written. Typically this will be the job object
+                              from a autotest test.
+
+        @returns A TelemetryResult Instance with the results of this telemetry
+                 execution.
+        """
+        logging.debug('Running telemetry benchmark: %s with page set: %s.',
+                      benchmark, page_set)
+        telemetry_script = os.path.join(self._telemetry_path,
+                                        TELEMETRY_RUN_BENCHMARKS_SCRIPT)
+        page_set_path = os.path.join(self._telemetry_path,
+                                     'tools/perf/page_sets/%s' % page_set)
+        benchmark_with_pageset = ' '.join([benchmark, page_set_path])
+        result = self._run_telemetry(telemetry_script, benchmark_with_pageset)
+        result.parse_benchmark_results()
+
+        if keyval_writer:
+            keyval_writer.write_perf_keyval(result.perf_keyvals)
+
+        if result.status is WARNING_STATUS:
+            raise error.TestWarn('Telemetry Benchmark: %s with page set: %s'
+                                 ' exited with Warnings.' % (benchmark,
+                                                             page_set))
+        if result.status is FAILED_STATUS:
+            raise error.TestFail('Telemetry Benchmark: %s with page set: %s'
+                                 ' failed to run.' % (benchmark,
+                                                      page_set))
+
+        return result
diff --git a/server/cros/telemetry_runner_unittest.py b/server/cros/telemetry_runner_unittest.py
new file mode 100644
index 0000000..3051131
--- /dev/null
+++ b/server/cros/telemetry_runner_unittest.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for server/cros/dynamic_suite/telemetry_runner.py."""
+import mox
+
+import common
+from autotest_lib.server.cros import telemetry_runner
+
+
+class TelemetryResultTest(mox.MoxTestBase):
+    """Unit tests for telemetry_runner.TelemetryResult."""
+
+
+    def testEmptyStdout(self):
+        """Test when the test exits with 0 but there is no output."""
+        result = telemetry_runner.TelemetryResult()
+        result.parse_benchmark_results()
+        self.assertEquals(result.status, telemetry_runner.FAILED_STATUS)
+
+
+    def testOnlyCSV(self):
+        """Test when the stdout is only CSV format."""
+        stdout = ('url,load_time (ms),image_decode_time (ms),image_count '
+                  '(count)\n'
+                  'http://www.google.com,5,100,10\n')
+        expected_keyvals = {
+                'load_time_ms-http___www.google.com': '5',
+                'image_decode_time_ms-http___www.google.com': '100',
+                'image_count-http___www.google.com':'10'}
+
+        result = telemetry_runner.TelemetryResult(exit_code=0, stdout=stdout)
+        result.parse_benchmark_results()
+        self.assertEquals(result.status, telemetry_runner.SUCCESS_STATUS)
+        self.assertEquals(expected_keyvals, result.perf_keyvals)
+
+
+    def testOnlyCSVWithWarnings(self):
+        """Test when the stderr has Warnings."""
+        stdout = ('url,load_time (ms),image_decode_time (ms),image_count '
+                  '(count)\n'
+                  'http://www.google.com,5,100,10\n')
+        stderr = ('WARNING: Page failed to load http://www.facebook.com\n'
+                  'WARNING: Page failed to load http://www.yahoo.com\n')
+        expected_keyvals = {
+                'load_time_ms-http___www.google.com': '5',
+                'image_decode_time_ms-http___www.google.com': '100',
+                'image_count-http___www.google.com':'10'}
+
+        result = telemetry_runner.TelemetryResult(exit_code=2, stdout=stdout,
+                                                  stderr=stderr)
+        result.parse_benchmark_results()
+        self.assertEquals(result.status, telemetry_runner.WARNING_STATUS)
+        self.assertEquals(expected_keyvals, result.perf_keyvals)
+
+
+    def testOnlyCSVWithWarningsAndTraceback(self):
+        """Test when the stderr has Warnings and Traceback."""
+        stdout = ('url,load_time (ms),image_decode_time (ms),image_count '
+                  '(count)\n'
+                  'http://www.google.com,5,100,10\n')
+        stderr = ('WARNING: Page failed to load http://www.facebook.com\n'
+                  'WARNING: Page failed to load http://www.yahoo.com\n'
+                  'Traceback (most recent call last):\n'
+                  'File "../../utils/unittest_suite.py", line 238, in '
+                  '<module>\n'
+                  'main()')
+        expected_keyvals = {
+                'load_time_ms-http___www.google.com': '5',
+                'image_decode_time_ms-http___www.google.com': '100',
+                'image_count-http___www.google.com':'10'}
+
+        result = telemetry_runner.TelemetryResult(exit_code=2, stdout=stdout,
+                                                  stderr=stderr)
+        result.parse_benchmark_results()
+        self.assertEquals(result.status, telemetry_runner.FAILED_STATUS)
+        self.assertEquals(expected_keyvals, result.perf_keyvals)
+
+
+    def testInfoBeforeCSV(self):
+        """Test when there is info before the CSV format."""
+        stdout = ('Pages: [http://www.google.com, http://www.facebook.com]\n'
+                  'url,load_time (ms),image_decode_time (ms),image_count '
+                  '(count)\n'
+                  'http://www.google.com,5,100,10\n')
+        stderr = 'WARNING: Page failed to load http://www.facebook.com\n'
+        expected_keyvals = {
+                'load_time_ms-http___www.google.com': '5',
+                'image_decode_time_ms-http___www.google.com': '100',
+                'image_count-http___www.google.com':'10'}
+
+        result = telemetry_runner.TelemetryResult(exit_code=1, stdout=stdout,
+                                                  stderr=stderr)
+        result.parse_benchmark_results()
+        self.assertEquals(result.status, telemetry_runner.WARNING_STATUS)
+        self.assertEquals(expected_keyvals, result.perf_keyvals)
+
+
+    def testInfoAfterCSV(self):
+        """Test when there is info after the CSV format."""
+        stdout = ('url,load_time (ms),image_decode_time (ms),image_count '
+                  '(count)\n'
+                  'http://www.google.com,5,100,10\n'
+                  'RESULT load_time for http://www.google.com = 5\n'
+                  'RESULT image_decode_time for http://www.google.com = 100\n'
+                  'RESULT image_count for http://www.google.com = 10\n')
+        expected_keyvals = {
+                'load_time_ms-http___www.google.com': '5',
+                'image_decode_time_ms-http___www.google.com': '100',
+                'image_count-http___www.google.com':'10'}
+
+        result = telemetry_runner.TelemetryResult(exit_code=0, stdout=stdout,
+                                                  stderr='')
+        result.parse_benchmark_results()
+        self.assertEquals(result.status, telemetry_runner.SUCCESS_STATUS)
+        self.assertEquals(expected_keyvals, result.perf_keyvals)
+
+
+    def testInfoBeforeAndAfterCSV(self):
+        """Test when there is info before and after CSV format."""
+        stdout = ('Pages: [http://www.google.com]\n'
+                  'url,load_time (ms),image_decode_time (ms),image_count '
+                  '(count)\n'
+                  'http://www.google.com,5,100,10\n'
+                  'RESULT load_time for http://www.google.com = 5\n'
+                  'RESULT image_decode_time for http://www.google.com = 100\n'
+                  'RESULT image_count for http://www.google.com = 10\n')
+        expected_keyvals = {
+                'load_time_ms-http___www.google.com': '5',
+                'image_decode_time_ms-http___www.google.com': '100',
+                'image_count-http___www.google.com':'10'}
+
+        result = telemetry_runner.TelemetryResult(exit_code=0, stdout=stdout,
+                                                  stderr='')
+        result.parse_benchmark_results()
+        self.assertEquals(result.status, telemetry_runner.SUCCESS_STATUS)
+        self.assertEquals(expected_keyvals, result.perf_keyvals)
+
+
+    def testNoCSV(self):
+        """Test when CSV format is missing from stdout."""
+        stdout = ('Pages: [http://www.google.com]\n'
+                  'RESULT load_time for http://www.google.com = 5\n'
+                  'RESULT image_decode_time for http://www.google.com = 100\n'
+                  'RESULT image_count for http://www.google.com = 10)\n')
+        expected_keyvals = {}
+
+        result = telemetry_runner.TelemetryResult(exit_code=0, stdout=stdout,
+                                                  stderr='')
+        result.parse_benchmark_results()
+        self.assertEquals(result.status, telemetry_runner.SUCCESS_STATUS)
+        self.assertEquals(expected_keyvals, result.perf_keyvals)
+
+
+    def testBadCharactersInUrlAndValues(self):
+        """Test that bad characters are cleaned up in value names and urls."""
+        stdout = ('url,load_time (ms),image_decode_time?=% (ms),image_count '
+                  '(count)\n'
+                  'http://www.google.com?search=&^@$You,5,100,10\n')
+        expected_keyvals = {
+                'load_time_ms-http___www.google.com_search_____You': '5',
+                'image_decode_time____ms-http___www.google.com_search_____You':
+                '100',
+                'image_count-http___www.google.com_search_____You':'10'}
+
+        result = telemetry_runner.TelemetryResult(exit_code=0, stdout=stdout,
+                                                  stderr='')
+        result.parse_benchmark_results()
+        self.assertEquals(result.status, telemetry_runner.SUCCESS_STATUS)
+        self.assertEquals(expected_keyvals, result.perf_keyvals)
+
+
+    def testCleanupUnits(self):
+        """Test that weird units are cleaned up."""
+        result = telemetry_runner.TelemetryResult()
+        self.assertEquals(result._cleanup_value('loadtime (ms)'),
+                                                'loadtime_ms')
+        self.assertEquals(result._cleanup_value('image_count ()'),
+                                                'image_count')
+        self.assertEquals(result._cleanup_value('image_count (count)'),
+                                                'image_count')
+        self.assertEquals(result._cleanup_value(
+                'CodeLoad (score (bigger is better))'),
+                'CodeLoad_score')
+        self.assertEquals(result._cleanup_value('load (%)'),
+                                                'load_percent')
+        self.assertEquals(result._cleanup_value('load_percent (%)'),
+                                                'load_percent')
+        self.assertEquals(result._cleanup_value('score (runs/s)'),
+                                                'score_runs_per_s')
\ No newline at end of file
diff --git a/server/hosts/site_host.py b/server/hosts/site_host.py
index ecf1b84..45f4bac 100644
--- a/server/hosts/site_host.py
+++ b/server/hosts/site_host.py
@@ -502,6 +502,22 @@
             self.run('rm -rf ' + path)
 
 
+    def _get_label_from_afe(self, label_prefix):
+        """Retrieve a host's specific label from the AFE.
+
+        Looks for a host label that has the form <label_prefix>:<value>
+        and returns the "<value>" part of the label. None is returned
+        if there is not a label matching the pattern
+
+        @returns the label that matches the prefix or 'None'
+        """
+        host_model = models.Host.objects.get(hostname=self.hostname)
+        host_label = host_model.labels.get(name__startswith=label_prefix)
+        if not host_label:
+            return None
+        return host_label.name.split(label_prefix, 1)[1]
+
+
     def _get_board_from_afe(self):
         """Retrieve this host's board from its labels in the AFE.
 
@@ -511,19 +527,18 @@
 
         @returns board from label, or `None`.
         """
-        host_model = models.Host.objects.get(hostname=self.hostname)
-        board_labels = filter(lambda l: l.name.startswith('board:'),
-                              host_model.labels.all())
-        board_name = None
-        if len(board_labels) == 1:
-            board_name = board_labels[0].name.split(':', 1)[1]
-        elif len(board_labels) == 0:
-            logging.error('Host %s does not have a board label.',
-                          self.hostname)
-        else:
-            logging.error('Host %s has multiple board labels.',
-                          self.hostname)
-        return board_name
+        return self._get_label_from_afe(ds_constants.BOARD_PREFIX)
+
+
+    def get_build(self):
+        """Retrieve the current build for this Host from the AFE.
+
+        Looks through this host's labels in the AFE to determine its build.
+
+        @returns The current build or None if it could not find it or if there
+                 were multiple build labels assigned to this host.
+        """
+        return self._get_label_from_afe(ds_constants.VERSION_PREFIX)
 
 
     def _install_repair(self):
@@ -1162,9 +1177,10 @@
         # Devices in the lab generally have the correct board name but our own
         # development devices have {board_name}-signed-{key_type}. The board
         # name may also begin with 'x86-' which we need to keep.
+        board_format_string = ds_constants.BOARD_PREFIX + '%s'
         if 'x86' not in board:
-            return 'board:%s' % board.split('-')[0]
-        return 'board:%s' % '-'.join(board.split('-')[0:2])
+            return board_format_string % board.split('-')[0]
+        return board_format_string % '-'.join(board.split('-')[0:2])
 
 
     @label_decorator('lightsensor')
diff --git a/server/site_tests/telemetry_ScrollingActionTests/control b/server/site_tests/telemetry_ScrollingActionTests/control
new file mode 100644
index 0000000..da83f1d
--- /dev/null
+++ b/server/site_tests/telemetry_ScrollingActionTests/control
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+AUTHOR = "chromeos-perf"
+NAME = "Telemetry Scrolling Action Test"
+TIME = "LONG"
+TEST_CATEGORY = "Functional"
+TEST_TYPE = "server"
+
+DOC = """
+This server side test suite executes the Telemetry Scrolling Benchmark.
+This is part of our effort to support Chrome for Chrome OS performance testing.
+"""
+
+def run_test(machine):
+    host = hosts.create_host(machine)
+    job.run_test("telemetry_ScrollingActionTests", host=host)
+
+
+parallel_simple(run_test, machines)
diff --git a/server/site_tests/telemetry_ScrollingActionTests/telemetry_ScrollingActionTests.py b/server/site_tests/telemetry_ScrollingActionTests/telemetry_ScrollingActionTests.py
new file mode 100644
index 0000000..3756dee
--- /dev/null
+++ b/server/site_tests/telemetry_ScrollingActionTests/telemetry_ScrollingActionTests.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+
+from autotest_lib.server import test
+from autotest_lib.server.cros import telemetry_runner
+
+
+class telemetry_ScrollingActionTests(test.test):
+    """Run the telemetry scrolling action tests."""
+    version = 1
+
+
+    def run_once(self, host=None):
+        """Run the telemetry scrolling action tests.
+
+        @param host: host we are running telemetry on.
+        """
+        telemetry = telemetry_runner.TelemetryRunner(host)
+        result = telemetry.run_telemetry_test('ScrollingActionTest')
+        logging.debug('Telemetry completed with a status of: %s with output:'
+                      ' %s', result.status, result.output)
\ No newline at end of file
diff --git a/server/site_tests/telemetry_ScrollingBenchmark/control b/server/site_tests/telemetry_ScrollingBenchmark/control
new file mode 100644
index 0000000..491aa04
--- /dev/null
+++ b/server/site_tests/telemetry_ScrollingBenchmark/control
@@ -0,0 +1,23 @@
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+AUTHOR = "chromeos-perf"
+NAME = "Telemetry Scrolling Benchmark"
+SUITE = "telemetry_benchmarks"
+TIME = "LONG"
+TEST_CATEGORY = "Functional"
+TEST_CLASS = "suite"
+TEST_TYPE = "server"
+
+DOC = """
+This server side test suite executes the Telemetry Scrolling Benchmark.
+This is part of our effort to support Chrome for Chrome OS performance testing.
+"""
+
+def run_benchmark(machine):
+    host = hosts.create_host(machine)
+    job.run_test("telemetry_ScrollingBenchmark", host=host)
+
+
+parallel_simple(run_benchmark, machines)
diff --git a/server/site_tests/telemetry_ScrollingBenchmark/telemetry_ScrollingBenchmark.py b/server/site_tests/telemetry_ScrollingBenchmark/telemetry_ScrollingBenchmark.py
new file mode 100644
index 0000000..ee6895c
--- /dev/null
+++ b/server/site_tests/telemetry_ScrollingBenchmark/telemetry_ScrollingBenchmark.py
@@ -0,0 +1,22 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from autotest_lib.server import test
+from autotest_lib.server.cros import telemetry_runner
+
+
+class telemetry_ScrollingBenchmark(test.test):
+    """Run the telemetry scrolling benchmark."""
+    version = 1
+
+
+    def run_once(self, host=None):
+        """Run the telemetry scrolling benchmark.
+
+        @param host: host we are running telemetry on.
+        """
+        telemetry = telemetry_runner.TelemetryRunner(host)
+        telemetry.run_telemetry_benchmark('scrolling_benchmark',
+                                          'tough_scrolling_cases.json',
+                                          keyval_writer=self)
diff --git a/test_suites/control.telemetry_benchmarks b/test_suites/control.telemetry_benchmarks
new file mode 100644
index 0000000..fbf9874
--- /dev/null
+++ b/test_suites/control.telemetry_benchmarks
@@ -0,0 +1,33 @@
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+AUTHOR = "Chrome OS Team"
+NAME = "telemetry_benchmarks"
+PURPOSE = "Test basic, required functionality."
+CRITERIA = "All tests with SUITE=telemetry_benchamrks must pass."
+
+TIME = "LONG"
+TEST_CATEGORY = "General"
+TEST_CLASS = "suite"
+TEST_TYPE = "Server"
+
+DOC = """
+Telemetry performance test suite.
+
+@param build: The name of the image to test.
+              Ex: x86-mario-release/R17-1412.33.0-a1-b29
+@param board: The board to test on. Ex: x86-mario
+@param pool: The pool of machines to utilize for scheduling. If pool=None
+             board is used.
+@param check_hosts: require appropriate live hosts to exist in the lab.
+@param SKIP_IMAGE: (optional) If present and True, don't re-image devices.
+"""
+
+import common
+from autotest_lib.server.cros.dynamic_suite import dynamic_suite
+
+dynamic_suite.reimage_and_run(
+    build=build, board=board, name='telemetry_benchmarks', job=job, pool=pool,
+    check_hosts=check_hosts, add_experimental=True, num=num,
+    file_bugs=file_bugs, skip_reimage=dynamic_suite.skip_reimage(globals()))