Autotest TKO parser now sends measured perf data to chrome's performance dashboard.

Any performance autotests that invoke self.output_perf_value() will now
have those outputted results uploaded to chrome's performance dashboard,
as part of the autotest TKO parsing stage.  For now, a message is logged
to the .parse.log output file indicating success or failure of the
upload.  In the future, failed uploads will need to be retried, and
better insight will need to be provided as to how well the perf_uploader
module is performing.

BUG=chromium:258232
TEST=Invoked platform_GesturesRegressionTest using a local instance of
the autotest frontend, and a local instance of Chrome team's perf
dashboard.  Then verified that the perf data outputted by the test appeared
on the perf dashboard.  Also verified that perf_uploader_unittest.py passes.

Change-Id: Id685deda129056465350a5be8effc9aa97fa9bb4
Reviewed-on: https://gerrit.chromium.org/gerrit/64672
Commit-Queue: Dennis Jeffrey <dennisjeffrey@chromium.org>
Reviewed-by: Dennis Jeffrey <dennisjeffrey@chromium.org>
Tested-by: Dennis Jeffrey <dennisjeffrey@chromium.org>
diff --git a/tko/parse.py b/tko/parse.py
index 1bf0217..ef7971a 100755
--- a/tko/parse.py
+++ b/tko/parse.py
@@ -4,7 +4,9 @@
 
 import common
 from autotest_lib.client.common_lib import mail, pidfile
-from autotest_lib.tko import db as tko_db, utils as tko_utils, status_lib, models
+from autotest_lib.tko import db as tko_db, utils as tko_utils
+from autotest_lib.tko import models, status_lib
+from autotest_lib.tko.perf_upload import perf_uploader
 from autotest_lib.client.common_lib import utils
 
 
@@ -159,6 +161,10 @@
     # write the job into the database
     db.insert_job(jobname, job)
 
+    # Upload perf values to the perf dashboard, if applicable.
+    for test in job.tests:
+        perf_uploader.upload_test(job, test)
+
     # Serializing job into a binary file
     try:
         from autotest_lib.tko import tko_pb2
diff --git a/tko/perf_upload/__init__.py b/tko/perf_upload/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tko/perf_upload/__init__.py
diff --git a/tko/perf_upload/common.py b/tko/perf_upload/common.py
new file mode 100644
index 0000000..440734f
--- /dev/null
+++ b/tko/perf_upload/common.py
@@ -0,0 +1,8 @@
+import os, sys
+dirname = os.path.dirname(sys.modules[__name__].__file__)
+autotest_dir = os.path.abspath(os.path.join(dirname, '..', '..'))
+client_dir = os.path.join(autotest_dir, 'client')
+sys.path.insert(0, client_dir)
+import setup_modules
+sys.path.pop(0)
+setup_modules.setup(base_path=autotest_dir, root_module_name='autotest_lib')
diff --git a/tko/perf_upload/perf_dashboard_config.json b/tko/perf_upload/perf_dashboard_config.json
new file mode 100644
index 0000000..8abfbee
--- /dev/null
+++ b/tko/perf_upload/perf_dashboard_config.json
@@ -0,0 +1,6 @@
+[
+  {
+    "autotest_name": "platform_GesturesRegressionTest",
+    "master_name": "ChromeOSTouchpad"
+  }
+]
diff --git a/tko/perf_upload/perf_uploader.py b/tko/perf_upload/perf_uploader.py
new file mode 100644
index 0000000..ac37f6e
--- /dev/null
+++ b/tko/perf_upload/perf_uploader.py
@@ -0,0 +1,276 @@
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Uploads performance data to the performance dashboard.
+
+Performance tests may output data that needs to be displayed on the performance
+dashboard.  The autotest TKO parser invokes this module with each test
+associated with a job.  If a test has performance data associated with it, it
+is uploaded to the performance dashboard.  The performance dashboard is owned
+by Chrome team and is available here: https://chromeperf.appspot.com/.  Users
+must be logged in with an @google.com account to view chromeOS perf data there.
+
+"""
+
+import httplib, json, math, os, urllib, urllib2
+
+import common
+from autotest_lib.tko import utils as tko_utils
+
+_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
+_PRESENTATION_CONFIG_FILE = os.path.join(
+        _ROOT_DIR, 'perf_dashboard_config.json')
+_DEFAULT_MASTER_NAME = 'ChromeOSPerf'
+_DASHBOARD_UPLOAD_URL = 'https://chromeperf.appspot.com/add_point'
+
+
+def _aggregate_iterations(perf_values):
+    """Aggregate same measurements from multiple iterations.
+
+    Each perf measurement may exist multiple times across multiple iterations
+    of a test.  Here, the results for each unique measured perf metric are
+    aggregated across multiple iterations.
+
+    @param perf_values: A list of tko.models.perf_value_iteration objects.
+
+    @return A dictionary mapping each unique measured perf value (keyed by
+        its description) to information about that perf value (in particular,
+        the value is a list of values for each iteration).
+
+    """
+    perf_data = {}
+    for perf_iteration in perf_values:
+        for perf_dict in perf_iteration.perf_measurements:
+            if perf_dict['description'] not in perf_data:
+                perf_data[perf_dict['description']] = {
+                    'units': perf_dict['units'],
+                    'higher_is_better': perf_dict['higher_is_better'],
+                    'value': [perf_dict['value']],   # Note: a list of values.
+                    'stddev': perf_dict['stddev']
+                }
+            else:
+                perf_data[perf_dict['description']]['value'].append(
+                        perf_dict['value'])
+                # Note: the stddev will be recomputed later when the results
+                # from each of the multiple iterations are averaged together.
+    return perf_data
+
+
+def _mean_and_stddev(data, precision=4):
+    """Computes mean and standard deviation from a list of numbers.
+
+    Assumes that the list contains at least 2 numbers.
+
+    @param data: A list of numeric values.
+    @param precision: The integer number of decimal places to which to
+        round the results.
+
+    @return A 2-tuple (mean, standard_deviation), in which each value is
+        rounded to |precision| decimal places.
+
+    """
+    n = len(data)
+    mean = float(sum(data)) / n
+    # Divide by n-1 to compute "sample standard deviation".
+    variance = sum([(elem - mean) ** 2 for elem in data]) / (n - 1)
+    return round(mean, precision), round(math.sqrt(variance), precision)
+
+
+def _compute_avg_stddev(perf_data):
+    """Compute average and standard deviations as needed for perf measurements.
+
+    For any perf measurement that exists in multiple iterations (has more than
+    one measured value), compute the average and standard deviation for it and
+    then store the updated information in the dictionary.
+
+    @param perf_data: A dictionary of measured perf data as computed by
+        _aggregate_iterations(), except each value is now a single value, not a
+        list of values.
+
+    """
+    for perf_dict in perf_data.itervalues():
+        if len(perf_dict['value']) > 1:
+            perf_dict['value'], perf_dict['stddev'] = (
+                    _mean_and_stddev(map(float, perf_dict['value'])))
+        else:
+            perf_dict['value'] = perf_dict['value'][0]  # Take out of list.
+
+
+def _parse_config_file():
+    """Parses a presentation config file and stores the info into a dict.
+
+    The config file contains information about how to present the perf data
+    on the perf dashboard.  This is required if the default presentation
+    settings aren't desired for certain tests.
+
+    @return A dictionary mapping each unique autotest name to a dictionary
+        of presentation config information.
+
+    """
+    json_obj = []
+    if os.path.exists(_PRESENTATION_CONFIG_FILE):
+        with open(_PRESENTATION_CONFIG_FILE, 'r') as fp:
+            json_obj = json.load(fp)
+    config_dict = {}
+    for entry in json_obj:
+        config_dict[entry['autotest_name']] = entry
+    return config_dict
+
+
+def _gather_presentation_info(config_data, test_name):
+    """Gathers presentation info from config data for the given test name.
+
+    @param config_data: A dictionary of dashboard presentation info for all
+        tests, as returned by _parse_config_file().  Info is keyed by autotest
+        name.
+    @param test_name: The name of an autotest.
+
+    @return A dictionary containing presentation information extracted from
+        |config_data| for the given autotest name.
+    """
+    master_name = _DEFAULT_MASTER_NAME
+    description_to_graph_name = {}  # Multiple values can be on the same graph.
+    if test_name in config_data:
+        presentation_dict = config_data[test_name]
+        if 'master_name' in presentation_dict:
+            master_name = presentation_dict['master_name']
+        if 'dashboard_test_name' in presentation_dict:
+            test_name = presentation_dict['dashboard_test_name']
+        if 'graphs' in presentation_dict:
+            for graph in presentation_dict['graphs']:
+                graph_name = graph['graph_name']
+                for desc in graph['descriptions']:
+                    description_to_graph_name[desc] = graph_name
+
+    return {
+        'master_name': master_name,
+        'desc_to_graph_name': description_to_graph_name,
+        'test_name': test_name,
+    }
+
+
+def _format_for_upload(platform_name, cros_version, chrome_version, perf_data,
+                       presentation_info):
+    """Formats perf data suitably to upload to the perf dashboard.
+
+    The perf dashboard expects perf data to be uploaded as a
+    specially-formatted JSON string.  In particular, the JSON object must be a
+    dictionary with key "data", and value being a list of dictionaries where
+    each dictionary contains all the information associated with a single
+    measured perf value: master name, bot name, test name, perf value, error
+    value, units, and build version numbers.
+
+    @param platform_name: The string name of the platform.
+    @param cros_version: The string chromeOS version number.
+    @param chrome_version: The string chrome version number.
+    @param perf_data: A dictionary of measured perf data as computed by
+        _compute_avg_stddev().
+    @param presentation_info: A dictionary of dashboard presentation info for
+        the given test, as identified by _gather_presentation_info().
+
+    @return A dictionary containing the formatted information ready to upload
+        to the performance dashboard.
+
+    """
+    dash_entries = []
+    for desc in perf_data:
+        # Each perf metric is named by a path that encodes the test name,
+        # a graph name (if specified), and a description.  This must be defined
+        # according to rules set by the Chrome team, as implemented in:
+        # chromium/tools/build/scripts/slave/results_dashboard.py.
+        graph_name = presentation_info['desc_to_graph_name'].get(desc, None)
+        if desc.endswith('_ref'):
+            desc = 'ref'
+        desc = desc.replace('_by_url', '')
+        desc = desc.replace('/', '_')
+        if graph_name:
+            test_path = '%s/%s/%s' % (presentation_info['test_name'],
+                                      graph_name, desc)
+        else:
+            test_path = '%s/%s' % (presentation_info['test_name'], desc)
+
+        new_dash_entry = {
+            'master': presentation_info['master_name'],
+            'bot': 'cros-' + platform_name,  # Prefix to clarify it's chromeOS.
+            'test': test_path,
+            'value': perf_data[desc]['value'],
+            'error': perf_data[desc]['stddev'],
+            'units': perf_data[desc]['units'],
+            'supplemental_columns': {
+                'r_cros_version': cros_version,
+                'r_chrome_version': chrome_version,
+            }
+        }
+
+        dash_entries.append(new_dash_entry)
+
+    json_string = json.dumps(dash_entries)
+    return {'data': json_string}
+
+
+def _send_to_dashboard(data_obj):
+    """Sends formatted perf data to the perf dashboard.
+
+    @param data_obj: A formatted data object as returned by
+        _format_for_upload().
+
+    @return None, if the data was uploaded without an exception, or a string
+        error message if an exception was raised when uploading.
+
+    """
+    encoded = urllib.urlencode(data_obj)
+    req = urllib2.Request(_DASHBOARD_UPLOAD_URL, encoded)
+    try:
+        urllib2.urlopen(req)
+    except urllib2.HTTPError, e:
+        return 'HTTPError: %d for JSON %s\n' % (e.code, data_obj['data'])
+    except urllib2.URLError, e:
+        return 'URLError: %s for JSON %s\n' % (str(e.reason), data_obj['data'])
+    except httplib.HTTPException:
+        return 'HTTPException for JSON %s\n' % data_obj['data']
+
+
+def upload_test(job, test):
+    """Uploads any perf data associated with a test to the perf dashboard.
+
+    @param job: An autotest tko.models.job object that is associated with the
+        given |test|.
+    @param test: An autotest tko.models.test object that may or may not be
+        associated with measured perf data.
+
+    """
+    if not test.perf_values:
+        return
+
+    # Aggregate values from multiple iterations together.
+    perf_data = _aggregate_iterations(test.perf_values)
+
+    # Compute averages and standard deviations as needed for measured perf
+    # values that exist in multiple iterations.  Ultimately, we only upload a
+    # single measurement (with standard deviation) for every unique measured
+    # perf metric.
+    _compute_avg_stddev(perf_data)
+
+    # Format the perf data for the upload, then upload it.
+    test_name = test.testname
+    platform_name = job.machine_group
+    cros_version = test.attributes.get('CHROMEOS_RELEASE_VERSION', '')
+    chrome_version = test.attributes.get('CHROME_VERSION', '')
+    # Prefix the chromeOS version number with the chrome milestone.
+    # TODO(dennisjeffrey): Modify the dashboard to accept the chromeOS version
+    # number *without* the milestone attached.
+    cros_version = chrome_version[:chrome_version.find('.') + 1] + cros_version
+    config_data = _parse_config_file()
+    presentation_info = _gather_presentation_info(config_data, test_name)
+    formatted_data = _format_for_upload(
+            platform_name, cros_version, chrome_version, perf_data,
+            presentation_info)
+    error = _send_to_dashboard(formatted_data)
+
+    if error:
+        tko_utils.dprint('Error when uploading perf data to the perf '
+                         'dashboard for test %s: %s' % (test_name, error))
+    else:
+        tko_utils.dprint('Successfully uploaded perf data to the perf '
+                         'dashboard for test %s.' % test_name)
diff --git a/tko/perf_upload/perf_uploader_unittest.py b/tko/perf_upload/perf_uploader_unittest.py
new file mode 100644
index 0000000..189ab5b
--- /dev/null
+++ b/tko/perf_upload/perf_uploader_unittest.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+
+"""Unit tests for the perf_uploader.py module.
+
+"""
+
+import json, unittest
+
+import common
+from autotest_lib.tko import models as tko_models
+from autotest_lib.tko.perf_upload import perf_uploader
+
+
+class test_aggregate_iterations(unittest.TestCase):
+    """Tests for the aggregate_iterations function."""
+
+    _PERF_ITERATION_DATA = {
+        '1': [
+            {
+                'description': 'metric1',
+                'value': 1,
+                'stddev': 0.0,
+                'units': 'units1',
+                'higher_is_better': True
+            },
+            {
+                'description': 'metric2',
+                'value': 10,
+                'stddev': 0.0,
+                'units': 'units2',
+                'higher_is_better': True
+            },
+            {
+                'description': 'metric3',
+                'value': 100,
+                'stddev': 1.7,
+                'units': 'units3',
+                'higher_is_better': False
+            }
+        ],
+        '2': [
+            {
+                'description': 'metric1',
+                'value': 2,
+                'stddev': 0.0,
+                'units': 'units1',
+                'higher_is_better': True
+            },
+            {
+                'description': 'metric2',
+                'value': 20,
+                'stddev': 0.0,
+                'units': 'units2',
+                'higher_is_better': True
+            },
+            {
+                'description': 'metric3',
+                'value': 200,
+                'stddev': 21.2,
+                'units': 'units3',
+                'higher_is_better': False
+            }
+        ],
+    }
+
+
+    def setUp(self):
+        """Sets up for each test case."""
+        self._perf_values = []
+        for iter_num, iter_data in self._PERF_ITERATION_DATA.iteritems():
+            self._perf_values.append(
+                    tko_models.perf_value_iteration(iter_num, iter_data))
+
+
+    def test_one_iteration(self):
+        """Tests that data for 1 iteration is aggregated properly."""
+        result = perf_uploader._aggregate_iterations([self._perf_values[0]])
+        self.assertEqual(len(result), 3, msg='Expected results for 3 metrics.')
+        self.assertTrue(
+            all([x in result for x in ['metric1', 'metric2', 'metric3']]),
+            msg='Parsed metrics not as expected.')
+        msg = 'Perf values for metric not aggregated properly.'
+        self.assertEqual(result['metric1']['value'], [1], msg=msg)
+        self.assertEqual(result['metric2']['value'], [10], msg=msg)
+        self.assertEqual(result['metric3']['value'], [100], msg=msg)
+        msg = 'Standard deviation values not retained properly.'
+        self.assertEqual(result['metric1']['stddev'], 0.0, msg=msg)
+        self.assertEqual(result['metric2']['stddev'], 0.0, msg=msg)
+        self.assertEqual(result['metric3']['stddev'], 1.7, msg=msg)
+
+
+    def test_two_iterations(self):
+        """Tests that data for 2 iterations is aggregated properly."""
+        result = perf_uploader._aggregate_iterations(self._perf_values)
+        self.assertEqual(len(result), 3, msg='Expected results for 3 metrics.')
+        self.assertTrue(
+            all([x in result for x in ['metric1', 'metric2', 'metric3']]),
+            msg='Parsed metrics not as expected.')
+        msg = 'Perf values for metric not aggregated properly.'
+        self.assertEqual(result['metric1']['value'], [1, 2], msg=msg)
+        self.assertEqual(result['metric2']['value'], [10, 20], msg=msg)
+        self.assertEqual(result['metric3']['value'], [100, 200], msg=msg)
+
+
+class test_compute_avg_stddev(unittest.TestCase):
+    """Tests for the compute_avg_stddev function."""
+
+    def setUp(self):
+        """Sets up for each test case."""
+        self._perf_values = {
+            'metric1': {'value': [10, 20, 30], 'stddev': 0.0},
+            'metric2': {'value': [2.0, 3.0, 4.0], 'stddev': 0.0},
+            'metric3': {'value': [1], 'stddev': 1.7},
+        }
+
+
+    def test_avg_stddev(self):
+        """Tests that averages and standard deviations are computed properly."""
+        perf_uploader._compute_avg_stddev(self._perf_values)
+        result = self._perf_values  # The input dictionary itself is modified.
+        self.assertEqual(len(result), 3, msg='Expected results for 3 metrics.')
+        self.assertTrue(
+            all([x in result for x in ['metric1', 'metric2', 'metric3']]),
+            msg='Parsed metrics not as expected.')
+        msg = 'Average value not computed properly.'
+        self.assertEqual(result['metric1']['value'], 20, msg=msg)
+        self.assertEqual(result['metric2']['value'], 3.0, msg=msg)
+        self.assertEqual(result['metric3']['value'], 1, msg=msg)
+        msg = 'Standard deviation value not computed properly.'
+        self.assertEqual(result['metric1']['stddev'], 10.0, msg=msg)
+        self.assertEqual(result['metric2']['stddev'], 1.0, msg=msg)
+        self.assertEqual(result['metric3']['stddev'], 1.7, msg=msg)
+
+
+class test_json_config_file_sanity(unittest.TestCase):
+    """Sanity tests for the JSON-formatted presentation config file."""
+
+    def test_proper_json(self):
+        """Verifies the file can be parsed as proper JSON."""
+        try:
+            with open(perf_uploader._PRESENTATION_CONFIG_FILE, 'r') as fp:
+                json.load(fp)
+        except:
+            self.fail('Presentation config file could not be parsed as JSON.')
+
+
+    def test_unique_test_names(self):
+        """Verifies that each test name appears only once in the JSON file."""
+        json_obj = []
+        try:
+            with open(perf_uploader._PRESENTATION_CONFIG_FILE, 'r') as fp:
+                json_obj = json.load(fp)
+        except:
+            self.fail('Presentation config file could not be parsed as JSON.')
+
+        name_set = set([x['autotest_name'] for x in json_obj])
+        self.assertEqual(len(name_set), len(json_obj),
+                         msg='Autotest names not unique in the JSON file.')
+
+
+class test_gather_presentation_info(unittest.TestCase):
+    """Tests for the gather_presentation_info function."""
+
+    _PRESENT_INFO = {
+        'test_name': {
+            'master_name': 'new_master_name',
+            'dashboard_test_name': 'new_test_name',
+            'graphs': [
+                {
+                    'graph_name': 'graph_name',
+                    'descriptions': ['metric1', 'metric2'],
+                },
+            ]
+        }
+    }
+
+
+    def test_test_name_specified(self):
+        """Verifies gathers presentation info correctly."""
+        result = perf_uploader._gather_presentation_info(
+                self._PRESENT_INFO, 'test_name')
+        self.assertTrue(
+                all([key in result for key in
+                     ['test_name', 'master_name', 'desc_to_graph_name']]),
+                msg='Unexpected keys in resulting dictionary: %s' % result)
+        self.assertEqual(len(result['desc_to_graph_name']), 2,
+                         msg='Unexpected "desc_to_graph_name" value: %s' %
+                             result['desc_to_graph_name'])
+        self.assertTrue(
+                all([x in result['desc_to_graph_name'] for x in
+                     ['metric1', 'metric2']]),
+                msg='Unexpected "desc_to_graph_name" value: %s' %
+                    result['desc_to_graph_name'])
+        self.assertEqual(result['desc_to_graph_name']['metric1'], 'graph_name',
+                         msg='Unexpected "desc_to_graph_name" value: %s' %
+                             result['desc_to_graph_name'])
+        self.assertEqual(result['desc_to_graph_name']['metric2'], 'graph_name',
+                         msg='Unexpected "desc_to_graph_name" value: %s' %
+                             result['desc_to_graph_name'])
+        self.assertEqual(result['master_name'], 'new_master_name',
+                         msg='Unexpected "master_name" value: %s' %
+                             result['master_name'])
+        self.assertEqual(result['test_name'], 'new_test_name',
+                         msg='Unexpected "test_name" value: %s' %
+                             result['test_name'])
+
+
+    def test_test_name_not_specified(self):
+        """Verifies gathers default presentation info if test is not there."""
+        result = perf_uploader._gather_presentation_info(
+                self._PRESENT_INFO, 'other_test_name')
+        self.assertTrue(
+                all([key in result for key in
+                     ['test_name', 'master_name', 'desc_to_graph_name']]),
+                msg='Unexpected keys in resulting dictionary: %s' % result)
+        self.assertEqual(len(result['desc_to_graph_name']), 0,
+                         msg='Unexpected "desc_to_graph_name" value: %s' %
+                             result['desc_to_graph_name'])
+        self.assertEqual(result['master_name'],
+                         perf_uploader._DEFAULT_MASTER_NAME,
+                         msg='Unexpected "master_name" value: %s' %
+                             result['master_name'])
+        self.assertEqual(result['test_name'], 'other_test_name',
+                         msg='Unexpected "test_name" value: %s' %
+                             result['test_name'])
+
+
+class test_format_for_upload(unittest.TestCase):
+    """Tests for the format_for_upload function."""
+
+    _PERF_DATA = {
+        'metric1': {
+            'value': 2.7,
+            'stddev': 0.2,
+            'units': 'msec',
+        },
+        'metric2': {
+            'value': 101.35,
+            'stddev': 5.78,
+            'units': 'frames_per_sec',
+        },
+    }
+
+    _PRESENT_INFO_DEFAULT = {
+        'master_name': perf_uploader._DEFAULT_MASTER_NAME,
+        'test_name': 'test_name',
+        'desc_to_graph_name': {}
+    }
+
+    _PRESENT_INFO = {
+        'master_name': 'new_master_name',
+        'test_name': 'new_test_name',
+        'desc_to_graph_name': {
+            'metric1': 'graph_name',
+            'metric2': 'graph_name'
+        }
+    }
+
+
+    def setUp(self):
+        self._perf_data = self._PERF_DATA
+
+
+    def _verify_result_string(self, actual_result, expected_result):
+        """Verifies a JSON string matches the expected result.
+
+        This function compares JSON objects rather than strings, because of
+        possible floating-point values that need to be compared using
+        assertAlmostEqual().
+
+        @param actual_result: The candidate JSON string.
+        @param expected_result: The reference JSON string that the candidate
+            must match.
+
+        """
+        actual = json.loads(actual_result)
+        expected = json.loads(expected_result)
+
+        fail_msg = 'Unexpected result string: %s' % actual_result
+        self.assertEqual(len(actual), len(expected), msg=fail_msg)
+        for idx in xrange(len(actual)):
+            keys_actual = set(actual[idx].keys())
+            keys_expected = set(expected[idx].keys())
+            self.assertEqual(len(keys_actual), len(keys_expected),
+                             msg=fail_msg)
+            self.assertTrue(all([key in keys_actual for key in keys_expected]),
+                            msg=fail_msg)
+
+            self.assertEqual(
+                    actual[idx]['supplemental_columns']['r_cros_version'],
+                    expected[idx]['supplemental_columns']['r_cros_version'],
+                    msg=fail_msg)
+            self.assertEqual(
+                    actual[idx]['supplemental_columns']['r_chrome_version'],
+                    expected[idx]['supplemental_columns']['r_chrome_version'],
+                    msg=fail_msg)
+            self.assertEqual(
+                    actual[idx]['bot'], expected[idx]['bot'], msg=fail_msg)
+            self.assertAlmostEqual(
+                    actual[idx]['value'], expected[idx]['value'], 4,
+                    msg=fail_msg)
+            self.assertEqual(
+                    actual[idx]['units'], expected[idx]['units'], msg=fail_msg)
+            self.assertEqual(
+                    actual[idx]['master'], expected[idx]['master'],
+                    msg=fail_msg)
+            self.assertAlmostEqual(
+                    actual[idx]['error'], expected[idx]['error'], 4,
+                    msg=fail_msg)
+            self.assertEqual(
+                    actual[idx]['test'], expected[idx]['test'], msg=fail_msg)
+
+
+    def test_default_presentation(self):
+        """Verifies default presentation settings with empty config info."""
+        result = perf_uploader._format_for_upload(
+                'platform', '1200.0.0', '25.10.0.0', self._perf_data,
+                self._PRESENT_INFO_DEFAULT)
+        expected_result_string = (
+                '[{"supplemental_columns": {"r_cros_version": "1200.0.0", '
+                '"r_chrome_version": "25.10.0.0"}, "bot": "cros-platform", '
+                '"value": 101.35, "units": "frames_per_sec", "master": '
+                '"ChromeOSPerf", "error": 5.78, "test": "test_name/metric2"}, '
+                '{"supplemental_columns": {"r_cros_version": "1200.0.0", '
+                '"r_chrome_version": "25.10.0.0"}, "bot": "cros-platform", '
+                '"value": 2.7, "units": "msec", "master": "ChromeOSPerf", '
+                '"error": 0.2, "test": "test_name/metric1"}]')
+        self._verify_result_string(result['data'], expected_result_string)
+
+
+    def test_overridden_presentation(self):
+        """Verifies presentation settings can override defaults properly."""
+        result = perf_uploader._format_for_upload(
+                'platform', '1200.0.0', '25.10.0.0',
+                self._perf_data, self._PRESENT_INFO)
+        expected_result_string = (
+                '[{"supplemental_columns": {"r_cros_version": "1200.0.0", '
+                '"r_chrome_version": "25.10.0.0"}, "bot": "cros-platform", '
+                '"value": 101.35, "units": "frames_per_sec", "master": '
+                '"new_master_name", "error": 5.78, "test": '
+                '"new_test_name/graph_name/metric2"}, '
+                '{"supplemental_columns": {"r_cros_version": "1200.0.0", '
+                '"r_chrome_version": "25.10.0.0"}, "bot": "cros-platform", '
+                '"value": 2.7, "units": "msec", "master": "new_master_name", '
+                '"error": 0.2, "test": "new_test_name/graph_name/metric1"}]')
+        self._verify_result_string(result['data'], expected_result_string)
+
+
+if __name__ == '__main__':
+    unittest.main()