[autotest] telemetry_Benchmarks upload perf data using output_perf_value

telemetry_Benchmarks now upload perf data using output_perf_value.
It will not write to the keyval file anymore.

After the change lands, we should stop the scripts extract_perf.py
and generate_perf_graphs that are currently kicked off
by cron to upload the perf data.

Also makes output_perf_value accept empty unit.

CQ-DEPEND=I0b746f4ab3162eb7ba4aefa12294b7a60e326513
TEST=1)Locally kicked off the tests from the AFE,
confirmed that the perf data was uploaded to the testing
perf dashboard.
2)test_unittest.py telemetry_runner_unittest.py
BUG=chromium:280634

Change-Id: Icf6dca08cab0268b41f8359eb73084dfee3e4b9d
Reviewed-on: https://chromium-review.googlesource.com/176746
Reviewed-by: Fang Deng <fdeng@chromium.org>
Tested-by: Fang Deng <fdeng@chromium.org>
Commit-Queue: Fang Deng <fdeng@chromium.org>
diff --git a/server/cros/telemetry_runner.py b/server/cros/telemetry_runner.py
index ddffd80..82429af 100644
--- a/server/cros/telemetry_runner.py
+++ b/server/cros/telemetry_runner.py
@@ -30,10 +30,6 @@
                              '(?P<VALUE>[\{\[]?[-\d\., ]+[\}\]]?)('
                              ' ?(?P<UNITS>.+))?')
 
-# Constants pertaining to perf keys generated from Telemetry test results.
-PERF_KEY_TELEMETRY_PREFIX = 'TELEMETRY'
-PERF_KEY_DELIMITER = '--'
-
 
 class TelemetryResult(object):
     """Class to represent the results of a telemetry run.
@@ -55,7 +51,10 @@
         else:
             self.status = FAILED_STATUS
 
-        self.perf_keyvals = {}
+        # A list of perf values, e.g.
+        # [{'graph': 'graphA', 'trace': 'page_load_time',
+        #   'units': 'secs', 'value':0.5}, ...]
+        self.perf_data = []
         self._stdout = stdout
         self._stderr = stderr
         self.output = '\n'.join([stdout, stderr])
@@ -172,13 +171,16 @@
                 # In this example, we'd get 34.2.
                 value_list = [float(x) for x in value.strip('{},').split(',')]
                 value = value_list[0]  # Position 0 is the value.
+            elif re.search('^\d+$', value):
+                value = int(value)
+            else:
+                value = float(value)
 
-            perf_key = PERF_KEY_DELIMITER.join(
-                    [PERF_KEY_TELEMETRY_PREFIX, graph_name, trace_name, units])
-            self.perf_keyvals[perf_key] = str(value)
+            self.perf_data.append({'graph':graph_name, 'trace': trace_name,
+                                   'units': units, 'value': value})
 
         pp = pprint.PrettyPrinter(indent=2)
-        logging.debug('Perf Keyvals: %s', pp.pformat(self.perf_keyvals))
+        logging.debug('Perf values: %s', pp.pformat(self.perf_data))
 
         if self.status is SUCCESS_STATUS:
             return
@@ -312,14 +314,38 @@
         return self._run_test(TELEMETRY_RUN_CROS_TESTS_SCRIPT, test)
 
 
-    def run_telemetry_benchmark(self, benchmark, keyval_writer=None):
+    @staticmethod
+    def _output_perf_value(perf_value_writer, perf_data):
+        """Output perf values to result dir.
+
+        The perf values will be output to the result dir and
+        be subsequently uploaded to perf dashboard.
+
+        @param perf_value_writer: Should be an instance with the function
+                                  output_perf_value(), if None, no perf value
+                                  will be written. Typically this will be the
+                                  job object from an autotest test.
+        @param perf_data: A list of perf values, each value is
+                          a dictionary that looks like
+                          {'graph':'GraphA', 'trace':'metric1',
+                           'units':'secs', 'value':0.5}
+        """
+        for perf_value in perf_data:
+            perf_value_writer.output_perf_value(
+                    description=perf_value['trace'],
+                    value=perf_value['value'],
+                    units=perf_value['units'],
+                    graph=perf_value['graph'])
+
+
+    def run_telemetry_benchmark(self, benchmark, perf_value_writer=None):
         """Runs a telemetry benchmark on a dut.
 
         @param benchmark: Benchmark we want to run.
-        @param keyval_writer: Should be a instance with the function
-                              write_perf_keyval(), if None, no keyvals will be
-                              written. Typically this will be the job object
-                              from a autotest test.
+        @param perf_value_writer: Should be an instance with the function
+                                  output_perf_value(), if None, no perf value
+                                  will be written. Typically this will be the
+                                  job object from an autotest test.
 
         @returns A TelemetryResult Instance with the results of this telemetry
                  execution.
@@ -330,8 +356,8 @@
         result = self._run_telemetry(telemetry_script, benchmark)
         result.parse_benchmark_results()
 
-        if keyval_writer:
-            keyval_writer.write_perf_keyval(result.perf_keyvals)
+        if perf_value_writer:
+            self._output_perf_value(perf_value_writer, result.perf_data)
 
         if result.status is WARNING_STATUS:
             raise error.TestWarn('Telemetry Benchmark: %s'