Add average sensitivity metric to conducted sensitivity tests.

This CL defines a new average sensitivity metric to surface on wifi
performance dashboards. The sensitivity metrics takes the average
sensitivity of all rates of the same mode, bandwidth, number of streams,
and chain masks, and averages them. This gives an average read out on
the receiver performance of a DUT per chain and mode and is more
compact than per-rate sensitivity, i.e., more suitable for dashboards.

Test: Done
Bug: None

Change-Id: I2517ffe8f93333df7a1574ca4f990b255c3d7349
Signed-off-by: Omar El Ayach <oelayach@google.com>
diff --git a/acts/tests/google/wifi/WifiSensitivityTest.py b/acts/tests/google/wifi/WifiSensitivityTest.py
index 2eea843..4eecd38 100644
--- a/acts/tests/google/wifi/WifiSensitivityTest.py
+++ b/acts/tests/google/wifi/WifiSensitivityTest.py
@@ -227,9 +227,7 @@
         channels_tested = []
         for result in self.testclass_results:
             testcase_params = result['testcase_params']
-            test_id = collections.OrderedDict(
-                (key, value) for key, value in testcase_params.items()
-                if key in id_fields)
+            test_id = self.extract_test_id(testcase_params, id_fields)
             test_id = tuple(test_id.items())
             if test_id not in testclass_results_dict:
                 testclass_results_dict[test_id] = collections.OrderedDict()
@@ -242,6 +240,27 @@
             else:
                 testclass_results_dict[test_id][channel] = ''
 
+        # calculate average metrics
+        metrics_dict = collections.OrderedDict()
+        id_fields = ['channel', 'mode', 'num_streams', 'chain_mask']
+        for test_id, channel in itertools.product(
+                testclass_results_dict.keys(), channels_tested):
+            metric_tag = collections.OrderedDict(test_id, channel=channel)
+            metric_tag = self.extract_test_id(metric_tag, id_fields)
+            metric_tag = tuple(metric_tag.items())
+            metrics_dict.setdefault(metric_tag, [])
+            sensitivity_result = testclass_results_dict[test_id][channel]
+            if sensitivity_result != '':
+                metrics_dict[metric_tag].append(sensitivity_result)
+        for metric_tag_tuple, metric_data in metrics_dict.items():
+            metric_tag_dict = collections.OrderedDict(metric_tag_tuple)
+            metric_tag = 'ch{}_{}_nss{}_chain{}'.format(
+                metric_tag_dict['channel'], metric_tag_dict['mode'],
+                metric_tag_dict['num_streams'], metric_tag_dict['chain_mask'])
+            metric_key = "{}.avg_sensitivity".format(metric_tag)
+            metric_value = numpy.nanmean(metric_data)
+            self.testclass_metric_logger.add_metric(metric_key, metric_value)
+
         # write csv
         csv_header = ['Mode', 'MCS', 'Streams', 'Chain', 'Rate (Mbps)']
         for channel in channels_tested: