Mute test case metrics in OTA test suites
This CL mutes test case metrics in OTA test suites wherein eash test
case is defined at a single orientation. In these cases, we are only
interested in test class metrics which, for example, take the average
across all orientations. This helps keep the number of metrics uploaded
to a metric key manageable.
Test: Done
Bug: None
Change-Id: Ib58280215fc4077ed7d8ea4b5182d13dfcf1b583
Signed-off-by: Omar El Ayach <oelayach@google.com>
diff --git a/acts/tests/google/wifi/WifiPingTest.py b/acts/tests/google/wifi/WifiPingTest.py
index 9cd2002..551719d 100644
--- a/acts/tests/google/wifi/WifiPingTest.py
+++ b/acts/tests/google/wifi/WifiPingTest.py
@@ -25,7 +25,6 @@
from acts import base_test
from acts import utils
from acts.controllers.utils_lib import ssh
-from acts.metrics.loggers.blackbox import BlackboxMetricLogger
from acts.test_utils.wifi import ota_chamber
from acts.test_utils.wifi import wifi_performance_test_utils as wputils
from acts.test_utils.wifi import wifi_retail_ap as retail_ap
@@ -57,10 +56,12 @@
def __init__(self, controllers):
base_test.BaseTestClass.__init__(self, controllers)
- self.ping_range_metric = BlackboxMetricLogger.for_test_case(
- metric_name='ping_range')
- self.ping_rtt_metric = BlackboxMetricLogger.for_test_case(
- metric_name='ping_rtt')
+ self.testcase_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_case())
+ self.testclass_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_testcase_metrics = True
+
self.tests = self.generate_test_cases(
ap_power='standard',
channels=[1, 6, 11, 36, 40, 44, 48, 149, 153, 157, 161],
@@ -153,7 +154,9 @@
len(x))] for x in sorted_rtt
]
# Set blackbox metric
- self.ping_rtt_metric.metric_value = max(rtt_at_test_percentile)
+ if self.publish_testcase_metrics:
+ self.testcase_metric_logger.add_metric('ping_rtt',
+ max(rtt_at_test_percentile))
# Evaluate test pass/fail
test_failed = False
for idx, rtt in enumerate(rtt_at_test_percentile):
@@ -183,7 +186,9 @@
# Get target range
rvr_range = self.get_range_from_rvr()
# Set Blackbox metric
- self.ping_range_metric.metric_value = result['range']
+ if self.publish_testcase_metrics:
+ self.testcase_metric_logger.add_metric('ping_range',
+ result['range'])
# Evaluate test pass/fail
if result['range'] - rvr_range < -self.testclass_params[
'range_gap_threshold']:
@@ -516,11 +521,7 @@
class WifiPing_LowPowerAP_Test(WifiPingTest):
def __init__(self, controllers):
- base_test.BaseTestClass.__init__(self, controllers)
- self.ping_range_metric = BlackboxMetricLogger.for_test_case(
- metric_name='ping_range')
- self.ping_rtt_metric = BlackboxMetricLogger.for_test_case(
- metric_name='ping_rtt')
+ super().__init__(self, controllers)
self.tests = self.generate_test_cases(
ap_power='low_power',
channels=[1, 6, 11, 36, 40, 44, 48, 149, 153, 157, 161],
@@ -539,12 +540,11 @@
def __init__(self, controllers):
base_test.BaseTestClass.__init__(self, controllers)
- self.ping_range_metric = BlackboxMetricLogger.for_test_case(
- metric_name='ping_range')
- self.ping_rtt_metric = BlackboxMetricLogger.for_test_case(
- metric_name='ping_rtt')
- self.bb_metric_logger = (
+ self.testcase_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_case())
+ self.testclass_metric_logger = (
wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_testcase_metrics = False
def setup_class(self):
WifiPingTest.setup_class(self)
@@ -591,7 +591,7 @@
self.log.info('Average range for Channel {} is: {}dB'.format(
channel, average_range))
metric_name = 'ota_summary_ch{}.avg_range'.format(channel)
- self.bb_metric_logger.add_metric(metric_name, average_range)
+ self.testclass_metric_logger.add_metric(metric_name, average_range)
current_context = context.get_current_context().get_full_output_path()
plot_file_path = os.path.join(current_context, 'results.html')
figure.generate_figure(plot_file_path)
diff --git a/acts/tests/google/wifi/WifiRssiTest.py b/acts/tests/google/wifi/WifiRssiTest.py
index d17aee8..7b1e68e 100644
--- a/acts/tests/google/wifi/WifiRssiTest.py
+++ b/acts/tests/google/wifi/WifiRssiTest.py
@@ -58,6 +58,7 @@
wputils.BlackboxMappedMetricLogger.for_test_case())
self.testclass_metric_logger = (
wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_test_metrics = True
def setup_class(self):
self.dut = self.android_devices[0]
@@ -109,15 +110,16 @@
postprocessed_results: compiled arrays of RSSI measurements
"""
# Set Blackbox metric values
- self.testcase_metric_logger.add_metric(
- 'signal_poll_rssi_stdev',
- max(postprocessed_results['signal_poll_rssi']['stdev']))
- self.testcase_metric_logger.add_metric(
- 'chain_0_rssi_stdev',
- max(postprocessed_results['chain_0_rssi']['stdev']))
- self.testcase_metric_logger.add_metric(
- 'chain_1_rssi_stdev',
- max(postprocessed_results['chain_1_rssi']['stdev']))
+ if self.publish_test_metrics:
+ self.testcase_metric_logger.add_metric(
+ 'signal_poll_rssi_stdev',
+ max(postprocessed_results['signal_poll_rssi']['stdev']))
+ self.testcase_metric_logger.add_metric(
+ 'chain_0_rssi_stdev',
+ max(postprocessed_results['chain_0_rssi']['stdev']))
+ self.testcase_metric_logger.add_metric(
+ 'chain_1_rssi_stdev',
+ max(postprocessed_results['chain_1_rssi']['stdev']))
# Evaluate test pass/fail
test_failed = any([
@@ -183,10 +185,11 @@
avg_error = RSSI_ERROR_VAL
avg_shift = RSSI_ERROR_VAL
# Set Blackbox metric values
- self.testcase_metric_logger.add_metric('{}_error'.format(key),
- avg_error)
- self.testcase_metric_logger.add_metric('{}_shift'.format(key),
- avg_shift)
+ if self.publish_test_metrics:
+ self.testcase_metric_logger.add_metric(
+ '{}_error'.format(key), avg_error)
+ self.testcase_metric_logger.add_metric(
+ '{}_shift'.format(key), avg_shift)
# Evaluate test pass/fail
rssi_failure = (avg_error >
self.testclass_params['abs_tolerance']
@@ -870,6 +873,14 @@
performance in varying channel conditions
"""
+ def __init__(self, controllers):
+ base_test.BaseTestClass.__init__(self, controllers)
+ self.testcase_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_case())
+ self.testclass_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_test_metrics = False
+
def setup_class(self):
WifiRssiTest.setup_class(self)
self.ota_chamber = ota_chamber.create(
diff --git a/acts/tests/google/wifi/WifiRvrTest.py b/acts/tests/google/wifi/WifiRvrTest.py
index e0fea3b..7d72c16 100644
--- a/acts/tests/google/wifi/WifiRvrTest.py
+++ b/acts/tests/google/wifi/WifiRvrTest.py
@@ -18,7 +18,6 @@
import itertools
import json
import logging
-import math
import numpy
import os
from acts import asserts
@@ -54,6 +53,7 @@
wputils.BlackboxMappedMetricLogger.for_test_case())
self.testclass_metric_logger = (
wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_testcase_metrics = True
def setup_class(self):
"""Initializes common test hardware and parameters.
@@ -163,7 +163,9 @@
# Set test metrics
rvr_result['metrics']['failure_count'] = failure_count
- self.testcase_metric_logger.add_metric('failure_count', failure_count)
+ if self.publish_testcase_metrics:
+ self.testcase_metric_logger.add_metric('failure_count',
+ failure_count)
# Assert pass or fail
if failure_count >= self.testclass_params['failure_count_tolerance']:
@@ -301,8 +303,9 @@
rvr_result['metrics'] = {}
rvr_result['metrics']['peak_tput'] = max(
rvr_result['throughput_receive'])
- self.testcase_metric_logger.add_metric(
- 'peak_tput', rvr_result['metrics']['peak_tput'])
+ if self.publish_testcase_metrics:
+ self.testcase_metric_logger.add_metric(
+ 'peak_tput', rvr_result['metrics']['peak_tput'])
tput_below_limit = [
tput < self.testclass_params['tput_metric_targets'][
@@ -316,8 +319,9 @@
break
else:
rvr_result['metrics']['high_tput_range'] = -1
- self.testcase_metric_logger.add_metric(
- 'high_tput_range', rvr_result['metrics']['high_tput_range'])
+ if self.publish_testcase_metrics:
+ self.testcase_metric_logger.add_metric(
+ 'high_tput_range', rvr_result['metrics']['high_tput_range'])
tput_below_limit = [
tput < self.testclass_params['tput_metric_targets'][
@@ -331,8 +335,9 @@
break
else:
rvr_result['metrics']['low_tput_range'] = -1
- self.testcase_metric_logger.add_metric(
- 'low_tput_range', rvr_result['metrics']['low_tput_range'])
+ if self.publish_testcase_metrics:
+ self.testcase_metric_logger.add_metric(
+ 'low_tput_range', rvr_result['metrics']['low_tput_range'])
def run_rvr_test(self, testcase_params):
"""Test function to run RvR.
@@ -680,6 +685,7 @@
wputils.BlackboxMappedMetricLogger.for_test_case())
self.testclass_metric_logger = (
wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_testcase_metrics = False
def setup_class(self):
WifiRvrTest.setup_class(self)
@@ -706,10 +712,6 @@
result['testcase_params'],
['channel', 'mode', 'traffic_type', 'traffic_direction'
]).items())
- #test_id = (result['testcase_params']['channel'],
- # result['testcase_params']['mode'],
- # result['testcase_params']['traffic_type'],
- # result['testcase_params']['traffic_direction'])
if test_id not in plots:
# Initialize test id data when not present
compiled_data[test_id] = {'throughput': [], 'metrics': {}}
diff --git a/acts/tests/google/wifi/WifiSensitivityTest.py b/acts/tests/google/wifi/WifiSensitivityTest.py
index 7220f92..01cdbd2 100644
--- a/acts/tests/google/wifi/WifiSensitivityTest.py
+++ b/acts/tests/google/wifi/WifiSensitivityTest.py
@@ -27,7 +27,6 @@
from acts import utils
from acts.controllers import iperf_client
from acts.controllers.utils_lib import ssh
-from acts.metrics.loggers.blackbox import BlackboxMetricLogger
from acts.test_utils.wifi import ota_chamber
from acts.test_utils.wifi import wifi_performance_test_utils as wputils
from acts.test_utils.wifi import wifi_test_utils as wutils
@@ -126,8 +125,11 @@
def __init__(self, controllers):
base_test.BaseTestClass.__init__(self, controllers)
- self.failure_count_metric = BlackboxMetricLogger.for_test_case(
- metric_name='sensitivity')
+ self.testcase_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_case())
+ self.testclass_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_testcase_metrics = True
def setup_class(self):
"""Initializes common test hardware and parameters.
@@ -558,7 +560,7 @@
class WifiSensitivity_AllChannels_Test(WifiSensitivityTest):
def __init__(self, controllers):
- base_test.BaseTestClass.__init__(self, controllers)
+ super().__init__(controllers)
self.tests = self.generate_test_cases(
[6, 36, 40, 44, 48, 149, 153, 157, 161],
['VHT20', 'VHT40', 'VHT80'], ['0', '1', '2x2'])
@@ -566,21 +568,21 @@
class WifiSensitivity_SampleChannels_Test(WifiSensitivityTest):
def __init__(self, controllers):
- base_test.BaseTestClass.__init__(self, controllers)
+ super().__init__(controllers)
self.tests = self.generate_test_cases(
[6, 36, 149], ['VHT20', 'VHT40', 'VHT80'], ['0', '1', '2x2'])
class WifiSensitivity_2GHz_Test(WifiSensitivityTest):
def __init__(self, controllers):
- base_test.BaseTestClass.__init__(self, controllers)
+ super().__init__(controllers)
self.tests = self.generate_test_cases([1, 2, 6, 10, 11], ['VHT20'],
['0', '1', '2x2'])
class WifiSensitivity_5GHz_Test(WifiSensitivityTest):
def __init__(self, controllers):
- base_test.BaseTestClass.__init__(self, controllers)
+ super().__init__(controllers)
self.tests = self.generate_test_cases(
[36, 40, 44, 48, 149, 153, 157, 161], ['VHT20', 'VHT40', 'VHT80'],
['0', '1', '2x2'])
@@ -588,14 +590,14 @@
class WifiSensitivity_UNII1_Test(WifiSensitivityTest):
def __init__(self, controllers):
- base_test.BaseTestClass.__init__(self, controllers)
+ super().__init__(controllers)
self.tests = self.generate_test_cases(
[36, 40, 44, 48], ['VHT20', 'VHT40', 'VHT80'], ['0', '1', '2x2'])
class WifiSensitivity_UNII3_Test(WifiSensitivityTest):
def __init__(self, controllers):
- base_test.BaseTestClass.__init__(self, controllers)
+ super().__init__(controllers)
self.tests = self.generate_test_cases([149, 153, 157, 161],
['VHT20', 'VHT40', 'VHT80'],
['0', '1', '2x2'])
@@ -611,9 +613,12 @@
"""
def __init__(self, controllers):
- WifiSensitivityTest.__init__(self, controllers)
- self.bb_metric_logger = (
+ base_test.BaseTestClass.__init__(self, controllers)
+ self.testcase_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_case())
+ self.testclass_metric_logger = (
wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_testcase_metrics = False
def setup_class(self):
WifiSensitivityTest.setup_class(self)
@@ -686,7 +691,8 @@
channel, metric_test_config)
metric_name = metric_tag + '.avg_sensitivity'
metric_value = numpy.nanmean(channel_results['sensitivity'])
- self.bb_metric_logger.add_metric(metric_name, metric_value)
+ self.testclass_metric_logger.add_metric(
+ metric_name, metric_value)
self.log.info(("Average Sensitivity for {}: {:.2f}").format(
metric_tag, metric_value))
current_context = (
diff --git a/acts/tests/google/wifi/WifiSoftApPerformanceTest.py b/acts/tests/google/wifi/WifiSoftApPerformanceTest.py
index ff4ad4f..f764b9f 100644
--- a/acts/tests/google/wifi/WifiSoftApPerformanceTest.py
+++ b/acts/tests/google/wifi/WifiSoftApPerformanceTest.py
@@ -39,6 +39,7 @@
wputils.BlackboxMappedMetricLogger.for_test_case())
self.testclass_metric_logger = (
wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_testcase_metrics = True
def setup_class(self):
"""Initializes common test hardware and parameters.
diff --git a/acts/tests/google/wifi/WifiThroughputStabilityTest.py b/acts/tests/google/wifi/WifiThroughputStabilityTest.py
index fd7c95f..fd51961 100644
--- a/acts/tests/google/wifi/WifiThroughputStabilityTest.py
+++ b/acts/tests/google/wifi/WifiThroughputStabilityTest.py
@@ -19,6 +19,7 @@
import json
import logging
import math
+import numpy
import os
from acts import asserts
from acts import base_test
@@ -26,7 +27,6 @@
from acts import utils
from acts.controllers import iperf_server as ipf
from acts.controllers.utils_lib import ssh
-from acts.metrics.loggers.blackbox import BlackboxMetricLogger
from acts.test_utils.wifi import ota_chamber
from acts.test_utils.wifi import wifi_performance_test_utils as wputils
from acts.test_utils.wifi import wifi_retail_ap as retail_ap
@@ -51,12 +51,11 @@
def __init__(self, controllers):
base_test.BaseTestClass.__init__(self, controllers)
# Define metrics to be uploaded to BlackBox
- self.min_throughput_metric = BlackboxMetricLogger.for_test_case(
- metric_name='min_throughput')
- self.avg_throughput_metric = BlackboxMetricLogger.for_test_case(
- metric_name='avg_throughput')
- self.std_dev_percent_metric = BlackboxMetricLogger.for_test_case(
- metric_name='std_dev_percent')
+ self.testcase_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_case())
+ self.testclass_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_testcase_metrics = True
# Generate test cases
self.tests = self.generate_test_cases(
[6, 36, 149], ['VHT20', 'VHT40', 'VHT80'], ['TCP', 'UDP'],
@@ -154,9 +153,13 @@
test_result_dict['iperf_results']['std_deviation'] /
test_result_dict['iperf_results']['avg_throughput']) * 100
# Set blackbox metrics
- self.avg_throughput_metric.metric_value = avg_throughput
- self.min_throughput_metric.metric_value = min_throughput
- self.std_dev_percent_metric.metric_value = std_dev_percent
+ if self.publish_testcase_metrics:
+ self.testcase_metric_logger.add_metric('avg_throughput',
+ avg_throughput)
+ self.testcase_metric_logger.add_metric('min_throughput',
+ min_throughput)
+ self.testcase_metric_logger.add_metric('std_dev_percent',
+ std_dev_percent)
# Evaluate pass/fail
min_throughput_check = (
(min_throughput / avg_throughput) *
@@ -207,8 +210,7 @@
'instantaneous_rates':
instantaneous_rates_Mbps,
'avg_throughput':
- math.fsum(instantaneous_rates_Mbps) /
- len(instantaneous_rates_Mbps),
+ numpy.mean(instantaneous_rates_Mbps),
'std_deviation':
test_result['iperf_result'].get_std_deviation(
self.testclass_params['iperf_ignored_interval']) * 8,
@@ -445,14 +447,11 @@
def __init__(self, controllers):
base_test.BaseTestClass.__init__(self, controllers)
# Define metrics to be uploaded to BlackBox
- self.min_throughput_metric = BlackboxMetricLogger.for_test_case(
- metric_name='min_throughput')
- self.avg_throughput_metric = BlackboxMetricLogger.for_test_case(
- metric_name='avg_throughput')
- self.std_dev_percent_metric = BlackboxMetricLogger.for_test_case(
- metric_name='std_dev_percent')
- self.bb_metric_logger = (
+ self.testcase_metric_logger = (
+ wputils.BlackboxMappedMetricLogger.for_test_case())
+ self.testclass_metric_logger = (
wputils.BlackboxMappedMetricLogger.for_test_class())
+ self.publish_testcase_metrics = False
def setup_class(self):
WifiThroughputStabilityTest.setup_class(self)
@@ -481,12 +480,10 @@
]).items())
test_data = channel_data.setdefault(
test_id, collections.OrderedDict(position=[], throughput=[]))
- current_throughput = (
- math.fsum(test['iperf_result'].instantaneous_rates[
- self.testclass_params['iperf_ignored_interval']:-1]) /
- len(test['iperf_result'].instantaneous_rates[
+ current_throughput = (numpy.mean(
+ test['iperf_result'].instantaneous_rates[
self.testclass_params['iperf_ignored_interval']:-1])
- ) * 8 * (1.024**2)
+ ) * 8 * (1.024**2)
test_data['position'].append(current_params['position'])
test_data['throughput'].append(current_throughput)
@@ -506,12 +503,13 @@
test_id_dict['traffic_direction'], channel,
test_id_dict['mode'])
metric_name = metric_tag + '.avg_throughput'
- metric_value = math.fsum(test_data['throughput']) / len(
- test_data['throughput'])
- self.bb_metric_logger.add_metric(metric_name, metric_value)
+ metric_value = numpy.mean(test_data['throughput'])
+ self.testclass_metric_logger.add_metric(
+ metric_name, metric_value)
metric_name = metric_tag + '.min_throughput'
metric_value = min(test_data['throughput'])
- self.bb_metric_logger.add_metric(metric_name, metric_value)
+ self.testclass_metric_logger.add_metric(
+ metric_name, metric_value)
# Plot test class results
plots = []