Merge "Merge both CEP and none-CEP cases into merge functions"
diff --git a/acts/framework/acts/controllers/access_point.py b/acts/framework/acts/controllers/access_point.py
index 4b89de6..38db6a7 100755
--- a/acts/framework/acts/controllers/access_point.py
+++ b/acts/framework/acts/controllers/access_point.py
@@ -150,6 +150,7 @@
self.lan = self.interfaces.get_lan_interface()
self.__initial_ap()
self.scapy_install_path = None
+ self.setup_bridge = False
def __initial_ap(self):
"""Initial AP interfaces.
@@ -179,7 +180,10 @@
self.ssh.run(BRIDGE_DOWN)
self.ssh.run(BRIDGE_DEL)
- def start_ap(self, hostapd_config, additional_parameters=None):
+ def start_ap(self,
+ hostapd_config,
+ setup_bridge=False,
+ additional_parameters=None):
"""Starts as an ap using a set of configurations.
This will start an ap on this host. To start an ap the controller
@@ -190,11 +194,13 @@
Args:
hostapd_config: hostapd_config.HostapdConfig, The configurations
- to use when starting up the ap.
+ to use when starting up the ap.
+ setup_bridge: Whether to bridge the LAN interface WLAN interface.
+ Only one WLAN interface can be bridged with the LAN interface
+ and none of the guest networks can be bridged.
additional_parameters: A dictionary of parameters that can sent
- directly into the hostapd config file. This
- can be used for debugging and or adding one
- off parameters into the config.
+ directly into the hostapd config file. This can be used for
+ debugging and or adding one off parameters into the config.
Returns:
An identifier for each ssid being started. These identifiers can be
@@ -203,7 +209,6 @@
Raises:
Error: When the ap can't be brought up.
"""
-
if hostapd_config.frequency < 5000:
interface = self.wlan_2g
subnet = self._AP_2G_SUBNET
@@ -275,7 +280,12 @@
# the server will come up.
interface_ip = ipaddress.ip_interface(
'%s/%s' % (subnet.router, subnet.network.netmask))
- self._ip_cmd.set_ipv4_address(interface, interface_ip)
+ if setup_bridge is True:
+ bridge_interface_name = 'br_lan'
+ self.create_bridge(bridge_interface_name, [interface, self.lan])
+ self._ip_cmd.set_ipv4_address(bridge_interface_name, interface_ip)
+ else:
+ self._ip_cmd.set_ipv4_address(interface, interface_ip)
if hostapd_config.bss_lookup:
# This loop goes through each interface that was setup for
# hostapd and assigns the DHCP scopes that were defined but
@@ -442,6 +452,13 @@
del self._aps[identifier]
if configured_subnets:
self.start_dhcp(subnets=configured_subnets)
+ bridge_interfaces = self.interfaces.get_bridge_interface()
+ if bridge_interfaces:
+ for iface in bridge_interfaces:
+ BRIDGE_DOWN = 'ifconfig {} down'.format(iface)
+ BRIDGE_DEL = 'brctl delbr {}'.format(iface)
+ self.ssh.run(BRIDGE_DOWN)
+ self.ssh.run(BRIDGE_DEL)
def stop_all_aps(self):
"""Stops all running aps on this device."""
diff --git a/acts/framework/acts/controllers/attenuator.py b/acts/framework/acts/controllers/attenuator.py
index aa84801..9d99c2d 100644
--- a/acts/framework/acts/controllers/attenuator.py
+++ b/acts/framework/acts/controllers/attenuator.py
@@ -48,17 +48,16 @@
logging.error('Attempt %s to open connection to attenuator '
'failed: %s' % (attempt_number, e))
if attempt_number == _ATTENUATOR_OPEN_RETRIES:
- ping_output = job.run(
- 'ping %s -c 1 -w 1' % ip_address, ignore_status=True)
+ ping_output = job.run('ping %s -c 1 -w 1' % ip_address,
+ ignore_status=True)
if ping_output.exit_status == 1:
- logging.error(
- 'Unable to ping attenuator at %s' % ip_address)
+ logging.error('Unable to ping attenuator at %s' %
+ ip_address)
else:
- logging.error(
- 'Able to ping attenuator at %s' % ip_address)
- job.run(
- 'echo "q" | telnet %s %s' % (ip_address, port),
- ignore_status=True)
+ logging.error('Able to ping attenuator at %s' %
+ ip_address)
+ job.run('echo "q" | telnet %s %s' % (ip_address, port),
+ ignore_status=True)
raise
for i in range(inst_cnt):
attn = Attenuator(attn_inst, idx=i)
@@ -72,13 +71,31 @@
return objs
+def get_info(attenuators):
+ """Get information on a list of Attenuator objects.
+
+ Args:
+ attenuators: A list of Attenuator objects.
+
+ Returns:
+ A list of dict, each representing info for Attenuator objects.
+ """
+ device_info = []
+ for attenuator in attenuators:
+ info = {
+ "Address": attenuator.instrument.address,
+ "Attenuator_Port": attenuator.idx
+ }
+ device_info.append(info)
+ return device_info
+
+
def destroy(objs):
for attn in objs:
attn.instrument.close()
-def get_attenuators_for_device(device_attenuator_configs,
- attenuators,
+def get_attenuators_for_device(device_attenuator_configs, attenuators,
attenuator_key):
"""Gets the list of attenuators associated to a specified device and builds
a list of the attenuator objects associated to the ip address in the
@@ -139,11 +156,12 @@
for attenuator_port in device_attenuator_config[attenuator_key]:
for attenuator in attenuators:
if (attenuator.instrument.address ==
- device_attenuator_config['Address'] and
- attenuator.idx is attenuator_port):
+ device_attenuator_config['Address']
+ and attenuator.idx is attenuator_port):
attenuator_list.append(attenuator)
return attenuator_list
+
"""Classes for accessing, managing, and manipulating attenuators.
Users will instantiate a specific child class, but almost all operation should
@@ -244,7 +262,6 @@
the physical implementation and allows the user to think only of attenuators
regardless of their location.
"""
-
def __init__(self, instrument, idx=0, offset=0):
"""This is the constructor for Attenuator
@@ -313,7 +330,6 @@
convenience to the user and avoid re-implementation of helper functions and
small loops scattered throughout user code.
"""
-
def __init__(self, name=''):
"""This constructor for AttenuatorGroup
diff --git a/acts/framework/acts/controllers/fuchsia_device.py b/acts/framework/acts/controllers/fuchsia_device.py
index fd4aecd..998927d 100644
--- a/acts/framework/acts/controllers/fuchsia_device.py
+++ b/acts/framework/acts/controllers/fuchsia_device.py
@@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import backoff
import json
import logging
import platform
@@ -22,6 +23,7 @@
import re
import requests
import subprocess
+import socket
import time
from acts import context
@@ -146,7 +148,6 @@
log: A logger object.
port: The TCP port number of the Fuchsia device.
"""
-
def __init__(self, fd_conf_data):
"""
Args:
@@ -230,13 +231,13 @@
# Init server
self.init_server_connection()
- def init_server_connection(self, retry_count=3):
- """Initializes HTTP connection with SL4F server.
-
- Args:
- retry_count: How many time to retry connecting assuming a
- known error.
- """
+ @backoff.on_exception(
+ backoff.constant,
+ (ConnectionRefusedError, requests.exceptions.ConnectionError),
+ interval=1.5,
+ max_tries=4)
+ def init_server_connection(self):
+ """Initializes HTTP connection with SL4F server."""
self.log.debug("Initialziing server connection")
init_data = json.dumps({
"jsonrpc": "2.0",
@@ -246,28 +247,8 @@
"client_id": self.client_id
}
})
- retry_counter = 0
- while retry_counter < retry_count:
- try:
- requests.get(url=self.init_address, data=init_data)
- retry_counter = retry_count + 1
- except ConnectionRefusedError:
- self.log.info('Connection Refused Error. '
- 'Retrying in 1 second.')
- e = ConnectionRefusedError('Connection Refused Error.')
- retry_counter += 1
- time.sleep(1)
- except requests.exceptions.ConnectionError:
- self.log.info('Requests ConnectionError. '
- 'Retrying in 1 second.')
- e = requests.exceptions.ConnectionError('Requests '
- 'ConnectionError')
- retry_counter += 1
- time.sleep(1)
- except Exception as e:
- raise e
- if retry_counter is retry_count:
- raise e
+
+ requests.get(url=self.init_address, data=init_data)
self.test_counter += 1
def build_id(self, test_id):
@@ -315,7 +296,8 @@
elif os_type == 'Linux':
timeout_flag = '-W'
else:
- raise ValueError('Invalid OS. Only Linux and MacOS are supported.')
+ raise ValueError(
+ 'Invalid OS. Only Linux and MacOS are supported.')
ping_command = ['ping', '%s' % timeout_flag, '1', '-c', '1', self.ip]
self.clean_up()
self.log.info('Rebooting FuchsiaDevice %s' % self.ip)
@@ -331,7 +313,7 @@
self.log.info('Waiting for FuchsiaDevice %s to come back up.' %
self.ip)
self.log.debug('Waiting for FuchsiaDevice %s to stop responding'
- ' to pings.' % self.ip)
+ ' to pings.' % self.ip)
while True:
initial_ping_status_code = subprocess.call(
ping_command,
@@ -340,28 +322,26 @@
if initial_ping_status_code != 1:
break
else:
- initial_ping_elapsed_time = (
- time.time() - initial_ping_start_time)
+ initial_ping_elapsed_time = (time.time() -
+ initial_ping_start_time)
if initial_ping_elapsed_time > timeout:
try:
uptime = (int(
self.send_command_ssh(
'clock --monotonic',
- timeout=
- FUCHSIA_RECONNECT_AFTER_REBOOT_TIME).stdout)
- / FUCHSIA_TIME_IN_NANOSECONDS)
+ timeout=FUCHSIA_RECONNECT_AFTER_REBOOT_TIME).
+ stdout) / FUCHSIA_TIME_IN_NANOSECONDS)
except Exception as e:
- self.log.debug('Unable to retrieve uptime from device.')
+ self.log.info('Unable to retrieve uptime from device.')
# Device failed to restart within the specified period.
# Restart the services so other tests can continue.
self.start_services()
self.init_server_connection()
- raise TimeoutError('Waited %s seconds, and FuchsiaDevice %s'
- ' never stopped responding to pings.'
- ' Uptime reported as %s' %
- (initial_ping_elapsed_time,
- self.ip,
- str(uptime)))
+ raise TimeoutError(
+ 'Waited %s seconds, and FuchsiaDevice %s'
+ ' never stopped responding to pings.'
+ ' Uptime reported as %s' %
+ (initial_ping_elapsed_time, self.ip, str(uptime)))
start_time = time.time()
self.log.debug('Waiting for FuchsiaDevice %s to start responding '
@@ -377,8 +357,8 @@
raise TimeoutError('Waited %s seconds, and FuchsiaDevice %s'
'did not repond to a ping.' %
(elapsed_time, self.ip))
- self.log.debug('Received a ping back in %s seconds.'
- % str(time.time() - start_time))
+ self.log.debug('Received a ping back in %s seconds.' %
+ str(time.time() - start_time))
# Wait 5 seconds after receiving a ping packet to just to let
# the OS get everything up and running.
time.sleep(10)
@@ -656,15 +636,17 @@
disconnect_response.get("error"))
return False
- def start_services(self, skip_sl4f=False, retry_count=3):
+ @backoff.on_exception(backoff.constant,
+ (FuchsiaSyslogError, socket.timeout),
+ interval=1.5,
+ max_tries=4)
+ def start_services(self, skip_sl4f=False):
"""Starts long running services on the Fuchsia device.
1. Start SL4F if not skipped.
Args:
skip_sl4f: Does not attempt to start SL4F if True.
- retry_count: How many time to retry connecting assuming a
- known error.
"""
self.log.debug("Attempting to start Fuchsia device services on %s." %
self.ip)
@@ -672,24 +654,9 @@
self.log_process = start_syslog(self.serial, self.log_path,
self.ip, self.ssh_username,
self.ssh_config)
- retry_counter = 0
- while retry_counter < retry_count:
- if ENABLE_LOG_LISTENER:
- try:
- self.log_process.start()
- retry_counter = retry_count + 1
- except FuchsiaSyslogError:
- self.log.info('Fuchsia Syslog Error. '
- 'Retrying in 1 second.')
- e = FuchsiaSyslogError('Fuchsia Syslog Error')
- retry_counter += 1
- time.sleep(1)
- except Exception as e:
- raise e
- else:
- retry_counter = retry_count + 1
- if retry_counter is retry_count:
- raise e
+
+ if ENABLE_LOG_LISTENER:
+ self.log_process.start()
if not skip_sl4f:
self.control_daemon("sl4f.cmx", "start")
diff --git a/acts/framework/acts/controllers/fuchsia_lib/utils_lib.py b/acts/framework/acts/controllers/fuchsia_lib/utils_lib.py
index 6456a08..56fd4c2 100644
--- a/acts/framework/acts/controllers/fuchsia_lib/utils_lib.py
+++ b/acts/framework/acts/controllers/fuchsia_lib/utils_lib.py
@@ -14,9 +14,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import backoff
import os
import logging
import paramiko
+import socket
import time
logging.getLogger("paramiko").setLevel(logging.WARNING)
@@ -51,11 +53,17 @@
raise Exception('No valid ssh key type found', exceptions)
+@backoff.on_exception(
+ backoff.constant,
+ (paramiko.ssh_exception.SSHException,
+ paramiko.ssh_exception.AuthenticationException, socket.timeout,
+ socket.error, ConnectionRefusedError, ConnectionResetError),
+ interval=1.5,
+ max_tries=4)
def create_ssh_connection(ip_address,
ssh_username,
ssh_config,
- connect_timeout=30,
- retry_count=3):
+ connect_timeout=30):
"""Creates and ssh connection to a Fuchsia device
Args:
@@ -63,8 +71,6 @@
ssh_username: Username for ssh server.
ssh_config: ssh_config location for the ssh server.
connect_timeout: Timeout value for connecting to ssh_server.
- retry_count: How many time to retry connecting assuming a
- known error.
Returns:
A paramiko ssh object
@@ -72,31 +78,12 @@
ssh_key = get_private_key(ip_address=ip_address, ssh_config=ssh_config)
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- retry_counter = 0
- while retry_counter < retry_count:
- try:
- ssh_client.connect(hostname=ip_address,
- username=ssh_username,
- allow_agent=False,
- pkey=ssh_key,
- timeout=connect_timeout,
- banner_timeout=200)
- retry_counter = retry_count + 1
- except paramiko.ssh_exception.SSHException:
- logging.info('Paramiko SSHException. Retrying in 1 second.')
- e = paramiko.ssh_exception.SSHException('Paramiko SSHException')
- time.sleep(1)
- retry_counter =+ 1
- except ConnectionRefusedError:
- logging.info('Connection Refused Error. Retrying in 1 second.')
- e = ConnectionRefusedError('Connection Refused Error')
- time.sleep(1)
- retry_counter =+ 1
- except Exception as e:
- raise e
- if retry_counter is retry_count:
- raise e
-
+ ssh_client.connect(hostname=ip_address,
+ username=ssh_username,
+ allow_agent=False,
+ pkey=ssh_key,
+ timeout=connect_timeout,
+ banner_timeout=200)
return ssh_client
diff --git a/acts/framework/acts/controllers/iperf_client.py b/acts/framework/acts/controllers/iperf_client.py
index 40c6993..80a8410 100644
--- a/acts/framework/acts/controllers/iperf_client.py
+++ b/acts/framework/acts/controllers/iperf_client.py
@@ -23,6 +23,8 @@
from acts import utils
from acts.controllers.android_device import AndroidDevice
from acts.controllers.iperf_server import _AndroidDeviceBridge
+from acts.controllers.fuchsia_lib.utils_lib import create_ssh_connection
+from acts.controllers.fuchsia_lib.utils_lib import SshResults
from acts.controllers.utils_lib.ssh import connection
from acts.controllers.utils_lib.ssh import settings
from acts.event import event_bus
@@ -51,12 +53,24 @@
if type(c) is dict and 'AndroidDevice' in c:
results.append(IPerfClientOverAdb(c['AndroidDevice']))
elif type(c) is dict and 'ssh_config' in c:
- results.append(IPerfClientOverSsh(c['ssh_config']))
+ results.append(
+ IPerfClientOverSsh(c['ssh_config'],
+ use_paramiko=c.get('use_paramiko'),
+ test_interface=c.get('test_interface')))
else:
results.append(IPerfClient())
return results
+def get_info(iperf_clients):
+ """Placeholder for info about iperf clients
+
+ Returns:
+ None
+ """
+ return None
+
+
def destroy(_):
# No cleanup needed.
pass
@@ -98,7 +112,7 @@
return os.path.join(full_out_dir, out_file_name)
- def start(self, ip, iperf_args, tag, timeout=3600):
+ def start(self, ip, iperf_args, tag, timeout=3600, iperf_binary=None):
"""Starts iperf client, and waits for completion.
Args:
@@ -107,6 +121,8 @@
client. Eg: iperf_args = "-t 10 -p 5001 -w 512k/-u -b 200M -J".
tag: A string to further identify iperf results file
timeout: the maximum amount of time the iperf client can run.
+ iperf_binary: Location of iperf3 binary. If none, it is assumed the
+ the binary is in the path.
Returns:
full_out_path: iperf result path.
@@ -116,8 +132,7 @@
class IPerfClient(IPerfClientBase):
"""Class that handles iperf3 client operations."""
-
- def start(self, ip, iperf_args, tag, timeout=3600):
+ def start(self, ip, iperf_args, tag, timeout=3600, iperf_binary=None):
"""Starts iperf client, and waits for completion.
Args:
@@ -126,11 +141,19 @@
client. Eg: iperf_args = "-t 10 -p 5001 -w 512k/-u -b 200M -J".
tag: tag to further identify iperf results file
timeout: unused.
+ iperf_binary: Location of iperf3 binary. If none, it is assumed the
+ the binary is in the path.
Returns:
full_out_path: iperf result path.
"""
- iperf_cmd = ['iperf3', '-c', ip] + iperf_args.split(' ')
+ if not iperf_binary:
+ logging.debug('No iperf3 binary specified. '
+ 'Assuming iperf3 is in the path.')
+ iperf_binary = 'iperf3'
+ else:
+ logging.debug('Using iperf3 binary located at %s' % iperf_binary)
+ iperf_cmd = [str(iperf_binary), '-c', ip] + iperf_args.split(' ')
full_out_path = self._get_full_file_path(tag)
with open(full_out_path, 'w') as out_file:
@@ -141,12 +164,21 @@
class IPerfClientOverSsh(IPerfClientBase):
"""Class that handles iperf3 client operations on remote machines."""
-
- def __init__(self, ssh_config):
+ def __init__(self, ssh_config, use_paramiko=False, test_interface=None):
self._ssh_settings = settings.from_config(ssh_config)
- self._ssh_session = connection.SshConnection(self._ssh_settings)
+ self._use_paramiko = use_paramiko
+ if str(self._use_paramiko) == 'True':
+ self._ssh_session = create_ssh_connection(
+ ip_address=ssh_config['host'],
+ ssh_username=ssh_config['user'],
+ ssh_config=ssh_config['ssh_config'])
+ else:
+ self._ssh_session = connection.SshConnection(self._ssh_settings)
- def start(self, ip, iperf_args, tag, timeout=3600):
+ self.hostname = self._ssh_settings.hostname
+ self.test_interface = test_interface
+
+ def start(self, ip, iperf_args, tag, timeout=3600, iperf_binary=None):
"""Starts iperf client, and waits for completion.
Args:
@@ -155,15 +187,33 @@
client. Eg: iperf_args = "-t 10 -p 5001 -w 512k/-u -b 200M -J".
tag: tag to further identify iperf results file
timeout: the maximum amount of time to allow the iperf client to run
+ iperf_binary: Location of iperf3 binary. If none, it is assumed the
+ the binary is in the path.
Returns:
full_out_path: iperf result path.
"""
- iperf_cmd = 'iperf3 -c {} {}'.format(ip, iperf_args)
+ if not iperf_binary:
+ logging.debug('No iperf3 binary specified. '
+ 'Assuming iperf3 is in the path.')
+ iperf_binary = 'iperf3'
+ else:
+ logging.debug('Using iperf3 binary located at %s' % iperf_binary)
+ iperf_cmd = '{} -c {} {}'.format(iperf_binary, ip, iperf_args)
full_out_path = self._get_full_file_path(tag)
try:
- iperf_process = self._ssh_session.run(iperf_cmd, timeout=timeout)
+ if self._use_paramiko:
+ cmd_result_stdin, cmd_result_stdout, cmd_result_stderr = (
+ self._ssh_session.exec_command(iperf_cmd, timeout=timeout))
+ cmd_result_exit_status = (
+ cmd_result_stdout.channel.recv_exit_status())
+ iperf_process = SshResults(cmd_result_stdin, cmd_result_stdout,
+ cmd_result_stderr,
+ cmd_result_exit_status)
+ else:
+ iperf_process = self._ssh_session.run(iperf_cmd,
+ timeout=timeout)
iperf_output = iperf_process.stdout
with open(full_out_path, 'w') as out_file:
out_file.write(iperf_output)
@@ -175,7 +225,6 @@
class IPerfClientOverAdb(IPerfClientBase):
"""Class that handles iperf3 operations over ADB devices."""
-
def __init__(self, android_device_or_serial):
"""Creates a new IPerfClientOverAdb object.
@@ -195,7 +244,7 @@
return _AndroidDeviceBridge.android_devices()[
self._android_device_or_serial]
- def start(self, ip, iperf_args, tag, timeout=3600):
+ def start(self, ip, iperf_args, tag, timeout=3600, iperf_binary=None):
"""Starts iperf client, and waits for completion.
Args:
@@ -204,14 +253,27 @@
client. Eg: iperf_args = "-t 10 -p 5001 -w 512k/-u -b 200M -J".
tag: tag to further identify iperf results file
timeout: the maximum amount of time to allow the iperf client to run
+ iperf_binary: Location of iperf3 binary. If none, it is assumed the
+ the binary is in the path.
Returns:
The iperf result file path.
"""
iperf_output = ''
try:
- iperf_status, iperf_output = self._android_device.run_iperf_client(
- ip, iperf_args, timeout=timeout)
+ if not iperf_binary:
+ logging.debug('No iperf3 binary specified. '
+ 'Assuming iperf3 is in the path.')
+ iperf_binary = 'iperf3'
+ else:
+ logging.debug('Using iperf3 binary located at %s' %
+ iperf_binary)
+ iperf_cmd = '{} -c {} {}'.format(iperf_binary, ip, iperf_args)
+ out = self._android_device.adb.shell(str(iperf_cmd),
+ timeout=timeout)
+ clean_out = out.split('\n')
+ if "error" in clean_out[0].lower():
+ raise Exception('clean_out')
except job.TimeoutError:
logging.warning('TimeoutError: Iperf measurement timed out.')
diff --git a/acts/framework/acts/controllers/iperf_server.py b/acts/framework/acts/controllers/iperf_server.py
index 039f143..f8048b9 100755
--- a/acts/framework/acts/controllers/iperf_server.py
+++ b/acts/framework/acts/controllers/iperf_server.py
@@ -17,6 +17,7 @@
import json
import logging
import math
+import IPy
import os
import shlex
import subprocess
@@ -36,6 +37,10 @@
ACTS_CONTROLLER_CONFIG_NAME = 'IPerfServer'
ACTS_CONTROLLER_REFERENCE_NAME = 'iperf_servers'
+KILOBITS = 1024
+MEGABITS = KILOBITS * 1024
+GIGABITS = MEGABITS * 1024
+BITS_IN_BYTE = 8
def create(configs):
@@ -56,7 +61,10 @@
elif type(c) is dict and 'AndroidDevice' in c and 'port' in c:
results.append(IPerfServerOverAdb(c['AndroidDevice'], c['port']))
elif type(c) is dict and 'ssh_config' in c and 'port' in c:
- results.append(IPerfServerOverSsh(c['ssh_config'], c['port']))
+ results.append(
+ IPerfServerOverSsh(c['ssh_config'],
+ c['port'],
+ test_interface=c.get('test_interface')))
else:
raise ValueError(
'Config entry %s in %s is not a valid IPerfServer '
@@ -64,6 +72,15 @@
return results
+def get_info(iperf_servers):
+ """Placeholder for info about iperf servers
+
+ Returns:
+ None
+ """
+ return None
+
+
def destroy(iperf_server_list):
for iperf_server in iperf_server_list:
try:
@@ -73,7 +90,7 @@
class IPerfResult(object):
- def __init__(self, result_path):
+ def __init__(self, result_path, reporting_speed_units='Mbytes'):
"""Loads iperf result from file.
Loads iperf result from JSON formatted server log. File can be accessed
@@ -82,6 +99,7 @@
containing multiple iperf client runs.
"""
# if result_path isn't a path, treat it as JSON
+ self.reporting_speed_units = reporting_speed_units
if not os.path.exists(result_path):
self.result = json.loads(result_path)
else:
@@ -89,8 +107,8 @@
with open(result_path, 'r') as f:
iperf_output = f.readlines()
if '}\n' in iperf_output:
- iperf_output = iperf_output[:iperf_output.index('}\n')
- + 1]
+ iperf_output = iperf_output[:iperf_output.index('}\n'
+ ) + 1]
iperf_string = ''.join(iperf_output)
iperf_string = iperf_string.replace('nan', '0')
self.result = json.loads(iperf_string)
@@ -110,6 +128,33 @@
return ('end' in self.result) and ('sum_received' in self.result['end']
or 'sum' in self.result['end'])
+ def _get_reporting_speed(self, network_speed_in_bits_per_second):
+ """Sets the units for the network speed reporting based on how the
+ object was initiated. Defaults to Megabytes per second. Currently
+ supported, bits per second (bits), kilobits per second (kbits), megabits
+ per second (mbits), gigabits per second (gbits), bytes per second
+ (bytes), kilobits per second (kbytes), megabits per second (mbytes),
+ gigabytes per second (gbytes).
+
+ Args:
+ network_speed_in_bits_per_second: The network speed from iperf in
+ bits per second.
+
+ Returns:
+ The value of the throughput in the appropriate units.
+ """
+ speed_divisor = 1
+ print(self.reporting_speed_units)
+ if self.reporting_speed_units[1:].lower() == 'bytes':
+ speed_divisor = speed_divisor * BITS_IN_BYTE
+ if self.reporting_speed_units[0:1].lower() == 'k':
+ speed_divisor = speed_divisor * KILOBITS
+ if self.reporting_speed_units[0:1].lower() == 'm':
+ speed_divisor = speed_divisor * MEGABITS
+ if self.reporting_speed_units[0:1].lower() == 'g':
+ speed_divisor = speed_divisor * GIGABITS
+ return network_speed_in_bits_per_second / speed_divisor
+
def get_json(self):
"""Returns the raw json output from iPerf."""
return self.result
@@ -131,7 +176,7 @@
if not self._has_data() or 'sum' not in self.result['end']:
return None
bps = self.result['end']['sum']['bits_per_second']
- return bps / 8 / 1024 / 1024
+ return self._get_reporting_speed(bps)
@property
def avg_receive_rate(self):
@@ -143,7 +188,7 @@
if not self._has_data() or 'sum_received' not in self.result['end']:
return None
bps = self.result['end']['sum_received']['bits_per_second']
- return bps / 8 / 1024 / 1024
+ return self._get_reporting_speed(bps)
@property
def avg_send_rate(self):
@@ -155,7 +200,7 @@
if not self._has_data() or 'sum_sent' not in self.result['end']:
return None
bps = self.result['end']['sum_sent']['bits_per_second']
- return bps / 8 / 1024 / 1024
+ return self._get_reporting_speed(bps)
@property
def instantaneous_rates(self):
@@ -167,7 +212,7 @@
if not self._has_data():
return None
intervals = [
- interval['sum']['bits_per_second'] / 8 / 1024 / 1024
+ self._get_reporting_speed(interval['sum']['bits_per_second'])
for interval in self.result['intervals']
]
return intervals
@@ -199,10 +244,11 @@
"""
if not self._has_data():
return None
- instantaneous_rates = self.instantaneous_rates[iperf_ignored_interval:
- -1]
+ instantaneous_rates = self.instantaneous_rates[
+ iperf_ignored_interval:-1]
avg_rate = math.fsum(instantaneous_rates) / len(instantaneous_rates)
- sqd_deviations = [(rate - avg_rate) ** 2 for rate in instantaneous_rates]
+ sqd_deviations = ([(rate - avg_rate)**2
+ for rate in instantaneous_rates])
std_dev = math.sqrt(
math.fsum(sqd_deviations) / (len(sqd_deviations) - 1))
return std_dev
@@ -256,7 +302,7 @@
Note: If the directory for the file path does not exist, it will be
created.
- Args:
+ Args:s
tag: The tag passed in to the server run.
"""
out_dir = self.log_path
@@ -297,7 +343,6 @@
class IPerfServer(IPerfServerBase):
"""Class that handles iperf server commands on localhost."""
-
def __init__(self, port=5201):
super().__init__(port)
self._hinted_port = port
@@ -335,8 +380,9 @@
if self._last_opened_file:
self._last_opened_file.close()
self._last_opened_file = open(self._current_log_file, 'w')
- self._iperf_process = subprocess.Popen(
- command, stdout=self._last_opened_file, stderr=subprocess.DEVNULL)
+ self._iperf_process = subprocess.Popen(command,
+ stdout=self._last_opened_file,
+ stderr=subprocess.DEVNULL)
for attempts_left in reversed(range(3)):
try:
self._port = int(
@@ -374,15 +420,22 @@
class IPerfServerOverSsh(IPerfServerBase):
"""Class that handles iperf3 operations on remote machines."""
-
- def __init__(self, ssh_config, port):
+ def __init__(self, ssh_config, port, test_interface=None):
super().__init__(port)
ssh_settings = settings.from_config(ssh_config)
self._ssh_session = connection.SshConnection(ssh_settings)
- self._iperf_command = 'iperf3 -s -J -p {}'.format(self.port)
self._iperf_pid = None
self._current_tag = None
+ self.hostname = ssh_settings.hostname
+ try:
+ # A test interface can only be found if an ip address is specified.
+ # A fully qualified hostname will return None for the
+ # test_interface.
+ self.test_interface = self._get_test_interface_based_on_ip(
+ test_interface)
+ except Exception:
+ self.test_interface = None
@property
def port(self):
@@ -395,7 +448,46 @@
def _get_remote_log_path(self):
return 'iperf_server_port%s.log' % self.port
- def start(self, extra_args='', tag=''):
+ def _get_test_interface_based_on_ip(self, test_interface):
+ """Gets the test interface for a particular IP if the test interface
+ passed in test_interface is None
+
+ Args:
+ test_interface: Either a interface name, ie eth0, or None
+
+ Returns:
+ The name of the test interface.
+ """
+ if test_interface:
+ return test_interface
+ return utils.get_interface_based_on_ip(self._ssh_session,
+ self.hostname)
+
+ def get_interface_ip_addresses(self, interface):
+ """Gets all of the ip addresses, ipv4 and ipv6, associated with a
+ particular interface name.
+
+ Args:
+ interface: The interface name on the device, ie eth0
+
+ Returns:
+ A list of dictionaries of the the various IP addresses:
+ ipv4_private_local_addresses: Any 192.168, 172.16, or 10
+ addresses
+ ipv4_public_addresses: Any IPv4 public addresses
+ ipv6_link_local_addresses: Any fe80:: addresses
+ ipv6_private_local_addresses: Any fd00:: addresses
+ ipv6_public_addresses: Any publicly routable addresses
+ """
+ return utils.get_interface_ip_addresses(self._ssh_session, interface)
+
+ def renew_test_interface_ip_address(self):
+ """Renews the test interface's IP address. Necessary for changing
+ DHCP scopes during a test.
+ """
+ utils.renew_linux_ip_address(self._ssh_session, self.test_interface)
+
+ def start(self, extra_args='', tag='', iperf_binary=None):
"""Starts iperf server on specified machine and port.
Args:
@@ -403,12 +495,22 @@
server with.
tag: Appended to log file name to identify logs from different
iperf runs.
+ iperf_binary: Location of iperf3 binary. If none, it is assumed the
+ the binary is in the path.
"""
if self.started:
return
+ if not iperf_binary:
+ logging.debug('No iperf3 binary specified. '
+ 'Assuming iperf3 is in the path.')
+ iperf_binary = 'iperf3'
+ else:
+ logging.debug('Using iperf3 binary located at %s' % iperf_binary)
+ iperf_command = '{} -s -J -p {}'.format(iperf_binary, self.port)
+
cmd = '{cmd} {extra_flags} > {log_file}'.format(
- cmd=self._iperf_command,
+ cmd=iperf_command,
extra_flags=extra_args,
log_file=self._get_remote_log_path())
@@ -463,8 +565,10 @@
"""
if not _AndroidDeviceBridge._test_class:
return {}
- return {device.serial: device
- for device in _AndroidDeviceBridge._test_class.android_devices}
+ return {
+ device.serial: device
+ for device in _AndroidDeviceBridge._test_class.android_devices
+ }
event_bus.register_subscription(
@@ -474,7 +578,6 @@
class IPerfServerOverAdb(IPerfServerBase):
"""Class that handles iperf3 operations over ADB devices."""
-
def __init__(self, android_device_or_serial, port):
"""Creates a new IPerfServerOverAdb object.
@@ -488,7 +591,6 @@
super().__init__(port)
self._android_device_or_serial = android_device_or_serial
- self._iperf_command = 'iperf3 -s -J -p {}'.format(self.port)
self._iperf_process = None
self._current_tag = ''
@@ -511,7 +613,7 @@
def _get_device_log_path(self):
return '~/data/iperf_server_port%s.log' % self.port
- def start(self, extra_args='', tag=''):
+ def start(self, extra_args='', tag='', iperf_binary=None):
"""Starts iperf server on an ADB device.
Args:
@@ -519,13 +621,23 @@
server with.
tag: Appended to log file name to identify logs from different
iperf runs.
+ iperf_binary: Location of iperf3 binary. If none, it is assumed the
+ the binary is in the path.
"""
if self._iperf_process is not None:
return
+ if not iperf_binary:
+ logging.debug('No iperf3 binary specified. '
+ 'Assuming iperf3 is in the path.')
+ iperf_binary = 'iperf3'
+ else:
+ logging.debug('Using iperf3 binary located at %s' % iperf_binary)
+ iperf_command = '{} -s -J -p {}'.format(iperf_binary, self.port)
+
self._iperf_process = self._android_device.adb.shell_nb(
'{cmd} {extra_flags} > {log_file}'.format(
- cmd=self._iperf_command,
+ cmd=iperf_command,
extra_flags=extra_args,
log_file=self._get_device_log_path()))
self._iperf_process_adb_pid = ''
diff --git a/acts/framework/acts/test_utils/abstract_devices/utils_lib/wlan_utils.py b/acts/framework/acts/test_utils/abstract_devices/utils_lib/wlan_utils.py
index 745d35f..d4af3c3 100644
--- a/acts/framework/acts/test_utils/abstract_devices/utils_lib/wlan_utils.py
+++ b/acts/framework/acts/test_utils/abstract_devices/utils_lib/wlan_utils.py
@@ -25,8 +25,8 @@
Args: Args match setup_ap_and_associate
"""
- asserts.assert_true(
- setup_ap_and_associate(*args, **kwargs), 'Failed to associate.')
+ asserts.assert_true(setup_ap_and_associate(*args, **kwargs),
+ 'Failed to associate.')
asserts.explicit_pass('Successfully associated.')
@@ -49,7 +49,8 @@
check_connectivity=False,
n_capabilities=None,
ac_capabilities=None,
- vht_bandwidth=None):
+ vht_bandwidth=None,
+ setup_bridge=False):
"""Sets up the AP and associates a client.
Args:
@@ -73,14 +74,13 @@
beacon_interval, dtim_period, frag_threshold, rts_threshold,
force_wmm, hidden, security, additional_ap_parameters, password,
check_connectivity, n_capabilities, ac_capabilities,
- vht_bandwidth)
+ vht_bandwidth, setup_bridge)
- return associate(
- client,
- ssid,
- password,
- check_connectivity=check_connectivity,
- hidden=hidden)
+ return associate(client,
+ ssid,
+ password,
+ check_connectivity=check_connectivity,
+ hidden=hidden)
def setup_ap(access_point,
@@ -101,7 +101,8 @@
check_connectivity=False,
n_capabilities=None,
ac_capabilities=None,
- vht_bandwidth=None):
+ vht_bandwidth=None,
+ setup_bridge=False):
"""Sets up the AP.
Args:
@@ -120,27 +121,27 @@
password: Password to connect to WLAN if necessary.
check_connectivity: Whether to check for internet connectivity.
"""
- ap = hostapd_ap_preset.create_ap_preset(
- profile_name=profile_name,
- iface_wlan_2g=access_point.wlan_2g,
- iface_wlan_5g=access_point.wlan_5g,
- channel=channel,
- ssid=ssid,
- mode=mode,
- short_preamble=preamble,
- beacon_interval=beacon_interval,
- dtim_period=dtim_period,
- frag_threshold=frag_threshold,
- rts_threshold=rts_threshold,
- force_wmm=force_wmm,
- hidden=hidden,
- bss_settings=[],
- security=security,
- n_capabilities=n_capabilities,
- ac_capabilities=ac_capabilities,
- vht_bandwidth=vht_bandwidth)
- access_point.start_ap(
- hostapd_config=ap, additional_parameters=additional_ap_parameters)
+ ap = hostapd_ap_preset.create_ap_preset(profile_name=profile_name,
+ iface_wlan_2g=access_point.wlan_2g,
+ iface_wlan_5g=access_point.wlan_5g,
+ channel=channel,
+ ssid=ssid,
+ mode=mode,
+ short_preamble=preamble,
+ beacon_interval=beacon_interval,
+ dtim_period=dtim_period,
+ frag_threshold=frag_threshold,
+ rts_threshold=rts_threshold,
+ force_wmm=force_wmm,
+ hidden=hidden,
+ bss_settings=[],
+ security=security,
+ n_capabilities=n_capabilities,
+ ac_capabilities=ac_capabilities,
+ vht_bandwidth=vht_bandwidth)
+ access_point.start_ap(hostapd_config=ap,
+ setup_bridge=setup_bridge,
+ additional_parameters=additional_ap_parameters)
def associate(client,
@@ -157,8 +158,10 @@
check_connectivity: Whether to check internet connectivity.
hidden: If the WLAN is hidden or not.
"""
- return client.associate(
- ssid, password, check_connectivity=check_connectivity, hidden=hidden)
+ return client.associate(ssid,
+ password,
+ check_connectivity=check_connectivity,
+ hidden=hidden)
def status(client):
diff --git a/acts/framework/acts/utils.py b/acts/framework/acts/utils.py
index 99c961a..b42ea21 100755
--- a/acts/framework/acts/utils.py
+++ b/acts/framework/acts/utils.py
@@ -19,6 +19,7 @@
import copy
import datetime
import functools
+import IPy
import json
import logging
import os
@@ -1381,3 +1382,97 @@
def ascii_string(uc_string):
"""Converts unicode string to ascii"""
return str(uc_string).encode('ASCII')
+
+
+def get_interface_ip_addresses(comm_channel, interface):
+ """Gets all of the ip addresses, ipv4 and ipv6, associated with a
+ particular interface name.
+
+ Args:
+ comm_channel: How to send commands to a device. Can be ssh, adb serial,
+ etc. Must have the run function implemented.
+ interface: The interface name on the device, ie eth0
+
+ Returns:
+ A list of dictionaries of the the various IP addresses:
+ ipv4_private_local_addresses: Any 192.168, 172.16, or 10
+ addresses
+ ipv4_public_addresses: Any IPv4 public addresses
+ ipv6_link_local_addresses: Any fe80:: addresses
+ ipv6_private_local_addresses: Any fd00:: addresses
+ ipv6_public_addresses: Any publicly routable addresses
+ """
+ ipv4_private_local_addresses = []
+ ipv4_public_addresses = []
+ ipv6_link_local_addresses = []
+ ipv6_private_local_addresses = []
+ ipv6_public_addresses = []
+ all_interfaces_and_addresses = comm_channel.run(
+ 'ip -o addr | awk \'!/^[0-9]*: ?lo|link\/ether/ {gsub("/", " "); '
+ 'print $2" "$4}\'').stdout
+ ifconfig_output = comm_channel.run('ifconfig %s' % interface).stdout
+ for interface_line in all_interfaces_and_addresses.split('\n'):
+ if interface != interface_line.split()[0]:
+ continue
+ on_device_ip = IPy.IP(interface_line.split()[1])
+ if on_device_ip.version() is 4:
+ if on_device_ip.iptype() == 'PRIVATE':
+ if str(on_device_ip) in ifconfig_output:
+ ipv4_private_local_addresses.append(
+ on_device_ip.strNormal())
+ elif on_device_ip.iptype() == 'PUBLIC':
+ if str(on_device_ip) in ifconfig_output:
+ ipv4_public_addresses.append(on_device_ip.strNormal())
+ elif on_device_ip.version() is 6:
+ if on_device_ip.iptype() == 'LINKLOCAL':
+ if str(on_device_ip) in ifconfig_output:
+ ipv6_link_local_addresses.append(on_device_ip.strNormal())
+ elif on_device_ip.iptype() == 'ULA':
+ if str(on_device_ip) in ifconfig_output:
+ ipv6_private_local_addresses.append(
+ on_device_ip.strNormal())
+ elif 'ALLOCATED' in on_device_ip.iptype():
+ if str(on_device_ip) in ifconfig_output:
+ ipv6_public_addresses.append(on_device_ip.strNormal())
+ return {
+ 'ipv4_private': ipv4_private_local_addresses,
+ 'ipv4_public': ipv4_public_addresses,
+ 'ipv6_link_local': ipv6_link_local_addresses,
+ 'ipv6_private_local': ipv6_private_local_addresses,
+ 'ipv6_public': ipv6_public_addresses
+ }
+
+
+def get_interface_based_on_ip(comm_channel, desired_ip_address):
+ """Gets the interface for a particular IP
+
+ Args:
+ comm_channel: How to send commands to a device. Can be ssh, adb serial,
+ etc. Must have the run function implemented.
+ desired_ip_address: The IP address that is being looked for on a device.
+
+ Returns:
+ The name of the test interface.
+ """
+
+ desired_ip_address = desired_ip_address.split('%', 1)[0]
+ all_ips_and_interfaces = comm_channel.run(
+ '(ip -o -4 addr show; ip -o -6 addr show) | '
+ 'awk \'{print $2" "$4}\'').stdout
+ #ipv4_addresses = comm_channel.run(
+ # 'ip -o -4 addr show| awk \'{print $2": "$4}\'').stdout
+ #ipv6_addresses = comm_channel._ssh_session.run(
+ # 'ip -o -6 addr show| awk \'{print $2": "$4}\'').stdout
+ #if desired_ip_address in ipv4_addresses:
+ # ip_addresses_to_search = ipv4_addresses
+ #elif desired_ip_address in ipv6_addresses:
+ # ip_addresses_to_search = ipv6_addresses
+ for ip_address_and_interface in all_ips_and_interfaces.split('\n'):
+ if desired_ip_address in ip_address_and_interface:
+ return ip_address_and_interface.split()[1][:-1]
+ return None
+
+
+def renew_linux_ip_address(comm_channel, interface):
+ comm_channel.run('sudo dhclient -r %s' % interface)
+ comm_channel.run('sudo dhclient %s' % interface)
diff --git a/acts/framework/setup.py b/acts/framework/setup.py
index 32b8a93..59210f9 100755
--- a/acts/framework/setup.py
+++ b/acts/framework/setup.py
@@ -23,6 +23,7 @@
import sys
install_requires = [
+ 'backoff',
# Future needs to have a newer version that contains urllib.
'future>=0.16.0',
'mock',
@@ -39,6 +40,7 @@
'xlsxwriter',
'mobly',
'grpcio',
+ 'IPy',
'Monsoon',
# paramiko-ng is needed vs paramiko as currently paramiko does not support
# ed25519 ssh keys, which is what Fuchsia uses.
@@ -58,7 +60,6 @@
"""Class used to execute unit tests using PyTest. This allows us to execute
unit tests without having to install the package.
"""
-
def finalize_options(self):
test.test.finalize_options(self)
self.test_args = ['-x', "tests"]
@@ -143,9 +144,8 @@
try:
import acts as acts_module
except ImportError:
- self.announce(
- 'Acts is not installed, nothing to uninstall.',
- level=log.ERROR)
+ self.announce('Acts is not installed, nothing to uninstall.',
+ level=log.ERROR)
return
while acts_module:
@@ -166,22 +166,21 @@
os.path.join(framework_dir, 'acts', 'bin', 'monsoon.py')
]
- setuptools.setup(
- name='acts',
- version='0.9',
- description='Android Comms Test Suite',
- license='Apache2.0',
- packages=setuptools.find_packages(),
- include_package_data=False,
- tests_require=['pytest'],
- install_requires=install_requires,
- scripts=scripts,
- cmdclass={
- 'test': PyTest,
- 'install_deps': ActsInstallDependencies,
- 'uninstall': ActsUninstall
- },
- url="http://www.android.com/")
+ setuptools.setup(name='acts',
+ version='0.9',
+ description='Android Comms Test Suite',
+ license='Apache2.0',
+ packages=setuptools.find_packages(),
+ include_package_data=False,
+ tests_require=['pytest'],
+ install_requires=install_requires,
+ scripts=scripts,
+ cmdclass={
+ 'test': PyTest,
+ 'install_deps': ActsInstallDependencies,
+ 'uninstall': ActsUninstall
+ },
+ url="http://www.android.com/")
if {'-u', '--uninstall', 'uninstall'}.intersection(sys.argv):
installed_scripts = [
diff --git a/acts/tests/google/coex/performance_tests/CoexBasicPerformanceTest.py b/acts/tests/google/coex/performance_tests/CoexBasicPerformanceTest.py
index 00cb735..c5c1879 100644
--- a/acts/tests/google/coex/performance_tests/CoexBasicPerformanceTest.py
+++ b/acts/tests/google/coex/performance_tests/CoexBasicPerformanceTest.py
@@ -14,153 +14,60 @@
# License for the specific language governing permissions and limitations under
# the License.
+import itertools
+
+from acts.test_utils.bt.bt_test_utils import enable_bluetooth
from acts.test_utils.coex.CoexPerformanceBaseTest import CoexPerformanceBaseTest
from acts.test_utils.coex.coex_test_utils import perform_classic_discovery
class CoexBasicPerformanceTest(CoexPerformanceBaseTest):
- def setup_class(self):
- super().setup_class()
+ def __init__(self, controllers):
+ super().__init__(controllers)
+ req_params = [
+ # A dict containing:
+ # protocol: A list containing TCP/UDP. Ex: protocol: ['tcp'].
+ # stream: A list containing ul/dl. Ex: stream: ['ul']
+ 'standalone_params'
+ ]
+ self.unpack_userparams(req_params)
+ self.tests = self.generated_test_cases(['bt_on', 'perform_discovery'])
- def run_iperf_and_perform_discovery(self):
- """Starts iperf client on host machine and bluetooth discovery
+ def perform_discovery(self):
+ """ Starts iperf client on host machine and bluetooth discovery
simultaneously.
Returns:
True if successful, False otherwise.
"""
tasks = [(perform_classic_discovery,
- (self.pri_ad, self.iperf["duration"], self.json_file,
- self.dev_list)), (self.run_iperf_and_get_result, ())]
- if not self.set_attenuation_and_run_iperf(tasks):
- return False
- return self.teardown_result()
+ (self.pri_ad, self.iperf['duration'], self.json_file,
+ self.dev_list)),
+ (self.run_iperf_and_get_result, ())]
+ return self.set_attenuation_and_run_iperf(tasks)
- def test_performance_with_bt_on_tcp_ul(self):
- """Check throughput when bluetooth on.
-
- This test is to start TCP-Uplink traffic between host machine and
- android device and check the throughput when bluetooth is on.
-
- Steps:
- 1. Start TCP-uplink traffic when bluetooth is on.
-
- Test Id: Bt_CoEx_kpi_005
- """
- self.set_attenuation_and_run_iperf()
- return self.teardown_result()
-
- def test_performance_with_bt_on_tcp_dl(self):
- """Check throughput when bluetooth on.
-
- This test is to start TCP-downlink traffic between host machine and
- android device and check the throughput when bluetooth is on.
-
- Steps:
- 1. Start TCP-downlink traffic when bluetooth is on.
-
- Test Id: Bt_CoEx_kpi_006
- """
- self.set_attenuation_and_run_iperf()
- return self.teardown_result()
-
- def test_performance_with_bt_on_udp_ul(self):
- """Check throughput when bluetooth on.
-
- This test is to start UDP-uplink traffic between host machine and
- android device and check the throughput when bluetooth is on.
-
- Steps:
- 1. Start UDP-uplink traffic when bluetooth is on.
-
- Test Id: Bt_CoEx_kpi_007
- """
- self.set_attenuation_and_run_iperf()
- return self.teardown_result()
-
- def test_performance_with_bt_on_udp_dl(self):
- """Check throughput when bluetooth on.
-
- This test is to start UDP-downlink traffic between host machine and
- android device and check the throughput when bluetooth is on.
-
- Steps:
- 1. Start UDP-downlink traffic when bluetooth is on.
-
- Test Id: Bt_CoEx_kpi_008
- """
- self.set_attenuation_and_run_iperf()
- return self.teardown_result()
-
- def test_performance_with_bluetooth_discovery_tcp_ul(self):
- """Check throughput when bluetooth discovery is ongoing.
-
- This test is to start TCP-uplink traffic between host machine and
- android device and bluetooth discovery and checks throughput.
-
- Steps:
- 1. Start TCP-uplink traffic and bluetooth discovery parallelly.
+ def bt_on(self):
+ """ Turns on bluetooth and runs iperf.
Returns:
- True if successful, False otherwise.
-
- Test Id: Bt_CoEx_kpi_009
+ True on success, False otherwise.
"""
- if not self.run_iperf_and_perform_discovery():
+ if not enable_bluetooth(self.pri_ad.droid, self.pri_ad.ed):
return False
- return True
+ return self.set_attenuation_and_run_iperf()
- def test_performance_with_bluetooth_discovery_tcp_dl(self):
- """Check throughput when bluetooth discovery is ongoing.
+ def generated_test_cases(self, test_types):
+ """ Auto generates tests for basic coex tests. """
+ test_cases = []
+ for protocol, stream, test_type in itertools.product(
+ self.standalone_params['protocol'],
+ self.standalone_params['stream'], test_types):
- This test is to start TCP-downlink traffic between host machine and
- android device and bluetooth discovery and checks throughput.
+ test_name = 'test_performance_with_{}_{}_{}'.format(
+ test_type, protocol, stream)
- Steps:
- 1. Start TCP-downlink traffic and bluetooth discovery parallelly.
-
- Returns:
- True if successful, False otherwise.
-
- Test Id: Bt_CoEx_kpi_010
- """
- if not self.run_iperf_and_perform_discovery():
- return False
- return True
-
- def test_performance_with_bluetooth_discovery_udp_ul(self):
- """Check throughput when bluetooth discovery is ongoing.
-
- This test is to start UDP-uplink traffic between host machine and
- android device and bluetooth discovery and checks throughput.
-
- Steps:
- 1. Start UDP-uplink traffic and bluetooth discovery parallelly.
-
- Returns:
- True if successful, False otherwise.
-
- Test Id: Bt_CoEx_kpi_011
- """
- if not self.run_iperf_and_perform_discovery():
- return False
- return True
-
- def test_performance_with_bluetooth_discovery_udp_dl(self):
- """Check throughput when bluetooth discovery is ongoing.
-
- This test is to start UDP-downlink traffic between host machine and
- android device and bluetooth discovery and checks throughput.
-
- Steps:
- 1. Start UDP-downlink traffic and bluetooth discovery parallelly.
-
- Returns:
- True if successful, False otherwise.
-
- Test Id: Bt_CoEx_kpi_012
- """
- if not self.run_iperf_and_perform_discovery():
- return False
- return True
+ test_function = getattr(self, test_type)
+ setattr(self, test_name, test_function)
+ test_cases.append(test_name)
+ return test_cases
diff --git a/acts/tests/google/net/DataCostTest.py b/acts/tests/google/net/DataCostTest.py
index 88ededa..617626f 100644
--- a/acts/tests/google/net/DataCostTest.py
+++ b/acts/tests/google/net/DataCostTest.py
@@ -77,7 +77,7 @@
"Fail to clear netstats.")
ad.reboot()
time.sleep(10)
- self.check_multipath_preference_from_dumpsys(ad)
+ self._check_multipath_preference_from_dumpsys(ad)
def _check_multipath_preference_from_dumpsys(self, ad):
""" Check cell multipath_preference from dumpsys