Merge remote branch 'cros/upstream' into autotest-rebase
Merged to upstream trunk@5066, from trunk@4749.
There is no way I could enlist each individual CL from the upstream here since it will blow up the changelist description field.
BUG=
TEST=
Had patched this CL into a fresh cut client to avoid any side effect.
run_remote_test bvt from both emerged location and third_party/autotest/file.
Both test passed!
We should also keep any eye on this to see how it gets propagated into cautotest server.
TBR=dalecurtis
Change-Id: I72f2bc7a9de530178484aea1bfb5ace68bcad029
diff --git a/client/common_lib/base_barrier.py b/client/common_lib/base_barrier.py
index e4de635..e1063a9 100644
--- a/client/common_lib/base_barrier.py
+++ b/client/common_lib/base_barrier.py
@@ -5,6 +5,16 @@
# default barrier port
_DEFAULT_PORT = 11922
+def get_host_from_id(hostid):
+ # Remove any trailing local identifier following a #.
+ # This allows multiple members per host which is particularly
+ # helpful in testing.
+ if not hostid.startswith('#'):
+ return hostid.split('#')[0]
+ else:
+ raise error.BarrierError(
+ "Invalid Host id: Host Address should be specified")
+
class BarrierAbortError(error.BarrierError):
"""Special BarrierError raised when an explicit abort is requested."""
@@ -159,17 +169,6 @@
self._waiting = {} # Maps from hostname -> (client, addr) tuples.
- def _get_host_from_id(self, hostid):
- # Remove any trailing local identifier following a #.
- # This allows multiple members per host which is particularly
- # helpful in testing.
- if not hostid.startswith('#'):
- return hostid.split('#')[0]
- else:
- raise error.BarrierError(
- "Invalid Host id: Host Address should be specified")
-
-
def _update_timeout(self, timeout):
if timeout is not None and self._start_time is not None:
self._timeout_secs = (time() - self._start_time) + timeout
@@ -397,14 +396,14 @@
remote.settimeout(30)
if is_master:
# Connect to all slaves.
- host = self._get_host_from_id(self._members[self._seen])
+ host = get_host_from_id(self._members[self._seen])
logging.info("calling slave: %s", host)
connection = (remote, (host, self._port))
remote.connect(connection[1])
self._master_welcome(connection)
else:
# Just connect to the master.
- host = self._get_host_from_id(self._masterid)
+ host = get_host_from_id(self._masterid)
logging.info("calling master")
connection = (remote, (host, self._port))
remote.connect(connection[1])
diff --git a/client/common_lib/base_barrier_unittest.py b/client/common_lib/base_barrier_unittest.py
index 52d8e17..71ea538 100755
--- a/client/common_lib/base_barrier_unittest.py
+++ b/client/common_lib/base_barrier_unittest.py
@@ -5,7 +5,7 @@
import os, sys, socket, errno, unittest, threading
from time import time, sleep
import common
-from autotest_lib.client.common_lib import error, barrier
+from autotest_lib.client.common_lib import error, barrier, base_barrier
from autotest_lib.client.common_lib.test_utils import mock
@@ -46,15 +46,14 @@
def test_get_host_from_id(self):
- b = barrier.barrier('127.0.0.1#', 'testgethost', 100)
-
- hostname = b._get_host_from_id('my_host')
+ hostname = base_barrier.get_host_from_id('my_host')
self.assertEqual(hostname, 'my_host')
- hostname = b._get_host_from_id('my_host#')
+ hostname = base_barrier.get_host_from_id('my_host#')
self.assertEqual(hostname, 'my_host')
- self.assertRaises(error.BarrierError, b._get_host_from_id, '#my_host')
+ self.assertRaises(error.BarrierError,
+ base_barrier.get_host_from_id, '#my_host')
def test_update_timeout(self):
diff --git a/client/common_lib/base_job.py b/client/common_lib/base_job.py
index 3c77d38..4a2271c 100644
--- a/client/common_lib/base_job.py
+++ b/client/common_lib/base_job.py
@@ -422,6 +422,9 @@
TIMESTAMP_FIELD = 'timestamp'
LOCALTIME_FIELD = 'localtime'
+ # non-space whitespace is forbidden in any fields
+ BAD_CHAR_REGEX = re.compile(r'[\t\n\r\v\f]')
+
def __init__(self, status_code, subdir, operation, message, fields,
timestamp=None):
"""Construct a status.log entry.
@@ -439,18 +442,16 @@
@raise ValueError: if any of the parameters are invalid
"""
- # non-space whitespace is forbidden in any fields
- bad_char_regex = r'[\t\n\r\v\f]'
if not log.is_valid_status(status_code):
raise ValueError('status code %r is not valid' % status_code)
self.status_code = status_code
- if subdir and re.search(bad_char_regex, subdir):
+ if subdir and self.BAD_CHAR_REGEX.search(subdir):
raise ValueError('Invalid character in subdir string')
self.subdir = subdir
- if operation and re.search(bad_char_regex, operation):
+ if operation and self.BAD_CHAR_REGEX.search(operation):
raise ValueError('Invalid character in operation string')
self.operation = operation
@@ -460,7 +461,7 @@
message_lines = message.split('\n')
self.message = message_lines[0].replace('\t', ' ' * 8)
self.extra_message_lines = message_lines[1:]
- if re.search(bad_char_regex, self.message):
+ if self.BAD_CHAR_REGEX.search(self.message):
raise ValueError('Invalid character in message %r' % self.message)
if not fields:
@@ -468,7 +469,7 @@
else:
self.fields = fields.copy()
for key, value in self.fields.iteritems():
- if re.search(bad_char_regex, key + value):
+ if self.BAD_CHAR_REGEX.search(key + value):
raise ValueError('Invalid character in %r=%r field'
% (key, value))
diff --git a/client/common_lib/base_utils.py b/client/common_lib/base_utils.py
new file mode 100644
index 0000000..101599b
--- /dev/null
+++ b/client/common_lib/base_utils.py
@@ -0,0 +1,1715 @@
+#
+# Copyright 2008 Google Inc. Released under the GPL v2
+
+import os, pickle, random, re, resource, select, shutil, signal, StringIO
+import socket, struct, subprocess, sys, time, textwrap, urlparse
+import warnings, smtplib, logging, urllib2
+from threading import Thread, Event
+try:
+ import hashlib
+except ImportError:
+ import md5, sha
+from autotest_lib.client.common_lib import error, logging_manager
+
+def deprecated(func):
+ """This is a decorator which can be used to mark functions as deprecated.
+ It will result in a warning being emmitted when the function is used."""
+ def new_func(*args, **dargs):
+ warnings.warn("Call to deprecated function %s." % func.__name__,
+ category=DeprecationWarning)
+ return func(*args, **dargs)
+ new_func.__name__ = func.__name__
+ new_func.__doc__ = func.__doc__
+ new_func.__dict__.update(func.__dict__)
+ return new_func
+
+
+class _NullStream(object):
+ def write(self, data):
+ pass
+
+
+ def flush(self):
+ pass
+
+
+TEE_TO_LOGS = object()
+_the_null_stream = _NullStream()
+
+DEFAULT_STDOUT_LEVEL = logging.DEBUG
+DEFAULT_STDERR_LEVEL = logging.ERROR
+
+# prefixes for logging stdout/stderr of commands
+STDOUT_PREFIX = '[stdout] '
+STDERR_PREFIX = '[stderr] '
+
+
+def get_stream_tee_file(stream, level, prefix=''):
+ if stream is None:
+ return _the_null_stream
+ if stream is TEE_TO_LOGS:
+ return logging_manager.LoggingFile(level=level, prefix=prefix)
+ return stream
+
+
+class BgJob(object):
+ def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
+ stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
+ self.command = command
+ self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
+ prefix=STDOUT_PREFIX)
+ self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
+ prefix=STDERR_PREFIX)
+ self.result = CmdResult(command)
+
+ # allow for easy stdin input by string, we'll let subprocess create
+ # a pipe for stdin input and we'll write to it in the wait loop
+ if isinstance(stdin, basestring):
+ self.string_stdin = stdin
+ stdin = subprocess.PIPE
+ else:
+ self.string_stdin = None
+
+ if verbose:
+ logging.debug("Running '%s'" % command)
+ self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ preexec_fn=self._reset_sigpipe, shell=True,
+
+ # Default shell in ChromeOS test image is
+ # already bash. We're seeing shell-init
+ # errors if this value is set.
+
+ #executable="/bin/bash",
+ stdin=stdin)
+
+
+ def output_prepare(self, stdout_file=None, stderr_file=None):
+ self.stdout_file = stdout_file
+ self.stderr_file = stderr_file
+
+
+ def process_output(self, stdout=True, final_read=False):
+ """output_prepare must be called prior to calling this"""
+ if stdout:
+ pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
+ else:
+ pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
+
+ if final_read:
+ # read in all the data we can from pipe and then stop
+ data = []
+ while select.select([pipe], [], [], 0)[0]:
+ data.append(os.read(pipe.fileno(), 1024))
+ if len(data[-1]) == 0:
+ break
+ data = "".join(data)
+ else:
+ # perform a single read
+ data = os.read(pipe.fileno(), 1024)
+ buf.write(data)
+ tee.write(data)
+
+
+ def cleanup(self):
+ self.stdout_tee.flush()
+ self.stderr_tee.flush()
+ self.sp.stdout.close()
+ self.sp.stderr.close()
+ self.result.stdout = self.stdout_file.getvalue()
+ self.result.stderr = self.stderr_file.getvalue()
+
+
+ def _reset_sigpipe(self):
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+
+def ip_to_long(ip):
+ # !L is a long in network byte order
+ return struct.unpack('!L', socket.inet_aton(ip))[0]
+
+
+def long_to_ip(number):
+ # See above comment.
+ return socket.inet_ntoa(struct.pack('!L', number))
+
+
+def create_subnet_mask(bits):
+ return (1 << 32) - (1 << 32-bits)
+
+
+def format_ip_with_mask(ip, mask_bits):
+ masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
+ return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
+
+
+def normalize_hostname(alias):
+ ip = socket.gethostbyname(alias)
+ return socket.gethostbyaddr(ip)[0]
+
+
+def get_ip_local_port_range():
+ match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
+ read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
+ return (int(match.group(1)), int(match.group(2)))
+
+
+def set_ip_local_port_range(lower, upper):
+ write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
+ '%d %d\n' % (lower, upper))
+
+
+
+def send_email(mail_from, mail_to, subject, body):
+ """
+ Sends an email via smtp
+
+ mail_from: string with email address of sender
+ mail_to: string or list with email address(es) of recipients
+ subject: string with subject of email
+ body: (multi-line) string with body of email
+ """
+ if isinstance(mail_to, str):
+ mail_to = [mail_to]
+ msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
+ subject, body)
+ try:
+ mailer = smtplib.SMTP('localhost')
+ try:
+ mailer.sendmail(mail_from, mail_to, msg)
+ finally:
+ mailer.quit()
+ except Exception, e:
+ # Emails are non-critical, not errors, but don't raise them
+ print "Sending email failed. Reason: %s" % repr(e)
+
+
+def read_one_line(filename):
+ return open(filename, 'r').readline().rstrip('\n')
+
+
+def read_file(filename):
+ f = open(filename)
+ try:
+ return f.read()
+ finally:
+ f.close()
+
+
+def get_field(data, param, linestart="", sep=" "):
+ """
+ Parse data from string.
+ @param data: Data to parse.
+ example:
+ data:
+ cpu 324 345 34 5 345
+ cpu0 34 11 34 34 33
+ ^^^^
+ start of line
+ params 0 1 2 3 4
+ @param param: Position of parameter after linestart marker.
+ @param linestart: String to which start line with parameters.
+ @param sep: Separator between parameters regular expression.
+ """
+ search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
+ find = search.search(data)
+ if find != None:
+ return re.split("%s" % sep, find.group(1))[param]
+ else:
+ print "There is no line which starts with %s in data." % linestart
+ return None
+
+
+def write_one_line(filename, line):
+ open_write_close(filename, line.rstrip('\n') + '\n')
+
+
+def open_write_close(filename, data):
+ f = open(filename, 'w')
+ try:
+ f.write(data)
+ finally:
+ f.close()
+
+
+def matrix_to_string(matrix, header=None):
+ """
+ Return a pretty, aligned string representation of a nxm matrix.
+
+ This representation can be used to print any tabular data, such as
+ database results. It works by scanning the lengths of each element
+ in each column, and determining the format string dynamically.
+
+ @param matrix: Matrix representation (list with n rows of m elements).
+ @param header: Optional tuple or list with header elements to be displayed.
+ """
+ if type(header) is list:
+ header = tuple(header)
+ lengths = []
+ if header:
+ for column in header:
+ lengths.append(len(column))
+ for row in matrix:
+ for column in row:
+ i = row.index(column)
+ cl = len(column)
+ try:
+ ml = lengths[i]
+ if cl > ml:
+ lengths[i] = cl
+ except IndexError:
+ lengths.append(cl)
+
+ lengths = tuple(lengths)
+ format_string = ""
+ for length in lengths:
+ format_string += "%-" + str(length) + "s "
+ format_string += "\n"
+
+ matrix_str = ""
+ if header:
+ matrix_str += format_string % header
+ for row in matrix:
+ matrix_str += format_string % tuple(row)
+
+ return matrix_str
+
+
+def read_keyval(path):
+ """
+ Read a key-value pair format file into a dictionary, and return it.
+ Takes either a filename or directory name as input. If it's a
+ directory name, we assume you want the file to be called keyval.
+ """
+ if os.path.isdir(path):
+ path = os.path.join(path, 'keyval')
+ keyval = {}
+ if os.path.exists(path):
+ for line in open(path):
+ line = re.sub('#.*', '', line).rstrip()
+ if not re.search(r'^[-\.\w]+=', line):
+ raise ValueError('Invalid format line: %s' % line)
+ key, value = line.split('=', 1)
+ if re.search('^\d+$', value):
+ value = int(value)
+ elif re.search('^(\d+\.)?\d+$', value):
+ value = float(value)
+ keyval[key] = value
+ return keyval
+
+
+def write_keyval(path, dictionary, type_tag=None):
+ """
+ Write a key-value pair format file out to a file. This uses append
+ mode to open the file, so existing text will not be overwritten or
+ reparsed.
+
+ If type_tag is None, then the key must be composed of alphanumeric
+ characters (or dashes+underscores). However, if type-tag is not
+ null then the keys must also have "{type_tag}" as a suffix. At
+ the moment the only valid values of type_tag are "attr" and "perf".
+ """
+ if os.path.isdir(path):
+ path = os.path.join(path, 'keyval')
+ keyval = open(path, 'a')
+
+ if type_tag is None:
+ key_regex = re.compile(r'^[-\.\w]+$')
+ else:
+ if type_tag not in ('attr', 'perf'):
+ raise ValueError('Invalid type tag: %s' % type_tag)
+ escaped_tag = re.escape(type_tag)
+ key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
+ try:
+ for key in sorted(dictionary.keys()):
+ if not key_regex.search(key):
+ raise ValueError('Invalid key: %s' % key)
+ keyval.write('%s=%s\n' % (key, dictionary[key]))
+ finally:
+ keyval.close()
+
+
+class FileFieldMonitor(object):
+ """
+ Monitors the information from the file and reports it's values.
+
+ It gather the information at start and stop of the measurement or
+ continuously during the measurement.
+ """
+ class Monitor(Thread):
+ """
+ Internal monitor class to ensure continuous monitor of monitored file.
+ """
+ def __init__(self, master):
+ """
+ @param master: Master class which control Monitor
+ """
+ Thread.__init__(self)
+ self.master = master
+
+ def run(self):
+ """
+ Start monitor in thread mode
+ """
+ while not self.master.end_event.isSet():
+ self.master._get_value(self.master.logging)
+ time.sleep(self.master.time_step)
+
+
+ def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
+ contlogging=False, separator=" +", time_step=0.1):
+ """
+ Initialize variables.
+ @param status_file: File contain status.
+ @param mode_diff: If True make a difference of value, else average.
+ @param data_to_read: List of tuples with data position.
+ format: [(start_of_line,position in params)]
+ example:
+ data:
+ cpu 324 345 34 5 345
+ cpu0 34 11 34 34 33
+ ^^^^
+ start of line
+ params 0 1 2 3 4
+ @param mode_diff: True to subtract old value from new value,
+ False make average of the values.
+ @parma continuously: Start the monitoring thread using the time_step
+ as the measurement period.
+ @param contlogging: Log data in continuous run.
+ @param separator: Regular expression of separator.
+ @param time_step: Time period of the monitoring value.
+ """
+ self.end_event = Event()
+ self.start_time = 0
+ self.end_time = 0
+ self.test_time = 0
+
+ self.status_file = status_file
+ self.separator = separator
+ self.data_to_read = data_to_read
+ self.num_of_params = len(self.data_to_read)
+ self.mode_diff = mode_diff
+ self.continuously = continuously
+ self.time_step = time_step
+
+ self.value = [0 for i in range(self.num_of_params)]
+ self.old_value = [0 for i in range(self.num_of_params)]
+ self.log = []
+ self.logging = contlogging
+
+ self.started = False
+ self.num_of_get_value = 0
+ self.monitor = None
+
+
+ def _get_value(self, logging=True):
+ """
+ Return current values.
+ @param logging: If true log value in memory. There can be problem
+ with long run.
+ """
+ data = read_file(self.status_file)
+ value = []
+ for i in range(self.num_of_params):
+ value.append(int(get_field(data,
+ self.data_to_read[i][1],
+ self.data_to_read[i][0],
+ self.separator)))
+
+ if logging:
+ self.log.append(value)
+ if not self.mode_diff:
+ value = map(lambda x, y: x + y, value, self.old_value)
+
+ self.old_value = value
+ self.num_of_get_value += 1
+ return value
+
+
+ def start(self):
+ """
+ Start value monitor.
+ """
+ if self.started:
+ self.stop()
+ self.old_value = [0 for i in range(self.num_of_params)]
+ self.num_of_get_value = 0
+ self.log = []
+ self.end_event.clear()
+ self.start_time = time.time()
+ self._get_value()
+ self.started = True
+ if (self.continuously):
+ self.monitor = FileFieldMonitor.Monitor(self)
+ self.monitor.start()
+
+
+ def stop(self):
+ """
+ Stop value monitor.
+ """
+ if self.started:
+ self.started = False
+ self.end_time = time.time()
+ self.test_time = self.end_time - self.start_time
+ self.value = self._get_value()
+ if (self.continuously):
+ self.end_event.set()
+ self.monitor.join()
+ if (self.mode_diff):
+ self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
+ else:
+ self.value = map(lambda x: x / self.num_of_get_value,
+ self.value)
+
+
+ def get_status(self):
+ """
+ @return: Status of monitored process average value,
+ time of test and array of monitored values and time step of
+ continuous run.
+ """
+ if self.started:
+ self.stop()
+ if self.mode_diff:
+ for i in range(len(self.log) - 1):
+ self.log[i] = (map(lambda x, y: x - y,
+ self.log[i + 1], self.log[i]))
+ self.log.pop()
+ return (self.value, self.test_time, self.log, self.time_step)
+
+
+def is_url(path):
+ """Return true if path looks like a URL"""
+ # for now, just handle http and ftp
+ url_parts = urlparse.urlparse(path)
+ return (url_parts[0] in ('http', 'ftp'))
+
+
+def urlopen(url, data=None, timeout=5):
+ """Wrapper to urllib2.urlopen with timeout addition."""
+
+ # Save old timeout
+ old_timeout = socket.getdefaulttimeout()
+ socket.setdefaulttimeout(timeout)
+ try:
+ return urllib2.urlopen(url, data=data)
+ finally:
+ socket.setdefaulttimeout(old_timeout)
+
+
+def urlretrieve(url, filename, data=None, timeout=300):
+ """Retrieve a file from given url."""
+ logging.debug('Fetching %s -> %s', url, filename)
+
+ src_file = urlopen(url, data=data, timeout=timeout)
+ try:
+ dest_file = open(filename, 'wb')
+ try:
+ shutil.copyfileobj(src_file, dest_file)
+ finally:
+ dest_file.close()
+ finally:
+ src_file.close()
+
+
+def hash(type, input=None):
+ """
+ Returns an hash object of type md5 or sha1. This function is implemented in
+ order to encapsulate hash objects in a way that is compatible with python
+ 2.4 and python 2.6 without warnings.
+
+ Note that even though python 2.6 hashlib supports hash types other than
+ md5 and sha1, we are artificially limiting the input values in order to
+ make the function to behave exactly the same among both python
+ implementations.
+
+ @param input: Optional input string that will be used to update the hash.
+ """
+ if type not in ['md5', 'sha1']:
+ raise ValueError("Unsupported hash type: %s" % type)
+
+ try:
+ hash = hashlib.new(type)
+ except NameError:
+ if type == 'md5':
+ hash = md5.new()
+ elif type == 'sha1':
+ hash = sha.new()
+
+ if input:
+ hash.update(input)
+
+ return hash
+
+
+def get_file(src, dest, permissions=None):
+ """Get a file from src, which can be local or a remote URL"""
+ if src == dest:
+ return
+
+ if is_url(src):
+ urlretrieve(src, dest)
+ else:
+ shutil.copyfile(src, dest)
+
+ if permissions:
+ os.chmod(dest, permissions)
+ return dest
+
+
+def unmap_url(srcdir, src, destdir='.'):
+ """
+ Receives either a path to a local file or a URL.
+ returns either the path to the local file, or the fetched URL
+
+ unmap_url('/usr/src', 'foo.tar', '/tmp')
+ = '/usr/src/foo.tar'
+ unmap_url('/usr/src', 'http://site/file', '/tmp')
+ = '/tmp/file'
+ (after retrieving it)
+ """
+ if is_url(src):
+ url_parts = urlparse.urlparse(src)
+ filename = os.path.basename(url_parts[2])
+ dest = os.path.join(destdir, filename)
+ return get_file(src, dest)
+ else:
+ return os.path.join(srcdir, src)
+
+
+def update_version(srcdir, preserve_srcdir, new_version, install,
+ *args, **dargs):
+ """
+ Make sure srcdir is version new_version
+
+ If not, delete it and install() the new version.
+
+ In the preserve_srcdir case, we just check it's up to date,
+ and if not, we rerun install, without removing srcdir
+ """
+ versionfile = os.path.join(srcdir, '.version')
+ install_needed = True
+
+ if os.path.exists(versionfile):
+ old_version = pickle.load(open(versionfile))
+ if old_version == new_version:
+ install_needed = False
+
+ if install_needed:
+ if not preserve_srcdir and os.path.exists(srcdir):
+ shutil.rmtree(srcdir)
+ install(*args, **dargs)
+ if os.path.exists(srcdir):
+ pickle.dump(new_version, open(versionfile, 'w'))
+
+
+def get_stderr_level(stderr_is_expected):
+ if stderr_is_expected:
+ return DEFAULT_STDOUT_LEVEL
+ return DEFAULT_STDERR_LEVEL
+
+
+def run(command, timeout=None, ignore_status=False,
+ stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
+ stderr_is_expected=None, args=()):
+ """
+ Run a command on the host.
+
+ @param command: the command line string.
+ @param timeout: time limit in seconds before attempting to kill the
+ running process. The run() function will take a few seconds
+ longer than 'timeout' to complete if it has to kill the process.
+ @param ignore_status: do not raise an exception, no matter what the exit
+ code of the command is.
+ @param stdout_tee: optional file-like object to which stdout data
+ will be written as it is generated (data will still be stored
+ in result.stdout).
+ @param stderr_tee: likewise for stderr.
+ @param verbose: if True, log the command being run.
+ @param stdin: stdin to pass to the executed process (can be a file
+ descriptor, a file object of a real file or a string).
+ @param args: sequence of strings of arguments to be given to the command
+ inside " quotes after they have been escaped for that; each
+ element in the sequence will be given as a separate command
+ argument
+
+ @return a CmdResult object
+
+ @raise CmdError: the exit code of the command execution was not 0
+ """
+ if isinstance(args, basestring):
+ raise TypeError('Got a string for the "args" keyword argument, '
+ 'need a sequence.')
+
+ for arg in args:
+ command += ' "%s"' % sh_escape(arg)
+ if stderr_is_expected is None:
+ stderr_is_expected = ignore_status
+
+ bg_job = join_bg_jobs(
+ (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
+ stderr_level=get_stderr_level(stderr_is_expected)),),
+ timeout)[0]
+ if not ignore_status and bg_job.result.exit_status:
+ raise error.CmdError(command, bg_job.result,
+ "Command returned non-zero exit status")
+
+ return bg_job.result
+
+
+def run_parallel(commands, timeout=None, ignore_status=False,
+ stdout_tee=None, stderr_tee=None):
+ """
+ Behaves the same as run() with the following exceptions:
+
+ - commands is a list of commands to run in parallel.
+ - ignore_status toggles whether or not an exception should be raised
+ on any error.
+
+ @return: a list of CmdResult objects
+ """
+ bg_jobs = []
+ for command in commands:
+ bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
+ stderr_level=get_stderr_level(ignore_status)))
+
+ # Updates objects in bg_jobs list with their process information
+ join_bg_jobs(bg_jobs, timeout)
+
+ for bg_job in bg_jobs:
+ if not ignore_status and bg_job.result.exit_status:
+ raise error.CmdError(command, bg_job.result,
+ "Command returned non-zero exit status")
+
+ return [bg_job.result for bg_job in bg_jobs]
+
+
+@deprecated
+def run_bg(command):
+ """Function deprecated. Please use BgJob class instead."""
+ bg_job = BgJob(command)
+ return bg_job.sp, bg_job.result
+
+
+def join_bg_jobs(bg_jobs, timeout=None):
+ """Joins the bg_jobs with the current thread.
+
+ Returns the same list of bg_jobs objects that was passed in.
+ """
+ ret, timeout_error = 0, False
+ for bg_job in bg_jobs:
+ bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
+
+ try:
+ # We are holding ends to stdin, stdout pipes
+ # hence we need to be sure to close those fds no mater what
+ start_time = time.time()
+ timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
+
+ for bg_job in bg_jobs:
+ # Process stdout and stderr
+ bg_job.process_output(stdout=True,final_read=True)
+ bg_job.process_output(stdout=False,final_read=True)
+ finally:
+ # close our ends of the pipes to the sp no matter what
+ for bg_job in bg_jobs:
+ bg_job.cleanup()
+
+ if timeout_error:
+ # TODO: This needs to be fixed to better represent what happens when
+ # running in parallel. However this is backwards compatable, so it will
+ # do for the time being.
+ raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
+ "Command(s) did not complete within %d seconds"
+ % timeout)
+
+
+ return bg_jobs
+
+
+def _wait_for_commands(bg_jobs, start_time, timeout):
+ # This returns True if it must return due to a timeout, otherwise False.
+
+ # To check for processes which terminate without producing any output
+ # a 1 second timeout is used in select.
+ SELECT_TIMEOUT = 1
+
+ read_list = []
+ write_list = []
+ reverse_dict = {}
+
+ for bg_job in bg_jobs:
+ read_list.append(bg_job.sp.stdout)
+ read_list.append(bg_job.sp.stderr)
+ reverse_dict[bg_job.sp.stdout] = (bg_job, True)
+ reverse_dict[bg_job.sp.stderr] = (bg_job, False)
+ if bg_job.string_stdin is not None:
+ write_list.append(bg_job.sp.stdin)
+ reverse_dict[bg_job.sp.stdin] = bg_job
+
+ if timeout:
+ stop_time = start_time + timeout
+ time_left = stop_time - time.time()
+ else:
+ time_left = None # so that select never times out
+
+ while not timeout or time_left > 0:
+ # select will return when we may write to stdin or when there is
+ # stdout/stderr output we can read (including when it is
+ # EOF, that is the process has terminated).
+ read_ready, write_ready, _ = select.select(read_list, write_list, [],
+ SELECT_TIMEOUT)
+
+ # os.read() has to be used instead of
+ # subproc.stdout.read() which will otherwise block
+ for file_obj in read_ready:
+ bg_job, is_stdout = reverse_dict[file_obj]
+ bg_job.process_output(is_stdout)
+
+ for file_obj in write_ready:
+ # we can write PIPE_BUF bytes without blocking
+ # POSIX requires PIPE_BUF is >= 512
+ bg_job = reverse_dict[file_obj]
+ file_obj.write(bg_job.string_stdin[:512])
+ bg_job.string_stdin = bg_job.string_stdin[512:]
+ # no more input data, close stdin, remove it from the select set
+ if not bg_job.string_stdin:
+ file_obj.close()
+ write_list.remove(file_obj)
+ del reverse_dict[file_obj]
+
+ all_jobs_finished = True
+ for bg_job in bg_jobs:
+ if bg_job.result.exit_status is not None:
+ continue
+
+ bg_job.result.exit_status = bg_job.sp.poll()
+ if bg_job.result.exit_status is not None:
+ # process exited, remove its stdout/stdin from the select set
+ bg_job.result.duration = time.time() - start_time
+ read_list.remove(bg_job.sp.stdout)
+ read_list.remove(bg_job.sp.stderr)
+ del reverse_dict[bg_job.sp.stdout]
+ del reverse_dict[bg_job.sp.stderr]
+ else:
+ all_jobs_finished = False
+
+ if all_jobs_finished:
+ return False
+
+ if timeout:
+ time_left = stop_time - time.time()
+
+ # Kill all processes which did not complete prior to timeout
+ for bg_job in bg_jobs:
+ if bg_job.result.exit_status is not None:
+ continue
+
+ logging.warn('run process timeout (%s) fired on: %s', timeout,
+ bg_job.command)
+ nuke_subprocess(bg_job.sp)
+ bg_job.result.exit_status = bg_job.sp.poll()
+ bg_job.result.duration = time.time() - start_time
+
+ return True
+
+
+def pid_is_alive(pid):
+ """
+ True if process pid exists and is not yet stuck in Zombie state.
+ Zombies are impossible to move between cgroups, etc.
+ pid can be integer, or text of integer.
+ """
+ path = '/proc/%s/stat' % pid
+
+ try:
+ stat = read_one_line(path)
+ except IOError:
+ if not os.path.exists(path):
+ # file went away
+ return False
+ raise
+
+ return stat.split()[2] != 'Z'
+
+
+def signal_pid(pid, sig):
+ """
+ Sends a signal to a process id. Returns True if the process terminated
+ successfully, False otherwise.
+ """
+ try:
+ os.kill(pid, sig)
+ except OSError:
+ # The process may have died before we could kill it.
+ pass
+
+ for i in range(5):
+ if not pid_is_alive(pid):
+ return True
+ time.sleep(1)
+
+ # The process is still alive
+ return False
+
+
+def nuke_subprocess(subproc):
+ # check if the subprocess is still alive, first
+ if subproc.poll() is not None:
+ return subproc.poll()
+
+ # the process has not terminated within timeout,
+ # kill it via an escalating series of signals.
+ signal_queue = [signal.SIGTERM, signal.SIGKILL]
+ for sig in signal_queue:
+ signal_pid(subproc.pid, sig)
+ if subproc.poll() is not None:
+ return subproc.poll()
+
+
+def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
+ # the process has not terminated within timeout,
+ # kill it via an escalating series of signals.
+ for sig in signal_queue:
+ if signal_pid(pid, sig):
+ return
+
+ # no signal successfully terminated the process
+ raise error.AutoservRunError('Could not kill %d' % pid, None)
+
+
+def system(command, timeout=None, ignore_status=False):
+ """
+ Run a command
+
+ @param timeout: timeout in seconds
+ @param ignore_status: if ignore_status=False, throw an exception if the
+ command's exit code is non-zero
+ if ignore_stauts=True, return the exit code.
+
+ @return exit status of command
+ (note, this will always be zero unless ignore_status=True)
+ """
+ return run(command, timeout=timeout, ignore_status=ignore_status,
+ stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
+
+
+def system_parallel(commands, timeout=None, ignore_status=False):
+ """This function returns a list of exit statuses for the respective
+ list of commands."""
+ return [bg_jobs.exit_status for bg_jobs in
+ run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
+ stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
+
+
+def system_output(command, timeout=None, ignore_status=False,
+ retain_output=False, args=()):
+ """
+ Run a command and return the stdout output.
+
+ @param command: command string to execute.
+ @param timeout: time limit in seconds before attempting to kill the
+ running process. The function will take a few seconds longer
+ than 'timeout' to complete if it has to kill the process.
+ @param ignore_status: do not raise an exception, no matter what the exit
+ code of the command is.
+ @param retain_output: set to True to make stdout/stderr of the command
+ output to be also sent to the logging system
+ @param args: sequence of strings of arguments to be given to the command
+ inside " quotes after they have been escaped for that; each
+ element in the sequence will be given as a separate command
+ argument
+
+ @return a string with the stdout output of the command.
+ """
+ if retain_output:
+ out = run(command, timeout=timeout, ignore_status=ignore_status,
+ stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
+ args=args).stdout
+ else:
+ out = run(command, timeout=timeout, ignore_status=ignore_status,
+ args=args).stdout
+ if out[-1:] == '\n':
+ out = out[:-1]
+ return out
+
+
+def system_output_parallel(commands, timeout=None, ignore_status=False,
+ retain_output=False):
+ if retain_output:
+ out = [bg_job.stdout for bg_job
+ in run_parallel(commands, timeout=timeout,
+ ignore_status=ignore_status,
+ stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
+ else:
+ out = [bg_job.stdout for bg_job in run_parallel(commands,
+ timeout=timeout, ignore_status=ignore_status)]
+ for x in out:
+ if out[-1:] == '\n': out = out[:-1]
+ return out
+
+
+def strip_unicode(input):
+ if type(input) == list:
+ return [strip_unicode(i) for i in input]
+ elif type(input) == dict:
+ output = {}
+ for key in input.keys():
+ output[str(key)] = strip_unicode(input[key])
+ return output
+ elif type(input) == unicode:
+ return str(input)
+ else:
+ return input
+
+
+def get_cpu_percentage(function, *args, **dargs):
+ """Returns a tuple containing the CPU% and return value from function call.
+
+ This function calculates the usage time by taking the difference of
+ the user and system times both before and after the function call.
+ """
+ child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
+ self_pre = resource.getrusage(resource.RUSAGE_SELF)
+ start = time.time()
+ to_return = function(*args, **dargs)
+ elapsed = time.time() - start
+ self_post = resource.getrusage(resource.RUSAGE_SELF)
+ child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
+
+ # Calculate CPU Percentage
+ s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
+ c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
+ cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
+
+ return cpu_percent, to_return
+
+
+class SystemLoad(object):
+ """
+ Get system and/or process values and return average value of load.
+ """
+ def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
+ use_log=False):
+ """
+ @param pids: List of pids to be monitored. If pid = 0 whole system will
+ be monitored. pid == 0 means whole system.
+ @param advanced: monitor add value for system irq count and softirq
+ for process minor and maior page fault
+ @param time_step: Time step for continuous monitoring.
+ @param cpu_cont: If True monitor CPU load continuously.
+ @param use_log: If true every monitoring is logged for dump.
+ """
+ self.pids = []
+ self.stats = {}
+ for pid in pids:
+ if pid == 0:
+ cpu = FileFieldMonitor("/proc/stat",
+ [("cpu", 0), # User Time
+ ("cpu", 2), # System Time
+ ("intr", 0), # IRQ Count
+ ("softirq", 0)], # Soft IRQ Count
+ True,
+ cpu_cont,
+ use_log,
+ " +",
+ time_step)
+ mem = FileFieldMonitor("/proc/meminfo",
+ [("MemTotal:", 0), # Mem Total
+ ("MemFree:", 0), # Mem Free
+ ("Buffers:", 0), # Buffers
+ ("Cached:", 0)], # Cached
+ False,
+ True,
+ use_log,
+ " +",
+ time_step)
+ self.stats[pid] = ["TOTAL", cpu, mem]
+ self.pids.append(pid)
+ else:
+ name = ""
+ if (type(pid) is int):
+ self.pids.append(pid)
+ name = get_process_name(pid)
+ else:
+ self.pids.append(pid[0])
+ name = pid[1]
+
+ cpu = FileFieldMonitor("/proc/%d/stat" %
+ self.pids[-1],
+ [("", 13), # User Time
+ ("", 14), # System Time
+ ("", 9), # Minority Page Fault
+ ("", 11)], # Majority Page Fault
+ True,
+ cpu_cont,
+ use_log,
+ " +",
+ time_step)
+ mem = FileFieldMonitor("/proc/%d/status" %
+ self.pids[-1],
+ [("VmSize:", 0), # Virtual Memory Size
+ ("VmRSS:", 0), # Resident Set Size
+ ("VmPeak:", 0), # Peak VM Size
+ ("VmSwap:", 0)], # VM in Swap
+ False,
+ True,
+ use_log,
+ " +",
+ time_step)
+ self.stats[self.pids[-1]] = [name, cpu, mem]
+
+ self.advanced = advanced
+
+
+ def __str__(self):
+ """
+ Define format how to print
+ """
+ out = ""
+ for pid in self.pids:
+ for stat in self.stats[pid][1:]:
+ out += str(stat.get_status()) + "\n"
+ return out
+
+
+ def start(self, pids=[]):
+ """
+ Start monitoring of the process system usage.
+ @param pids: List of PIDs you intend to control. Use pids=[] to control
+ all defined PIDs.
+ """
+ if pids == []:
+ pids = self.pids
+
+ for pid in pids:
+ for stat in self.stats[pid][1:]:
+ stat.start()
+
+
+ def stop(self, pids=[]):
+ """
+ Stop monitoring of the process system usage.
+ @param pids: List of PIDs you intend to control. Use pids=[] to control
+ all defined PIDs.
+ """
+ if pids == []:
+ pids = self.pids
+
+ for pid in pids:
+ for stat in self.stats[pid][1:]:
+ stat.stop()
+
+
+ def dump(self, pids=[]):
+ """
+ Get the status of monitoring.
+ @param pids: List of PIDs you intend to control. Use pids=[] to control
+ all defined PIDs.
+ @return:
+ tuple([cpu load], [memory load]):
+ ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
+ [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
+
+ PID1_cpu_meas:
+ average_values[], test_time, cont_meas_values[[]], time_step
+ PID1_mem_meas:
+ average_values[], test_time, cont_meas_values[[]], time_step
+ where average_values[] are the measured values (mem_free,swap,...)
+ which are described in SystemLoad.__init__()-FileFieldMonitor.
+ cont_meas_values[[]] is a list of average_values in the sampling
+ times.
+ """
+ if pids == []:
+ pids = self.pids
+
+ cpus = []
+ memory = []
+ for pid in pids:
+ stat = (pid, self.stats[pid][1].get_status())
+ cpus.append(stat)
+ for pid in pids:
+ stat = (pid, self.stats[pid][2].get_status())
+ memory.append(stat)
+
+ return (cpus, memory)
+
+
+ def get_cpu_status_string(self, pids=[]):
+ """
+ Convert status to string array.
+ @param pids: List of PIDs you intend to control. Use pids=[] to control
+ all defined PIDs.
+ @return: String format to table.
+ """
+ if pids == []:
+ pids = self.pids
+
+ headers = ["NAME",
+ ("%7s") % "PID",
+ ("%5s") % "USER",
+ ("%5s") % "SYS",
+ ("%5s") % "SUM"]
+ if self.advanced:
+ headers.extend(["MINFLT/IRQC",
+ "MAJFLT/SOFTIRQ"])
+ headers.append(("%11s") % "TIME")
+ textstatus = []
+ for pid in pids:
+ stat = self.stats[pid][1].get_status()
+ time = stat[1]
+ stat = stat[0]
+ textstatus.append(["%s" % self.stats[pid][0],
+ "%7s" % pid,
+ "%4.0f%%" % (stat[0] / time),
+ "%4.0f%%" % (stat[1] / time),
+ "%4.0f%%" % ((stat[0] + stat[1]) / time),
+ "%10.3fs" % time])
+ if self.advanced:
+ textstatus[-1].insert(-1, "%11d" % stat[2])
+ textstatus[-1].insert(-1, "%14d" % stat[3])
+
+ return matrix_to_string(textstatus, tuple(headers))
+
+
+ def get_mem_status_string(self, pids=[]):
+ """
+ Convert status to string array.
+ @param pids: List of PIDs you intend to control. Use pids=[] to control
+ all defined PIDs.
+ @return: String format to table.
+ """
+ if pids == []:
+ pids = self.pids
+
+ headers = ["NAME",
+ ("%7s") % "PID",
+ ("%8s") % "TOTAL/VMSIZE",
+ ("%8s") % "FREE/VMRSS",
+ ("%8s") % "BUFFERS/VMPEAK",
+ ("%8s") % "CACHED/VMSWAP",
+ ("%11s") % "TIME"]
+ textstatus = []
+ for pid in pids:
+ stat = self.stats[pid][2].get_status()
+ time = stat[1]
+ stat = stat[0]
+ textstatus.append(["%s" % self.stats[pid][0],
+ "%7s" % pid,
+ "%10dMB" % (stat[0] / 1024),
+ "%8dMB" % (stat[1] / 1024),
+ "%12dMB" % (stat[2] / 1024),
+ "%11dMB" % (stat[3] / 1024),
+ "%10.3fs" % time])
+
+ return matrix_to_string(textstatus, tuple(headers))
+
+
+def get_arch(run_function=run):
+ """
+ Get the hardware architecture of the machine.
+ run_function is used to execute the commands. It defaults to
+ utils.run() but a custom method (if provided) should be of the
+ same schema as utils.run. It should return a CmdResult object and
+ throw a CmdError exception.
+ """
+ arch = run_function('/bin/uname -m').stdout.rstrip()
+ if re.match(r'i\d86$', arch):
+ arch = 'i386'
+ return arch
+
+
+def get_num_logical_cpus_per_socket(run_function=run):
+ """
+ Get the number of cores (including hyperthreading) per cpu.
+ run_function is used to execute the commands. It defaults to
+ utils.run() but a custom method (if provided) should be of the
+ same schema as utils.run. It should return a CmdResult object and
+ throw a CmdError exception.
+ """
+ siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
+ num_siblings = map(int,
+ re.findall(r'^siblings\s*:\s*(\d+)\s*$',
+ siblings, re.M))
+ if len(num_siblings) == 0:
+ raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
+ if min(num_siblings) != max(num_siblings):
+ raise error.TestError('Number of siblings differ %r' %
+ num_siblings)
+ return num_siblings[0]
+
+
+def merge_trees(src, dest):
+ """
+ Merges a source directory tree at 'src' into a destination tree at
+ 'dest'. If a path is a file in both trees than the file in the source
+ tree is APPENDED to the one in the destination tree. If a path is
+ a directory in both trees then the directories are recursively merged
+ with this function. In any other case, the function will skip the
+ paths that cannot be merged (instead of failing).
+ """
+ if not os.path.exists(src):
+ return # exists only in dest
+ elif not os.path.exists(dest):
+ if os.path.isfile(src):
+ shutil.copy2(src, dest) # file only in src
+ else:
+ shutil.copytree(src, dest, symlinks=True) # dir only in src
+ return
+ elif os.path.isfile(src) and os.path.isfile(dest):
+ # src & dest are files in both trees, append src to dest
+ destfile = open(dest, "a")
+ try:
+ srcfile = open(src)
+ try:
+ destfile.write(srcfile.read())
+ finally:
+ srcfile.close()
+ finally:
+ destfile.close()
+ elif os.path.isdir(src) and os.path.isdir(dest):
+ # src & dest are directories in both trees, so recursively merge
+ for name in os.listdir(src):
+ merge_trees(os.path.join(src, name), os.path.join(dest, name))
+ else:
+ # src & dest both exist, but are incompatible
+ return
+
+
+class CmdResult(object):
+ """
+ Command execution result.
+
+ command: String containing the command line itself
+ exit_status: Integer exit code of the process
+ stdout: String containing stdout of the process
+ stderr: String containing stderr of the process
+ duration: Elapsed wall clock time running the process
+ """
+
+
+ def __init__(self, command="", stdout="", stderr="",
+ exit_status=None, duration=0):
+ self.command = command
+ self.exit_status = exit_status
+ self.stdout = stdout
+ self.stderr = stderr
+ self.duration = duration
+
+
+ def __repr__(self):
+ wrapper = textwrap.TextWrapper(width = 78,
+ initial_indent="\n ",
+ subsequent_indent=" ")
+
+ stdout = self.stdout.rstrip()
+ if stdout:
+ stdout = "\nstdout:\n%s" % stdout
+
+ stderr = self.stderr.rstrip()
+ if stderr:
+ stderr = "\nstderr:\n%s" % stderr
+
+ return ("* Command: %s\n"
+ "Exit status: %s\n"
+ "Duration: %s\n"
+ "%s"
+ "%s"
+ % (wrapper.fill(self.command), self.exit_status,
+ self.duration, stdout, stderr))
+
+
+class run_randomly:
+ def __init__(self, run_sequentially=False):
+ # Run sequentially is for debugging control files
+ self.test_list = []
+ self.run_sequentially = run_sequentially
+
+
+ def add(self, *args, **dargs):
+ test = (args, dargs)
+ self.test_list.append(test)
+
+
+ def run(self, fn):
+ while self.test_list:
+ test_index = random.randint(0, len(self.test_list)-1)
+ if self.run_sequentially:
+ test_index = 0
+ (args, dargs) = self.test_list.pop(test_index)
+ fn(*args, **dargs)
+
+
+def import_site_module(path, module, dummy=None, modulefile=None):
+ """
+ Try to import the site specific module if it exists.
+
+ @param path full filename of the source file calling this (ie __file__)
+ @param module full module name
+ @param dummy dummy value to return in case there is no symbol to import
+ @param modulefile module filename
+
+ @return site specific module or dummy
+
+ @raises ImportError if the site file exists but imports fails
+ """
+ short_module = module[module.rfind(".") + 1:]
+
+ if not modulefile:
+ modulefile = short_module + ".py"
+
+ if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
+ return __import__(module, {}, {}, [short_module])
+ return dummy
+
+
+def import_site_symbol(path, module, name, dummy=None, modulefile=None):
+ """
+ Try to import site specific symbol from site specific file if it exists
+
+ @param path full filename of the source file calling this (ie __file__)
+ @param module full module name
+ @param name symbol name to be imported from the site file
+ @param dummy dummy value to return in case there is no symbol to import
+ @param modulefile module filename
+
+ @return site specific symbol or dummy
+
+ @raises ImportError if the site file exists but imports fails
+ """
+ module = import_site_module(path, module, modulefile=modulefile)
+ if not module:
+ return dummy
+
+ # special unique value to tell us if the symbol can't be imported
+ cant_import = object()
+
+ obj = getattr(module, name, cant_import)
+ if obj is cant_import:
+ logging.debug("unable to import site symbol '%s', using non-site "
+ "implementation", name)
+ return dummy
+
+ return obj
+
+
+def import_site_class(path, module, classname, baseclass, modulefile=None):
+ """
+ Try to import site specific class from site specific file if it exists
+
+ Args:
+ path: full filename of the source file calling this (ie __file__)
+ module: full module name
+ classname: class name to be loaded from site file
+ baseclass: base class object to return when no site file present or
+ to mixin when site class exists but is not inherited from baseclass
+ modulefile: module filename
+
+ Returns: baseclass if site specific class does not exist, the site specific
+ class if it exists and is inherited from baseclass or a mixin of the
+ site specific class and baseclass when the site specific class exists
+ and is not inherited from baseclass
+
+ Raises: ImportError if the site file exists but imports fails
+ """
+
+ res = import_site_symbol(path, module, classname, None, modulefile)
+ if res:
+ if not issubclass(res, baseclass):
+ # if not a subclass of baseclass then mix in baseclass with the
+ # site specific class object and return the result
+ res = type(classname, (res, baseclass), {})
+ else:
+ res = baseclass
+
+ return res
+
+
+def import_site_function(path, module, funcname, dummy, modulefile=None):
+ """
+ Try to import site specific function from site specific file if it exists
+
+ Args:
+ path: full filename of the source file calling this (ie __file__)
+ module: full module name
+ funcname: function name to be imported from site file
+ dummy: dummy function to return in case there is no function to import
+ modulefile: module filename
+
+ Returns: site specific function object or dummy
+
+ Raises: ImportError if the site file exists but imports fails
+ """
+
+ return import_site_symbol(path, module, funcname, dummy, modulefile)
+
+
+def _get_pid_path(program_name):
+ my_path = os.path.dirname(__file__)
+ return os.path.abspath(os.path.join(my_path, "..", "..",
+ "%s.pid" % program_name))
+
+
+def write_pid(program_name):
+ """
+ Try to drop <program_name>.pid in the main autotest directory.
+
+ Args:
+ program_name: prefix for file name
+ """
+ pidfile = open(_get_pid_path(program_name), "w")
+ try:
+ pidfile.write("%s\n" % os.getpid())
+ finally:
+ pidfile.close()
+
+
+def delete_pid_file_if_exists(program_name):
+ """
+ Tries to remove <program_name>.pid from the main autotest directory.
+ """
+ pidfile_path = _get_pid_path(program_name)
+
+ try:
+ os.remove(pidfile_path)
+ except OSError:
+ if not os.path.exists(pidfile_path):
+ return
+ raise
+
+
+def get_pid_from_file(program_name):
+ """
+ Reads the pid from <program_name>.pid in the autotest directory.
+
+ @param program_name the name of the program
+ @return the pid if the file exists, None otherwise.
+ """
+ pidfile_path = _get_pid_path(program_name)
+ if not os.path.exists(pidfile_path):
+ return None
+
+ pidfile = open(_get_pid_path(program_name), 'r')
+
+ try:
+ try:
+ pid = int(pidfile.readline())
+ except IOError:
+ if not os.path.exists(pidfile_path):
+ return None
+ raise
+ finally:
+ pidfile.close()
+
+ return pid
+
+
+def get_process_name(pid):
+ """
+ Get process name from PID.
+ @param pid: PID of process.
+ """
+ return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
+
+
+def program_is_alive(program_name):
+ """
+ Checks if the process is alive and not in Zombie state.
+
+ @param program_name the name of the program
+ @return True if still alive, False otherwise
+ """
+ pid = get_pid_from_file(program_name)
+ if pid is None:
+ return False
+ return pid_is_alive(pid)
+
+
+def signal_program(program_name, sig=signal.SIGTERM):
+ """
+ Sends a signal to the process listed in <program_name>.pid
+
+ @param program_name the name of the program
+ @param sig signal to send
+ """
+ pid = get_pid_from_file(program_name)
+ if pid:
+ signal_pid(pid, sig)
+
+
+def get_relative_path(path, reference):
+ """Given 2 absolute paths "path" and "reference", compute the path of
+ "path" as relative to the directory "reference".
+
+ @param path the absolute path to convert to a relative path
+ @param reference an absolute directory path to which the relative
+ path will be computed
+ """
+ # normalize the paths (remove double slashes, etc)
+ assert(os.path.isabs(path))
+ assert(os.path.isabs(reference))
+
+ path = os.path.normpath(path)
+ reference = os.path.normpath(reference)
+
+ # we could use os.path.split() but it splits from the end
+ path_list = path.split(os.path.sep)[1:]
+ ref_list = reference.split(os.path.sep)[1:]
+
+ # find the longest leading common path
+ for i in xrange(min(len(path_list), len(ref_list))):
+ if path_list[i] != ref_list[i]:
+ # decrement i so when exiting this loop either by no match or by
+ # end of range we are one step behind
+ i -= 1
+ break
+ i += 1
+ # drop the common part of the paths, not interested in that anymore
+ del path_list[:i]
+
+ # for each uncommon component in the reference prepend a ".."
+ path_list[:0] = ['..'] * (len(ref_list) - i)
+
+ return os.path.join(*path_list)
+
+
+def sh_escape(command):
+ """
+ Escape special characters from a command so that it can be passed
+ as a double quoted (" ") string in a (ba)sh command.
+
+ Args:
+ command: the command string to escape.
+
+ Returns:
+ The escaped command string. The required englobing double
+ quotes are NOT added and so should be added at some point by
+ the caller.
+
+ See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
+ """
+ command = command.replace("\\", "\\\\")
+ command = command.replace("$", r'\$')
+ command = command.replace('"', r'\"')
+ command = command.replace('`', r'\`')
+ return command
+
+
+def configure(extra=None, configure='./configure'):
+ """
+ Run configure passing in the correct host, build, and target options.
+
+ @param extra: extra command line arguments to pass to configure
+ @param configure: which configure script to use
+ """
+ args = []
+ if 'CHOST' in os.environ:
+ args.append('--host=' + os.environ['CHOST'])
+ if 'CBUILD' in os.environ:
+ args.append('--build=' + os.environ['CBUILD'])
+ if 'CTARGET' in os.environ:
+ args.append('--target=' + os.environ['CTARGET'])
+ if extra:
+ args.append(extra)
+
+ system('%s %s' % (configure, ' '.join(args)))
+
+
+def make(extra='', make='make', timeout=None, ignore_status=False):
+ """
+ Run make, adding MAKEOPTS to the list of options.
+
+ @param extra: extra command line arguments to pass to make.
+ """
+ cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
+ return system(cmd, timeout=timeout, ignore_status=ignore_status)
+
+
+def compare_versions(ver1, ver2):
+ """Version number comparison between ver1 and ver2 strings.
+
+ >>> compare_tuple("1", "2")
+ -1
+ >>> compare_tuple("foo-1.1", "foo-1.2")
+ -1
+ >>> compare_tuple("1.2", "1.2a")
+ -1
+ >>> compare_tuple("1.2b", "1.2a")
+ 1
+ >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
+ -1
+
+ Args:
+ ver1: version string
+ ver2: version string
+
+ Returns:
+ int: 1 if ver1 > ver2
+ 0 if ver1 == ver2
+ -1 if ver1 < ver2
+ """
+ ax = re.split('[.-]', ver1)
+ ay = re.split('[.-]', ver2)
+ while len(ax) > 0 and len(ay) > 0:
+ cx = ax.pop(0)
+ cy = ay.pop(0)
+ maxlen = max(len(cx), len(cy))
+ c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
+ if c != 0:
+ return c
+ return cmp(len(ax), len(ay))
+
+
+def args_to_dict(args):
+ """Convert autoserv extra arguments in the form of key=val or key:val to a
+ dictionary. Each argument key is converted to lowercase dictionary key.
+
+ Args:
+ args - list of autoserv extra arguments.
+
+ Returns:
+ dictionary
+ """
+ arg_re = re.compile(r'(\w+)[:=](.*)$')
+ dict = {}
+ for arg in args:
+ match = arg_re.match(arg)
+ if match:
+ dict[match.group(1).lower()] = match.group(2)
+ else:
+ logging.warning("args_to_dict: argument '%s' doesn't match "
+ "'%s' pattern. Ignored." % (arg, arg_re.pattern))
+ return dict
+
+
+def get_unused_port():
+ """
+ Finds a semi-random available port. A race condition is still
+ possible after the port number is returned, if another process
+ happens to bind it.
+
+ Returns:
+ A port number that is unused on both TCP and UDP.
+ """
+
+ def try_bind(port, socket_type, socket_proto):
+ s = socket.socket(socket.AF_INET, socket_type, socket_proto)
+ try:
+ try:
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ s.bind(('', port))
+ return s.getsockname()[1]
+ except socket.error:
+ return None
+ finally:
+ s.close()
+
+ # On the 2.6 kernel, calling try_bind() on UDP socket returns the
+ # same port over and over. So always try TCP first.
+ while True:
+ # Ask the OS for an unused port.
+ port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
+ # Check if this port is unused on the other protocol.
+ if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
+ return port
diff --git a/client/common_lib/base_utils_unittest.py b/client/common_lib/base_utils_unittest.py
new file mode 100755
index 0000000..de5983c
--- /dev/null
+++ b/client/common_lib/base_utils_unittest.py
@@ -0,0 +1,799 @@
+#!/usr/bin/python
+
+import os, unittest, StringIO, socket, urllib2, shutil, subprocess, logging
+
+import common
+from autotest_lib.client.common_lib import base_utils, autotemp
+from autotest_lib.client.common_lib.test_utils import mock
+
+
+class test_read_one_line(unittest.TestCase):
+ def setUp(self):
+ self.god = mock.mock_god()
+ self.god.stub_function(base_utils, "open")
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def test_ip_to_long(self):
+ self.assertEqual(base_utils.ip_to_long('0.0.0.0'), 0)
+ self.assertEqual(base_utils.ip_to_long('255.255.255.255'), 4294967295)
+ self.assertEqual(base_utils.ip_to_long('192.168.0.1'), 3232235521)
+ self.assertEqual(base_utils.ip_to_long('1.2.4.8'), 16909320)
+
+
+ def test_long_to_ip(self):
+ self.assertEqual(base_utils.long_to_ip(0), '0.0.0.0')
+ self.assertEqual(base_utils.long_to_ip(4294967295), '255.255.255.255')
+ self.assertEqual(base_utils.long_to_ip(3232235521), '192.168.0.1')
+ self.assertEqual(base_utils.long_to_ip(16909320), '1.2.4.8')
+
+
+ def test_create_subnet_mask(self):
+ self.assertEqual(base_utils.create_subnet_mask(0), 0)
+ self.assertEqual(base_utils.create_subnet_mask(32), 4294967295)
+ self.assertEqual(base_utils.create_subnet_mask(25), 4294967168)
+
+
+ def test_format_ip_with_mask(self):
+ self.assertEqual(base_utils.format_ip_with_mask('192.168.0.1', 0),
+ '0.0.0.0/0')
+ self.assertEqual(base_utils.format_ip_with_mask('192.168.0.1', 32),
+ '192.168.0.1/32')
+ self.assertEqual(base_utils.format_ip_with_mask('192.168.0.1', 26),
+ '192.168.0.0/26')
+ self.assertEqual(base_utils.format_ip_with_mask('192.168.0.255', 26),
+ '192.168.0.192/26')
+
+
+ def create_test_file(self, contents):
+ test_file = StringIO.StringIO(contents)
+ base_utils.open.expect_call("filename", "r").and_return(test_file)
+
+
+ def test_reads_one_line_file(self):
+ self.create_test_file("abc\n")
+ self.assertEqual("abc", base_utils.read_one_line("filename"))
+ self.god.check_playback()
+
+
+ def test_strips_read_lines(self):
+ self.create_test_file("abc \n")
+ self.assertEqual("abc ", base_utils.read_one_line("filename"))
+ self.god.check_playback()
+
+
+ def test_drops_extra_lines(self):
+ self.create_test_file("line 1\nline 2\nline 3\n")
+ self.assertEqual("line 1", base_utils.read_one_line("filename"))
+ self.god.check_playback()
+
+
+ def test_works_on_empty_file(self):
+ self.create_test_file("")
+ self.assertEqual("", base_utils.read_one_line("filename"))
+ self.god.check_playback()
+
+
+ def test_works_on_file_with_no_newlines(self):
+ self.create_test_file("line but no newline")
+ self.assertEqual("line but no newline",
+ base_utils.read_one_line("filename"))
+ self.god.check_playback()
+
+
+ def test_preserves_leading_whitespace(self):
+ self.create_test_file(" has leading whitespace")
+ self.assertEqual(" has leading whitespace",
+ base_utils.read_one_line("filename"))
+
+
+class test_write_one_line(unittest.TestCase):
+ def setUp(self):
+ self.god = mock.mock_god()
+ self.god.stub_function(base_utils, "open")
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def get_write_one_line_output(self, content):
+ test_file = mock.SaveDataAfterCloseStringIO()
+ base_utils.open.expect_call("filename", "w").and_return(test_file)
+ base_utils.write_one_line("filename", content)
+ self.god.check_playback()
+ return test_file.final_data
+
+
+ def test_writes_one_line_file(self):
+ self.assertEqual("abc\n", self.get_write_one_line_output("abc"))
+
+
+ def test_preserves_existing_newline(self):
+ self.assertEqual("abc\n", self.get_write_one_line_output("abc\n"))
+
+
+ def test_preserves_leading_whitespace(self):
+ self.assertEqual(" abc\n", self.get_write_one_line_output(" abc"))
+
+
+ def test_preserves_trailing_whitespace(self):
+ self.assertEqual("abc \n", self.get_write_one_line_output("abc "))
+
+
+ def test_handles_empty_input(self):
+ self.assertEqual("\n", self.get_write_one_line_output(""))
+
+
+class test_open_write_close(unittest.TestCase):
+ def setUp(self):
+ self.god = mock.mock_god()
+ self.god.stub_function(base_utils, "open")
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def test_simple_functionality(self):
+ data = "\n\nwhee\n"
+ test_file = mock.SaveDataAfterCloseStringIO()
+ base_utils.open.expect_call("filename", "w").and_return(test_file)
+ base_utils.open_write_close("filename", data)
+ self.god.check_playback()
+ self.assertEqual(data, test_file.final_data)
+
+
+class test_read_keyval(unittest.TestCase):
+ def setUp(self):
+ self.god = mock.mock_god()
+ self.god.stub_function(base_utils, "open")
+ self.god.stub_function(os.path, "isdir")
+ self.god.stub_function(os.path, "exists")
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def create_test_file(self, filename, contents):
+ test_file = StringIO.StringIO(contents)
+ os.path.exists.expect_call(filename).and_return(True)
+ base_utils.open.expect_call(filename).and_return(test_file)
+
+
+ def read_keyval(self, contents):
+ os.path.isdir.expect_call("file").and_return(False)
+ self.create_test_file("file", contents)
+ keyval = base_utils.read_keyval("file")
+ self.god.check_playback()
+ return keyval
+
+
+ def test_returns_empty_when_file_doesnt_exist(self):
+ os.path.isdir.expect_call("file").and_return(False)
+ os.path.exists.expect_call("file").and_return(False)
+ self.assertEqual({}, base_utils.read_keyval("file"))
+ self.god.check_playback()
+
+
+ def test_accesses_files_directly(self):
+ os.path.isdir.expect_call("file").and_return(False)
+ self.create_test_file("file", "")
+ base_utils.read_keyval("file")
+ self.god.check_playback()
+
+
+ def test_accesses_directories_through_keyval_file(self):
+ os.path.isdir.expect_call("dir").and_return(True)
+ self.create_test_file("dir/keyval", "")
+ base_utils.read_keyval("dir")
+ self.god.check_playback()
+
+
+ def test_values_are_rstripped(self):
+ keyval = self.read_keyval("a=b \n")
+ self.assertEquals(keyval, {"a": "b"})
+
+
+ def test_comments_are_ignored(self):
+ keyval = self.read_keyval("a=b # a comment\n")
+ self.assertEquals(keyval, {"a": "b"})
+
+
+ def test_integers_become_ints(self):
+ keyval = self.read_keyval("a=1\n")
+ self.assertEquals(keyval, {"a": 1})
+ self.assertEquals(int, type(keyval["a"]))
+
+
+ def test_float_values_become_floats(self):
+ keyval = self.read_keyval("a=1.5\n")
+ self.assertEquals(keyval, {"a": 1.5})
+ self.assertEquals(float, type(keyval["a"]))
+
+
+ def test_multiple_lines(self):
+ keyval = self.read_keyval("a=one\nb=two\n")
+ self.assertEquals(keyval, {"a": "one", "b": "two"})
+
+
+ def test_the_last_duplicate_line_is_used(self):
+ keyval = self.read_keyval("a=one\nb=two\na=three\n")
+ self.assertEquals(keyval, {"a": "three", "b": "two"})
+
+
+ def test_extra_equals_are_included_in_values(self):
+ keyval = self.read_keyval("a=b=c\n")
+ self.assertEquals(keyval, {"a": "b=c"})
+
+
+ def test_non_alphanumeric_keynames_are_rejected(self):
+ self.assertRaises(ValueError, self.read_keyval, "a$=one\n")
+
+
+ def test_underscores_are_allowed_in_key_names(self):
+ keyval = self.read_keyval("a_b=value\n")
+ self.assertEquals(keyval, {"a_b": "value"})
+
+
+ def test_dashes_are_allowed_in_key_names(self):
+ keyval = self.read_keyval("a-b=value\n")
+ self.assertEquals(keyval, {"a-b": "value"})
+
+
+class test_write_keyval(unittest.TestCase):
+ def setUp(self):
+ self.god = mock.mock_god()
+ self.god.stub_function(base_utils, "open")
+ self.god.stub_function(os.path, "isdir")
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def assertHasLines(self, value, lines):
+ vlines = value.splitlines()
+ vlines.sort()
+ self.assertEquals(vlines, sorted(lines))
+
+
+ def write_keyval(self, filename, dictionary, expected_filename=None,
+ type_tag=None):
+ if expected_filename is None:
+ expected_filename = filename
+ test_file = StringIO.StringIO()
+ self.god.stub_function(test_file, "close")
+ base_utils.open.expect_call(expected_filename,
+ "a").and_return(test_file)
+ test_file.close.expect_call()
+ if type_tag is None:
+ base_utils.write_keyval(filename, dictionary)
+ else:
+ base_utils.write_keyval(filename, dictionary, type_tag)
+ return test_file.getvalue()
+
+
+ def write_keyval_file(self, dictionary, type_tag=None):
+ os.path.isdir.expect_call("file").and_return(False)
+ return self.write_keyval("file", dictionary, type_tag=type_tag)
+
+
+ def test_accesses_files_directly(self):
+ os.path.isdir.expect_call("file").and_return(False)
+ result = self.write_keyval("file", {"a": "1"})
+ self.assertEquals(result, "a=1\n")
+
+
+ def test_accesses_directories_through_keyval_file(self):
+ os.path.isdir.expect_call("dir").and_return(True)
+ result = self.write_keyval("dir", {"b": "2"}, "dir/keyval")
+ self.assertEquals(result, "b=2\n")
+
+
+ def test_numbers_are_stringified(self):
+ result = self.write_keyval_file({"c": 3})
+ self.assertEquals(result, "c=3\n")
+
+
+ def test_type_tags_are_excluded_by_default(self):
+ result = self.write_keyval_file({"d": "a string"})
+ self.assertEquals(result, "d=a string\n")
+ self.assertRaises(ValueError, self.write_keyval_file,
+ {"d{perf}": "a string"})
+
+
+ def test_perf_tags_are_allowed(self):
+ result = self.write_keyval_file({"a{perf}": 1, "b{perf}": 2},
+ type_tag="perf")
+ self.assertHasLines(result, ["a{perf}=1", "b{perf}=2"])
+ self.assertRaises(ValueError, self.write_keyval_file,
+ {"a": 1, "b": 2}, type_tag="perf")
+
+
+ def test_non_alphanumeric_keynames_are_rejected(self):
+ self.assertRaises(ValueError, self.write_keyval_file, {"x$": 0})
+
+
+ def test_underscores_are_allowed_in_key_names(self):
+ result = self.write_keyval_file({"a_b": "value"})
+ self.assertEquals(result, "a_b=value\n")
+
+
+ def test_dashes_are_allowed_in_key_names(self):
+ result = self.write_keyval_file({"a-b": "value"})
+ self.assertEquals(result, "a-b=value\n")
+
+
+class test_is_url(unittest.TestCase):
+ def test_accepts_http(self):
+ self.assertTrue(base_utils.is_url("http://example.com"))
+
+
+ def test_accepts_ftp(self):
+ self.assertTrue(base_utils.is_url("ftp://ftp.example.com"))
+
+
+ def test_rejects_local_path(self):
+ self.assertFalse(base_utils.is_url("/home/username/file"))
+
+
+ def test_rejects_local_filename(self):
+ self.assertFalse(base_utils.is_url("filename"))
+
+
+ def test_rejects_relative_local_path(self):
+ self.assertFalse(base_utils.is_url("somedir/somesubdir/file"))
+
+
+ def test_rejects_local_path_containing_url(self):
+ self.assertFalse(base_utils.is_url("somedir/http://path/file"))
+
+
+class test_urlopen(unittest.TestCase):
+ def setUp(self):
+ self.god = mock.mock_god()
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def stub_urlopen_with_timeout_comparison(self, test_func, expected_return,
+ *expected_args):
+ expected_args += (None,) * (2 - len(expected_args))
+ def urlopen(url, data=None):
+ self.assertEquals(expected_args, (url,data))
+ test_func(socket.getdefaulttimeout())
+ return expected_return
+ self.god.stub_with(urllib2, "urlopen", urlopen)
+
+
+ def stub_urlopen_with_timeout_check(self, expected_timeout,
+ expected_return, *expected_args):
+ def test_func(timeout):
+ self.assertEquals(timeout, expected_timeout)
+ self.stub_urlopen_with_timeout_comparison(test_func, expected_return,
+ *expected_args)
+
+
+ def test_timeout_set_during_call(self):
+ self.stub_urlopen_with_timeout_check(30, "retval", "url")
+ retval = base_utils.urlopen("url", timeout=30)
+ self.assertEquals(retval, "retval")
+
+
+ def test_timeout_reset_after_call(self):
+ old_timeout = socket.getdefaulttimeout()
+ self.stub_urlopen_with_timeout_check(30, None, "url")
+ try:
+ socket.setdefaulttimeout(1234)
+ base_utils.urlopen("url", timeout=30)
+ self.assertEquals(1234, socket.getdefaulttimeout())
+ finally:
+ socket.setdefaulttimeout(old_timeout)
+
+
+ def test_timeout_set_by_default(self):
+ def test_func(timeout):
+ self.assertTrue(timeout is not None)
+ self.stub_urlopen_with_timeout_comparison(test_func, None, "url")
+ base_utils.urlopen("url")
+
+
+ def test_args_are_untouched(self):
+ self.stub_urlopen_with_timeout_check(30, None, "http://url",
+ "POST data")
+ base_utils.urlopen("http://url", timeout=30, data="POST data")
+
+
+class test_urlretrieve(unittest.TestCase):
+ def setUp(self):
+ self.god = mock.mock_god()
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def test_urlopen_passed_arguments(self):
+ self.god.stub_function(base_utils, "urlopen")
+ self.god.stub_function(base_utils.shutil, "copyfileobj")
+ self.god.stub_function(base_utils, "open")
+
+ url = "url"
+ dest = "somefile"
+ data = object()
+ timeout = 10
+
+ src_file = self.god.create_mock_class(file, "file")
+ dest_file = self.god.create_mock_class(file, "file")
+
+ (base_utils.urlopen.expect_call(url, data=data, timeout=timeout)
+ .and_return(src_file))
+ base_utils.open.expect_call(dest, "wb").and_return(dest_file)
+ base_utils.shutil.copyfileobj.expect_call(src_file, dest_file)
+ dest_file.close.expect_call()
+ src_file.close.expect_call()
+
+ base_utils.urlretrieve(url, dest, data=data, timeout=timeout)
+ self.god.check_playback()
+
+
+class test_merge_trees(unittest.TestCase):
+ # a some path-handling helper functions
+ def src(self, *path_segments):
+ return os.path.join(self.src_tree.name, *path_segments)
+
+
+ def dest(self, *path_segments):
+ return os.path.join(self.dest_tree.name, *path_segments)
+
+
+ def paths(self, *path_segments):
+ return self.src(*path_segments), self.dest(*path_segments)
+
+
+ def assertFileEqual(self, *path_segments):
+ src, dest = self.paths(*path_segments)
+ self.assertEqual(True, os.path.isfile(src))
+ self.assertEqual(True, os.path.isfile(dest))
+ self.assertEqual(os.path.getsize(src), os.path.getsize(dest))
+ self.assertEqual(open(src).read(), open(dest).read())
+
+
+ def assertFileContents(self, contents, *path_segments):
+ dest = self.dest(*path_segments)
+ self.assertEqual(True, os.path.isfile(dest))
+ self.assertEqual(os.path.getsize(dest), len(contents))
+ self.assertEqual(contents, open(dest).read())
+
+
+ def setUp(self):
+ self.src_tree = autotemp.tempdir(unique_id='utilsrc')
+ self.dest_tree = autotemp.tempdir(unique_id='utilsdest')
+
+ # empty subdirs
+ os.mkdir(self.src("empty"))
+ os.mkdir(self.dest("empty"))
+
+
+ def tearDown(self):
+ self.src_tree.clean()
+ self.dest_tree.clean()
+
+
+ def test_both_dont_exist(self):
+ base_utils.merge_trees(*self.paths("empty"))
+
+
+ def test_file_only_at_src(self):
+ print >> open(self.src("src_only"), "w"), "line 1"
+ base_utils.merge_trees(*self.paths("src_only"))
+ self.assertFileEqual("src_only")
+
+
+ def test_file_only_at_dest(self):
+ print >> open(self.dest("dest_only"), "w"), "line 1"
+ base_utils.merge_trees(*self.paths("dest_only"))
+ self.assertEqual(False, os.path.exists(self.src("dest_only")))
+ self.assertFileContents("line 1\n", "dest_only")
+
+
+ def test_file_at_both(self):
+ print >> open(self.dest("in_both"), "w"), "line 1"
+ print >> open(self.src("in_both"), "w"), "line 2"
+ base_utils.merge_trees(*self.paths("in_both"))
+ self.assertFileContents("line 1\nline 2\n", "in_both")
+
+
+ def test_directory_with_files_in_both(self):
+ print >> open(self.dest("in_both"), "w"), "line 1"
+ print >> open(self.src("in_both"), "w"), "line 3"
+ base_utils.merge_trees(*self.paths())
+ self.assertFileContents("line 1\nline 3\n", "in_both")
+
+
+ def test_directory_with_mix_of_files(self):
+ print >> open(self.dest("in_dest"), "w"), "dest line"
+ print >> open(self.src("in_src"), "w"), "src line"
+ base_utils.merge_trees(*self.paths())
+ self.assertFileContents("dest line\n", "in_dest")
+ self.assertFileContents("src line\n", "in_src")
+
+
+ def test_directory_with_subdirectories(self):
+ os.mkdir(self.src("src_subdir"))
+ print >> open(self.src("src_subdir", "subfile"), "w"), "subdir line"
+ os.mkdir(self.src("both_subdir"))
+ os.mkdir(self.dest("both_subdir"))
+ print >> open(self.src("both_subdir", "subfile"), "w"), "src line"
+ print >> open(self.dest("both_subdir", "subfile"), "w"), "dest line"
+ base_utils.merge_trees(*self.paths())
+ self.assertFileContents("subdir line\n", "src_subdir", "subfile")
+ self.assertFileContents("dest line\nsrc line\n", "both_subdir",
+ "subfile")
+
+
+class test_get_relative_path(unittest.TestCase):
+ def test_not_absolute(self):
+ self.assertRaises(AssertionError,
+ base_utils.get_relative_path, "a", "b")
+
+ def test_same_dir(self):
+ self.assertEqual(base_utils.get_relative_path("/a/b/c", "/a/b"), "c")
+
+ def test_forward_dir(self):
+ self.assertEqual(base_utils.get_relative_path("/a/b/c/d", "/a/b"),
+ "c/d")
+
+ def test_previous_dir(self):
+ self.assertEqual(base_utils.get_relative_path("/a/b", "/a/b/c/d"),
+ "../..")
+
+ def test_parallel_dir(self):
+ self.assertEqual(base_utils.get_relative_path("/a/c/d", "/a/b/c/d"),
+ "../../../c/d")
+
+
+class test_sh_escape(unittest.TestCase):
+ def _test_in_shell(self, text):
+ escaped_text = base_utils.sh_escape(text)
+ proc = subprocess.Popen('echo "%s"' % escaped_text, shell=True,
+ stdin=open(os.devnull, 'r'),
+ stdout=subprocess.PIPE,
+ stderr=open(os.devnull, 'w'))
+ stdout, _ = proc.communicate()
+ self.assertEqual(proc.returncode, 0)
+ self.assertEqual(stdout[:-1], text)
+
+
+ def test_normal_string(self):
+ self._test_in_shell('abcd')
+
+
+ def test_spaced_string(self):
+ self._test_in_shell('abcd efgh')
+
+
+ def test_dollar(self):
+ self._test_in_shell('$')
+
+
+ def test_single_quote(self):
+ self._test_in_shell('\'')
+
+
+ def test_single_quoted_string(self):
+ self._test_in_shell('\'efgh\'')
+
+
+ def test_double_quote(self):
+ self._test_in_shell('"')
+
+
+ def test_double_quoted_string(self):
+ self._test_in_shell('"abcd"')
+
+
+ def test_backtick(self):
+ self._test_in_shell('`')
+
+
+ def test_backticked_string(self):
+ self._test_in_shell('`jklm`')
+
+
+ def test_backslash(self):
+ self._test_in_shell('\\')
+
+
+ def test_backslashed_special_characters(self):
+ self._test_in_shell('\\$')
+ self._test_in_shell('\\"')
+ self._test_in_shell('\\\'')
+ self._test_in_shell('\\`')
+
+
+ def test_backslash_codes(self):
+ self._test_in_shell('\\n')
+ self._test_in_shell('\\r')
+ self._test_in_shell('\\t')
+ self._test_in_shell('\\v')
+ self._test_in_shell('\\b')
+ self._test_in_shell('\\a')
+ self._test_in_shell('\\000')
+
+
+class test_run(unittest.TestCase):
+ """
+ Test the base_utils.run() function.
+
+ Note: This test runs simple external commands to test the base_utils.run()
+ API without assuming implementation details.
+ """
+ def setUp(self):
+ self.god = mock.mock_god()
+ self.god.stub_function(base_utils.logging, 'warn')
+ self.god.stub_function(base_utils.logging, 'debug')
+
+
+ def tearDown(self):
+ self.god.unstub_all()
+
+
+ def __check_result(self, result, command, exit_status=0, stdout='',
+ stderr=''):
+ self.assertEquals(result.command, command)
+ self.assertEquals(result.exit_status, exit_status)
+ self.assertEquals(result.stdout, stdout)
+ self.assertEquals(result.stderr, stderr)
+
+
+ def test_default_simple(self):
+ cmd = 'echo "hello world"'
+ # expect some king of logging.debug() call but don't care about args
+ base_utils.logging.debug.expect_any_call()
+ self.__check_result(base_utils.run(cmd), cmd, stdout='hello world\n')
+
+
+ def test_default_failure(self):
+ cmd = 'exit 11'
+ try:
+ base_utils.run(cmd, verbose=False)
+ except base_utils.error.CmdError, err:
+ self.__check_result(err.result_obj, cmd, exit_status=11)
+
+
+ def test_ignore_status(self):
+ cmd = 'echo error >&2 && exit 11'
+ self.__check_result(base_utils.run(cmd, ignore_status=True,
+ verbose=False),
+ cmd, exit_status=11, stderr='error\n')
+
+
+ def test_timeout(self):
+ # we expect a logging.warn() message, don't care about the contents
+ base_utils.logging.warn.expect_any_call()
+ try:
+ base_utils.run('echo -n output && sleep 10',
+ timeout=1, verbose=False)
+ except base_utils.error.CmdError, err:
+ self.assertEquals(err.result_obj.stdout, 'output')
+
+
+ def test_stdout_stderr_tee(self):
+ cmd = 'echo output && echo error >&2'
+ stdout_tee = StringIO.StringIO()
+ stderr_tee = StringIO.StringIO()
+
+ self.__check_result(base_utils.run(
+ cmd, stdout_tee=stdout_tee, stderr_tee=stderr_tee,
+ verbose=False), cmd, stdout='output\n', stderr='error\n')
+ self.assertEqual(stdout_tee.getvalue(), 'output\n')
+ self.assertEqual(stderr_tee.getvalue(), 'error\n')
+
+
+ def test_stdin_string(self):
+ cmd = 'cat'
+ self.__check_result(base_utils.run(cmd, verbose=False, stdin='hi!\n'),
+ cmd, stdout='hi!\n')
+
+
+ def test_safe_args(self):
+ cmd = 'echo "hello \\"world" "again"'
+ self.__check_result(base_utils.run(
+ 'echo', verbose=False, args=('hello "world', 'again')), cmd,
+ stdout='hello "world again\n')
+
+
+ def test_safe_args_given_string(self):
+ cmd = 'echo "hello \\"world" "again"'
+ self.assertRaises(TypeError, base_utils.run, 'echo', args='hello')
+
+
+class test_compare_versions(unittest.TestCase):
+ def test_zerofill(self):
+ self.assertEqual(base_utils.compare_versions('1.7', '1.10'), -1)
+ self.assertEqual(base_utils.compare_versions('1.222', '1.3'), 1)
+ self.assertEqual(base_utils.compare_versions('1.03', '1.3'), 0)
+
+
+ def test_unequal_len(self):
+ self.assertEqual(base_utils.compare_versions('1.3', '1.3.4'), -1)
+ self.assertEqual(base_utils.compare_versions('1.3.1', '1.3'), 1)
+
+
+ def test_dash_delimited(self):
+ self.assertEqual(base_utils.compare_versions('1-2-3', '1-5-1'), -1)
+ self.assertEqual(base_utils.compare_versions('1-2-1', '1-1-1'), 1)
+ self.assertEqual(base_utils.compare_versions('1-2-4', '1-2-4'), 0)
+
+
+ def test_alphabets(self):
+ self.assertEqual(base_utils.compare_versions('m.l.b', 'n.b.a'), -1)
+ self.assertEqual(base_utils.compare_versions('n.b.a', 'm.l.b'), 1)
+ self.assertEqual(base_utils.compare_versions('abc.e', 'abc.e'), 0)
+
+
+ def test_mix_symbols(self):
+ self.assertEqual(base_utils.compare_versions('k-320.1', 'k-320.3'), -1)
+ self.assertEqual(base_utils.compare_versions('k-231.5', 'k-231.1'), 1)
+ self.assertEqual(base_utils.compare_versions('k-231.1', 'k-231.1'), 0)
+
+ self.assertEqual(base_utils.compare_versions('k.320-1', 'k.320-3'), -1)
+ self.assertEqual(base_utils.compare_versions('k.231-5', 'k.231-1'), 1)
+ self.assertEqual(base_utils.compare_versions('k.231-1', 'k.231-1'), 0)
+
+
+class test_args_to_dict(unittest.TestCase):
+ def test_no_args(self):
+ result = base_utils.args_to_dict([])
+ self.assertEqual({}, result)
+
+
+ def test_matches(self):
+ result = base_utils.args_to_dict(['aBc:DeF', 'SyS=DEf', 'XY_Z:',
+ 'F__o0O=', 'B8r:=:=', '_bAZ_=:=:'])
+ self.assertEqual(result, {'abc':'DeF', 'sys':'DEf', 'xy_z':'',
+ 'f__o0o':'', 'b8r':'=:=', '_baz_':':=:'})
+
+
+ def test_unmatches(self):
+ # Temporarily shut warning messages from args_to_dict() when an argument
+ # doesn't match its pattern.
+ logger = logging.getLogger()
+ saved_level = logger.level
+ logger.setLevel(logging.ERROR)
+
+ try:
+ result = base_utils.args_to_dict(['ab-c:DeF', '--SyS=DEf', 'a*=b',
+ 'a*b', ':VAL', '=VVV', 'WORD'])
+ self.assertEqual({}, result)
+ finally:
+ # Restore level.
+ logger.setLevel(saved_level)
+
+
+class test_get_random_port(unittest.TestCase):
+ def do_bind(self, port, socket_type, socket_proto):
+ s = socket.socket(socket.AF_INET, socket_type, socket_proto)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ s.bind(('', port))
+ return s
+
+
+ def test_get_port(self):
+ for _ in xrange(100):
+ p = base_utils.get_unused_port()
+ s = self.do_bind(p, socket.SOCK_STREAM, socket.IPPROTO_TCP)
+ self.assert_(s.getsockname())
+ s = self.do_bind(p, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
+ self.assert_(s.getsockname())
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/client/common_lib/control_data.py b/client/common_lib/control_data.py
index 5b8caf2..dcc49cd 100644
--- a/client/common_lib/control_data.py
+++ b/client/common_lib/control_data.py
@@ -19,6 +19,7 @@
self.experimental = False
self.run_verify = True
self.sync_count = 1
+ self.test_parameters = set()
diff = REQUIRED_VARS - set(vars)
if len(diff) > 0:
@@ -134,6 +135,7 @@
def set_test_type(self, val):
self._set_option('test_type', val, ['client', 'server'])
+
def set_test_parameters(self, val):
self._set_set('test_parameters', val)
diff --git a/client/common_lib/logging_manager.py b/client/common_lib/logging_manager.py
index e34e9c9..96f718a 100644
--- a/client/common_lib/logging_manager.py
+++ b/client/common_lib/logging_manager.py
@@ -79,7 +79,7 @@
return rv
-if sys.version_info[:2] > (2, 6):
+if sys.version_info[:2] > (2, 7):
warnings.warn('This module has not been reviewed for Python %s' %
sys.version)
diff --git a/client/common_lib/magic.py b/client/common_lib/magic.py
old mode 100644
new mode 100755
diff --git a/client/common_lib/software_manager.py b/client/common_lib/software_manager.py
new file mode 100755
index 0000000..f67f667
--- /dev/null
+++ b/client/common_lib/software_manager.py
@@ -0,0 +1,788 @@
+#!/usr/bin/python
+"""
+Software package management library.
+
+This is an abstraction layer on top of the existing distributions high level
+package managers. It supports package operations useful for testing purposes,
+and multiple high level package managers (here called backends). If you want
+to make this lib to support your particular package manager/distro, please
+implement the given backend class.
+
+@author: Higor Vieira Alves (halves@br.ibm.com)
+@author: Lucas Meneghel Rodrigues (lmr@redhat.com)
+@author: Ramon de Carvalho Valle (rcvalle@br.ibm.com)
+
+@copyright: IBM 2008-2009
+@copyright: Red Hat 2009-2010
+"""
+import os, re, logging, ConfigParser, optparse, random, string
+try:
+ import yum
+except:
+ pass
+import common
+from autotest_lib.client.bin import os_dep, utils
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib import logging_config, logging_manager
+
+
+def generate_random_string(length):
+ """
+ Return a random string using alphanumeric characters.
+
+ @length: Length of the string that will be generated.
+ """
+ r = random.SystemRandom()
+ str = ""
+ chars = string.letters + string.digits
+ while length > 0:
+ str += r.choice(chars)
+ length -= 1
+ return str
+
+
+class SoftwareManagerLoggingConfig(logging_config.LoggingConfig):
+ """
+ Used with the sole purpose of providing convenient logging setup
+ for the KVM test auxiliary programs.
+ """
+ def configure_logging(self, results_dir=None, verbose=False):
+ super(SoftwareManagerLoggingConfig, self).configure_logging(
+ use_console=True,
+ verbose=verbose)
+
+
+class SystemInspector(object):
+ """
+ System inspector class.
+
+ This may grow up to include more complete reports of operating system and
+ machine properties.
+ """
+ def __init__(self):
+ """
+ Probe system, and save information for future reference.
+ """
+ self.distro = utils.get_os_vendor()
+ self.high_level_pms = ['apt-get', 'yum', 'zypper']
+
+
+ def get_package_management(self):
+ """
+ Determine the supported package management systems present on the
+ system. If more than one package management system installed, try
+ to find the best supported system.
+ """
+ list_supported = []
+ for high_level_pm in self.high_level_pms:
+ try:
+ os_dep.command(high_level_pm)
+ list_supported.append(high_level_pm)
+ except:
+ pass
+
+ pm_supported = None
+ if len(list_supported) == 0:
+ pm_supported = None
+ if len(list_supported) == 1:
+ pm_supported = list_supported[0]
+ elif len(list_supported) > 1:
+ if 'apt-get' in list_supported and self.distro in ['Debian', 'Ubuntu']:
+ pm_supported = 'apt-get'
+ elif 'yum' in list_supported and self.distro == 'Fedora':
+ pm_supported = 'yum'
+ else:
+ pm_supported = list_supported[0]
+
+ logging.debug('Package Manager backend: %s' % pm_supported)
+ return pm_supported
+
+
+class SoftwareManager(object):
+ """
+ Package management abstraction layer.
+
+ It supports a set of common package operations for testing purposes, and it
+ uses the concept of a backend, a helper class that implements the set of
+ operations of a given package management tool.
+ """
+ def __init__(self):
+ """
+ Class constructor.
+
+ Determines the best supported package management system for the given
+ operating system running and initializes the appropriate backend.
+ """
+ inspector = SystemInspector()
+ backend_type = inspector.get_package_management()
+ if backend_type == 'yum':
+ self.backend = YumBackend()
+ elif backend_type == 'zypper':
+ self.backend = ZypperBackend()
+ elif backend_type == 'apt-get':
+ self.backend = AptBackend()
+ else:
+ raise NotImplementedError('Unimplemented package management '
+ 'system: %s.' % backend_type)
+
+
+ def check_installed(self, name, version=None, arch=None):
+ """
+ Check whether a package is installed on this system.
+
+ @param name: Package name.
+ @param version: Package version.
+ @param arch: Package architecture.
+ """
+ return self.backend.check_installed(name, version, arch)
+
+
+ def list_all(self):
+ """
+ List all installed packages.
+ """
+ return self.backend.list_all()
+
+
+ def list_files(self, name):
+ """
+ Get a list of all files installed by package [name].
+
+ @param name: Package name.
+ """
+ return self.backend.list_files(name)
+
+
+ def install(self, name):
+ """
+ Install package [name].
+
+ @param name: Package name.
+ """
+ return self.backend.install(name)
+
+
+ def remove(self, name):
+ """
+ Remove package [name].
+
+ @param name: Package name.
+ """
+ return self.backend.remove(name)
+
+
+ def add_repo(self, url):
+ """
+ Add package repo described by [url].
+
+ @param name: URL of the package repo.
+ """
+ return self.backend.add_repo(url)
+
+
+ def remove_repo(self, url):
+ """
+ Remove package repo described by [url].
+
+ @param url: URL of the package repo.
+ """
+ return self.backend.remove_repo(url)
+
+
+ def upgrade(self):
+ """
+ Upgrade all packages available.
+ """
+ return self.backend.upgrade()
+
+
+ def provides(self, file):
+ """
+ Returns a list of packages that provides a given capability to the
+ system (be it a binary, a library).
+
+ @param file: Path to the file.
+ """
+ return self.backend.provides(file)
+
+
+ def install_what_provides(self, file):
+ """
+ Installs package that provides [file].
+
+ @param file: Path to file.
+ """
+ provides = self.provides(file)
+ if provides is not None:
+ self.install(provides)
+ else:
+ logging.warning('No package seems to provide %s', file)
+
+
+class RpmBackend(object):
+ """
+ This class implements operations executed with the rpm package manager.
+
+ rpm is a lower level package manager, used by higher level managers such
+ as yum and zypper.
+ """
+ def __init__(self):
+ self.lowlevel_base_cmd = os_dep.command('rpm')
+
+
+ def _check_installed_version(self, name, version):
+ """
+ Helper for the check_installed public method.
+
+ @param name: Package name.
+ @param version: Package version.
+ """
+ cmd = (self.lowlevel_base_cmd + ' -q --qf %{VERSION} ' + name +
+ ' 2> /dev/null')
+ inst_version = utils.system_output(cmd)
+
+ if inst_version >= version:
+ return True
+ else:
+ return False
+
+
+ def check_installed(self, name, version=None, arch=None):
+ """
+ Check if package [name] is installed.
+
+ @param name: Package name.
+ @param version: Package version.
+ @param arch: Package architecture.
+ """
+ if arch:
+ cmd = (self.lowlevel_base_cmd + ' -q --qf %{ARCH} ' + name +
+ ' 2> /dev/null')
+ inst_archs = utils.system_output(cmd)
+ inst_archs = inst_archs.split('\n')
+
+ for inst_arch in inst_archs:
+ if inst_arch == arch:
+ return self._check_installed_version(name, version)
+ return False
+
+ elif version:
+ return self._check_installed_version(name, version)
+ else:
+ cmd = 'rpm -q ' + name + ' 2> /dev/null'
+ return (os.system(cmd) == 0)
+
+
+ def list_all(self):
+ """
+ List all installed packages.
+ """
+ installed_packages = utils.system_output('rpm -qa').splitlines()
+ return installed_packages
+
+
+ def list_files(self, name):
+ """
+ List files installed on the system by package [name].
+
+ @param name: Package name.
+ """
+ path = os.path.abspath(name)
+ if os.path.isfile(path):
+ option = '-qlp'
+ name = path
+ else:
+ option = '-ql'
+
+ l_cmd = 'rpm' + ' ' + option + ' ' + name + ' 2> /dev/null'
+
+ try:
+ result = utils.system_output(l_cmd)
+ list_files = result.split('\n')
+ return list_files
+ except error.CmdError:
+ return []
+
+
+class DpkgBackend(object):
+ """
+ This class implements operations executed with the dpkg package manager.
+
+ dpkg is a lower level package manager, used by higher level managers such
+ as apt and aptitude.
+ """
+ def __init__(self):
+ self.lowlevel_base_cmd = os_dep.command('dpkg')
+
+
+ def check_installed(self, name):
+ if os.path.isfile(name):
+ n_cmd = (self.lowlevel_base_cmd + ' -f ' + name +
+ ' Package 2>/dev/null')
+ name = utils.system_output(n_cmd)
+ i_cmd = self.lowlevel_base_cmd + ' -s ' + name + ' 2>/dev/null'
+ # Checking if package is installed
+ package_status = utils.system_output(i_cmd, ignore_status=True)
+ not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
+ dpkg_not_installed = re.search(not_inst_pattern, package_status)
+ if dpkg_not_installed:
+ return False
+ return True
+
+
+ def list_all(self):
+ """
+ List all packages available in the system.
+ """
+ installed_packages = []
+ raw_list = utils.system_output('dpkg -l').splitlines()[5:]
+ for line in raw_list:
+ parts = line.split()
+ if parts[0] == "ii": # only grab "installed" packages
+ installed_packages.append("%s-%s" % (parts[1], parts[2]))
+
+
+ def list_files(self, package):
+ """
+ List files installed by package [package].
+
+ @param package: Package name.
+ @return: List of paths installed by package.
+ """
+ if os.path.isfile(package):
+ l_cmd = self.lowlevel_base_cmd + ' -c ' + package
+ else:
+ l_cmd = self.lowlevel_base_cmd + ' -l ' + package
+ return utils.system_output(l_cmd).split('\n')
+
+
+class YumBackend(RpmBackend):
+ """
+ Implements the yum backend for software manager.
+
+ Set of operations for the yum package manager, commonly found on Yellow Dog
+ Linux and Red Hat based distributions, such as Fedora and Red Hat
+ Enterprise Linux.
+ """
+ def __init__(self):
+ """
+ Initializes the base command and the yum package repository.
+ """
+ super(YumBackend, self).__init__()
+ executable = os_dep.command('yum')
+ base_arguments = '-y'
+ self.base_command = executable + ' ' + base_arguments
+ self.repo_file_path = '/etc/yum.repos.d/autotest.repo'
+ self.cfgparser = ConfigParser.ConfigParser()
+ self.cfgparser.read(self.repo_file_path)
+ y_cmd = executable + ' --version | head -1'
+ self.yum_version = utils.system_output(y_cmd, ignore_status=True)
+ logging.debug('Yum backend initialized')
+ logging.debug('Yum version: %s' % self.yum_version)
+ self.yum_base = yum.YumBase()
+
+
+ def _cleanup(self):
+ """
+ Clean up the yum cache so new package information can be downloaded.
+ """
+ utils.system("yum clean all")
+
+
+ def install(self, name):
+ """
+ Installs package [name]. Handles local installs.
+ """
+ if os.path.isfile(name):
+ name = os.path.abspath(name)
+ command = 'localinstall'
+ else:
+ command = 'install'
+
+ i_cmd = self.base_command + ' ' + command + ' ' + name
+
+ try:
+ utils.system(i_cmd)
+ return True
+ except:
+ return False
+
+
+ def remove(self, name):
+ """
+ Removes package [name].
+
+ @param name: Package name (eg. 'ipython').
+ """
+ r_cmd = self.base_command + ' ' + 'erase' + ' ' + name
+ try:
+ utils.system(r_cmd)
+ return True
+ except:
+ return False
+
+
+ def add_repo(self, url):
+ """
+ Adds package repository located on [url].
+
+ @param url: Universal Resource Locator of the repository.
+ """
+ # Check if we URL is already set
+ for section in self.cfgparser.sections():
+ for option, value in self.cfgparser.items(section):
+ if option == 'url' and value == url:
+ return True
+
+ # Didn't find it, let's set it up
+ while True:
+ section_name = 'software_manager' + '_' + generate_random_string(4)
+ if not self.cfgparser.has_section(section_name):
+ break
+ self.cfgparser.add_section(section_name)
+ self.cfgparser.set(section_name, 'name',
+ 'Repository added by the autotest software manager.')
+ self.cfgparser.set(section_name, 'url', url)
+ self.cfgparser.set(section_name, 'enabled', 1)
+ self.cfgparser.set(section_name, 'gpgcheck', 0)
+ self.cfgparser.write(self.repo_file_path)
+
+
+ def remove_repo(self, url):
+ """
+ Removes package repository located on [url].
+
+ @param url: Universal Resource Locator of the repository.
+ """
+ for section in self.cfgparser.sections():
+ for option, value in self.cfgparser.items(section):
+ if option == 'url' and value == url:
+ self.cfgparser.remove_section(section)
+ self.cfgparser.write(self.repo_file_path)
+
+
+ def upgrade(self):
+ """
+ Upgrade all available packages.
+ """
+ r_cmd = self.base_command + ' ' + 'update'
+ try:
+ utils.system(r_cmd)
+ return True
+ except:
+ return False
+
+
+ def provides(self, name):
+ """
+ Returns a list of packages that provides a given capability.
+
+ @param name: Capability name (eg, 'foo').
+ """
+ d_provides = self.yum_base.searchPackageProvides(args=[name])
+ provides_list = [key for key in d_provides]
+ if provides_list:
+ logging.info("Package %s provides %s", provides_list[0], name)
+ return str(provides_list[0])
+ else:
+ return None
+
+
+class ZypperBackend(RpmBackend):
+ """
+ Implements the zypper backend for software manager.
+
+ Set of operations for the zypper package manager, found on SUSE Linux.
+ """
+ def __init__(self):
+ """
+ Initializes the base command and the yum package repository.
+ """
+ super(ZypperBackend, self).__init__()
+ self.base_command = os_dep.command('zypper') + ' -n'
+ z_cmd = self.base_command + ' --version'
+ self.zypper_version = utils.system_output(z_cmd, ignore_status=True)
+ logging.debug('Zypper backend initialized')
+ logging.debug('Zypper version: %s' % self.zypper_version)
+
+
+ def install(self, name):
+ """
+ Installs package [name]. Handles local installs.
+
+ @param name: Package Name.
+ """
+ path = os.path.abspath(name)
+ i_cmd = self.base_command + ' install -l ' + name
+ try:
+ utils.system(i_cmd)
+ return True
+ except:
+ return False
+
+
+ def add_repo(self, url):
+ """
+ Adds repository [url].
+
+ @param url: URL for the package repository.
+ """
+ ar_cmd = self.base_command + ' addrepo ' + url
+ try:
+ utils.system(ar_cmd)
+ return True
+ except:
+ return False
+
+
+ def remove_repo(self, url):
+ """
+ Removes repository [url].
+
+ @param url: URL for the package repository.
+ """
+ rr_cmd = self.base_command + ' removerepo ' + url
+ try:
+ utils.system(rr_cmd)
+ return True
+ except:
+ return False
+
+
+ def remove(self, name):
+ """
+ Removes package [name].
+ """
+ r_cmd = self.base_command + ' ' + 'erase' + ' ' + name
+
+ try:
+ utils.system(r_cmd)
+ return True
+ except:
+ return False
+
+
+ def upgrade(self):
+ """
+ Upgrades all packages of the system.
+ """
+ u_cmd = self.base_command + ' update -l'
+
+ try:
+ utils.system(u_cmd)
+ return True
+ except:
+ return False
+
+
+ def provides(self, name):
+ """
+ Searches for what provides a given file.
+
+ @param name: File path.
+ """
+ p_cmd = self.base_command + ' what-provides ' + name
+ list_provides = []
+ try:
+ p_output = utils.system_output(p_cmd).split('\n')[4:]
+ for line in p_output:
+ line = [a.strip() for a in line.split('|')]
+ try:
+ state, pname, type, version, arch, repository = line
+ if pname not in list_provides:
+ list_provides.append(pname)
+ except IndexError:
+ pass
+ if len(list_provides) > 1:
+ logging.warning('More than one package found, '
+ 'opting by the first queue result')
+ if list_provides:
+ logging.info("Package %s provides %s", list_provides[0], name)
+ return list_provides[0]
+ return None
+ except:
+ return None
+
+
+class AptBackend(DpkgBackend):
+ """
+ Implements the apt backend for software manager.
+
+ Set of operations for the apt package manager, commonly found on Debian and
+ Debian based distributions, such as Ubuntu Linux.
+ """
+ def __init__(self):
+ """
+ Initializes the base command and the debian package repository.
+ """
+ super(AptBackend, self).__init__()
+ executable = os_dep.command('apt-get')
+ self.base_command = executable + ' -y'
+ self.repo_file_path = '/etc/apt/sources.list.d/autotest'
+ self.apt_version = utils.system_output('apt-get -v | head -1',
+ ignore_status=True)
+ logging.debug('Apt backend initialized')
+ logging.debug('apt version: %s' % self.apt_version)
+
+
+ def install(self, name):
+ """
+ Installs package [name].
+
+ @param name: Package name.
+ """
+ command = 'install'
+ i_cmd = self.base_command + ' ' + command + ' ' + name
+
+ try:
+ utils.system(i_cmd)
+ return True
+ except:
+ return False
+
+
+ def remove(self, name):
+ """
+ Remove package [name].
+
+ @param name: Package name.
+ """
+ command = 'remove'
+ flag = '--purge'
+ r_cmd = self.base_command + ' ' + command + ' ' + flag + ' ' + name
+
+ try:
+ utils.system(r_cmd)
+ return True
+ except:
+ return False
+
+
+ def add_repo(self, repo):
+ """
+ Add an apt repository.
+
+ @param repo: Repository string. Example:
+ 'deb http://archive.ubuntu.com/ubuntu/ maverick universe'
+ """
+ repo_file = open(self.repo_file_path, 'a')
+ repo_file_contents = repo_file.read()
+ if repo not in repo_file_contents:
+ repo_file.write(repo)
+
+
+ def remove_repo(self, repo):
+ """
+ Remove an apt repository.
+
+ @param repo: Repository string. Example:
+ 'deb http://archive.ubuntu.com/ubuntu/ maverick universe'
+ """
+ repo_file = open(self.repo_file_path, 'r')
+ new_file_contents = []
+ for line in repo_file.readlines:
+ if not line == repo:
+ new_file_contents.append(line)
+ repo_file.close()
+ new_file_contents = "\n".join(new_file_contents)
+ repo_file.open(self.repo_file_path, 'w')
+ repo_file.write(new_file_contents)
+ repo_file.close()
+
+
+ def upgrade(self):
+ """
+ Upgrade all packages of the system with eventual new versions.
+ """
+ ud_command = 'update'
+ ud_cmd = self.base_command + ' ' + ud_command
+ try:
+ utils.system(ud_cmd)
+ except:
+ logging.error("Apt package update failed")
+ up_command = 'upgrade'
+ up_cmd = self.base_command + ' ' + up_command
+ try:
+ utils.system(up_cmd)
+ return True
+ except:
+ return False
+
+
+ def provides(self, file):
+ """
+ Return a list of packages that provide [file].
+
+ @param file: File path.
+ """
+ if not self.check_installed('apt-file'):
+ self.install('apt-file')
+ command = os_dep.command('apt-file')
+ cache_update_cmd = command + ' update'
+ try:
+ utils.system(cache_update_cmd, ignore_status=True)
+ except:
+ logging.error("Apt file cache update failed")
+ fu_cmd = command + ' search ' + file
+ try:
+ provides = utils.system_output(fu_cmd).split('\n')
+ list_provides = []
+ for line in provides:
+ if line:
+ try:
+ line = line.split(':')
+ package = line[0].strip()
+ path = line[1].strip()
+ if path == file and package not in list_provides:
+ list_provides.append(package)
+ except IndexError:
+ pass
+ if len(list_provides) > 1:
+ logging.warning('More than one package found, '
+ 'opting by the first queue result')
+ if list_provides:
+ logging.info("Package %s provides %s", list_provides[0], file)
+ return list_provides[0]
+ return None
+ except:
+ return None
+
+
+if __name__ == '__main__':
+ parser = optparse.OptionParser(
+ "usage: %prog [install|remove|list-all|list-files|add-repo|remove-repo|"
+ "upgrade|what-provides|install-what-provides] arguments")
+ parser.add_option('--verbose', dest="debug", action='store_true',
+ help='include debug messages in console output')
+
+ options, args = parser.parse_args()
+ debug = options.debug
+ logging_manager.configure_logging(SoftwareManagerLoggingConfig(),
+ verbose=debug)
+ software_manager = SoftwareManager()
+ if args:
+ action = args[0]
+ args = " ".join(args[1:])
+ else:
+ action = 'show-help'
+
+ if action == 'install':
+ software_manager.install(args)
+ elif action == 'remove':
+ software_manager.remove(args)
+ if action == 'list-all':
+ software_manager.list_all()
+ elif action == 'list-files':
+ software_manager.list_files(args)
+ elif action == 'add-repo':
+ software_manager.add_repo(args)
+ elif action == 'remove-repo':
+ software_manager.remove_repo(args)
+ elif action == 'upgrade':
+ software_manager.upgrade()
+ elif action == 'what-provides':
+ software_manager.provides(args)
+ elif action == 'install-what-provides':
+ software_manager.install_what_provides(args)
+ elif action == 'show-help':
+ parser.print_help()
diff --git a/client/common_lib/utils.py b/client/common_lib/utils.py
index 101599b..382f79d 100644
--- a/client/common_lib/utils.py
+++ b/client/common_lib/utils.py
@@ -1,1715 +1,13 @@
-#
-# Copyright 2008 Google Inc. Released under the GPL v2
+"""
+Convenience functions for use by tests or whomever.
-import os, pickle, random, re, resource, select, shutil, signal, StringIO
-import socket, struct, subprocess, sys, time, textwrap, urlparse
-import warnings, smtplib, logging, urllib2
-from threading import Thread, Event
-try:
- import hashlib
-except ImportError:
- import md5, sha
-from autotest_lib.client.common_lib import error, logging_manager
+NOTE: this is a mixin library that pulls in functions from several places
+Note carefully what the precendece order is
-def deprecated(func):
- """This is a decorator which can be used to mark functions as deprecated.
- It will result in a warning being emmitted when the function is used."""
- def new_func(*args, **dargs):
- warnings.warn("Call to deprecated function %s." % func.__name__,
- category=DeprecationWarning)
- return func(*args, **dargs)
- new_func.__name__ = func.__name__
- new_func.__doc__ = func.__doc__
- new_func.__dict__.update(func.__dict__)
- return new_func
+There's no really good way to do this, as this isn't a class we can do
+inheritance with, just a collection of static methods.
+"""
-
-class _NullStream(object):
- def write(self, data):
- pass
-
-
- def flush(self):
- pass
-
-
-TEE_TO_LOGS = object()
-_the_null_stream = _NullStream()
-
-DEFAULT_STDOUT_LEVEL = logging.DEBUG
-DEFAULT_STDERR_LEVEL = logging.ERROR
-
-# prefixes for logging stdout/stderr of commands
-STDOUT_PREFIX = '[stdout] '
-STDERR_PREFIX = '[stderr] '
-
-
-def get_stream_tee_file(stream, level, prefix=''):
- if stream is None:
- return _the_null_stream
- if stream is TEE_TO_LOGS:
- return logging_manager.LoggingFile(level=level, prefix=prefix)
- return stream
-
-
-class BgJob(object):
- def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
- stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
- self.command = command
- self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
- prefix=STDOUT_PREFIX)
- self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
- prefix=STDERR_PREFIX)
- self.result = CmdResult(command)
-
- # allow for easy stdin input by string, we'll let subprocess create
- # a pipe for stdin input and we'll write to it in the wait loop
- if isinstance(stdin, basestring):
- self.string_stdin = stdin
- stdin = subprocess.PIPE
- else:
- self.string_stdin = None
-
- if verbose:
- logging.debug("Running '%s'" % command)
- self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- preexec_fn=self._reset_sigpipe, shell=True,
-
- # Default shell in ChromeOS test image is
- # already bash. We're seeing shell-init
- # errors if this value is set.
-
- #executable="/bin/bash",
- stdin=stdin)
-
-
- def output_prepare(self, stdout_file=None, stderr_file=None):
- self.stdout_file = stdout_file
- self.stderr_file = stderr_file
-
-
- def process_output(self, stdout=True, final_read=False):
- """output_prepare must be called prior to calling this"""
- if stdout:
- pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
- else:
- pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
-
- if final_read:
- # read in all the data we can from pipe and then stop
- data = []
- while select.select([pipe], [], [], 0)[0]:
- data.append(os.read(pipe.fileno(), 1024))
- if len(data[-1]) == 0:
- break
- data = "".join(data)
- else:
- # perform a single read
- data = os.read(pipe.fileno(), 1024)
- buf.write(data)
- tee.write(data)
-
-
- def cleanup(self):
- self.stdout_tee.flush()
- self.stderr_tee.flush()
- self.sp.stdout.close()
- self.sp.stderr.close()
- self.result.stdout = self.stdout_file.getvalue()
- self.result.stderr = self.stderr_file.getvalue()
-
-
- def _reset_sigpipe(self):
- signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-
-
-def ip_to_long(ip):
- # !L is a long in network byte order
- return struct.unpack('!L', socket.inet_aton(ip))[0]
-
-
-def long_to_ip(number):
- # See above comment.
- return socket.inet_ntoa(struct.pack('!L', number))
-
-
-def create_subnet_mask(bits):
- return (1 << 32) - (1 << 32-bits)
-
-
-def format_ip_with_mask(ip, mask_bits):
- masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
- return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
-
-
-def normalize_hostname(alias):
- ip = socket.gethostbyname(alias)
- return socket.gethostbyaddr(ip)[0]
-
-
-def get_ip_local_port_range():
- match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
- read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
- return (int(match.group(1)), int(match.group(2)))
-
-
-def set_ip_local_port_range(lower, upper):
- write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
- '%d %d\n' % (lower, upper))
-
-
-
-def send_email(mail_from, mail_to, subject, body):
- """
- Sends an email via smtp
-
- mail_from: string with email address of sender
- mail_to: string or list with email address(es) of recipients
- subject: string with subject of email
- body: (multi-line) string with body of email
- """
- if isinstance(mail_to, str):
- mail_to = [mail_to]
- msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
- subject, body)
- try:
- mailer = smtplib.SMTP('localhost')
- try:
- mailer.sendmail(mail_from, mail_to, msg)
- finally:
- mailer.quit()
- except Exception, e:
- # Emails are non-critical, not errors, but don't raise them
- print "Sending email failed. Reason: %s" % repr(e)
-
-
-def read_one_line(filename):
- return open(filename, 'r').readline().rstrip('\n')
-
-
-def read_file(filename):
- f = open(filename)
- try:
- return f.read()
- finally:
- f.close()
-
-
-def get_field(data, param, linestart="", sep=" "):
- """
- Parse data from string.
- @param data: Data to parse.
- example:
- data:
- cpu 324 345 34 5 345
- cpu0 34 11 34 34 33
- ^^^^
- start of line
- params 0 1 2 3 4
- @param param: Position of parameter after linestart marker.
- @param linestart: String to which start line with parameters.
- @param sep: Separator between parameters regular expression.
- """
- search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
- find = search.search(data)
- if find != None:
- return re.split("%s" % sep, find.group(1))[param]
- else:
- print "There is no line which starts with %s in data." % linestart
- return None
-
-
-def write_one_line(filename, line):
- open_write_close(filename, line.rstrip('\n') + '\n')
-
-
-def open_write_close(filename, data):
- f = open(filename, 'w')
- try:
- f.write(data)
- finally:
- f.close()
-
-
-def matrix_to_string(matrix, header=None):
- """
- Return a pretty, aligned string representation of a nxm matrix.
-
- This representation can be used to print any tabular data, such as
- database results. It works by scanning the lengths of each element
- in each column, and determining the format string dynamically.
-
- @param matrix: Matrix representation (list with n rows of m elements).
- @param header: Optional tuple or list with header elements to be displayed.
- """
- if type(header) is list:
- header = tuple(header)
- lengths = []
- if header:
- for column in header:
- lengths.append(len(column))
- for row in matrix:
- for column in row:
- i = row.index(column)
- cl = len(column)
- try:
- ml = lengths[i]
- if cl > ml:
- lengths[i] = cl
- except IndexError:
- lengths.append(cl)
-
- lengths = tuple(lengths)
- format_string = ""
- for length in lengths:
- format_string += "%-" + str(length) + "s "
- format_string += "\n"
-
- matrix_str = ""
- if header:
- matrix_str += format_string % header
- for row in matrix:
- matrix_str += format_string % tuple(row)
-
- return matrix_str
-
-
-def read_keyval(path):
- """
- Read a key-value pair format file into a dictionary, and return it.
- Takes either a filename or directory name as input. If it's a
- directory name, we assume you want the file to be called keyval.
- """
- if os.path.isdir(path):
- path = os.path.join(path, 'keyval')
- keyval = {}
- if os.path.exists(path):
- for line in open(path):
- line = re.sub('#.*', '', line).rstrip()
- if not re.search(r'^[-\.\w]+=', line):
- raise ValueError('Invalid format line: %s' % line)
- key, value = line.split('=', 1)
- if re.search('^\d+$', value):
- value = int(value)
- elif re.search('^(\d+\.)?\d+$', value):
- value = float(value)
- keyval[key] = value
- return keyval
-
-
-def write_keyval(path, dictionary, type_tag=None):
- """
- Write a key-value pair format file out to a file. This uses append
- mode to open the file, so existing text will not be overwritten or
- reparsed.
-
- If type_tag is None, then the key must be composed of alphanumeric
- characters (or dashes+underscores). However, if type-tag is not
- null then the keys must also have "{type_tag}" as a suffix. At
- the moment the only valid values of type_tag are "attr" and "perf".
- """
- if os.path.isdir(path):
- path = os.path.join(path, 'keyval')
- keyval = open(path, 'a')
-
- if type_tag is None:
- key_regex = re.compile(r'^[-\.\w]+$')
- else:
- if type_tag not in ('attr', 'perf'):
- raise ValueError('Invalid type tag: %s' % type_tag)
- escaped_tag = re.escape(type_tag)
- key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
- try:
- for key in sorted(dictionary.keys()):
- if not key_regex.search(key):
- raise ValueError('Invalid key: %s' % key)
- keyval.write('%s=%s\n' % (key, dictionary[key]))
- finally:
- keyval.close()
-
-
-class FileFieldMonitor(object):
- """
- Monitors the information from the file and reports it's values.
-
- It gather the information at start and stop of the measurement or
- continuously during the measurement.
- """
- class Monitor(Thread):
- """
- Internal monitor class to ensure continuous monitor of monitored file.
- """
- def __init__(self, master):
- """
- @param master: Master class which control Monitor
- """
- Thread.__init__(self)
- self.master = master
-
- def run(self):
- """
- Start monitor in thread mode
- """
- while not self.master.end_event.isSet():
- self.master._get_value(self.master.logging)
- time.sleep(self.master.time_step)
-
-
- def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
- contlogging=False, separator=" +", time_step=0.1):
- """
- Initialize variables.
- @param status_file: File contain status.
- @param mode_diff: If True make a difference of value, else average.
- @param data_to_read: List of tuples with data position.
- format: [(start_of_line,position in params)]
- example:
- data:
- cpu 324 345 34 5 345
- cpu0 34 11 34 34 33
- ^^^^
- start of line
- params 0 1 2 3 4
- @param mode_diff: True to subtract old value from new value,
- False make average of the values.
- @parma continuously: Start the monitoring thread using the time_step
- as the measurement period.
- @param contlogging: Log data in continuous run.
- @param separator: Regular expression of separator.
- @param time_step: Time period of the monitoring value.
- """
- self.end_event = Event()
- self.start_time = 0
- self.end_time = 0
- self.test_time = 0
-
- self.status_file = status_file
- self.separator = separator
- self.data_to_read = data_to_read
- self.num_of_params = len(self.data_to_read)
- self.mode_diff = mode_diff
- self.continuously = continuously
- self.time_step = time_step
-
- self.value = [0 for i in range(self.num_of_params)]
- self.old_value = [0 for i in range(self.num_of_params)]
- self.log = []
- self.logging = contlogging
-
- self.started = False
- self.num_of_get_value = 0
- self.monitor = None
-
-
- def _get_value(self, logging=True):
- """
- Return current values.
- @param logging: If true log value in memory. There can be problem
- with long run.
- """
- data = read_file(self.status_file)
- value = []
- for i in range(self.num_of_params):
- value.append(int(get_field(data,
- self.data_to_read[i][1],
- self.data_to_read[i][0],
- self.separator)))
-
- if logging:
- self.log.append(value)
- if not self.mode_diff:
- value = map(lambda x, y: x + y, value, self.old_value)
-
- self.old_value = value
- self.num_of_get_value += 1
- return value
-
-
- def start(self):
- """
- Start value monitor.
- """
- if self.started:
- self.stop()
- self.old_value = [0 for i in range(self.num_of_params)]
- self.num_of_get_value = 0
- self.log = []
- self.end_event.clear()
- self.start_time = time.time()
- self._get_value()
- self.started = True
- if (self.continuously):
- self.monitor = FileFieldMonitor.Monitor(self)
- self.monitor.start()
-
-
- def stop(self):
- """
- Stop value monitor.
- """
- if self.started:
- self.started = False
- self.end_time = time.time()
- self.test_time = self.end_time - self.start_time
- self.value = self._get_value()
- if (self.continuously):
- self.end_event.set()
- self.monitor.join()
- if (self.mode_diff):
- self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
- else:
- self.value = map(lambda x: x / self.num_of_get_value,
- self.value)
-
-
- def get_status(self):
- """
- @return: Status of monitored process average value,
- time of test and array of monitored values and time step of
- continuous run.
- """
- if self.started:
- self.stop()
- if self.mode_diff:
- for i in range(len(self.log) - 1):
- self.log[i] = (map(lambda x, y: x - y,
- self.log[i + 1], self.log[i]))
- self.log.pop()
- return (self.value, self.test_time, self.log, self.time_step)
-
-
-def is_url(path):
- """Return true if path looks like a URL"""
- # for now, just handle http and ftp
- url_parts = urlparse.urlparse(path)
- return (url_parts[0] in ('http', 'ftp'))
-
-
-def urlopen(url, data=None, timeout=5):
- """Wrapper to urllib2.urlopen with timeout addition."""
-
- # Save old timeout
- old_timeout = socket.getdefaulttimeout()
- socket.setdefaulttimeout(timeout)
- try:
- return urllib2.urlopen(url, data=data)
- finally:
- socket.setdefaulttimeout(old_timeout)
-
-
-def urlretrieve(url, filename, data=None, timeout=300):
- """Retrieve a file from given url."""
- logging.debug('Fetching %s -> %s', url, filename)
-
- src_file = urlopen(url, data=data, timeout=timeout)
- try:
- dest_file = open(filename, 'wb')
- try:
- shutil.copyfileobj(src_file, dest_file)
- finally:
- dest_file.close()
- finally:
- src_file.close()
-
-
-def hash(type, input=None):
- """
- Returns an hash object of type md5 or sha1. This function is implemented in
- order to encapsulate hash objects in a way that is compatible with python
- 2.4 and python 2.6 without warnings.
-
- Note that even though python 2.6 hashlib supports hash types other than
- md5 and sha1, we are artificially limiting the input values in order to
- make the function to behave exactly the same among both python
- implementations.
-
- @param input: Optional input string that will be used to update the hash.
- """
- if type not in ['md5', 'sha1']:
- raise ValueError("Unsupported hash type: %s" % type)
-
- try:
- hash = hashlib.new(type)
- except NameError:
- if type == 'md5':
- hash = md5.new()
- elif type == 'sha1':
- hash = sha.new()
-
- if input:
- hash.update(input)
-
- return hash
-
-
-def get_file(src, dest, permissions=None):
- """Get a file from src, which can be local or a remote URL"""
- if src == dest:
- return
-
- if is_url(src):
- urlretrieve(src, dest)
- else:
- shutil.copyfile(src, dest)
-
- if permissions:
- os.chmod(dest, permissions)
- return dest
-
-
-def unmap_url(srcdir, src, destdir='.'):
- """
- Receives either a path to a local file or a URL.
- returns either the path to the local file, or the fetched URL
-
- unmap_url('/usr/src', 'foo.tar', '/tmp')
- = '/usr/src/foo.tar'
- unmap_url('/usr/src', 'http://site/file', '/tmp')
- = '/tmp/file'
- (after retrieving it)
- """
- if is_url(src):
- url_parts = urlparse.urlparse(src)
- filename = os.path.basename(url_parts[2])
- dest = os.path.join(destdir, filename)
- return get_file(src, dest)
- else:
- return os.path.join(srcdir, src)
-
-
-def update_version(srcdir, preserve_srcdir, new_version, install,
- *args, **dargs):
- """
- Make sure srcdir is version new_version
-
- If not, delete it and install() the new version.
-
- In the preserve_srcdir case, we just check it's up to date,
- and if not, we rerun install, without removing srcdir
- """
- versionfile = os.path.join(srcdir, '.version')
- install_needed = True
-
- if os.path.exists(versionfile):
- old_version = pickle.load(open(versionfile))
- if old_version == new_version:
- install_needed = False
-
- if install_needed:
- if not preserve_srcdir and os.path.exists(srcdir):
- shutil.rmtree(srcdir)
- install(*args, **dargs)
- if os.path.exists(srcdir):
- pickle.dump(new_version, open(versionfile, 'w'))
-
-
-def get_stderr_level(stderr_is_expected):
- if stderr_is_expected:
- return DEFAULT_STDOUT_LEVEL
- return DEFAULT_STDERR_LEVEL
-
-
-def run(command, timeout=None, ignore_status=False,
- stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
- stderr_is_expected=None, args=()):
- """
- Run a command on the host.
-
- @param command: the command line string.
- @param timeout: time limit in seconds before attempting to kill the
- running process. The run() function will take a few seconds
- longer than 'timeout' to complete if it has to kill the process.
- @param ignore_status: do not raise an exception, no matter what the exit
- code of the command is.
- @param stdout_tee: optional file-like object to which stdout data
- will be written as it is generated (data will still be stored
- in result.stdout).
- @param stderr_tee: likewise for stderr.
- @param verbose: if True, log the command being run.
- @param stdin: stdin to pass to the executed process (can be a file
- descriptor, a file object of a real file or a string).
- @param args: sequence of strings of arguments to be given to the command
- inside " quotes after they have been escaped for that; each
- element in the sequence will be given as a separate command
- argument
-
- @return a CmdResult object
-
- @raise CmdError: the exit code of the command execution was not 0
- """
- if isinstance(args, basestring):
- raise TypeError('Got a string for the "args" keyword argument, '
- 'need a sequence.')
-
- for arg in args:
- command += ' "%s"' % sh_escape(arg)
- if stderr_is_expected is None:
- stderr_is_expected = ignore_status
-
- bg_job = join_bg_jobs(
- (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
- stderr_level=get_stderr_level(stderr_is_expected)),),
- timeout)[0]
- if not ignore_status and bg_job.result.exit_status:
- raise error.CmdError(command, bg_job.result,
- "Command returned non-zero exit status")
-
- return bg_job.result
-
-
-def run_parallel(commands, timeout=None, ignore_status=False,
- stdout_tee=None, stderr_tee=None):
- """
- Behaves the same as run() with the following exceptions:
-
- - commands is a list of commands to run in parallel.
- - ignore_status toggles whether or not an exception should be raised
- on any error.
-
- @return: a list of CmdResult objects
- """
- bg_jobs = []
- for command in commands:
- bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
- stderr_level=get_stderr_level(ignore_status)))
-
- # Updates objects in bg_jobs list with their process information
- join_bg_jobs(bg_jobs, timeout)
-
- for bg_job in bg_jobs:
- if not ignore_status and bg_job.result.exit_status:
- raise error.CmdError(command, bg_job.result,
- "Command returned non-zero exit status")
-
- return [bg_job.result for bg_job in bg_jobs]
-
-
-@deprecated
-def run_bg(command):
- """Function deprecated. Please use BgJob class instead."""
- bg_job = BgJob(command)
- return bg_job.sp, bg_job.result
-
-
-def join_bg_jobs(bg_jobs, timeout=None):
- """Joins the bg_jobs with the current thread.
-
- Returns the same list of bg_jobs objects that was passed in.
- """
- ret, timeout_error = 0, False
- for bg_job in bg_jobs:
- bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
-
- try:
- # We are holding ends to stdin, stdout pipes
- # hence we need to be sure to close those fds no mater what
- start_time = time.time()
- timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
-
- for bg_job in bg_jobs:
- # Process stdout and stderr
- bg_job.process_output(stdout=True,final_read=True)
- bg_job.process_output(stdout=False,final_read=True)
- finally:
- # close our ends of the pipes to the sp no matter what
- for bg_job in bg_jobs:
- bg_job.cleanup()
-
- if timeout_error:
- # TODO: This needs to be fixed to better represent what happens when
- # running in parallel. However this is backwards compatable, so it will
- # do for the time being.
- raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
- "Command(s) did not complete within %d seconds"
- % timeout)
-
-
- return bg_jobs
-
-
-def _wait_for_commands(bg_jobs, start_time, timeout):
- # This returns True if it must return due to a timeout, otherwise False.
-
- # To check for processes which terminate without producing any output
- # a 1 second timeout is used in select.
- SELECT_TIMEOUT = 1
-
- read_list = []
- write_list = []
- reverse_dict = {}
-
- for bg_job in bg_jobs:
- read_list.append(bg_job.sp.stdout)
- read_list.append(bg_job.sp.stderr)
- reverse_dict[bg_job.sp.stdout] = (bg_job, True)
- reverse_dict[bg_job.sp.stderr] = (bg_job, False)
- if bg_job.string_stdin is not None:
- write_list.append(bg_job.sp.stdin)
- reverse_dict[bg_job.sp.stdin] = bg_job
-
- if timeout:
- stop_time = start_time + timeout
- time_left = stop_time - time.time()
- else:
- time_left = None # so that select never times out
-
- while not timeout or time_left > 0:
- # select will return when we may write to stdin or when there is
- # stdout/stderr output we can read (including when it is
- # EOF, that is the process has terminated).
- read_ready, write_ready, _ = select.select(read_list, write_list, [],
- SELECT_TIMEOUT)
-
- # os.read() has to be used instead of
- # subproc.stdout.read() which will otherwise block
- for file_obj in read_ready:
- bg_job, is_stdout = reverse_dict[file_obj]
- bg_job.process_output(is_stdout)
-
- for file_obj in write_ready:
- # we can write PIPE_BUF bytes without blocking
- # POSIX requires PIPE_BUF is >= 512
- bg_job = reverse_dict[file_obj]
- file_obj.write(bg_job.string_stdin[:512])
- bg_job.string_stdin = bg_job.string_stdin[512:]
- # no more input data, close stdin, remove it from the select set
- if not bg_job.string_stdin:
- file_obj.close()
- write_list.remove(file_obj)
- del reverse_dict[file_obj]
-
- all_jobs_finished = True
- for bg_job in bg_jobs:
- if bg_job.result.exit_status is not None:
- continue
-
- bg_job.result.exit_status = bg_job.sp.poll()
- if bg_job.result.exit_status is not None:
- # process exited, remove its stdout/stdin from the select set
- bg_job.result.duration = time.time() - start_time
- read_list.remove(bg_job.sp.stdout)
- read_list.remove(bg_job.sp.stderr)
- del reverse_dict[bg_job.sp.stdout]
- del reverse_dict[bg_job.sp.stderr]
- else:
- all_jobs_finished = False
-
- if all_jobs_finished:
- return False
-
- if timeout:
- time_left = stop_time - time.time()
-
- # Kill all processes which did not complete prior to timeout
- for bg_job in bg_jobs:
- if bg_job.result.exit_status is not None:
- continue
-
- logging.warn('run process timeout (%s) fired on: %s', timeout,
- bg_job.command)
- nuke_subprocess(bg_job.sp)
- bg_job.result.exit_status = bg_job.sp.poll()
- bg_job.result.duration = time.time() - start_time
-
- return True
-
-
-def pid_is_alive(pid):
- """
- True if process pid exists and is not yet stuck in Zombie state.
- Zombies are impossible to move between cgroups, etc.
- pid can be integer, or text of integer.
- """
- path = '/proc/%s/stat' % pid
-
- try:
- stat = read_one_line(path)
- except IOError:
- if not os.path.exists(path):
- # file went away
- return False
- raise
-
- return stat.split()[2] != 'Z'
-
-
-def signal_pid(pid, sig):
- """
- Sends a signal to a process id. Returns True if the process terminated
- successfully, False otherwise.
- """
- try:
- os.kill(pid, sig)
- except OSError:
- # The process may have died before we could kill it.
- pass
-
- for i in range(5):
- if not pid_is_alive(pid):
- return True
- time.sleep(1)
-
- # The process is still alive
- return False
-
-
-def nuke_subprocess(subproc):
- # check if the subprocess is still alive, first
- if subproc.poll() is not None:
- return subproc.poll()
-
- # the process has not terminated within timeout,
- # kill it via an escalating series of signals.
- signal_queue = [signal.SIGTERM, signal.SIGKILL]
- for sig in signal_queue:
- signal_pid(subproc.pid, sig)
- if subproc.poll() is not None:
- return subproc.poll()
-
-
-def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
- # the process has not terminated within timeout,
- # kill it via an escalating series of signals.
- for sig in signal_queue:
- if signal_pid(pid, sig):
- return
-
- # no signal successfully terminated the process
- raise error.AutoservRunError('Could not kill %d' % pid, None)
-
-
-def system(command, timeout=None, ignore_status=False):
- """
- Run a command
-
- @param timeout: timeout in seconds
- @param ignore_status: if ignore_status=False, throw an exception if the
- command's exit code is non-zero
- if ignore_stauts=True, return the exit code.
-
- @return exit status of command
- (note, this will always be zero unless ignore_status=True)
- """
- return run(command, timeout=timeout, ignore_status=ignore_status,
- stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
-
-
-def system_parallel(commands, timeout=None, ignore_status=False):
- """This function returns a list of exit statuses for the respective
- list of commands."""
- return [bg_jobs.exit_status for bg_jobs in
- run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
- stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
-
-
-def system_output(command, timeout=None, ignore_status=False,
- retain_output=False, args=()):
- """
- Run a command and return the stdout output.
-
- @param command: command string to execute.
- @param timeout: time limit in seconds before attempting to kill the
- running process. The function will take a few seconds longer
- than 'timeout' to complete if it has to kill the process.
- @param ignore_status: do not raise an exception, no matter what the exit
- code of the command is.
- @param retain_output: set to True to make stdout/stderr of the command
- output to be also sent to the logging system
- @param args: sequence of strings of arguments to be given to the command
- inside " quotes after they have been escaped for that; each
- element in the sequence will be given as a separate command
- argument
-
- @return a string with the stdout output of the command.
- """
- if retain_output:
- out = run(command, timeout=timeout, ignore_status=ignore_status,
- stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
- args=args).stdout
- else:
- out = run(command, timeout=timeout, ignore_status=ignore_status,
- args=args).stdout
- if out[-1:] == '\n':
- out = out[:-1]
- return out
-
-
-def system_output_parallel(commands, timeout=None, ignore_status=False,
- retain_output=False):
- if retain_output:
- out = [bg_job.stdout for bg_job
- in run_parallel(commands, timeout=timeout,
- ignore_status=ignore_status,
- stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
- else:
- out = [bg_job.stdout for bg_job in run_parallel(commands,
- timeout=timeout, ignore_status=ignore_status)]
- for x in out:
- if out[-1:] == '\n': out = out[:-1]
- return out
-
-
-def strip_unicode(input):
- if type(input) == list:
- return [strip_unicode(i) for i in input]
- elif type(input) == dict:
- output = {}
- for key in input.keys():
- output[str(key)] = strip_unicode(input[key])
- return output
- elif type(input) == unicode:
- return str(input)
- else:
- return input
-
-
-def get_cpu_percentage(function, *args, **dargs):
- """Returns a tuple containing the CPU% and return value from function call.
-
- This function calculates the usage time by taking the difference of
- the user and system times both before and after the function call.
- """
- child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
- self_pre = resource.getrusage(resource.RUSAGE_SELF)
- start = time.time()
- to_return = function(*args, **dargs)
- elapsed = time.time() - start
- self_post = resource.getrusage(resource.RUSAGE_SELF)
- child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
-
- # Calculate CPU Percentage
- s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
- c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
- cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
-
- return cpu_percent, to_return
-
-
-class SystemLoad(object):
- """
- Get system and/or process values and return average value of load.
- """
- def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
- use_log=False):
- """
- @param pids: List of pids to be monitored. If pid = 0 whole system will
- be monitored. pid == 0 means whole system.
- @param advanced: monitor add value for system irq count and softirq
- for process minor and maior page fault
- @param time_step: Time step for continuous monitoring.
- @param cpu_cont: If True monitor CPU load continuously.
- @param use_log: If true every monitoring is logged for dump.
- """
- self.pids = []
- self.stats = {}
- for pid in pids:
- if pid == 0:
- cpu = FileFieldMonitor("/proc/stat",
- [("cpu", 0), # User Time
- ("cpu", 2), # System Time
- ("intr", 0), # IRQ Count
- ("softirq", 0)], # Soft IRQ Count
- True,
- cpu_cont,
- use_log,
- " +",
- time_step)
- mem = FileFieldMonitor("/proc/meminfo",
- [("MemTotal:", 0), # Mem Total
- ("MemFree:", 0), # Mem Free
- ("Buffers:", 0), # Buffers
- ("Cached:", 0)], # Cached
- False,
- True,
- use_log,
- " +",
- time_step)
- self.stats[pid] = ["TOTAL", cpu, mem]
- self.pids.append(pid)
- else:
- name = ""
- if (type(pid) is int):
- self.pids.append(pid)
- name = get_process_name(pid)
- else:
- self.pids.append(pid[0])
- name = pid[1]
-
- cpu = FileFieldMonitor("/proc/%d/stat" %
- self.pids[-1],
- [("", 13), # User Time
- ("", 14), # System Time
- ("", 9), # Minority Page Fault
- ("", 11)], # Majority Page Fault
- True,
- cpu_cont,
- use_log,
- " +",
- time_step)
- mem = FileFieldMonitor("/proc/%d/status" %
- self.pids[-1],
- [("VmSize:", 0), # Virtual Memory Size
- ("VmRSS:", 0), # Resident Set Size
- ("VmPeak:", 0), # Peak VM Size
- ("VmSwap:", 0)], # VM in Swap
- False,
- True,
- use_log,
- " +",
- time_step)
- self.stats[self.pids[-1]] = [name, cpu, mem]
-
- self.advanced = advanced
-
-
- def __str__(self):
- """
- Define format how to print
- """
- out = ""
- for pid in self.pids:
- for stat in self.stats[pid][1:]:
- out += str(stat.get_status()) + "\n"
- return out
-
-
- def start(self, pids=[]):
- """
- Start monitoring of the process system usage.
- @param pids: List of PIDs you intend to control. Use pids=[] to control
- all defined PIDs.
- """
- if pids == []:
- pids = self.pids
-
- for pid in pids:
- for stat in self.stats[pid][1:]:
- stat.start()
-
-
- def stop(self, pids=[]):
- """
- Stop monitoring of the process system usage.
- @param pids: List of PIDs you intend to control. Use pids=[] to control
- all defined PIDs.
- """
- if pids == []:
- pids = self.pids
-
- for pid in pids:
- for stat in self.stats[pid][1:]:
- stat.stop()
-
-
- def dump(self, pids=[]):
- """
- Get the status of monitoring.
- @param pids: List of PIDs you intend to control. Use pids=[] to control
- all defined PIDs.
- @return:
- tuple([cpu load], [memory load]):
- ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
- [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
-
- PID1_cpu_meas:
- average_values[], test_time, cont_meas_values[[]], time_step
- PID1_mem_meas:
- average_values[], test_time, cont_meas_values[[]], time_step
- where average_values[] are the measured values (mem_free,swap,...)
- which are described in SystemLoad.__init__()-FileFieldMonitor.
- cont_meas_values[[]] is a list of average_values in the sampling
- times.
- """
- if pids == []:
- pids = self.pids
-
- cpus = []
- memory = []
- for pid in pids:
- stat = (pid, self.stats[pid][1].get_status())
- cpus.append(stat)
- for pid in pids:
- stat = (pid, self.stats[pid][2].get_status())
- memory.append(stat)
-
- return (cpus, memory)
-
-
- def get_cpu_status_string(self, pids=[]):
- """
- Convert status to string array.
- @param pids: List of PIDs you intend to control. Use pids=[] to control
- all defined PIDs.
- @return: String format to table.
- """
- if pids == []:
- pids = self.pids
-
- headers = ["NAME",
- ("%7s") % "PID",
- ("%5s") % "USER",
- ("%5s") % "SYS",
- ("%5s") % "SUM"]
- if self.advanced:
- headers.extend(["MINFLT/IRQC",
- "MAJFLT/SOFTIRQ"])
- headers.append(("%11s") % "TIME")
- textstatus = []
- for pid in pids:
- stat = self.stats[pid][1].get_status()
- time = stat[1]
- stat = stat[0]
- textstatus.append(["%s" % self.stats[pid][0],
- "%7s" % pid,
- "%4.0f%%" % (stat[0] / time),
- "%4.0f%%" % (stat[1] / time),
- "%4.0f%%" % ((stat[0] + stat[1]) / time),
- "%10.3fs" % time])
- if self.advanced:
- textstatus[-1].insert(-1, "%11d" % stat[2])
- textstatus[-1].insert(-1, "%14d" % stat[3])
-
- return matrix_to_string(textstatus, tuple(headers))
-
-
- def get_mem_status_string(self, pids=[]):
- """
- Convert status to string array.
- @param pids: List of PIDs you intend to control. Use pids=[] to control
- all defined PIDs.
- @return: String format to table.
- """
- if pids == []:
- pids = self.pids
-
- headers = ["NAME",
- ("%7s") % "PID",
- ("%8s") % "TOTAL/VMSIZE",
- ("%8s") % "FREE/VMRSS",
- ("%8s") % "BUFFERS/VMPEAK",
- ("%8s") % "CACHED/VMSWAP",
- ("%11s") % "TIME"]
- textstatus = []
- for pid in pids:
- stat = self.stats[pid][2].get_status()
- time = stat[1]
- stat = stat[0]
- textstatus.append(["%s" % self.stats[pid][0],
- "%7s" % pid,
- "%10dMB" % (stat[0] / 1024),
- "%8dMB" % (stat[1] / 1024),
- "%12dMB" % (stat[2] / 1024),
- "%11dMB" % (stat[3] / 1024),
- "%10.3fs" % time])
-
- return matrix_to_string(textstatus, tuple(headers))
-
-
-def get_arch(run_function=run):
- """
- Get the hardware architecture of the machine.
- run_function is used to execute the commands. It defaults to
- utils.run() but a custom method (if provided) should be of the
- same schema as utils.run. It should return a CmdResult object and
- throw a CmdError exception.
- """
- arch = run_function('/bin/uname -m').stdout.rstrip()
- if re.match(r'i\d86$', arch):
- arch = 'i386'
- return arch
-
-
-def get_num_logical_cpus_per_socket(run_function=run):
- """
- Get the number of cores (including hyperthreading) per cpu.
- run_function is used to execute the commands. It defaults to
- utils.run() but a custom method (if provided) should be of the
- same schema as utils.run. It should return a CmdResult object and
- throw a CmdError exception.
- """
- siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
- num_siblings = map(int,
- re.findall(r'^siblings\s*:\s*(\d+)\s*$',
- siblings, re.M))
- if len(num_siblings) == 0:
- raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
- if min(num_siblings) != max(num_siblings):
- raise error.TestError('Number of siblings differ %r' %
- num_siblings)
- return num_siblings[0]
-
-
-def merge_trees(src, dest):
- """
- Merges a source directory tree at 'src' into a destination tree at
- 'dest'. If a path is a file in both trees than the file in the source
- tree is APPENDED to the one in the destination tree. If a path is
- a directory in both trees then the directories are recursively merged
- with this function. In any other case, the function will skip the
- paths that cannot be merged (instead of failing).
- """
- if not os.path.exists(src):
- return # exists only in dest
- elif not os.path.exists(dest):
- if os.path.isfile(src):
- shutil.copy2(src, dest) # file only in src
- else:
- shutil.copytree(src, dest, symlinks=True) # dir only in src
- return
- elif os.path.isfile(src) and os.path.isfile(dest):
- # src & dest are files in both trees, append src to dest
- destfile = open(dest, "a")
- try:
- srcfile = open(src)
- try:
- destfile.write(srcfile.read())
- finally:
- srcfile.close()
- finally:
- destfile.close()
- elif os.path.isdir(src) and os.path.isdir(dest):
- # src & dest are directories in both trees, so recursively merge
- for name in os.listdir(src):
- merge_trees(os.path.join(src, name), os.path.join(dest, name))
- else:
- # src & dest both exist, but are incompatible
- return
-
-
-class CmdResult(object):
- """
- Command execution result.
-
- command: String containing the command line itself
- exit_status: Integer exit code of the process
- stdout: String containing stdout of the process
- stderr: String containing stderr of the process
- duration: Elapsed wall clock time running the process
- """
-
-
- def __init__(self, command="", stdout="", stderr="",
- exit_status=None, duration=0):
- self.command = command
- self.exit_status = exit_status
- self.stdout = stdout
- self.stderr = stderr
- self.duration = duration
-
-
- def __repr__(self):
- wrapper = textwrap.TextWrapper(width = 78,
- initial_indent="\n ",
- subsequent_indent=" ")
-
- stdout = self.stdout.rstrip()
- if stdout:
- stdout = "\nstdout:\n%s" % stdout
-
- stderr = self.stderr.rstrip()
- if stderr:
- stderr = "\nstderr:\n%s" % stderr
-
- return ("* Command: %s\n"
- "Exit status: %s\n"
- "Duration: %s\n"
- "%s"
- "%s"
- % (wrapper.fill(self.command), self.exit_status,
- self.duration, stdout, stderr))
-
-
-class run_randomly:
- def __init__(self, run_sequentially=False):
- # Run sequentially is for debugging control files
- self.test_list = []
- self.run_sequentially = run_sequentially
-
-
- def add(self, *args, **dargs):
- test = (args, dargs)
- self.test_list.append(test)
-
-
- def run(self, fn):
- while self.test_list:
- test_index = random.randint(0, len(self.test_list)-1)
- if self.run_sequentially:
- test_index = 0
- (args, dargs) = self.test_list.pop(test_index)
- fn(*args, **dargs)
-
-
-def import_site_module(path, module, dummy=None, modulefile=None):
- """
- Try to import the site specific module if it exists.
-
- @param path full filename of the source file calling this (ie __file__)
- @param module full module name
- @param dummy dummy value to return in case there is no symbol to import
- @param modulefile module filename
-
- @return site specific module or dummy
-
- @raises ImportError if the site file exists but imports fails
- """
- short_module = module[module.rfind(".") + 1:]
-
- if not modulefile:
- modulefile = short_module + ".py"
-
- if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
- return __import__(module, {}, {}, [short_module])
- return dummy
-
-
-def import_site_symbol(path, module, name, dummy=None, modulefile=None):
- """
- Try to import site specific symbol from site specific file if it exists
-
- @param path full filename of the source file calling this (ie __file__)
- @param module full module name
- @param name symbol name to be imported from the site file
- @param dummy dummy value to return in case there is no symbol to import
- @param modulefile module filename
-
- @return site specific symbol or dummy
-
- @raises ImportError if the site file exists but imports fails
- """
- module = import_site_module(path, module, modulefile=modulefile)
- if not module:
- return dummy
-
- # special unique value to tell us if the symbol can't be imported
- cant_import = object()
-
- obj = getattr(module, name, cant_import)
- if obj is cant_import:
- logging.debug("unable to import site symbol '%s', using non-site "
- "implementation", name)
- return dummy
-
- return obj
-
-
-def import_site_class(path, module, classname, baseclass, modulefile=None):
- """
- Try to import site specific class from site specific file if it exists
-
- Args:
- path: full filename of the source file calling this (ie __file__)
- module: full module name
- classname: class name to be loaded from site file
- baseclass: base class object to return when no site file present or
- to mixin when site class exists but is not inherited from baseclass
- modulefile: module filename
-
- Returns: baseclass if site specific class does not exist, the site specific
- class if it exists and is inherited from baseclass or a mixin of the
- site specific class and baseclass when the site specific class exists
- and is not inherited from baseclass
-
- Raises: ImportError if the site file exists but imports fails
- """
-
- res = import_site_symbol(path, module, classname, None, modulefile)
- if res:
- if not issubclass(res, baseclass):
- # if not a subclass of baseclass then mix in baseclass with the
- # site specific class object and return the result
- res = type(classname, (res, baseclass), {})
- else:
- res = baseclass
-
- return res
-
-
-def import_site_function(path, module, funcname, dummy, modulefile=None):
- """
- Try to import site specific function from site specific file if it exists
-
- Args:
- path: full filename of the source file calling this (ie __file__)
- module: full module name
- funcname: function name to be imported from site file
- dummy: dummy function to return in case there is no function to import
- modulefile: module filename
-
- Returns: site specific function object or dummy
-
- Raises: ImportError if the site file exists but imports fails
- """
-
- return import_site_symbol(path, module, funcname, dummy, modulefile)
-
-
-def _get_pid_path(program_name):
- my_path = os.path.dirname(__file__)
- return os.path.abspath(os.path.join(my_path, "..", "..",
- "%s.pid" % program_name))
-
-
-def write_pid(program_name):
- """
- Try to drop <program_name>.pid in the main autotest directory.
-
- Args:
- program_name: prefix for file name
- """
- pidfile = open(_get_pid_path(program_name), "w")
- try:
- pidfile.write("%s\n" % os.getpid())
- finally:
- pidfile.close()
-
-
-def delete_pid_file_if_exists(program_name):
- """
- Tries to remove <program_name>.pid from the main autotest directory.
- """
- pidfile_path = _get_pid_path(program_name)
-
- try:
- os.remove(pidfile_path)
- except OSError:
- if not os.path.exists(pidfile_path):
- return
- raise
-
-
-def get_pid_from_file(program_name):
- """
- Reads the pid from <program_name>.pid in the autotest directory.
-
- @param program_name the name of the program
- @return the pid if the file exists, None otherwise.
- """
- pidfile_path = _get_pid_path(program_name)
- if not os.path.exists(pidfile_path):
- return None
-
- pidfile = open(_get_pid_path(program_name), 'r')
-
- try:
- try:
- pid = int(pidfile.readline())
- except IOError:
- if not os.path.exists(pidfile_path):
- return None
- raise
- finally:
- pidfile.close()
-
- return pid
-
-
-def get_process_name(pid):
- """
- Get process name from PID.
- @param pid: PID of process.
- """
- return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
-
-
-def program_is_alive(program_name):
- """
- Checks if the process is alive and not in Zombie state.
-
- @param program_name the name of the program
- @return True if still alive, False otherwise
- """
- pid = get_pid_from_file(program_name)
- if pid is None:
- return False
- return pid_is_alive(pid)
-
-
-def signal_program(program_name, sig=signal.SIGTERM):
- """
- Sends a signal to the process listed in <program_name>.pid
-
- @param program_name the name of the program
- @param sig signal to send
- """
- pid = get_pid_from_file(program_name)
- if pid:
- signal_pid(pid, sig)
-
-
-def get_relative_path(path, reference):
- """Given 2 absolute paths "path" and "reference", compute the path of
- "path" as relative to the directory "reference".
-
- @param path the absolute path to convert to a relative path
- @param reference an absolute directory path to which the relative
- path will be computed
- """
- # normalize the paths (remove double slashes, etc)
- assert(os.path.isabs(path))
- assert(os.path.isabs(reference))
-
- path = os.path.normpath(path)
- reference = os.path.normpath(reference)
-
- # we could use os.path.split() but it splits from the end
- path_list = path.split(os.path.sep)[1:]
- ref_list = reference.split(os.path.sep)[1:]
-
- # find the longest leading common path
- for i in xrange(min(len(path_list), len(ref_list))):
- if path_list[i] != ref_list[i]:
- # decrement i so when exiting this loop either by no match or by
- # end of range we are one step behind
- i -= 1
- break
- i += 1
- # drop the common part of the paths, not interested in that anymore
- del path_list[:i]
-
- # for each uncommon component in the reference prepend a ".."
- path_list[:0] = ['..'] * (len(ref_list) - i)
-
- return os.path.join(*path_list)
-
-
-def sh_escape(command):
- """
- Escape special characters from a command so that it can be passed
- as a double quoted (" ") string in a (ba)sh command.
-
- Args:
- command: the command string to escape.
-
- Returns:
- The escaped command string. The required englobing double
- quotes are NOT added and so should be added at some point by
- the caller.
-
- See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
- """
- command = command.replace("\\", "\\\\")
- command = command.replace("$", r'\$')
- command = command.replace('"', r'\"')
- command = command.replace('`', r'\`')
- return command
-
-
-def configure(extra=None, configure='./configure'):
- """
- Run configure passing in the correct host, build, and target options.
-
- @param extra: extra command line arguments to pass to configure
- @param configure: which configure script to use
- """
- args = []
- if 'CHOST' in os.environ:
- args.append('--host=' + os.environ['CHOST'])
- if 'CBUILD' in os.environ:
- args.append('--build=' + os.environ['CBUILD'])
- if 'CTARGET' in os.environ:
- args.append('--target=' + os.environ['CTARGET'])
- if extra:
- args.append(extra)
-
- system('%s %s' % (configure, ' '.join(args)))
-
-
-def make(extra='', make='make', timeout=None, ignore_status=False):
- """
- Run make, adding MAKEOPTS to the list of options.
-
- @param extra: extra command line arguments to pass to make.
- """
- cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
- return system(cmd, timeout=timeout, ignore_status=ignore_status)
-
-
-def compare_versions(ver1, ver2):
- """Version number comparison between ver1 and ver2 strings.
-
- >>> compare_tuple("1", "2")
- -1
- >>> compare_tuple("foo-1.1", "foo-1.2")
- -1
- >>> compare_tuple("1.2", "1.2a")
- -1
- >>> compare_tuple("1.2b", "1.2a")
- 1
- >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
- -1
-
- Args:
- ver1: version string
- ver2: version string
-
- Returns:
- int: 1 if ver1 > ver2
- 0 if ver1 == ver2
- -1 if ver1 < ver2
- """
- ax = re.split('[.-]', ver1)
- ay = re.split('[.-]', ver2)
- while len(ax) > 0 and len(ay) > 0:
- cx = ax.pop(0)
- cy = ay.pop(0)
- maxlen = max(len(cx), len(cy))
- c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
- if c != 0:
- return c
- return cmp(len(ax), len(ay))
-
-
-def args_to_dict(args):
- """Convert autoserv extra arguments in the form of key=val or key:val to a
- dictionary. Each argument key is converted to lowercase dictionary key.
-
- Args:
- args - list of autoserv extra arguments.
-
- Returns:
- dictionary
- """
- arg_re = re.compile(r'(\w+)[:=](.*)$')
- dict = {}
- for arg in args:
- match = arg_re.match(arg)
- if match:
- dict[match.group(1).lower()] = match.group(2)
- else:
- logging.warning("args_to_dict: argument '%s' doesn't match "
- "'%s' pattern. Ignored." % (arg, arg_re.pattern))
- return dict
-
-
-def get_unused_port():
- """
- Finds a semi-random available port. A race condition is still
- possible after the port number is returned, if another process
- happens to bind it.
-
- Returns:
- A port number that is unused on both TCP and UDP.
- """
-
- def try_bind(port, socket_type, socket_proto):
- s = socket.socket(socket.AF_INET, socket_type, socket_proto)
- try:
- try:
- s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- s.bind(('', port))
- return s.getsockname()[1]
- except socket.error:
- return None
- finally:
- s.close()
-
- # On the 2.6 kernel, calling try_bind() on UDP socket returns the
- # same port over and over. So always try TCP first.
- while True:
- # Ask the OS for an unused port.
- port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
- # Check if this port is unused on the other protocol.
- if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
- return port
+from autotest_lib.client.common_lib.base_utils import *
+if os.path.exists(os.path.join(os.path.dirname(__file__), 'site_utils.py')):
+ from autotest_lib.client.common_lib.site_utils import *