blob: 063c101e97961d4d736bf08abfe38204d07e4b17 [file] [log] [blame]
Allen Li5ed7e632017-02-03 16:31:33 -08001# Copyright (c) 2017 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
Eric Li861b2d52011-02-04 14:50:35 -08005"""
6Convenience functions for use by tests or whomever.
mbligh63073c92008-03-31 16:49:32 +00007
Eric Li861b2d52011-02-04 14:50:35 -08008There's no really good way to do this, as this isn't a class we can do
9inheritance with, just a collection of static methods.
10"""
Allen Li5ed7e632017-02-03 16:31:33 -080011
12# pylint: disable=missing-docstring
13
14import StringIO
Luis Hector Chavez5473ee32018-05-15 10:12:50 -070015import collections
Zachary Marcus4e4cd7b2018-05-24 15:07:16 -070016import datetime
Allen Li5ed7e632017-02-03 16:31:33 -080017import errno
18import inspect
19import itertools
20import logging
21import os
22import pickle
Zachary Marcus4e4cd7b2018-05-24 15:07:16 -070023import Queue
Allen Li5ed7e632017-02-03 16:31:33 -080024import random
25import re
26import resource
27import select
28import shutil
29import signal
30import socket
31import string
32import struct
33import subprocess
34import textwrap
Zachary Marcus4e4cd7b2018-05-24 15:07:16 -070035import threading
Allen Li5ed7e632017-02-03 16:31:33 -080036import time
37import urllib2
38import urlparse
39import uuid
40import warnings
41
42try:
43 import hashlib
44except ImportError:
45 import md5
46 import sha
47
Shuqian Zhaoae2d0782016-11-15 16:58:47 -080048import common
Allen Li5ed7e632017-02-03 16:31:33 -080049
Aviv Keshet12aa4a22017-05-18 16:48:05 -070050from autotest_lib.client.common_lib import env
Allen Li5ed7e632017-02-03 16:31:33 -080051from autotest_lib.client.common_lib import error
52from autotest_lib.client.common_lib import global_config
53from autotest_lib.client.common_lib import logging_manager
Aviv Keshet98b179e2017-07-18 16:22:23 -070054from autotest_lib.client.common_lib import metrics_mock_class
Allen Li5ed7e632017-02-03 16:31:33 -080055from autotest_lib.client.cros import constants
56
Kirtika Ruchandani42eabe62018-08-25 12:57:15 -070057# pylint: disable=wildcard-import
Dan Shi60cf6a92015-01-29 17:22:49 -080058from autotest_lib.client.common_lib.lsbrelease_utils import *
Allen Li5ed7e632017-02-03 16:31:33 -080059
60
61def deprecated(func):
62 """This is a decorator which can be used to mark functions as deprecated.
63 It will result in a warning being emmitted when the function is used."""
64 def new_func(*args, **dargs):
65 warnings.warn("Call to deprecated function %s." % func.__name__,
66 category=DeprecationWarning)
67 return func(*args, **dargs)
68 new_func.__name__ = func.__name__
69 new_func.__doc__ = func.__doc__
70 new_func.__dict__.update(func.__dict__)
71 return new_func
72
73
74class _NullStream(object):
75 def write(self, data):
76 pass
77
78
79 def flush(self):
80 pass
81
82
83TEE_TO_LOGS = object()
84_the_null_stream = _NullStream()
85
Hidehiko Abed939d662017-06-13 20:25:03 +090086DEVNULL = object()
87
Allen Li5ed7e632017-02-03 16:31:33 -080088DEFAULT_STDOUT_LEVEL = logging.DEBUG
89DEFAULT_STDERR_LEVEL = logging.ERROR
90
91# prefixes for logging stdout/stderr of commands
92STDOUT_PREFIX = '[stdout] '
93STDERR_PREFIX = '[stderr] '
94
95# safe characters for the shell (do not need quoting)
96SHELL_QUOTING_WHITELIST = frozenset(string.ascii_letters +
97 string.digits +
98 '_-+=')
99
100def custom_warning_handler(message, category, filename, lineno, file=None,
101 line=None):
102 """Custom handler to log at the WARNING error level. Ignores |file|."""
103 logging.warning(warnings.formatwarning(message, category, filename, lineno,
104 line))
105
106warnings.showwarning = custom_warning_handler
107
108def get_stream_tee_file(stream, level, prefix=''):
109 if stream is None:
110 return _the_null_stream
Hidehiko Abed939d662017-06-13 20:25:03 +0900111 if stream is DEVNULL:
112 return None
Allen Li5ed7e632017-02-03 16:31:33 -0800113 if stream is TEE_TO_LOGS:
114 return logging_manager.LoggingFile(level=level, prefix=prefix)
115 return stream
116
117
118def _join_with_nickname(base_string, nickname):
119 if nickname:
120 return '%s BgJob "%s" ' % (base_string, nickname)
121 return base_string
122
123
Hidehiko Abed939d662017-06-13 20:25:03 +0900124# TODO: Cleanup and possibly eliminate |unjoinable|, which is only used in our
125# master-ssh connection process, while fixing underlying
Allen Li5ed7e632017-02-03 16:31:33 -0800126# semantics problem in BgJob. See crbug.com/279312
127class BgJob(object):
128 def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
Daniel Erat3300f6e2018-01-09 17:34:31 -0800129 stdin=None, stdout_level=DEFAULT_STDOUT_LEVEL,
130 stderr_level=DEFAULT_STDERR_LEVEL, nickname=None,
Hidehiko Abed939d662017-06-13 20:25:03 +0900131 unjoinable=False, env=None, extra_paths=None):
Allen Li5ed7e632017-02-03 16:31:33 -0800132 """Create and start a new BgJob.
133
134 This constructor creates a new BgJob, and uses Popen to start a new
135 subprocess with given command. It returns without blocking on execution
136 of the subprocess.
137
138 After starting a new BgJob, use output_prepare to connect the process's
139 stdout and stderr pipes to the stream of your choice.
140
141 When the job is running, the jobs's output streams are only read from
142 when process_output is called.
143
144 @param command: command to be executed in new subprocess. May be either
145 a list, or a string (in which case Popen will be called
146 with shell=True)
Hidehiko Abed939d662017-06-13 20:25:03 +0900147 @param stdout_tee: (Optional) a file like object, TEE_TO_LOGS or
148 DEVNULL.
149 If not given, after finishing the process, the
150 stdout data from subprocess is available in
151 result.stdout.
152 If a file like object is given, in process_output(),
153 the stdout data from the subprocess will be handled
154 by the given file like object.
155 If TEE_TO_LOGS is given, in process_output(), the
156 stdout data from the subprocess will be handled by
Allen Li5ed7e632017-02-03 16:31:33 -0800157 the standard logging_manager.
Hidehiko Abed939d662017-06-13 20:25:03 +0900158 If DEVNULL is given, the stdout of the subprocess
159 will be just discarded. In addition, even after
160 cleanup(), result.stdout will be just an empty
161 string (unlike the case where stdout_tee is not
162 given).
Allen Li5ed7e632017-02-03 16:31:33 -0800163 @param stderr_tee: Same as stdout_tee, but for stderr.
164 @param verbose: Boolean, make BgJob logging more verbose.
165 @param stdin: Stream object, will be passed to Popen as the new
166 process's stdin.
Daniel Erat3300f6e2018-01-09 17:34:31 -0800167 @param stdout_level: A logging level value. If stdout_tee was set to
Allen Li5ed7e632017-02-03 16:31:33 -0800168 TEE_TO_LOGS, sets the level that tee'd
Daniel Erat3300f6e2018-01-09 17:34:31 -0800169 stdout output will be logged at. Ignored
Allen Li5ed7e632017-02-03 16:31:33 -0800170 otherwise.
Daniel Erat3300f6e2018-01-09 17:34:31 -0800171 @param stderr_level: Same as stdout_level, but for stderr.
Allen Li5ed7e632017-02-03 16:31:33 -0800172 @param nickname: Optional string, to be included in logging messages
Hidehiko Abed939d662017-06-13 20:25:03 +0900173 @param unjoinable: Optional bool, default False.
174 This should be True for BgJobs running in background
175 and will never be joined with join_bg_jobs(), such
176 as the master-ssh connection. Instead, it is
177 caller's responsibility to terminate the subprocess
178 correctly, e.g. by calling nuke_subprocess().
179 This will lead that, calling join_bg_jobs(),
180 process_output() or cleanup() will result in an
181 InvalidBgJobCall exception.
182 Also, |stdout_tee| and |stderr_tee| must be set to
183 DEVNULL, otherwise InvalidBgJobCall is raised.
Allen Li5ed7e632017-02-03 16:31:33 -0800184 @param env: Dict containing environment variables used in subprocess.
185 @param extra_paths: Optional string list, to be prepended to the PATH
186 env variable in env (or os.environ dict if env is
187 not specified).
188 """
189 self.command = command
Hidehiko Abed939d662017-06-13 20:25:03 +0900190 self.unjoinable = unjoinable
191 if (unjoinable and (stdout_tee != DEVNULL or stderr_tee != DEVNULL)):
192 raise error.InvalidBgJobCall(
193 'stdout_tee and stderr_tee must be DEVNULL for '
194 'unjoinable BgJob')
195 self._stdout_tee = get_stream_tee_file(
Daniel Erat3300f6e2018-01-09 17:34:31 -0800196 stdout_tee, stdout_level,
Allen Li5ed7e632017-02-03 16:31:33 -0800197 prefix=_join_with_nickname(STDOUT_PREFIX, nickname))
Hidehiko Abed939d662017-06-13 20:25:03 +0900198 self._stderr_tee = get_stream_tee_file(
199 stderr_tee, stderr_level,
Allen Li5ed7e632017-02-03 16:31:33 -0800200 prefix=_join_with_nickname(STDERR_PREFIX, nickname))
201 self.result = CmdResult(command)
202
203 # allow for easy stdin input by string, we'll let subprocess create
204 # a pipe for stdin input and we'll write to it in the wait loop
205 if isinstance(stdin, basestring):
206 self.string_stdin = stdin
207 stdin = subprocess.PIPE
208 else:
209 self.string_stdin = None
210
Allen Li5ed7e632017-02-03 16:31:33 -0800211 # Prepend extra_paths to env['PATH'] if necessary.
212 if extra_paths:
213 env = (os.environ if env is None else env).copy()
214 oldpath = env.get('PATH')
215 env['PATH'] = os.pathsep.join(
216 extra_paths + ([oldpath] if oldpath else []))
217
218 if verbose:
219 logging.debug("Running '%s'", command)
Hidehiko Abed939d662017-06-13 20:25:03 +0900220
Allen Li5ed7e632017-02-03 16:31:33 -0800221 if type(command) == list:
Hidehiko Abed939d662017-06-13 20:25:03 +0900222 shell = False
223 executable = None
Allen Li5ed7e632017-02-03 16:31:33 -0800224 else:
Hidehiko Abed939d662017-06-13 20:25:03 +0900225 shell = True
226 executable = '/bin/bash'
Allen Li5ed7e632017-02-03 16:31:33 -0800227
Hidehiko Abed939d662017-06-13 20:25:03 +0900228 with open('/dev/null', 'w') as devnull:
229 self.sp = subprocess.Popen(
230 command,
231 stdin=stdin,
232 stdout=devnull if stdout_tee == DEVNULL else subprocess.PIPE,
233 stderr=devnull if stderr_tee == DEVNULL else subprocess.PIPE,
234 preexec_fn=self._reset_sigpipe,
235 shell=shell, executable=executable,
236 env=env, close_fds=True)
237
Allen Li5ed7e632017-02-03 16:31:33 -0800238 self._cleanup_called = False
Hidehiko Abed939d662017-06-13 20:25:03 +0900239 self._stdout_file = (
240 None if stdout_tee == DEVNULL else StringIO.StringIO())
241 self._stderr_file = (
242 None if stderr_tee == DEVNULL else StringIO.StringIO())
Allen Li5ed7e632017-02-03 16:31:33 -0800243
244 def process_output(self, stdout=True, final_read=False):
245 """Read from process's output stream, and write data to destinations.
246
247 This function reads up to 1024 bytes from the background job's
248 stdout or stderr stream, and writes the resulting data to the BgJob's
249 output tee and to the stream set up in output_prepare.
250
251 Warning: Calls to process_output will block on reads from the
252 subprocess stream, and will block on writes to the configured
253 destination stream.
254
255 @param stdout: True = read and process data from job's stdout.
256 False = from stderr.
257 Default: True
258 @param final_read: Do not read only 1024 bytes from stream. Instead,
259 read and process all data until end of the stream.
260
261 """
Hidehiko Abed939d662017-06-13 20:25:03 +0900262 if self.unjoinable:
Allen Li5ed7e632017-02-03 16:31:33 -0800263 raise error.InvalidBgJobCall('Cannot call process_output on '
Hidehiko Abed939d662017-06-13 20:25:03 +0900264 'a job with unjoinable BgJob')
Allen Li5ed7e632017-02-03 16:31:33 -0800265 if stdout:
Hidehiko Abed939d662017-06-13 20:25:03 +0900266 pipe, buf, tee = (
267 self.sp.stdout, self._stdout_file, self._stdout_tee)
Allen Li5ed7e632017-02-03 16:31:33 -0800268 else:
Hidehiko Abed939d662017-06-13 20:25:03 +0900269 pipe, buf, tee = (
270 self.sp.stderr, self._stderr_file, self._stderr_tee)
271
272 if not pipe:
273 return
Allen Li5ed7e632017-02-03 16:31:33 -0800274
275 if final_read:
276 # read in all the data we can from pipe and then stop
277 data = []
278 while select.select([pipe], [], [], 0)[0]:
279 data.append(os.read(pipe.fileno(), 1024))
280 if len(data[-1]) == 0:
281 break
282 data = "".join(data)
283 else:
284 # perform a single read
285 data = os.read(pipe.fileno(), 1024)
286 buf.write(data)
287 tee.write(data)
288
Allen Li5ed7e632017-02-03 16:31:33 -0800289 def cleanup(self):
290 """Clean up after BgJob.
291
292 Flush the stdout_tee and stderr_tee buffers, close the
293 subprocess stdout and stderr buffers, and saves data from
294 the configured stdout and stderr destination streams to
295 self.result. Duplicate calls ignored with a warning.
296 """
Hidehiko Abed939d662017-06-13 20:25:03 +0900297 if self.unjoinable:
Allen Li5ed7e632017-02-03 16:31:33 -0800298 raise error.InvalidBgJobCall('Cannot call cleanup on '
Hidehiko Abed939d662017-06-13 20:25:03 +0900299 'a job with a unjoinable BgJob')
Allen Li5ed7e632017-02-03 16:31:33 -0800300 if self._cleanup_called:
301 logging.warning('BgJob [%s] received a duplicate call to '
302 'cleanup. Ignoring.', self.command)
303 return
304 try:
Hidehiko Abed939d662017-06-13 20:25:03 +0900305 if self.sp.stdout:
306 self._stdout_tee.flush()
307 self.sp.stdout.close()
308 self.result.stdout = self._stdout_file.getvalue()
309
310 if self.sp.stderr:
311 self._stderr_tee.flush()
312 self.sp.stderr.close()
313 self.result.stderr = self._stderr_file.getvalue()
Allen Li5ed7e632017-02-03 16:31:33 -0800314 finally:
315 self._cleanup_called = True
316
Allen Li5ed7e632017-02-03 16:31:33 -0800317 def _reset_sigpipe(self):
Aviv Keshet12aa4a22017-05-18 16:48:05 -0700318 if not env.IN_MOD_WSGI:
319 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
Allen Li5ed7e632017-02-03 16:31:33 -0800320
321
322def ip_to_long(ip):
323 # !L is a long in network byte order
324 return struct.unpack('!L', socket.inet_aton(ip))[0]
325
326
327def long_to_ip(number):
328 # See above comment.
329 return socket.inet_ntoa(struct.pack('!L', number))
330
331
332def create_subnet_mask(bits):
333 return (1 << 32) - (1 << 32-bits)
334
335
336def format_ip_with_mask(ip, mask_bits):
337 masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
338 return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
339
340
341def normalize_hostname(alias):
342 ip = socket.gethostbyname(alias)
343 return socket.gethostbyaddr(ip)[0]
344
345
346def get_ip_local_port_range():
347 match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
348 read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
349 return (int(match.group(1)), int(match.group(2)))
350
351
352def set_ip_local_port_range(lower, upper):
353 write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
354 '%d %d\n' % (lower, upper))
355
356
357def read_one_line(filename):
Kuo-Hsin Yange42a4a82018-08-31 16:06:22 +0800358 f = open(filename, 'r')
359 try:
360 return f.readline().rstrip('\n')
361 finally:
362 f.close()
Allen Li5ed7e632017-02-03 16:31:33 -0800363
364
365def read_file(filename):
366 f = open(filename)
367 try:
368 return f.read()
369 finally:
370 f.close()
371
372
373def get_field(data, param, linestart="", sep=" "):
374 """
375 Parse data from string.
376 @param data: Data to parse.
377 example:
378 data:
379 cpu 324 345 34 5 345
380 cpu0 34 11 34 34 33
381 ^^^^
382 start of line
383 params 0 1 2 3 4
384 @param param: Position of parameter after linestart marker.
385 @param linestart: String to which start line with parameters.
386 @param sep: Separator between parameters regular expression.
387 """
388 search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
389 find = search.search(data)
390 if find != None:
391 return re.split("%s" % sep, find.group(1))[param]
392 else:
393 print "There is no line which starts with %s in data." % linestart
394 return None
395
396
397def write_one_line(filename, line):
398 open_write_close(filename, str(line).rstrip('\n') + '\n')
399
400
401def open_write_close(filename, data):
402 f = open(filename, 'w')
403 try:
404 f.write(data)
405 finally:
406 f.close()
407
408
409def locate_file(path, base_dir=None):
410 """Locates a file.
411
Dean Liao34be76d2018-07-19 00:48:47 +0800412 @param path: The path of the file being located. Could be absolute or
413 relative path. For relative path, it tries to locate the file from
414 base_dir.
415
Allen Li5ed7e632017-02-03 16:31:33 -0800416 @param base_dir (optional): Base directory of the relative path.
417
418 @returns Absolute path of the file if found. None if path is None.
419 @raises error.TestFail if the file is not found.
420 """
421 if path is None:
422 return None
423
424 if not os.path.isabs(path) and base_dir is not None:
425 # Assume the relative path is based in autotest directory.
426 path = os.path.join(base_dir, path)
427 if not os.path.isfile(path):
428 raise error.TestFail('ERROR: Unable to find %s' % path)
429 return path
430
431
432def matrix_to_string(matrix, header=None):
433 """
434 Return a pretty, aligned string representation of a nxm matrix.
435
436 This representation can be used to print any tabular data, such as
437 database results. It works by scanning the lengths of each element
438 in each column, and determining the format string dynamically.
439
440 @param matrix: Matrix representation (list with n rows of m elements).
441 @param header: Optional tuple or list with header elements to be displayed.
442 """
443 if type(header) is list:
444 header = tuple(header)
445 lengths = []
446 if header:
447 for column in header:
448 lengths.append(len(column))
449 for row in matrix:
450 for i, column in enumerate(row):
451 column = unicode(column).encode("utf-8")
452 cl = len(column)
453 try:
454 ml = lengths[i]
455 if cl > ml:
456 lengths[i] = cl
457 except IndexError:
458 lengths.append(cl)
459
460 lengths = tuple(lengths)
461 format_string = ""
462 for length in lengths:
463 format_string += "%-" + str(length) + "s "
464 format_string += "\n"
465
466 matrix_str = ""
467 if header:
468 matrix_str += format_string % header
469 for row in matrix:
470 matrix_str += format_string % tuple(row)
471
472 return matrix_str
473
474
475def read_keyval(path, type_tag=None):
476 """
477 Read a key-value pair format file into a dictionary, and return it.
478 Takes either a filename or directory name as input. If it's a
479 directory name, we assume you want the file to be called keyval.
480
481 @param path: Full path of the file to read from.
482 @param type_tag: If not None, only keyvals with key ending
483 in a suffix {type_tag} will be collected.
484 """
485 if os.path.isdir(path):
486 path = os.path.join(path, 'keyval')
487 if not os.path.exists(path):
488 return {}
489
490 if type_tag:
491 pattern = r'^([-\.\w]+)\{%s\}=(.*)$' % type_tag
492 else:
493 pattern = r'^([-\.\w]+)=(.*)$'
494
495 keyval = {}
496 f = open(path)
497 for line in f:
498 line = re.sub('#.*', '', line).rstrip()
499 if not line:
500 continue
501 match = re.match(pattern, line)
502 if match:
503 key = match.group(1)
504 value = match.group(2)
505 if re.search('^\d+$', value):
506 value = int(value)
507 elif re.search('^(\d+\.)?\d+$', value):
508 value = float(value)
509 keyval[key] = value
510 else:
511 raise ValueError('Invalid format line: %s' % line)
512 f.close()
513 return keyval
514
515
Aviv Keshetf0c82242017-05-18 22:06:40 -0700516def write_keyval(path, dictionary, type_tag=None):
Allen Li5ed7e632017-02-03 16:31:33 -0800517 """
518 Write a key-value pair format file out to a file. This uses append
519 mode to open the file, so existing text will not be overwritten or
520 reparsed.
521
522 If type_tag is None, then the key must be composed of alphanumeric
523 characters (or dashes+underscores). However, if type-tag is not
524 null then the keys must also have "{type_tag}" as a suffix. At
525 the moment the only valid values of type_tag are "attr" and "perf".
526
527 @param path: full path of the file to be written
528 @param dictionary: the items to write
529 @param type_tag: see text above
530 """
531 if os.path.isdir(path):
532 path = os.path.join(path, 'keyval')
533 keyval = open(path, 'a')
534
535 if type_tag is None:
536 key_regex = re.compile(r'^[-\.\w]+$')
537 else:
538 if type_tag not in ('attr', 'perf'):
539 raise ValueError('Invalid type tag: %s' % type_tag)
540 escaped_tag = re.escape(type_tag)
541 key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
542 try:
543 for key in sorted(dictionary.keys()):
544 if not key_regex.search(key):
545 raise ValueError('Invalid key: %s' % key)
546 keyval.write('%s=%s\n' % (key, dictionary[key]))
547 finally:
548 keyval.close()
549
Allen Li5ed7e632017-02-03 16:31:33 -0800550
551def is_url(path):
552 """Return true if path looks like a URL"""
553 # for now, just handle http and ftp
554 url_parts = urlparse.urlparse(path)
555 return (url_parts[0] in ('http', 'ftp'))
556
557
558def urlopen(url, data=None, timeout=5):
559 """Wrapper to urllib2.urlopen with timeout addition."""
560
561 # Save old timeout
562 old_timeout = socket.getdefaulttimeout()
563 socket.setdefaulttimeout(timeout)
564 try:
565 return urllib2.urlopen(url, data=data)
566 finally:
567 socket.setdefaulttimeout(old_timeout)
568
569
570def urlretrieve(url, filename, data=None, timeout=300):
571 """Retrieve a file from given url."""
572 logging.debug('Fetching %s -> %s', url, filename)
573
574 src_file = urlopen(url, data=data, timeout=timeout)
575 try:
576 dest_file = open(filename, 'wb')
577 try:
578 shutil.copyfileobj(src_file, dest_file)
579 finally:
580 dest_file.close()
581 finally:
582 src_file.close()
583
584
Kirtika Ruchandani7f10b272018-05-31 20:39:03 -0700585def hash(hashtype, input=None):
Allen Li5ed7e632017-02-03 16:31:33 -0800586 """
587 Returns an hash object of type md5 or sha1. This function is implemented in
588 order to encapsulate hash objects in a way that is compatible with python
589 2.4 and python 2.6 without warnings.
590
591 Note that even though python 2.6 hashlib supports hash types other than
592 md5 and sha1, we are artificially limiting the input values in order to
593 make the function to behave exactly the same among both python
594 implementations.
595
596 @param input: Optional input string that will be used to update the hash.
597 """
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -0700598 # pylint: disable=redefined-builtin
Kirtika Ruchandani7f10b272018-05-31 20:39:03 -0700599 if hashtype not in ['md5', 'sha1']:
600 raise ValueError("Unsupported hash type: %s" % hashtype)
Allen Li5ed7e632017-02-03 16:31:33 -0800601
602 try:
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -0700603 computed_hash = hashlib.new(hashtype)
Allen Li5ed7e632017-02-03 16:31:33 -0800604 except NameError:
Kirtika Ruchandani7f10b272018-05-31 20:39:03 -0700605 if hashtype == 'md5':
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -0700606 computed_hash = md5.new()
Kirtika Ruchandani7f10b272018-05-31 20:39:03 -0700607 elif hashtype == 'sha1':
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -0700608 computed_hash = sha.new()
Allen Li5ed7e632017-02-03 16:31:33 -0800609
610 if input:
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -0700611 computed_hash.update(input)
Allen Li5ed7e632017-02-03 16:31:33 -0800612
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -0700613 return computed_hash
Allen Li5ed7e632017-02-03 16:31:33 -0800614
615
616def get_file(src, dest, permissions=None):
617 """Get a file from src, which can be local or a remote URL"""
618 if src == dest:
619 return
620
621 if is_url(src):
622 urlretrieve(src, dest)
623 else:
624 shutil.copyfile(src, dest)
625
626 if permissions:
627 os.chmod(dest, permissions)
628 return dest
629
630
631def unmap_url(srcdir, src, destdir='.'):
632 """
633 Receives either a path to a local file or a URL.
634 returns either the path to the local file, or the fetched URL
635
636 unmap_url('/usr/src', 'foo.tar', '/tmp')
637 = '/usr/src/foo.tar'
638 unmap_url('/usr/src', 'http://site/file', '/tmp')
639 = '/tmp/file'
640 (after retrieving it)
641 """
642 if is_url(src):
643 url_parts = urlparse.urlparse(src)
644 filename = os.path.basename(url_parts[2])
645 dest = os.path.join(destdir, filename)
646 return get_file(src, dest)
647 else:
648 return os.path.join(srcdir, src)
649
650
651def update_version(srcdir, preserve_srcdir, new_version, install,
652 *args, **dargs):
653 """
654 Make sure srcdir is version new_version
655
656 If not, delete it and install() the new version.
657
658 In the preserve_srcdir case, we just check it's up to date,
659 and if not, we rerun install, without removing srcdir
660 """
661 versionfile = os.path.join(srcdir, '.version')
662 install_needed = True
663
664 if os.path.exists(versionfile):
665 old_version = pickle.load(open(versionfile))
666 if old_version == new_version:
667 install_needed = False
668
669 if install_needed:
670 if not preserve_srcdir and os.path.exists(srcdir):
671 shutil.rmtree(srcdir)
672 install(*args, **dargs)
673 if os.path.exists(srcdir):
674 pickle.dump(new_version, open(versionfile, 'w'))
675
676
Daniel Erat3300f6e2018-01-09 17:34:31 -0800677def get_stderr_level(stderr_is_expected, stdout_level=DEFAULT_STDOUT_LEVEL):
Allen Li5ed7e632017-02-03 16:31:33 -0800678 if stderr_is_expected:
Daniel Erat3300f6e2018-01-09 17:34:31 -0800679 return stdout_level
Allen Li5ed7e632017-02-03 16:31:33 -0800680 return DEFAULT_STDERR_LEVEL
681
682
Daniel Erat3300f6e2018-01-09 17:34:31 -0800683def run(command, timeout=None, ignore_status=False, stdout_tee=None,
684 stderr_tee=None, verbose=True, stdin=None, stderr_is_expected=None,
685 stdout_level=None, stderr_level=None, args=(), nickname=None,
686 ignore_timeout=False, env=None, extra_paths=None):
Allen Li5ed7e632017-02-03 16:31:33 -0800687 """
688 Run a command on the host.
689
690 @param command: the command line string.
691 @param timeout: time limit in seconds before attempting to kill the
692 running process. The run() function will take a few seconds
693 longer than 'timeout' to complete if it has to kill the process.
694 @param ignore_status: do not raise an exception, no matter what the exit
695 code of the command is.
696 @param stdout_tee: optional file-like object to which stdout data
697 will be written as it is generated (data will still be stored
Daniel Erat9ac588d2018-04-16 08:45:53 -0700698 in result.stdout unless this is DEVNULL).
Allen Li5ed7e632017-02-03 16:31:33 -0800699 @param stderr_tee: likewise for stderr.
700 @param verbose: if True, log the command being run.
701 @param stdin: stdin to pass to the executed process (can be a file
702 descriptor, a file object of a real file or a string).
703 @param stderr_is_expected: if True, stderr will be logged at the same level
704 as stdout
Daniel Erat3300f6e2018-01-09 17:34:31 -0800705 @param stdout_level: logging level used if stdout_tee is TEE_TO_LOGS;
706 if None, a default is used.
707 @param stderr_level: like stdout_level but for stderr.
Allen Li5ed7e632017-02-03 16:31:33 -0800708 @param args: sequence of strings of arguments to be given to the command
709 inside " quotes after they have been escaped for that; each
710 element in the sequence will be given as a separate command
711 argument
712 @param nickname: Short string that will appear in logging messages
713 associated with this command.
714 @param ignore_timeout: If True, timeouts are ignored otherwise if a
715 timeout occurs it will raise CmdTimeoutError.
716 @param env: Dict containing environment variables used in a subprocess.
717 @param extra_paths: Optional string list, to be prepended to the PATH
718 env variable in env (or os.environ dict if env is
719 not specified).
720
721 @return a CmdResult object or None if the command timed out and
722 ignore_timeout is True
723
724 @raise CmdError: the exit code of the command execution was not 0
725 @raise CmdTimeoutError: the command timed out and ignore_timeout is False.
726 """
727 if isinstance(args, basestring):
728 raise TypeError('Got a string for the "args" keyword argument, '
729 'need a sequence.')
730
731 # In some cases, command will actually be a list
732 # (For example, see get_user_hash in client/cros/cryptohome.py.)
733 # So, to cover that case, detect if it's a string or not and convert it
734 # into one if necessary.
735 if not isinstance(command, basestring):
736 command = ' '.join([sh_quote_word(arg) for arg in command])
737
738 command = ' '.join([command] + [sh_quote_word(arg) for arg in args])
Daniel Erat3300f6e2018-01-09 17:34:31 -0800739
Allen Li5ed7e632017-02-03 16:31:33 -0800740 if stderr_is_expected is None:
741 stderr_is_expected = ignore_status
Daniel Erat3300f6e2018-01-09 17:34:31 -0800742 if stdout_level is None:
743 stdout_level = DEFAULT_STDOUT_LEVEL
744 if stderr_level is None:
745 stderr_level = get_stderr_level(stderr_is_expected, stdout_level)
Allen Li5ed7e632017-02-03 16:31:33 -0800746
747 try:
748 bg_job = join_bg_jobs(
749 (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
Daniel Erat3300f6e2018-01-09 17:34:31 -0800750 stdout_level=stdout_level, stderr_level=stderr_level,
Allen Li5ed7e632017-02-03 16:31:33 -0800751 nickname=nickname, env=env, extra_paths=extra_paths),),
752 timeout)[0]
753 except error.CmdTimeoutError:
754 if not ignore_timeout:
755 raise
756 return None
757
758 if not ignore_status and bg_job.result.exit_status:
759 raise error.CmdError(command, bg_job.result,
760 "Command returned non-zero exit status")
761
762 return bg_job.result
763
764
765def run_parallel(commands, timeout=None, ignore_status=False,
766 stdout_tee=None, stderr_tee=None,
Kirtika Ruchandanif798f5e2018-07-08 18:34:20 -0700767 nicknames=None):
Allen Li5ed7e632017-02-03 16:31:33 -0800768 """
769 Behaves the same as run() with the following exceptions:
770
771 - commands is a list of commands to run in parallel.
772 - ignore_status toggles whether or not an exception should be raised
773 on any error.
774
775 @return: a list of CmdResult objects
776 """
777 bg_jobs = []
Kirtika Ruchandanif798f5e2018-07-08 18:34:20 -0700778 if nicknames is None:
779 nicknames = []
Allen Li5ed7e632017-02-03 16:31:33 -0800780 for (command, nickname) in itertools.izip_longest(commands, nicknames):
781 bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
782 stderr_level=get_stderr_level(ignore_status),
783 nickname=nickname))
784
785 # Updates objects in bg_jobs list with their process information
786 join_bg_jobs(bg_jobs, timeout)
787
788 for bg_job in bg_jobs:
789 if not ignore_status and bg_job.result.exit_status:
790 raise error.CmdError(command, bg_job.result,
791 "Command returned non-zero exit status")
792
793 return [bg_job.result for bg_job in bg_jobs]
794
795
796@deprecated
797def run_bg(command):
798 """Function deprecated. Please use BgJob class instead."""
799 bg_job = BgJob(command)
800 return bg_job.sp, bg_job.result
801
802
803def join_bg_jobs(bg_jobs, timeout=None):
804 """Joins the bg_jobs with the current thread.
805
806 Returns the same list of bg_jobs objects that was passed in.
807 """
Hidehiko Abed939d662017-06-13 20:25:03 +0900808 if any(bg_job.unjoinable for bg_job in bg_jobs):
809 raise error.InvalidBgJobCall(
810 'join_bg_jobs cannot be called for unjoinable bg_job')
Allen Li5ed7e632017-02-03 16:31:33 -0800811
Hidehiko Abed939d662017-06-13 20:25:03 +0900812 timeout_error = False
Allen Li5ed7e632017-02-03 16:31:33 -0800813 try:
814 # We are holding ends to stdin, stdout pipes
815 # hence we need to be sure to close those fds no mater what
816 start_time = time.time()
817 timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
818
819 for bg_job in bg_jobs:
820 # Process stdout and stderr
821 bg_job.process_output(stdout=True,final_read=True)
822 bg_job.process_output(stdout=False,final_read=True)
823 finally:
824 # close our ends of the pipes to the sp no matter what
825 for bg_job in bg_jobs:
826 bg_job.cleanup()
827
828 if timeout_error:
829 # TODO: This needs to be fixed to better represent what happens when
830 # running in parallel. However this is backwards compatable, so it will
831 # do for the time being.
832 raise error.CmdTimeoutError(
833 bg_jobs[0].command, bg_jobs[0].result,
834 "Command(s) did not complete within %d seconds" % timeout)
835
836
837 return bg_jobs
838
839
840def _wait_for_commands(bg_jobs, start_time, timeout):
841 """Waits for background jobs by select polling their stdout/stderr.
842
843 @param bg_jobs: A list of background jobs to wait on.
844 @param start_time: Time used to calculate the timeout lifetime of a job.
845 @param timeout: The timeout of the list of bg_jobs.
846
847 @return: True if the return was due to a timeout, False otherwise.
848 """
849
850 # To check for processes which terminate without producing any output
851 # a 1 second timeout is used in select.
852 SELECT_TIMEOUT = 1
853
854 read_list = []
855 write_list = []
856 reverse_dict = {}
857
858 for bg_job in bg_jobs:
Hidehiko Abed939d662017-06-13 20:25:03 +0900859 if bg_job.sp.stdout:
860 read_list.append(bg_job.sp.stdout)
861 reverse_dict[bg_job.sp.stdout] = (bg_job, True)
862 if bg_job.sp.stderr:
863 read_list.append(bg_job.sp.stderr)
864 reverse_dict[bg_job.sp.stderr] = (bg_job, False)
Allen Li5ed7e632017-02-03 16:31:33 -0800865 if bg_job.string_stdin is not None:
866 write_list.append(bg_job.sp.stdin)
867 reverse_dict[bg_job.sp.stdin] = bg_job
868
869 if timeout:
870 stop_time = start_time + timeout
871 time_left = stop_time - time.time()
872 else:
873 time_left = None # so that select never times out
874
875 while not timeout or time_left > 0:
876 # select will return when we may write to stdin, when there is
877 # stdout/stderr output we can read (including when it is
878 # EOF, that is the process has terminated) or when a non-fatal
879 # signal was sent to the process. In the last case the select returns
880 # EINTR, and we continue waiting for the job if the signal handler for
881 # the signal that interrupted the call allows us to.
882 try:
883 read_ready, write_ready, _ = select.select(read_list, write_list,
884 [], SELECT_TIMEOUT)
885 except select.error as v:
886 if v[0] == errno.EINTR:
887 logging.warning(v)
888 continue
889 else:
890 raise
891 # os.read() has to be used instead of
892 # subproc.stdout.read() which will otherwise block
893 for file_obj in read_ready:
894 bg_job, is_stdout = reverse_dict[file_obj]
895 bg_job.process_output(is_stdout)
896
897 for file_obj in write_ready:
898 # we can write PIPE_BUF bytes without blocking
899 # POSIX requires PIPE_BUF is >= 512
900 bg_job = reverse_dict[file_obj]
901 file_obj.write(bg_job.string_stdin[:512])
902 bg_job.string_stdin = bg_job.string_stdin[512:]
903 # no more input data, close stdin, remove it from the select set
904 if not bg_job.string_stdin:
905 file_obj.close()
906 write_list.remove(file_obj)
907 del reverse_dict[file_obj]
908
909 all_jobs_finished = True
910 for bg_job in bg_jobs:
911 if bg_job.result.exit_status is not None:
912 continue
913
914 bg_job.result.exit_status = bg_job.sp.poll()
915 if bg_job.result.exit_status is not None:
916 # process exited, remove its stdout/stdin from the select set
917 bg_job.result.duration = time.time() - start_time
Hidehiko Abed939d662017-06-13 20:25:03 +0900918 if bg_job.sp.stdout:
919 read_list.remove(bg_job.sp.stdout)
920 del reverse_dict[bg_job.sp.stdout]
921 if bg_job.sp.stderr:
922 read_list.remove(bg_job.sp.stderr)
923 del reverse_dict[bg_job.sp.stderr]
Allen Li5ed7e632017-02-03 16:31:33 -0800924 else:
925 all_jobs_finished = False
926
927 if all_jobs_finished:
928 return False
929
930 if timeout:
931 time_left = stop_time - time.time()
932
933 # Kill all processes which did not complete prior to timeout
934 for bg_job in bg_jobs:
935 if bg_job.result.exit_status is not None:
936 continue
937
938 logging.warning('run process timeout (%s) fired on: %s', timeout,
939 bg_job.command)
940 if nuke_subprocess(bg_job.sp) is None:
941 # If process could not be SIGKILL'd, log kernel stack.
942 logging.warning(read_file('/proc/%d/stack' % bg_job.sp.pid))
943 bg_job.result.exit_status = bg_job.sp.poll()
944 bg_job.result.duration = time.time() - start_time
945
946 return True
947
948
949def pid_is_alive(pid):
950 """
951 True if process pid exists and is not yet stuck in Zombie state.
952 Zombies are impossible to move between cgroups, etc.
953 pid can be integer, or text of integer.
954 """
955 path = '/proc/%s/stat' % pid
956
957 try:
958 stat = read_one_line(path)
959 except IOError:
960 if not os.path.exists(path):
961 # file went away
962 return False
963 raise
964
965 return stat.split()[2] != 'Z'
966
967
968def signal_pid(pid, sig):
969 """
970 Sends a signal to a process id. Returns True if the process terminated
971 successfully, False otherwise.
972 """
973 try:
974 os.kill(pid, sig)
975 except OSError:
976 # The process may have died before we could kill it.
977 pass
978
Kirtika Ruchandani17f3a382018-07-08 01:59:05 -0700979 for _ in range(5):
Allen Li5ed7e632017-02-03 16:31:33 -0800980 if not pid_is_alive(pid):
981 return True
982 time.sleep(1)
983
984 # The process is still alive
985 return False
986
987
988def nuke_subprocess(subproc):
989 # check if the subprocess is still alive, first
990 if subproc.poll() is not None:
991 return subproc.poll()
992
993 # the process has not terminated within timeout,
994 # kill it via an escalating series of signals.
995 signal_queue = [signal.SIGTERM, signal.SIGKILL]
996 for sig in signal_queue:
997 signal_pid(subproc.pid, sig)
998 if subproc.poll() is not None:
999 return subproc.poll()
1000
1001
1002def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
1003 # the process has not terminated within timeout,
1004 # kill it via an escalating series of signals.
1005 pid_path = '/proc/%d/'
1006 if not os.path.exists(pid_path % pid):
1007 # Assume that if the pid does not exist in proc it is already dead.
1008 logging.error('No listing in /proc for pid:%d.', pid)
1009 raise error.AutoservPidAlreadyDeadError('Could not kill nonexistant '
1010 'pid: %s.', pid)
1011 for sig in signal_queue:
1012 if signal_pid(pid, sig):
1013 return
1014
1015 # no signal successfully terminated the process
1016 raise error.AutoservRunError('Could not kill %d for process name: %s' % (
1017 pid, get_process_name(pid)), None)
1018
1019
1020def system(command, timeout=None, ignore_status=False):
1021 """
1022 Run a command
1023
1024 @param timeout: timeout in seconds
1025 @param ignore_status: if ignore_status=False, throw an exception if the
1026 command's exit code is non-zero
1027 if ignore_stauts=True, return the exit code.
1028
1029 @return exit status of command
1030 (note, this will always be zero unless ignore_status=True)
1031 """
1032 return run(command, timeout=timeout, ignore_status=ignore_status,
1033 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
1034
1035
1036def system_parallel(commands, timeout=None, ignore_status=False):
1037 """This function returns a list of exit statuses for the respective
1038 list of commands."""
1039 return [bg_jobs.exit_status for bg_jobs in
1040 run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
1041 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1042
1043
1044def system_output(command, timeout=None, ignore_status=False,
1045 retain_output=False, args=()):
1046 """
1047 Run a command and return the stdout output.
1048
1049 @param command: command string to execute.
1050 @param timeout: time limit in seconds before attempting to kill the
1051 running process. The function will take a few seconds longer
1052 than 'timeout' to complete if it has to kill the process.
1053 @param ignore_status: do not raise an exception, no matter what the exit
1054 code of the command is.
1055 @param retain_output: set to True to make stdout/stderr of the command
1056 output to be also sent to the logging system
1057 @param args: sequence of strings of arguments to be given to the command
1058 inside " quotes after they have been escaped for that; each
1059 element in the sequence will be given as a separate command
1060 argument
1061
1062 @return a string with the stdout output of the command.
1063 """
1064 if retain_output:
1065 out = run(command, timeout=timeout, ignore_status=ignore_status,
1066 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
1067 args=args).stdout
1068 else:
1069 out = run(command, timeout=timeout, ignore_status=ignore_status,
1070 args=args).stdout
1071 if out[-1:] == '\n':
1072 out = out[:-1]
1073 return out
1074
1075
1076def system_output_parallel(commands, timeout=None, ignore_status=False,
1077 retain_output=False):
1078 if retain_output:
1079 out = [bg_job.stdout for bg_job
1080 in run_parallel(commands, timeout=timeout,
1081 ignore_status=ignore_status,
1082 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1083 else:
1084 out = [bg_job.stdout for bg_job in run_parallel(commands,
1085 timeout=timeout, ignore_status=ignore_status)]
Kirtika Ruchandani17f3a382018-07-08 01:59:05 -07001086 for _ in out:
Kirtika Ruchandani5c2b4c42018-08-25 14:31:42 -07001087 if out[-1:] == '\n':
1088 out = out[:-1]
Allen Li5ed7e632017-02-03 16:31:33 -08001089 return out
1090
1091
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -07001092def strip_unicode(input_obj):
1093 if type(input_obj) == list:
1094 return [strip_unicode(i) for i in input_obj]
1095 elif type(input_obj) == dict:
Allen Li5ed7e632017-02-03 16:31:33 -08001096 output = {}
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -07001097 for key in input_obj.keys():
1098 output[str(key)] = strip_unicode(input_obj[key])
Allen Li5ed7e632017-02-03 16:31:33 -08001099 return output
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -07001100 elif type(input_obj) == unicode:
1101 return str(input_obj)
Allen Li5ed7e632017-02-03 16:31:33 -08001102 else:
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -07001103 return input_obj
Allen Li5ed7e632017-02-03 16:31:33 -08001104
1105
1106def get_cpu_percentage(function, *args, **dargs):
1107 """Returns a tuple containing the CPU% and return value from function call.
1108
1109 This function calculates the usage time by taking the difference of
1110 the user and system times both before and after the function call.
1111 """
1112 child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
1113 self_pre = resource.getrusage(resource.RUSAGE_SELF)
1114 start = time.time()
1115 to_return = function(*args, **dargs)
1116 elapsed = time.time() - start
1117 self_post = resource.getrusage(resource.RUSAGE_SELF)
1118 child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
1119
1120 # Calculate CPU Percentage
1121 s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
1122 c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
1123 cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
1124
1125 return cpu_percent, to_return
1126
1127
1128def get_arch(run_function=run):
1129 """
1130 Get the hardware architecture of the machine.
1131 If specified, run_function should return a CmdResult object and throw a
1132 CmdError exception.
1133 If run_function is anything other than utils.run(), it is used to
1134 execute the commands. By default (when set to utils.run()) this will
1135 just examine os.uname()[4].
1136 """
1137
1138 # Short circuit from the common case.
1139 if run_function == run:
1140 return re.sub(r'i\d86$', 'i386', os.uname()[4])
1141
1142 # Otherwise, use the run_function in case it hits a remote machine.
1143 arch = run_function('/bin/uname -m').stdout.rstrip()
1144 if re.match(r'i\d86$', arch):
1145 arch = 'i386'
1146 return arch
1147
1148def get_arch_userspace(run_function=run):
1149 """
1150 Get the architecture by userspace (possibly different from kernel).
1151 """
1152 archs = {
1153 'arm': 'ELF 32-bit.*, ARM,',
Adam Kallai77754052018-07-12 10:02:40 +02001154 'arm64': 'ELF 64-bit.*, ARM aarch64,',
Allen Li5ed7e632017-02-03 16:31:33 -08001155 'i386': 'ELF 32-bit.*, Intel 80386,',
1156 'x86_64': 'ELF 64-bit.*, x86-64,',
1157 }
1158
1159 cmd = 'file --brief --dereference /bin/sh'
1160 filestr = run_function(cmd).stdout.rstrip()
1161 for a, regex in archs.iteritems():
1162 if re.match(regex, filestr):
1163 return a
1164
1165 return get_arch()
1166
1167
1168def get_num_logical_cpus_per_socket(run_function=run):
1169 """
1170 Get the number of cores (including hyperthreading) per cpu.
1171 run_function is used to execute the commands. It defaults to
1172 utils.run() but a custom method (if provided) should be of the
1173 same schema as utils.run. It should return a CmdResult object and
1174 throw a CmdError exception.
1175 """
1176 siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1177 num_siblings = map(int,
1178 re.findall(r'^siblings\s*:\s*(\d+)\s*$',
1179 siblings, re.M))
1180 if len(num_siblings) == 0:
1181 raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1182 if min(num_siblings) != max(num_siblings):
1183 raise error.TestError('Number of siblings differ %r' %
1184 num_siblings)
1185 return num_siblings[0]
1186
1187
Alex Khouderchah19e0ab32018-07-06 17:03:21 -07001188def set_high_performance_mode(host=None):
1189 """
1190 Sets the kernel governor mode to the highest setting.
1191 Returns previous governor state.
1192 """
1193 original_governors = get_scaling_governor_states(host)
1194 set_scaling_governors('performance', host)
1195 return original_governors
1196
1197
1198def set_scaling_governors(value, host=None):
1199 """
1200 Sets all scaling governor to string value.
1201 Sample values: 'performance', 'interactive', 'ondemand', 'powersave'.
1202 """
1203 paths = _get_cpufreq_paths('scaling_governor', host)
1204 if not paths:
1205 logging.info("Could not set governor states, as no files of the form "
1206 "'/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor' "
1207 "were found.")
1208 run_func = host.run if host else system
1209 for path in paths:
1210 cmd = 'echo %s > %s' % (value, path)
1211 logging.info('Writing scaling governor mode \'%s\' -> %s', value, path)
1212 # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
1213 run_func(cmd, ignore_status=True)
1214
1215
1216def _get_cpufreq_paths(filename, host=None):
1217 """
1218 Returns a list of paths to the governors.
1219 """
1220 run_func = host.run if host else run
Brian Norris951f3572019-01-16 10:13:03 -08001221 glob = '/sys/devices/system/cpu/cpu*/cpufreq/' + filename
1222 # Simple glob expansion; note that CPUs may come and go, causing these
1223 # paths to change at any time.
1224 cmd = 'echo ' + glob
Alex Khouderchah19e0ab32018-07-06 17:03:21 -07001225 try:
Brian Norris951f3572019-01-16 10:13:03 -08001226 paths = run_func(cmd, verbose=False).stdout.split()
Alex Khouderchah19e0ab32018-07-06 17:03:21 -07001227 except error.CmdError:
1228 return []
Brian Norris951f3572019-01-16 10:13:03 -08001229 # If the glob result equals itself, then we likely didn't match any real
1230 # paths (assuming 'cpu*' is not a real path).
1231 if paths == [glob]:
1232 return []
Alex Khouderchah19e0ab32018-07-06 17:03:21 -07001233 return paths
1234
1235
1236def get_scaling_governor_states(host=None):
1237 """
1238 Returns a list of (performance governor path, current state) tuples.
1239 """
1240 paths = _get_cpufreq_paths('scaling_governor', host)
1241 path_value_list = []
1242 run_func = host.run if host else run
1243 for path in paths:
1244 value = run_func('head -n 1 %s' % path, verbose=False).stdout
1245 path_value_list.append((path, value))
1246 return path_value_list
1247
1248
1249def restore_scaling_governor_states(path_value_list, host=None):
1250 """
1251 Restores governor states. Inverse operation to get_scaling_governor_states.
1252 """
1253 run_func = host.run if host else system
1254 for (path, value) in path_value_list:
1255 cmd = 'echo %s > %s' % (value.rstrip('\n'), path)
1256 # On Tegra CPUs can be dynamically enabled/disabled. Ignore failures.
1257 run_func(cmd, ignore_status=True)
1258
1259
Allen Li5ed7e632017-02-03 16:31:33 -08001260def merge_trees(src, dest):
1261 """
1262 Merges a source directory tree at 'src' into a destination tree at
1263 'dest'. If a path is a file in both trees than the file in the source
1264 tree is APPENDED to the one in the destination tree. If a path is
1265 a directory in both trees then the directories are recursively merged
1266 with this function. In any other case, the function will skip the
1267 paths that cannot be merged (instead of failing).
1268 """
1269 if not os.path.exists(src):
1270 return # exists only in dest
1271 elif not os.path.exists(dest):
1272 if os.path.isfile(src):
1273 shutil.copy2(src, dest) # file only in src
1274 else:
1275 shutil.copytree(src, dest, symlinks=True) # dir only in src
1276 return
1277 elif os.path.isfile(src) and os.path.isfile(dest):
1278 # src & dest are files in both trees, append src to dest
1279 destfile = open(dest, "a")
1280 try:
1281 srcfile = open(src)
1282 try:
1283 destfile.write(srcfile.read())
1284 finally:
1285 srcfile.close()
1286 finally:
1287 destfile.close()
1288 elif os.path.isdir(src) and os.path.isdir(dest):
1289 # src & dest are directories in both trees, so recursively merge
1290 for name in os.listdir(src):
1291 merge_trees(os.path.join(src, name), os.path.join(dest, name))
1292 else:
1293 # src & dest both exist, but are incompatible
1294 return
1295
1296
1297class CmdResult(object):
1298 """
1299 Command execution result.
1300
1301 command: String containing the command line itself
1302 exit_status: Integer exit code of the process
1303 stdout: String containing stdout of the process
1304 stderr: String containing stderr of the process
1305 duration: Elapsed wall clock time running the process
1306 """
1307
1308
1309 def __init__(self, command="", stdout="", stderr="",
1310 exit_status=None, duration=0):
1311 self.command = command
1312 self.exit_status = exit_status
1313 self.stdout = stdout
1314 self.stderr = stderr
1315 self.duration = duration
1316
1317
Prathmesh Prabhu54905412017-07-11 17:08:38 -07001318 def __eq__(self, other):
1319 if type(self) == type(other):
1320 return (self.command == other.command
1321 and self.exit_status == other.exit_status
1322 and self.stdout == other.stdout
1323 and self.stderr == other.stderr
1324 and self.duration == other.duration)
1325 else:
1326 return NotImplemented
1327
1328
Allen Li5ed7e632017-02-03 16:31:33 -08001329 def __repr__(self):
1330 wrapper = textwrap.TextWrapper(width = 78,
1331 initial_indent="\n ",
1332 subsequent_indent=" ")
1333
1334 stdout = self.stdout.rstrip()
1335 if stdout:
1336 stdout = "\nstdout:\n%s" % stdout
1337
1338 stderr = self.stderr.rstrip()
1339 if stderr:
1340 stderr = "\nstderr:\n%s" % stderr
1341
1342 return ("* Command: %s\n"
1343 "Exit status: %s\n"
1344 "Duration: %s\n"
1345 "%s"
1346 "%s"
1347 % (wrapper.fill(str(self.command)), self.exit_status,
1348 self.duration, stdout, stderr))
1349
1350
1351class run_randomly:
1352 def __init__(self, run_sequentially=False):
1353 # Run sequentially is for debugging control files
1354 self.test_list = []
1355 self.run_sequentially = run_sequentially
1356
1357
1358 def add(self, *args, **dargs):
1359 test = (args, dargs)
1360 self.test_list.append(test)
1361
1362
1363 def run(self, fn):
1364 while self.test_list:
1365 test_index = random.randint(0, len(self.test_list)-1)
1366 if self.run_sequentially:
1367 test_index = 0
1368 (args, dargs) = self.test_list.pop(test_index)
1369 fn(*args, **dargs)
1370
1371
1372def import_site_module(path, module, dummy=None, modulefile=None):
1373 """
1374 Try to import the site specific module if it exists.
1375
1376 @param path full filename of the source file calling this (ie __file__)
1377 @param module full module name
1378 @param dummy dummy value to return in case there is no symbol to import
1379 @param modulefile module filename
1380
1381 @return site specific module or dummy
1382
1383 @raises ImportError if the site file exists but imports fails
1384 """
1385 short_module = module[module.rfind(".") + 1:]
1386
1387 if not modulefile:
1388 modulefile = short_module + ".py"
1389
1390 if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1391 return __import__(module, {}, {}, [short_module])
1392 return dummy
1393
1394
1395def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1396 """
1397 Try to import site specific symbol from site specific file if it exists
1398
1399 @param path full filename of the source file calling this (ie __file__)
1400 @param module full module name
1401 @param name symbol name to be imported from the site file
1402 @param dummy dummy value to return in case there is no symbol to import
1403 @param modulefile module filename
1404
1405 @return site specific symbol or dummy
1406
1407 @raises ImportError if the site file exists but imports fails
1408 """
1409 module = import_site_module(path, module, modulefile=modulefile)
1410 if not module:
1411 return dummy
1412
1413 # special unique value to tell us if the symbol can't be imported
1414 cant_import = object()
1415
1416 obj = getattr(module, name, cant_import)
1417 if obj is cant_import:
1418 return dummy
1419
1420 return obj
1421
1422
1423def import_site_class(path, module, classname, baseclass, modulefile=None):
1424 """
1425 Try to import site specific class from site specific file if it exists
1426
1427 Args:
1428 path: full filename of the source file calling this (ie __file__)
1429 module: full module name
1430 classname: class name to be loaded from site file
1431 baseclass: base class object to return when no site file present or
1432 to mixin when site class exists but is not inherited from baseclass
1433 modulefile: module filename
1434
1435 Returns: baseclass if site specific class does not exist, the site specific
1436 class if it exists and is inherited from baseclass or a mixin of the
1437 site specific class and baseclass when the site specific class exists
1438 and is not inherited from baseclass
1439
1440 Raises: ImportError if the site file exists but imports fails
1441 """
1442
1443 res = import_site_symbol(path, module, classname, None, modulefile)
1444 if res:
1445 if not issubclass(res, baseclass):
1446 # if not a subclass of baseclass then mix in baseclass with the
1447 # site specific class object and return the result
1448 res = type(classname, (res, baseclass), {})
1449 else:
1450 res = baseclass
1451
1452 return res
1453
1454
1455def import_site_function(path, module, funcname, dummy, modulefile=None):
1456 """
1457 Try to import site specific function from site specific file if it exists
1458
1459 Args:
1460 path: full filename of the source file calling this (ie __file__)
1461 module: full module name
1462 funcname: function name to be imported from site file
1463 dummy: dummy function to return in case there is no function to import
1464 modulefile: module filename
1465
1466 Returns: site specific function object or dummy
1467
1468 Raises: ImportError if the site file exists but imports fails
1469 """
1470
1471 return import_site_symbol(path, module, funcname, dummy, modulefile)
1472
1473
1474def _get_pid_path(program_name):
1475 my_path = os.path.dirname(__file__)
1476 return os.path.abspath(os.path.join(my_path, "..", "..",
1477 "%s.pid" % program_name))
1478
1479
1480def write_pid(program_name):
1481 """
1482 Try to drop <program_name>.pid in the main autotest directory.
1483
1484 Args:
1485 program_name: prefix for file name
1486 """
1487 pidfile = open(_get_pid_path(program_name), "w")
1488 try:
1489 pidfile.write("%s\n" % os.getpid())
1490 finally:
1491 pidfile.close()
1492
1493
1494def delete_pid_file_if_exists(program_name):
1495 """
1496 Tries to remove <program_name>.pid from the main autotest directory.
1497 """
1498 pidfile_path = _get_pid_path(program_name)
1499
1500 try:
1501 os.remove(pidfile_path)
1502 except OSError:
1503 if not os.path.exists(pidfile_path):
1504 return
1505 raise
1506
1507
1508def get_pid_from_file(program_name):
1509 """
1510 Reads the pid from <program_name>.pid in the autotest directory.
1511
1512 @param program_name the name of the program
1513 @return the pid if the file exists, None otherwise.
1514 """
1515 pidfile_path = _get_pid_path(program_name)
1516 if not os.path.exists(pidfile_path):
1517 return None
1518
1519 pidfile = open(_get_pid_path(program_name), 'r')
1520
1521 try:
1522 try:
1523 pid = int(pidfile.readline())
1524 except IOError:
1525 if not os.path.exists(pidfile_path):
1526 return None
1527 raise
1528 finally:
1529 pidfile.close()
1530
1531 return pid
1532
1533
1534def get_process_name(pid):
1535 """
1536 Get process name from PID.
1537 @param pid: PID of process.
1538 @return: Process name if PID stat file exists or 'Dead PID' if it does not.
1539 """
1540 pid_stat_path = "/proc/%d/stat"
1541 if not os.path.exists(pid_stat_path % pid):
1542 return "Dead Pid"
1543 return get_field(read_file(pid_stat_path % pid), 1)[1:-1]
1544
1545
1546def program_is_alive(program_name):
1547 """
1548 Checks if the process is alive and not in Zombie state.
1549
1550 @param program_name the name of the program
1551 @return True if still alive, False otherwise
1552 """
1553 pid = get_pid_from_file(program_name)
1554 if pid is None:
1555 return False
1556 return pid_is_alive(pid)
1557
1558
1559def signal_program(program_name, sig=signal.SIGTERM):
1560 """
1561 Sends a signal to the process listed in <program_name>.pid
1562
1563 @param program_name the name of the program
1564 @param sig signal to send
1565 """
1566 pid = get_pid_from_file(program_name)
1567 if pid:
1568 signal_pid(pid, sig)
1569
1570
1571def get_relative_path(path, reference):
1572 """Given 2 absolute paths "path" and "reference", compute the path of
1573 "path" as relative to the directory "reference".
1574
1575 @param path the absolute path to convert to a relative path
1576 @param reference an absolute directory path to which the relative
1577 path will be computed
1578 """
1579 # normalize the paths (remove double slashes, etc)
1580 assert(os.path.isabs(path))
1581 assert(os.path.isabs(reference))
1582
1583 path = os.path.normpath(path)
1584 reference = os.path.normpath(reference)
1585
1586 # we could use os.path.split() but it splits from the end
1587 path_list = path.split(os.path.sep)[1:]
1588 ref_list = reference.split(os.path.sep)[1:]
1589
1590 # find the longest leading common path
1591 for i in xrange(min(len(path_list), len(ref_list))):
1592 if path_list[i] != ref_list[i]:
1593 # decrement i so when exiting this loop either by no match or by
1594 # end of range we are one step behind
1595 i -= 1
1596 break
1597 i += 1
1598 # drop the common part of the paths, not interested in that anymore
1599 del path_list[:i]
1600
1601 # for each uncommon component in the reference prepend a ".."
1602 path_list[:0] = ['..'] * (len(ref_list) - i)
1603
1604 return os.path.join(*path_list)
1605
1606
1607def sh_escape(command):
1608 """
1609 Escape special characters from a command so that it can be passed
1610 as a double quoted (" ") string in a (ba)sh command.
1611
1612 Args:
1613 command: the command string to escape.
1614
1615 Returns:
1616 The escaped command string. The required englobing double
1617 quotes are NOT added and so should be added at some point by
1618 the caller.
1619
1620 See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1621 """
1622 command = command.replace("\\", "\\\\")
1623 command = command.replace("$", r'\$')
1624 command = command.replace('"', r'\"')
1625 command = command.replace('`', r'\`')
1626 return command
1627
1628
1629def sh_quote_word(text, whitelist=SHELL_QUOTING_WHITELIST):
1630 r"""Quote a string to make it safe as a single word in a shell command.
1631
1632 POSIX shell syntax recognizes no escape characters inside a single-quoted
1633 string. So, single quotes can safely quote any string of characters except
1634 a string with a single quote character. A single quote character must be
1635 quoted with the sequence '\'' which translates to:
1636 ' -> close current quote
1637 \' -> insert a literal single quote
1638 ' -> reopen quoting again.
1639
1640 This is safe for all combinations of characters, including embedded and
1641 trailing backslashes in odd or even numbers.
1642
1643 This is also safe for nesting, e.g. the following is a valid use:
1644
1645 adb_command = 'adb shell %s' % (
1646 sh_quote_word('echo %s' % sh_quote_word('hello world')))
1647
1648 @param text: The string to be quoted into a single word for the shell.
1649 @param whitelist: Optional list of characters that do not need quoting.
1650 Defaults to a known good list of characters.
1651
1652 @return A string, possibly quoted, safe as a single word for a shell.
1653 """
1654 if all(c in whitelist for c in text):
1655 return text
1656 return "'" + text.replace("'", r"'\''") + "'"
1657
1658
1659def configure(extra=None, configure='./configure'):
1660 """
1661 Run configure passing in the correct host, build, and target options.
1662
1663 @param extra: extra command line arguments to pass to configure
1664 @param configure: which configure script to use
1665 """
1666 args = []
1667 if 'CHOST' in os.environ:
1668 args.append('--host=' + os.environ['CHOST'])
1669 if 'CBUILD' in os.environ:
1670 args.append('--build=' + os.environ['CBUILD'])
1671 if 'CTARGET' in os.environ:
1672 args.append('--target=' + os.environ['CTARGET'])
1673 if extra:
1674 args.append(extra)
1675
1676 system('%s %s' % (configure, ' '.join(args)))
1677
1678
1679def make(extra='', make='make', timeout=None, ignore_status=False):
1680 """
1681 Run make, adding MAKEOPTS to the list of options.
1682
1683 @param extra: extra command line arguments to pass to make.
1684 """
1685 cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1686 return system(cmd, timeout=timeout, ignore_status=ignore_status)
1687
1688
1689def compare_versions(ver1, ver2):
1690 """Version number comparison between ver1 and ver2 strings.
1691
1692 >>> compare_tuple("1", "2")
1693 -1
1694 >>> compare_tuple("foo-1.1", "foo-1.2")
1695 -1
1696 >>> compare_tuple("1.2", "1.2a")
1697 -1
1698 >>> compare_tuple("1.2b", "1.2a")
1699 1
1700 >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1701 -1
1702
1703 Args:
1704 ver1: version string
1705 ver2: version string
1706
1707 Returns:
1708 int: 1 if ver1 > ver2
1709 0 if ver1 == ver2
1710 -1 if ver1 < ver2
1711 """
1712 ax = re.split('[.-]', ver1)
1713 ay = re.split('[.-]', ver2)
1714 while len(ax) > 0 and len(ay) > 0:
1715 cx = ax.pop(0)
1716 cy = ay.pop(0)
1717 maxlen = max(len(cx), len(cy))
1718 c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1719 if c != 0:
1720 return c
1721 return cmp(len(ax), len(ay))
1722
1723
1724def args_to_dict(args):
1725 """Convert autoserv extra arguments in the form of key=val or key:val to a
1726 dictionary. Each argument key is converted to lowercase dictionary key.
1727
1728 Args:
1729 args - list of autoserv extra arguments.
1730
1731 Returns:
1732 dictionary
1733 """
1734 arg_re = re.compile(r'(\w+)[:=](.*)$')
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -07001735 args_dict = {}
Allen Li5ed7e632017-02-03 16:31:33 -08001736 for arg in args:
1737 match = arg_re.match(arg)
1738 if match:
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -07001739 args_dict[match.group(1).lower()] = match.group(2)
Allen Li5ed7e632017-02-03 16:31:33 -08001740 else:
1741 logging.warning("args_to_dict: argument '%s' doesn't match "
1742 "'%s' pattern. Ignored.", arg, arg_re.pattern)
Kirtika Ruchandani82a18c42018-07-08 19:16:46 -07001743 return args_dict
Allen Li5ed7e632017-02-03 16:31:33 -08001744
1745
1746def get_unused_port():
1747 """
1748 Finds a semi-random available port. A race condition is still
1749 possible after the port number is returned, if another process
1750 happens to bind it.
1751
1752 Returns:
1753 A port number that is unused on both TCP and UDP.
1754 """
1755
1756 def try_bind(port, socket_type, socket_proto):
1757 s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1758 try:
1759 try:
1760 s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1761 s.bind(('', port))
1762 return s.getsockname()[1]
1763 except socket.error:
1764 return None
1765 finally:
1766 s.close()
1767
1768 # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1769 # same port over and over. So always try TCP first.
1770 while True:
1771 # Ask the OS for an unused port.
1772 port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1773 # Check if this port is unused on the other protocol.
1774 if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1775 return port
1776
1777
1778def ask(question, auto=False):
1779 """
1780 Raw input with a prompt that emulates logging.
1781
1782 @param question: Question to be asked
1783 @param auto: Whether to return "y" instead of asking the question
1784 """
1785 if auto:
1786 logging.info("%s (y/n) y", question)
1787 return "y"
1788 return raw_input("%s INFO | %s (y/n) " %
1789 (time.strftime("%H:%M:%S", time.localtime()), question))
1790
1791
1792def rdmsr(address, cpu=0):
1793 """
1794 Reads an x86 MSR from the specified CPU, returns as long integer.
1795 """
1796 with open('/dev/cpu/%s/msr' % cpu, 'r', 0) as fd:
1797 fd.seek(address)
1798 return struct.unpack('=Q', fd.read(8))[0]
1799
1800
1801def wait_for_value(func,
1802 expected_value=None,
1803 min_threshold=None,
1804 max_threshold=None,
1805 timeout_sec=10):
1806 """
1807 Returns the value of func(). If |expected_value|, |min_threshold|, and
1808 |max_threshold| are not set, returns immediately.
1809
1810 If |expected_value| is set, polls the return value until |expected_value| is
1811 reached, and returns that value.
1812
1813 If either |max_threshold| or |min_threshold| is set, this function will
1814 will repeatedly call func() until the return value reaches or exceeds one of
1815 these thresholds.
1816
1817 Polling will stop after |timeout_sec| regardless of these thresholds.
1818
1819 @param func: function whose return value is to be waited on.
1820 @param expected_value: wait for func to return this value.
1821 @param min_threshold: wait for func value to reach or fall below this value.
1822 @param max_threshold: wait for func value to reach or rise above this value.
1823 @param timeout_sec: Number of seconds to wait before giving up and
1824 returning whatever value func() last returned.
1825
1826 Return value:
1827 The most recent return value of func().
1828 """
1829 value = None
1830 start_time_sec = time.time()
1831 while True:
1832 value = func()
1833 if (expected_value is None and \
1834 min_threshold is None and \
1835 max_threshold is None) or \
1836 (expected_value is not None and value == expected_value) or \
1837 (min_threshold is not None and value <= min_threshold) or \
1838 (max_threshold is not None and value >= max_threshold):
1839 break
1840
1841 if time.time() - start_time_sec >= timeout_sec:
1842 break
1843 time.sleep(0.1)
1844
1845 return value
1846
1847
1848def wait_for_value_changed(func,
1849 old_value=None,
1850 timeout_sec=10):
1851 """
1852 Returns the value of func().
1853
1854 The function polls the return value until it is different from |old_value|,
1855 and returns that value.
1856
1857 Polling will stop after |timeout_sec|.
1858
1859 @param func: function whose return value is to be waited on.
1860 @param old_value: wait for func to return a value different from this.
1861 @param timeout_sec: Number of seconds to wait before giving up and
1862 returning whatever value func() last returned.
1863
1864 @returns The most recent return value of func().
1865 """
1866 value = None
1867 start_time_sec = time.time()
1868 while True:
1869 value = func()
1870 if value != old_value:
1871 break
1872
1873 if time.time() - start_time_sec >= timeout_sec:
1874 break
1875 time.sleep(0.1)
1876
1877 return value
1878
1879
1880CONFIG = global_config.global_config
1881
1882# Keep checking if the pid is alive every second until the timeout (in seconds)
1883CHECK_PID_IS_ALIVE_TIMEOUT = 6
1884
1885_LOCAL_HOST_LIST = ('localhost', '127.0.0.1')
1886
1887# The default address of a vm gateway.
1888DEFAULT_VM_GATEWAY = '10.0.2.2'
1889
1890# Google Storage bucket URI to store results in.
1891DEFAULT_OFFLOAD_GSURI = CONFIG.get_config_value(
1892 'CROS', 'results_storage_server', default=None)
1893
1894# Default Moblab Ethernet Interface.
1895_MOBLAB_ETH_0 = 'eth0'
1896_MOBLAB_ETH_1 = 'eth1'
1897
1898# A list of subnets that requires dedicated devserver and drone in the same
1899# subnet. Each item is a tuple of (subnet_ip, mask_bits), e.g.,
1900# ('192.168.0.0', 24))
1901RESTRICTED_SUBNETS = []
1902
1903def _setup_restricted_subnets():
1904 restricted_subnets_list = CONFIG.get_config_value(
1905 'CROS', 'restricted_subnets', type=list, default=[])
1906 # TODO(dshi): Remove the code to split subnet with `:` after R51 is
1907 # off stable channel, and update shadow config to use `/` as
1908 # delimiter for consistency.
1909 for subnet in restricted_subnets_list:
1910 ip, mask_bits = subnet.split('/') if '/' in subnet \
1911 else subnet.split(':')
1912 RESTRICTED_SUBNETS.append((ip, int(mask_bits)))
1913
1914_setup_restricted_subnets()
1915
1916# regex pattern for CLIENT/wireless_ssid_ config. For example, global config
1917# can have following config in CLIENT section to indicate that hosts in subnet
1918# 192.168.0.1/24 should use wireless ssid of `ssid_1`
1919# wireless_ssid_192.168.0.1/24: ssid_1
1920WIRELESS_SSID_PATTERN = 'wireless_ssid_(.*)/(\d+)'
1921
1922
Keith Haddowa4b55dd2018-02-28 14:34:59 -08001923def get_moblab_serial_number():
Keith Haddowd2798fe2018-07-24 10:06:50 -07001924 """Gets a unique identifier for the moblab.
Allen Li5ed7e632017-02-03 16:31:33 -08001925
Keith Haddowd2798fe2018-07-24 10:06:50 -07001926 Serial number is the prefered identifier, use it if
1927 present, however fallback is the ethernet mac address.
Allen Li5ed7e632017-02-03 16:31:33 -08001928 """
Keith Haddowd2798fe2018-07-24 10:06:50 -07001929 for vpd_key in ['serial_number', 'ethernet_mac']:
1930 try:
1931 cmd_result = run('sudo vpd -g %s' % vpd_key)
1932 if cmd_result and cmd_result.stdout:
1933 return cmd_result.stdout
1934 except error.CmdError as e:
1935 logging.error(str(e))
1936 logging.info(vpd_key)
Keith Haddowa4b55dd2018-02-28 14:34:59 -08001937 return 'NoSerialNumber'
Allen Li5ed7e632017-02-03 16:31:33 -08001938
1939
Kevin Cernekee0f709a62018-02-16 15:13:13 -08001940def ping(host, deadline=None, tries=None, timeout=60, user=None):
Allen Li5ed7e632017-02-03 16:31:33 -08001941 """Attempt to ping |host|.
1942
1943 Shell out to 'ping' if host is an IPv4 addres or 'ping6' if host is an
1944 IPv6 address to try to reach |host| for |timeout| seconds.
1945 Returns exit code of ping.
1946
1947 Per 'man ping', if you specify BOTH |deadline| and |tries|, ping only
1948 returns 0 if we get responses to |tries| pings within |deadline| seconds.
1949
1950 Specifying |deadline| or |count| alone should return 0 as long as
1951 some packets receive responses.
1952
1953 Note that while this works with literal IPv6 addresses it will not work
1954 with hostnames that resolve to IPv6 only.
1955
1956 @param host: the host to ping.
1957 @param deadline: seconds within which |tries| pings must succeed.
1958 @param tries: number of pings to send.
1959 @param timeout: number of seconds after which to kill 'ping' command.
1960 @return exit code of ping command.
1961 """
1962 args = [host]
Kevin Cernekee0f709a62018-02-16 15:13:13 -08001963 cmd = 'ping6' if re.search(r':.*:', host) else 'ping'
Allen Li5ed7e632017-02-03 16:31:33 -08001964
1965 if deadline:
1966 args.append('-w%d' % deadline)
1967 if tries:
1968 args.append('-c%d' % tries)
1969
Kevin Cernekee0f709a62018-02-16 15:13:13 -08001970 if user != None:
1971 args = [user, '-c', ' '.join([cmd] + args)]
1972 cmd = 'su'
1973
1974 return run(cmd, args=args, verbose=True,
Allen Li5ed7e632017-02-03 16:31:33 -08001975 ignore_status=True, timeout=timeout,
1976 stdout_tee=TEE_TO_LOGS,
1977 stderr_tee=TEE_TO_LOGS).exit_status
1978
1979
1980def host_is_in_lab_zone(hostname):
1981 """Check if the host is in the CLIENT.dns_zone.
1982
1983 @param hostname: The hostname to check.
1984 @returns True if hostname.dns_zone resolves, otherwise False.
1985 """
1986 host_parts = hostname.split('.')
1987 dns_zone = CONFIG.get_config_value('CLIENT', 'dns_zone', default=None)
1988 fqdn = '%s.%s' % (host_parts[0], dns_zone)
Xixuan Wud227ac82019-01-07 14:04:51 -08001989 logging.debug('Checking if host %s is in lab zone.', fqdn)
Allen Li5ed7e632017-02-03 16:31:33 -08001990 try:
1991 socket.gethostbyname(fqdn)
1992 return True
1993 except socket.gaierror:
1994 return False
1995
1996
Prathmesh Prabhucbd5ebb2018-08-28 17:04:50 -07001997def in_moblab_ssp():
1998 """Detects if this execution is inside an SSP container on moblab."""
1999 config_is_moblab = CONFIG.get_config_value('SSP', 'is_moblab', type=bool,
2000 default=False)
2001 return is_in_container() and config_is_moblab
2002
2003
Allen Li5ed7e632017-02-03 16:31:33 -08002004def get_chrome_version(job_views):
2005 """
2006 Retrieves the version of the chrome binary associated with a job.
2007
2008 When a test runs we query the chrome binary for it's version and drop
2009 that value into a client keyval. To retrieve the chrome version we get all
2010 the views associated with a test from the db, including those of the
2011 server and client jobs, and parse the version out of the first test view
2012 that has it. If we never ran a single test in the suite the job_views
2013 dictionary will not contain a chrome version.
2014
2015 This method cannot retrieve the chrome version from a dictionary that
2016 does not conform to the structure of an autotest tko view.
2017
2018 @param job_views: a list of a job's result views, as returned by
2019 the get_detailed_test_views method in rpc_interface.
2020 @return: The chrome version string, or None if one can't be found.
2021 """
2022
2023 # Aborted jobs have no views.
2024 if not job_views:
2025 return None
2026
2027 for view in job_views:
2028 if (view.get('attributes')
2029 and constants.CHROME_VERSION in view['attributes'].keys()):
2030
2031 return view['attributes'].get(constants.CHROME_VERSION)
2032
2033 logging.warning('Could not find chrome version for failure.')
2034 return None
2035
2036
Allen Li5ed7e632017-02-03 16:31:33 -08002037def get_moblab_id():
2038 """Gets the moblab random id.
2039
2040 The random id file is cached on disk. If it does not exist, a new file is
2041 created the first time.
2042
2043 @returns the moblab random id.
2044 """
2045 moblab_id_filepath = '/home/moblab/.moblab_id'
Keith Haddowfa0412a2017-05-18 09:17:22 -07002046 try:
2047 if os.path.exists(moblab_id_filepath):
2048 with open(moblab_id_filepath, 'r') as moblab_id_file:
2049 random_id = moblab_id_file.read()
2050 else:
2051 random_id = uuid.uuid1().hex
2052 with open(moblab_id_filepath, 'w') as moblab_id_file:
2053 moblab_id_file.write('%s' % random_id)
2054 except IOError as e:
2055 # Possible race condition, another process has created the file.
2056 # Sleep a second to make sure the file gets closed.
2057 logging.info(e)
2058 time.sleep(1)
Allen Li5ed7e632017-02-03 16:31:33 -08002059 with open(moblab_id_filepath, 'r') as moblab_id_file:
2060 random_id = moblab_id_file.read()
Allen Li5ed7e632017-02-03 16:31:33 -08002061 return random_id
2062
2063
2064def get_offload_gsuri():
2065 """Return the GSURI to offload test results to.
2066
2067 For the normal use case this is the results_storage_server in the
2068 global_config.
2069
2070 However partners using Moblab will be offloading their results to a
2071 subdirectory of their image storage buckets. The subdirectory is
2072 determined by the MAC Address of the Moblab device.
2073
2074 @returns gsuri to offload test results to.
2075 """
2076 # For non-moblab, use results_storage_server or default.
Dean Liao34be76d2018-07-19 00:48:47 +08002077 if not is_moblab(): # pylint: disable=undefined-variable
Allen Li5ed7e632017-02-03 16:31:33 -08002078 return DEFAULT_OFFLOAD_GSURI
2079
2080 # For moblab, use results_storage_server or image_storage_server as bucket
2081 # name and mac-address/moblab_id as path.
2082 gsuri = DEFAULT_OFFLOAD_GSURI
2083 if not gsuri:
Dean Liao34be76d2018-07-19 00:48:47 +08002084 gsuri = "%sresults/" % CONFIG.get_config_value('CROS',
2085 'image_storage_server')
Allen Li5ed7e632017-02-03 16:31:33 -08002086
Keith Haddowa4b55dd2018-02-28 14:34:59 -08002087 return '%s%s/%s/' % (gsuri, get_moblab_serial_number(), get_moblab_id())
Allen Li5ed7e632017-02-03 16:31:33 -08002088
2089
2090# TODO(petermayo): crosbug.com/31826 Share this with _GsUpload in
2091# //chromite.git/buildbot/prebuilt.py somewhere/somehow
2092def gs_upload(local_file, remote_file, acl, result_dir=None,
2093 transfer_timeout=300, acl_timeout=300):
2094 """Upload to GS bucket.
2095
2096 @param local_file: Local file to upload
2097 @param remote_file: Remote location to upload the local_file to.
2098 @param acl: name or file used for controlling access to the uploaded
2099 file.
2100 @param result_dir: Result directory if you want to add tracing to the
2101 upload.
2102 @param transfer_timeout: Timeout for this upload call.
2103 @param acl_timeout: Timeout for the acl call needed to confirm that
2104 the uploader has permissions to execute the upload.
2105
2106 @raise CmdError: the exit code of the gsutil call was not 0.
2107
2108 @returns True/False - depending on if the upload succeeded or failed.
2109 """
2110 # https://developers.google.com/storage/docs/accesscontrol#extension
2111 CANNED_ACLS = ['project-private', 'private', 'public-read',
2112 'public-read-write', 'authenticated-read',
2113 'bucket-owner-read', 'bucket-owner-full-control']
2114 _GSUTIL_BIN = 'gsutil'
2115 acl_cmd = None
2116 if acl in CANNED_ACLS:
2117 cmd = '%s cp -a %s %s %s' % (_GSUTIL_BIN, acl, local_file, remote_file)
2118 else:
2119 # For private uploads we assume that the overlay board is set up
2120 # properly and a googlestore_acl.xml is present, if not this script
2121 # errors
2122 cmd = '%s cp -a private %s %s' % (_GSUTIL_BIN, local_file, remote_file)
2123 if not os.path.exists(acl):
2124 logging.error('Unable to find ACL File %s.', acl)
2125 return False
2126 acl_cmd = '%s setacl %s %s' % (_GSUTIL_BIN, acl, remote_file)
2127 if not result_dir:
2128 run(cmd, timeout=transfer_timeout, verbose=True)
2129 if acl_cmd:
2130 run(acl_cmd, timeout=acl_timeout, verbose=True)
2131 return True
2132 with open(os.path.join(result_dir, 'tracing'), 'w') as ftrace:
2133 ftrace.write('Preamble\n')
2134 run(cmd, timeout=transfer_timeout, verbose=True,
2135 stdout_tee=ftrace, stderr_tee=ftrace)
2136 if acl_cmd:
2137 ftrace.write('\nACL setting\n')
2138 # Apply the passed in ACL xml file to the uploaded object.
2139 run(acl_cmd, timeout=acl_timeout, verbose=True,
2140 stdout_tee=ftrace, stderr_tee=ftrace)
2141 ftrace.write('Postamble\n')
2142 return True
2143
2144
2145def gs_ls(uri_pattern):
2146 """Returns a list of URIs that match a given pattern.
2147
2148 @param uri_pattern: a GS URI pattern, may contain wildcards
2149
2150 @return A list of URIs matching the given pattern.
2151
2152 @raise CmdError: the gsutil command failed.
2153
2154 """
2155 gs_cmd = ' '.join(['gsutil', 'ls', uri_pattern])
2156 result = system_output(gs_cmd).splitlines()
2157 return [path.rstrip() for path in result if path]
2158
2159
Kirtika Ruchandanif798f5e2018-07-08 18:34:20 -07002160def nuke_pids(pid_list, signal_queue=None):
Allen Li5ed7e632017-02-03 16:31:33 -08002161 """
2162 Given a list of pid's, kill them via an esclating series of signals.
2163
2164 @param pid_list: List of PID's to kill.
2165 @param signal_queue: Queue of signals to send the PID's to terminate them.
2166
2167 @return: A mapping of the signal name to the number of processes it
2168 was sent to.
2169 """
Kirtika Ruchandanif798f5e2018-07-08 18:34:20 -07002170 if signal_queue is None:
2171 signal_queue = [signal.SIGTERM, signal.SIGKILL]
Allen Li5ed7e632017-02-03 16:31:33 -08002172 sig_count = {}
2173 # Though this is slightly hacky it beats hardcoding names anyday.
2174 sig_names = dict((k, v) for v, k in signal.__dict__.iteritems()
2175 if v.startswith('SIG'))
2176 for sig in signal_queue:
2177 logging.debug('Sending signal %s to the following pids:', sig)
2178 sig_count[sig_names.get(sig, 'unknown_signal')] = len(pid_list)
2179 for pid in pid_list:
2180 logging.debug('Pid %d', pid)
2181 try:
2182 os.kill(pid, sig)
2183 except OSError:
2184 # The process may have died from a previous signal before we
2185 # could kill it.
2186 pass
2187 if sig == signal.SIGKILL:
2188 return sig_count
2189 pid_list = [pid for pid in pid_list if pid_is_alive(pid)]
2190 if not pid_list:
2191 break
2192 time.sleep(CHECK_PID_IS_ALIVE_TIMEOUT)
2193 failed_list = []
2194 for pid in pid_list:
2195 if pid_is_alive(pid):
2196 failed_list.append('Could not kill %d for process name: %s.' % pid,
2197 get_process_name(pid))
2198 if failed_list:
2199 raise error.AutoservRunError('Following errors occured: %s' %
2200 failed_list, None)
2201 return sig_count
2202
2203
2204def externalize_host(host):
2205 """Returns an externally accessible host name.
2206
2207 @param host: a host name or address (string)
2208
2209 @return An externally visible host name or address
2210
2211 """
2212 return socket.gethostname() if host in _LOCAL_HOST_LIST else host
2213
2214
2215def urlopen_socket_timeout(url, data=None, timeout=5):
2216 """
2217 Wrapper to urllib2.urlopen with a socket timeout.
2218
2219 This method will convert all socket timeouts to
2220 TimeoutExceptions, so we can use it in conjunction
2221 with the rpc retry decorator and continue to handle
2222 other URLErrors as we see fit.
2223
2224 @param url: The url to open.
2225 @param data: The data to send to the url (eg: the urlencoded dictionary
2226 used with a POST call).
2227 @param timeout: The timeout for this urlopen call.
2228
2229 @return: The response of the urlopen call.
2230
2231 @raises: error.TimeoutException when a socket timeout occurs.
2232 urllib2.URLError for errors that not caused by timeout.
2233 urllib2.HTTPError for errors like 404 url not found.
2234 """
2235 old_timeout = socket.getdefaulttimeout()
2236 socket.setdefaulttimeout(timeout)
2237 try:
2238 return urllib2.urlopen(url, data=data)
2239 except urllib2.URLError as e:
2240 if type(e.reason) is socket.timeout:
2241 raise error.TimeoutException(str(e))
2242 raise
2243 finally:
2244 socket.setdefaulttimeout(old_timeout)
2245
2246
2247def parse_chrome_version(version_string):
2248 """
2249 Parse a chrome version string and return version and milestone.
2250
2251 Given a chrome version of the form "W.X.Y.Z", return "W.X.Y.Z" as
2252 the version and "W" as the milestone.
2253
2254 @param version_string: Chrome version string.
2255 @return: a tuple (chrome_version, milestone). If the incoming version
2256 string is not of the form "W.X.Y.Z", chrome_version will
2257 be set to the incoming "version_string" argument and the
2258 milestone will be set to the empty string.
2259 """
2260 match = re.search('(\d+)\.\d+\.\d+\.\d+', version_string)
2261 ver = match.group(0) if match else version_string
2262 milestone = match.group(1) if match else ''
2263 return ver, milestone
2264
2265
2266def is_localhost(server):
2267 """Check if server is equivalent to localhost.
2268
2269 @param server: Name of the server to check.
2270
2271 @return: True if given server is equivalent to localhost.
2272
2273 @raise socket.gaierror: If server name failed to be resolved.
2274 """
2275 if server in _LOCAL_HOST_LIST:
2276 return True
2277 try:
2278 return (socket.gethostbyname(socket.gethostname()) ==
2279 socket.gethostbyname(server))
2280 except socket.gaierror:
2281 logging.error('Failed to resolve server name %s.', server)
2282 return False
2283
2284
Allen Li5ed7e632017-02-03 16:31:33 -08002285def get_function_arg_value(func, arg_name, args, kwargs):
2286 """Get the value of the given argument for the function.
2287
2288 @param func: Function being called with given arguments.
2289 @param arg_name: Name of the argument to look for value.
2290 @param args: arguments for function to be called.
2291 @param kwargs: keyword arguments for function to be called.
2292
2293 @return: The value of the given argument for the function.
2294
2295 @raise ValueError: If the argument is not listed function arguemnts.
2296 @raise KeyError: If no value is found for the given argument.
2297 """
2298 if arg_name in kwargs:
2299 return kwargs[arg_name]
2300
2301 argspec = inspect.getargspec(func)
2302 index = argspec.args.index(arg_name)
2303 try:
2304 return args[index]
2305 except IndexError:
2306 try:
2307 # The argument can use a default value. Reverse the default value
2308 # so argument with default value can be counted from the last to
2309 # the first.
2310 return argspec.defaults[::-1][len(argspec.args) - index - 1]
2311 except IndexError:
2312 raise KeyError('Argument %s is not given a value. argspec: %s, '
2313 'args:%s, kwargs:%s' %
2314 (arg_name, argspec, args, kwargs))
2315
2316
2317def has_systemd():
2318 """Check if the host is running systemd.
2319
2320 @return: True if the host uses systemd, otherwise returns False.
2321 """
2322 return os.path.basename(os.readlink('/proc/1/exe')) == 'systemd'
2323
2324
Allen Li5ed7e632017-02-03 16:31:33 -08002325def get_real_user():
2326 """Get the real user that runs the script.
2327
2328 The function check environment variable SUDO_USER for the user if the
2329 script is run with sudo. Otherwise, it returns the value of environment
2330 variable USER.
2331
2332 @return: The user name that runs the script.
2333
2334 """
2335 user = os.environ.get('SUDO_USER')
2336 if not user:
2337 user = os.environ.get('USER')
2338 return user
2339
2340
2341def get_service_pid(service_name):
2342 """Return pid of service.
2343
2344 @param service_name: string name of service.
2345
2346 @return: pid or 0 if service is not running.
2347 """
2348 if has_systemd():
2349 # systemctl show prints 'MainPID=0' if the service is not running.
2350 cmd_result = run('systemctl show -p MainPID %s' %
2351 service_name, ignore_status=True)
2352 return int(cmd_result.stdout.split('=')[1])
2353 else:
2354 cmd_result = run('status %s' % service_name,
2355 ignore_status=True)
2356 if 'start/running' in cmd_result.stdout:
2357 return int(cmd_result.stdout.split()[3])
2358 return 0
2359
2360
2361def control_service(service_name, action='start', ignore_status=True):
2362 """Controls a service. It can be used to start, stop or restart
2363 a service.
2364
2365 @param service_name: string service to be restarted.
2366
2367 @param action: string choice of action to control command.
2368
2369 @param ignore_status: boolean ignore if system command fails.
2370
2371 @return: status code of the executed command.
2372 """
2373 if action not in ('start', 'stop', 'restart'):
2374 raise ValueError('Unknown action supplied as parameter.')
2375
2376 control_cmd = action + ' ' + service_name
2377 if has_systemd():
2378 control_cmd = 'systemctl ' + control_cmd
2379 return system(control_cmd, ignore_status=ignore_status)
2380
2381
2382def restart_service(service_name, ignore_status=True):
2383 """Restarts a service
2384
2385 @param service_name: string service to be restarted.
2386
2387 @param ignore_status: boolean ignore if system command fails.
2388
2389 @return: status code of the executed command.
2390 """
Dean Liao34be76d2018-07-19 00:48:47 +08002391 return control_service(service_name, action='restart',
2392 ignore_status=ignore_status)
Allen Li5ed7e632017-02-03 16:31:33 -08002393
2394
2395def start_service(service_name, ignore_status=True):
2396 """Starts a service
2397
2398 @param service_name: string service to be started.
2399
2400 @param ignore_status: boolean ignore if system command fails.
2401
2402 @return: status code of the executed command.
2403 """
Dean Liao34be76d2018-07-19 00:48:47 +08002404 return control_service(service_name, action='start',
2405 ignore_status=ignore_status)
Allen Li5ed7e632017-02-03 16:31:33 -08002406
2407
2408def stop_service(service_name, ignore_status=True):
2409 """Stops a service
2410
2411 @param service_name: string service to be stopped.
2412
2413 @param ignore_status: boolean ignore if system command fails.
2414
2415 @return: status code of the executed command.
2416 """
Dean Liao34be76d2018-07-19 00:48:47 +08002417 return control_service(service_name, action='stop',
2418 ignore_status=ignore_status)
Allen Li5ed7e632017-02-03 16:31:33 -08002419
2420
2421def sudo_require_password():
2422 """Test if the process can run sudo command without using password.
2423
2424 @return: True if the process needs password to run sudo command.
2425
2426 """
2427 try:
2428 run('sudo -n true')
2429 return False
2430 except error.CmdError:
2431 logging.warn('sudo command requires password.')
2432 return True
2433
2434
2435def is_in_container():
2436 """Check if the process is running inside a container.
2437
2438 @return: True if the process is running inside a container, otherwise False.
2439 """
2440 result = run('grep -q "/lxc/" /proc/1/cgroup',
2441 verbose=False, ignore_status=True)
Dan Shi30a90de2017-04-24 12:46:19 -07002442 if result.exit_status == 0:
2443 return True
2444
2445 # Check "container" environment variable for lxd/lxc containers.
Dan Shi8e312ec2017-04-24 14:23:36 -07002446 if os.environ.get('container') == 'lxc':
Dan Shi30a90de2017-04-24 12:46:19 -07002447 return True
2448
2449 return False
Allen Li5ed7e632017-02-03 16:31:33 -08002450
2451
2452def is_flash_installed():
2453 """
2454 The Adobe Flash binary is only distributed with internal builds.
2455 """
2456 return (os.path.exists('/opt/google/chrome/pepper/libpepflashplayer.so')
2457 and os.path.exists('/opt/google/chrome/pepper/pepper-flash.info'))
2458
2459
2460def verify_flash_installed():
2461 """
2462 The Adobe Flash binary is only distributed with internal builds.
2463 Warn users of public builds of the extra dependency.
2464 """
2465 if not is_flash_installed():
2466 raise error.TestNAError('No Adobe Flash binary installed.')
2467
2468
2469def is_in_same_subnet(ip_1, ip_2, mask_bits=24):
2470 """Check if two IP addresses are in the same subnet with given mask bits.
2471
2472 The two IP addresses are string of IPv4, e.g., '192.168.0.3'.
2473
2474 @param ip_1: First IP address to compare.
2475 @param ip_2: Second IP address to compare.
2476 @param mask_bits: Number of mask bits for subnet comparison. Default to 24.
2477
2478 @return: True if the two IP addresses are in the same subnet.
2479
2480 """
2481 mask = ((2L<<mask_bits-1) -1)<<(32-mask_bits)
2482 ip_1_num = struct.unpack('!I', socket.inet_aton(ip_1))[0]
2483 ip_2_num = struct.unpack('!I', socket.inet_aton(ip_2))[0]
2484 return ip_1_num & mask == ip_2_num & mask
2485
2486
2487def get_ip_address(hostname):
2488 """Get the IP address of given hostname.
2489
2490 @param hostname: Hostname of a DUT.
2491
2492 @return: The IP address of given hostname. None if failed to resolve
2493 hostname.
2494 """
2495 try:
2496 if hostname:
2497 return socket.gethostbyname(hostname)
2498 except socket.gaierror as e:
2499 logging.error('Failed to get IP address of %s, error: %s.', hostname, e)
2500
2501
2502def get_servers_in_same_subnet(host_ip, mask_bits, servers=None,
2503 server_ip_map=None):
2504 """Get the servers in the same subnet of the given host ip.
2505
2506 @param host_ip: The IP address of a dut to look for devserver.
2507 @param mask_bits: Number of mask bits.
2508 @param servers: A list of servers to be filtered by subnet specified by
2509 host_ip and mask_bits.
2510 @param server_ip_map: A map between the server name and its IP address.
2511 The map can be pre-built for better performance, e.g., when
2512 allocating a drone for an agent task.
2513
2514 @return: A list of servers in the same subnet of the given host ip.
2515
2516 """
2517 matched_servers = []
2518 if not servers and not server_ip_map:
2519 raise ValueError('Either `servers` or `server_ip_map` must be given.')
2520 if not servers:
2521 servers = server_ip_map.keys()
2522 # Make sure server_ip_map is an empty dict if it's not set.
2523 if not server_ip_map:
2524 server_ip_map = {}
2525 for server in servers:
2526 server_ip = server_ip_map.get(server, get_ip_address(server))
2527 if server_ip and is_in_same_subnet(server_ip, host_ip, mask_bits):
2528 matched_servers.append(server)
2529 return matched_servers
2530
2531
Kirtika Ruchandanif798f5e2018-07-08 18:34:20 -07002532def get_restricted_subnet(hostname, restricted_subnets=None):
Allen Li5ed7e632017-02-03 16:31:33 -08002533 """Get the restricted subnet of given hostname.
2534
2535 @param hostname: Name of the host to look for matched restricted subnet.
2536 @param restricted_subnets: A list of restricted subnets, default is set to
2537 RESTRICTED_SUBNETS.
2538
2539 @return: A tuple of (subnet_ip, mask_bits), which defines a restricted
2540 subnet.
2541 """
Kirtika Ruchandanif798f5e2018-07-08 18:34:20 -07002542 if restricted_subnets is None:
2543 restricted_subnets=RESTRICTED_SUBNETS
Allen Li5ed7e632017-02-03 16:31:33 -08002544 host_ip = get_ip_address(hostname)
2545 if not host_ip:
2546 return
2547 for subnet_ip, mask_bits in restricted_subnets:
2548 if is_in_same_subnet(subnet_ip, host_ip, mask_bits):
2549 return subnet_ip, mask_bits
2550
2551
2552def get_wireless_ssid(hostname):
2553 """Get the wireless ssid based on given hostname.
2554
2555 The method tries to locate the wireless ssid in the same subnet of given
2556 hostname first. If none is found, it returns the default setting in
2557 CLIENT/wireless_ssid.
2558
2559 @param hostname: Hostname of the test device.
2560
2561 @return: wireless ssid for the test device.
2562 """
2563 default_ssid = CONFIG.get_config_value('CLIENT', 'wireless_ssid',
2564 default=None)
2565 host_ip = get_ip_address(hostname)
2566 if not host_ip:
2567 return default_ssid
2568
2569 # Get all wireless ssid in the global config.
2570 ssids = CONFIG.get_config_value_regex('CLIENT', WIRELESS_SSID_PATTERN)
2571
2572 # There could be multiple subnet matches, pick the one with most strict
2573 # match, i.e., the one with highest maskbit.
2574 matched_ssid = default_ssid
2575 matched_maskbit = -1
2576 for key, value in ssids.items():
2577 # The config key filtered by regex WIRELESS_SSID_PATTERN has a format of
2578 # wireless_ssid_[subnet_ip]/[maskbit], for example:
2579 # wireless_ssid_192.168.0.1/24
2580 # Following line extract the subnet ip and mask bit from the key name.
2581 match = re.match(WIRELESS_SSID_PATTERN, key)
2582 subnet_ip, maskbit = match.groups()
2583 maskbit = int(maskbit)
2584 if (is_in_same_subnet(subnet_ip, host_ip, maskbit) and
2585 maskbit > matched_maskbit):
2586 matched_ssid = value
2587 matched_maskbit = maskbit
2588 return matched_ssid
2589
2590
2591def parse_launch_control_build(build_name):
2592 """Get branch, target, build_id from the given Launch Control build_name.
2593
2594 @param build_name: Name of a Launch Control build, should be formated as
2595 branch/target/build_id
2596
2597 @return: Tuple of branch, target, build_id
2598 @raise ValueError: If the build_name is not correctly formated.
2599 """
2600 branch, target, build_id = build_name.split('/')
2601 return branch, target, build_id
2602
2603
2604def parse_android_target(target):
2605 """Get board and build type from the given target.
2606
2607 @param target: Name of an Android build target, e.g., shamu-eng.
2608
2609 @return: Tuple of board, build_type
2610 @raise ValueError: If the target is not correctly formated.
2611 """
2612 board, build_type = target.split('-')
2613 return board, build_type
2614
2615
2616def parse_launch_control_target(target):
2617 """Parse the build target and type from a Launch Control target.
2618
2619 The Launch Control target has the format of build_target-build_type, e.g.,
2620 shamu-eng or dragonboard-userdebug. This method extracts the build target
2621 and type from the target name.
2622
2623 @param target: Name of a Launch Control target, e.g., shamu-eng.
2624
2625 @return: (build_target, build_type), e.g., ('shamu', 'userdebug')
2626 """
2627 match = re.match('(?P<build_target>.+)-(?P<build_type>[^-]+)', target)
2628 if match:
2629 return match.group('build_target'), match.group('build_type')
2630 else:
2631 return None, None
2632
2633
2634def is_launch_control_build(build):
2635 """Check if a given build is a Launch Control build.
2636
2637 @param build: Name of a build, e.g.,
2638 ChromeOS build: daisy-release/R50-1234.0.0
2639 Launch Control build: git_mnc_release/shamu-eng
2640
2641 @return: True if the build name matches the pattern of a Launch Control
2642 build, False otherwise.
2643 """
2644 try:
2645 _, target, _ = parse_launch_control_build(build)
2646 build_target, _ = parse_launch_control_target(target)
2647 if build_target:
2648 return True
2649 except ValueError:
2650 # parse_launch_control_build or parse_launch_control_target failed.
2651 pass
2652 return False
2653
2654
2655def which(exec_file):
2656 """Finds an executable file.
2657
2658 If the file name contains a path component, it is checked as-is.
2659 Otherwise, we check with each of the path components found in the system
2660 PATH prepended. This behavior is similar to the 'which' command-line tool.
2661
2662 @param exec_file: Name or path to desired executable.
2663
2664 @return: An actual path to the executable, or None if not found.
2665 """
2666 if os.path.dirname(exec_file):
2667 return exec_file if os.access(exec_file, os.X_OK) else None
2668 sys_path = os.environ.get('PATH')
2669 prefix_list = sys_path.split(os.pathsep) if sys_path else []
2670 for prefix in prefix_list:
2671 path = os.path.join(prefix, exec_file)
2672 if os.access(path, os.X_OK):
2673 return path
2674
2675
2676class TimeoutError(error.TestError):
Dean Liao7d51db02018-07-16 17:21:42 +08002677 """Error raised when poll_for_condition() failed to poll within time.
2678
2679 It may embed a reason (either a string or an exception object) so that
2680 the caller of poll_for_condition() can handle failure better.
2681 """
2682
2683 def __init__(self, message=None, reason=None):
2684 """Constructor.
2685
2686 It supports three invocations:
2687 1) TimeoutError()
2688 2) TimeoutError(message): with customized message.
2689 3) TimeoutError(message, reason): with message and reason for timeout.
2690 """
2691 self.reason = reason
2692 if self.reason:
2693 reason_str = 'Reason: ' + repr(self.reason)
2694 if message:
2695 message += '. ' + reason_str
2696 else:
2697 message = reason_str
2698
2699 if message:
2700 super(TimeoutError, self).__init__(message)
2701 else:
2702 super(TimeoutError, self).__init__()
2703
2704
2705class Timer(object):
2706 """A synchronous timer to evaluate if timout is reached.
2707
2708 Usage:
2709 timer = Timer(timeout_sec)
2710 while timer.sleep(sleep_interval):
2711 # do something...
2712 """
2713 def __init__(self, timeout):
2714 """Constructor.
2715
2716 Note that timer won't start until next() is called.
2717
2718 @param timeout: timer timeout in seconds.
2719 """
2720 self.timeout = timeout
2721 self.deadline = 0
2722
2723 def sleep(self, interval):
2724 """Checks if it has sufficient time to sleep; sleeps if so.
2725
2726 It blocks for |interval| seconds if it has time to sleep.
2727 If timer is not ticked yet, kicks it off and returns True without
2728 sleep.
2729
2730 @param interval: sleep interval in seconds.
2731 @return True if it has sleeped or just kicked off the timer. False
2732 otherwise.
2733 """
2734 now = time.time()
2735 if not self.deadline:
2736 self.deadline = now + self.timeout
2737 return True
2738 if now + interval < self.deadline:
2739 time.sleep(interval)
2740 return True
2741 return False
Allen Li5ed7e632017-02-03 16:31:33 -08002742
2743
2744def poll_for_condition(condition,
2745 exception=None,
2746 timeout=10,
2747 sleep_interval=0.1,
2748 desc=None):
Zachary Marcusfe67b652018-04-11 15:22:53 -07002749 """Polls until a condition is evaluated to true.
Allen Li5ed7e632017-02-03 16:31:33 -08002750
Zachary Marcusfe67b652018-04-11 15:22:53 -07002751 @param condition: function taking no args and returning anything that will
2752 evaluate to True in a conditional check
2753 @param exception: exception to throw if condition doesn't evaluate to true
Allen Li5ed7e632017-02-03 16:31:33 -08002754 @param timeout: maximum number of seconds to wait
2755 @param sleep_interval: time to sleep between polls
2756 @param desc: description of default TimeoutError used if 'exception' is
2757 None
2758
Zachary Marcusfe67b652018-04-11 15:22:53 -07002759 @return The evaluated value that caused the poll loop to terminate.
Allen Li5ed7e632017-02-03 16:31:33 -08002760
2761 @raise 'exception' arg if supplied; TimeoutError otherwise
2762 """
2763 start_time = time.time()
2764 while True:
2765 value = condition()
2766 if value:
2767 return value
2768 if time.time() + sleep_interval - start_time > timeout:
2769 if exception:
Xixuan Wue3dfe002017-09-26 10:48:00 -07002770 logging.error('Will raise error %r due to unexpected return: '
2771 '%r', exception, value)
Kirtika Ruchandani2af5b2d2018-08-25 14:39:54 -07002772 raise exception # pylint: disable=raising-bad-type
Allen Li5ed7e632017-02-03 16:31:33 -08002773
2774 if desc:
2775 desc = 'Timed out waiting for condition: ' + desc
2776 else:
2777 desc = 'Timed out waiting for unnamed condition'
2778 logging.error(desc)
Dean Liao7d51db02018-07-16 17:21:42 +08002779 raise TimeoutError(message=desc)
Allen Li5ed7e632017-02-03 16:31:33 -08002780
2781 time.sleep(sleep_interval)
2782
2783
Dean Liao7d51db02018-07-16 17:21:42 +08002784def poll_for_condition_ex(condition, timeout=10, sleep_interval=0.1, desc=None):
2785 """Polls until a condition is evaluated to true or until timeout.
2786
2787 Similiar to poll_for_condition, except that it handles exceptions
2788 condition() raises. If timeout is not reached, the exception is dropped and
2789 poll for condition after a sleep; otherwise, the exception is embedded into
2790 TimeoutError to raise.
2791
2792 @param condition: function taking no args and returning anything that will
2793 evaluate to True in a conditional check
2794 @param timeout: maximum number of seconds to wait
2795 @param sleep_interval: time to sleep between polls
2796 @param desc: description of the condition
2797
2798 @return The evaluated value that caused the poll loop to terminate.
2799
2800 @raise TimeoutError. If condition() raised exception, it is embedded in
2801 raised TimeoutError.
2802 """
2803 timer = Timer(timeout)
2804 while timer.sleep(sleep_interval):
2805 reason = None
2806 try:
2807 value = condition()
2808 if value:
2809 return value
2810 except BaseException as e:
2811 reason = e
2812
2813 if desc is None:
2814 desc = 'unamed condition'
2815 if reason is None:
2816 reason = 'condition evaluted as false'
2817 to_raise = TimeoutError(message='Timed out waiting for ' + desc,
2818 reason=reason)
2819 logging.error(str(to_raise))
2820 raise to_raise
2821
2822
Ruben Zakariane88b4e92019-04-16 10:29:51 -07002823def shadowroot_query(element, action):
2824 """Recursively queries shadowRoot.
2825
2826 @param element: element to query for.
2827 @param action: action to be performed on the element.
2828
2829 @return JS functions to execute.
2830
2831 """
2832 # /deep/ CSS query has been removed from ShadowDOM. The only way to access
2833 # elements now is to recursively query in each shadowRoot.
2834 shadowroot_script = """
2835 function deepQuerySelectorAll(root, targetQuery) {
2836 const elems = Array.prototype.slice.call(
2837 root.querySelectorAll(targetQuery[0]));
2838 const remaining = targetQuery.slice(1);
2839 if (remaining.length === 0) {
2840 return elems;
2841 }
2842
2843 let res = [];
2844 for (let i = 0; i < elems.length; i++) {
2845 if (elems[i].shadowRoot) {
2846 res = res.concat(
2847 deepQuerySelectorAll(elems[i].shadowRoot, remaining));
2848 }
2849 }
2850 return res;
2851 };
2852 var testing_element = deepQuerySelectorAll(document, %s);
2853 testing_element[0].%s;
2854 """
2855 script_to_execute = shadowroot_script % (element, action)
2856 return script_to_execute
2857
2858
Zachary Marcus4e4cd7b2018-05-24 15:07:16 -07002859def threaded_return(function):
2860 """
2861 Decorator to add to a function to get that function to return a thread
2862 object, but with the added benefit of storing its return value.
2863
2864 @param function: function object to be run in the thread
2865
2866 @return a threading.Thread object, that has already been started, is
2867 recording its result, and can be completed and its result
2868 fetched by calling .finish()
2869 """
2870 def wrapped_t(queue, *args, **kwargs):
2871 """
2872 Calls the decorated function as normal, but appends the output into
2873 the passed-in threadsafe queue.
2874 """
2875 ret = function(*args, **kwargs)
2876 queue.put(ret)
2877
2878 def wrapped_finish(threaded_object):
2879 """
2880 Provides a utility to this thread object, getting its result while
2881 simultaneously joining the thread.
2882 """
2883 ret = threaded_object.get()
2884 threaded_object.join()
2885 return ret
2886
2887 def wrapper(*args, **kwargs):
2888 """
2889 Creates the queue and starts the thread, then assigns extra attributes
2890 to the thread to give it result-storing capability.
2891 """
2892 q = Queue.Queue()
2893 t = threading.Thread(target=wrapped_t, args=(q,) + args, kwargs=kwargs)
2894 t.start()
2895 t.result_queue = q
2896 t.get = t.result_queue.get
2897 t.finish = lambda: wrapped_finish(t)
2898 return t
2899
2900 # for the decorator
2901 return wrapper
2902
2903
2904@threaded_return
2905def background_sample_until_condition(
2906 function,
2907 condition=lambda: True,
2908 timeout=10,
2909 sleep_interval=1):
2910 """
2911 Records the value of the function until the condition is False or the
2912 timeout is reached. Runs as a background thread, so it's nonblocking.
2913 Usage might look something like:
2914
2915 def function():
2916 return get_value()
2917 def condition():
2918 return self._keep_sampling
2919
2920 # main thread
2921 sample_thread = utils.background_sample_until_condition(
2922 function=function,condition=condition)
2923 # do other work
2924 # ...
2925 self._keep_sampling = False
2926 # blocking call to get result and join the thread
2927 result = sample_thread.finish()
2928
2929 @param function: function object, 0 args, to be continually polled
2930 @param condition: function object, 0 args, to say when to stop polling
2931 @param timeout: maximum number of seconds to wait
2932 @param number of seconds to wait in between polls
2933
2934 @return a thread object that has already been started and is running in
2935 the background, whose run must be stopped with .finish(), which
2936 also returns a list of the results from the sample function
2937 """
2938 log = []
2939
2940 end_time = datetime.datetime.now() + datetime.timedelta(
2941 seconds = timeout + sleep_interval)
2942
2943 while condition() and datetime.datetime.now() < end_time:
2944 log.append(function())
2945 time.sleep(sleep_interval)
2946 return log
2947
2948
Aviv Keshet98b179e2017-07-18 16:22:23 -07002949class metrics_mock(metrics_mock_class.mock_class_base):
Allen Li5ed7e632017-02-03 16:31:33 -08002950 """mock class for metrics in case chromite is not installed."""
2951 pass
Luis Hector Chavez5473ee32018-05-15 10:12:50 -07002952
2953
2954MountInfo = collections.namedtuple('MountInfo', ['root', 'mount_point', 'tags'])
2955
2956
2957def get_mount_info(process='self', mount_point=None):
2958 """Retrieves information about currently mounted file systems.
2959
2960 @param mount_point: (optional) The mount point (a path). If this is
2961 provided, only information about the given mount point
2962 is returned. If this is omitted, info about all mount
2963 points is returned.
2964 @param process: (optional) The process id (or the string 'self') of the
2965 process whose mountinfo will be obtained. If this is
2966 omitted, info about the current process is returned.
2967
2968 @return A generator yielding one MountInfo object for each relevant mount
2969 found in /proc/PID/mountinfo.
2970 """
2971 with open('/proc/{}/mountinfo'.format(process)) as f:
2972 for line in f.readlines():
2973 # These lines are formatted according to the proc(5) manpage.
2974 # Sample line:
2975 # 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root \
2976 # rw,errors=continue
2977 # Fields (descriptions omitted for fields we don't care about)
2978 # 3: the root of the mount.
2979 # 4: the mount point.
2980 # 5: mount options.
2981 # 6: tags. There can be more than one of these. This is where
2982 # shared mounts are indicated.
2983 # 7: a dash separator marking the end of the tags.
2984 mountinfo = line.split()
2985 if mount_point is None or mountinfo[4] == mount_point:
2986 tags = []
2987 for field in mountinfo[6:]:
2988 if field == '-':
2989 break
2990 tags.append(field.split(':')[0])
2991 yield MountInfo(root = mountinfo[3],
2992 mount_point = mountinfo[4],
2993 tags = tags)
Kuo Jen Weice9f5562018-11-02 12:34:55 +08002994
2995
2996# Appended suffix for chart tablet naming convention in test lab
2997CHART_ADDRESS_SUFFIX = '-tablet'
2998
2999
3000def get_lab_chart_address(hostname):
3001 """Convert lab DUT hostname to address of camera box chart tablet"""
3002 return hostname + CHART_ADDRESS_SUFFIX if is_in_container() else None