mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 1 | #!/usr/bin/python |
| 2 | # |
| 3 | # Copyright 2008 Google Inc. Released under the GPL v2 |
| 4 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 5 | import os, pickle, random, re, resource, select, shutil, signal, StringIO |
| 6 | import socket, struct, subprocess, sys, time, textwrap, urllib, urlparse |
showard | b18134f | 2009-03-20 20:52:18 +0000 | [diff] [blame] | 7 | import warnings, smtplib, logging |
| 8 | from autotest_lib.client.common_lib import error, barrier |
mbligh | 81edd79 | 2008-08-26 16:54:02 +0000 | [diff] [blame] | 9 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 10 | def deprecated(func): |
| 11 | """This is a decorator which can be used to mark functions as deprecated. |
| 12 | It will result in a warning being emmitted when the function is used.""" |
| 13 | def new_func(*args, **dargs): |
| 14 | warnings.warn("Call to deprecated function %s." % func.__name__, |
| 15 | category=DeprecationWarning) |
| 16 | return func(*args, **dargs) |
| 17 | new_func.__name__ = func.__name__ |
| 18 | new_func.__doc__ = func.__doc__ |
| 19 | new_func.__dict__.update(func.__dict__) |
| 20 | return new_func |
| 21 | |
| 22 | |
| 23 | class BgJob(object): |
showard | 170873e | 2009-01-07 00:22:26 +0000 | [diff] [blame] | 24 | def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True, |
| 25 | stdin=None): |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 26 | self.command = command |
| 27 | self.stdout_tee = stdout_tee |
| 28 | self.stderr_tee = stderr_tee |
| 29 | self.result = CmdResult(command) |
mbligh | bd96b45 | 2008-09-03 23:14:27 +0000 | [diff] [blame] | 30 | if verbose: |
showard | b18134f | 2009-03-20 20:52:18 +0000 | [diff] [blame] | 31 | logging.debug("Running '%s'" % command) |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 32 | self.sp = subprocess.Popen(command, stdout=subprocess.PIPE, |
| 33 | stderr=subprocess.PIPE, |
| 34 | preexec_fn=self._reset_sigpipe, shell=True, |
showard | 170873e | 2009-01-07 00:22:26 +0000 | [diff] [blame] | 35 | executable="/bin/bash", |
| 36 | stdin=stdin) |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 37 | |
| 38 | |
| 39 | def output_prepare(self, stdout_file=None, stderr_file=None): |
| 40 | self.stdout_file = stdout_file |
| 41 | self.stderr_file = stderr_file |
| 42 | |
mbligh | 45ffc43 | 2008-12-09 23:35:17 +0000 | [diff] [blame] | 43 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 44 | def process_output(self, stdout=True, final_read=False): |
| 45 | """output_prepare must be called prior to calling this""" |
| 46 | if stdout: |
| 47 | pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee |
| 48 | else: |
| 49 | pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee |
| 50 | |
| 51 | if final_read: |
| 52 | # read in all the data we can from pipe and then stop |
| 53 | data = [] |
| 54 | while select.select([pipe], [], [], 0)[0]: |
| 55 | data.append(os.read(pipe.fileno(), 1024)) |
| 56 | if len(data[-1]) == 0: |
| 57 | break |
| 58 | data = "".join(data) |
| 59 | else: |
| 60 | # perform a single read |
| 61 | data = os.read(pipe.fileno(), 1024) |
| 62 | buf.write(data) |
| 63 | if tee: |
| 64 | tee.write(data) |
| 65 | tee.flush() |
| 66 | |
| 67 | |
| 68 | def cleanup(self): |
| 69 | self.sp.stdout.close() |
| 70 | self.sp.stderr.close() |
| 71 | self.result.stdout = self.stdout_file.getvalue() |
| 72 | self.result.stderr = self.stderr_file.getvalue() |
| 73 | |
| 74 | |
| 75 | def _reset_sigpipe(self): |
| 76 | signal.signal(signal.SIGPIPE, signal.SIG_DFL) |
| 77 | |
mbligh | 81edd79 | 2008-08-26 16:54:02 +0000 | [diff] [blame] | 78 | |
| 79 | def ip_to_long(ip): |
| 80 | # !L is a long in network byte order |
| 81 | return struct.unpack('!L', socket.inet_aton(ip))[0] |
| 82 | |
| 83 | |
| 84 | def long_to_ip(number): |
| 85 | # See above comment. |
| 86 | return socket.inet_ntoa(struct.pack('!L', number)) |
| 87 | |
| 88 | |
| 89 | def create_subnet_mask(bits): |
mbligh | 81edd79 | 2008-08-26 16:54:02 +0000 | [diff] [blame] | 90 | return (1 << 32) - (1 << 32-bits) |
| 91 | |
| 92 | |
| 93 | def format_ip_with_mask(ip, mask_bits): |
| 94 | masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits) |
| 95 | return "%s/%s" % (long_to_ip(masked_ip), mask_bits) |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 96 | |
mbligh | de0d47e | 2008-03-28 14:37:18 +0000 | [diff] [blame] | 97 | |
jadmanski | e80d471 | 2008-10-03 16:15:59 +0000 | [diff] [blame] | 98 | def normalize_hostname(alias): |
| 99 | ip = socket.gethostbyname(alias) |
| 100 | return socket.gethostbyaddr(ip)[0] |
| 101 | |
| 102 | |
mbligh | d6d043c | 2008-09-27 21:00:45 +0000 | [diff] [blame] | 103 | def get_ip_local_port_range(): |
| 104 | match = re.match(r'\s*(\d+)\s*(\d+)\s*$', |
| 105 | read_one_line('/proc/sys/net/ipv4/ip_local_port_range')) |
| 106 | return (int(match.group(1)), int(match.group(2))) |
| 107 | |
| 108 | |
| 109 | def set_ip_local_port_range(lower, upper): |
| 110 | write_one_line('/proc/sys/net/ipv4/ip_local_port_range', |
| 111 | '%d %d\n' % (lower, upper)) |
| 112 | |
mbligh | 315b941 | 2008-10-01 03:34:11 +0000 | [diff] [blame] | 113 | |
mbligh | 45ffc43 | 2008-12-09 23:35:17 +0000 | [diff] [blame] | 114 | |
| 115 | def send_email(mail_from, mail_to, subject, body): |
| 116 | """ |
| 117 | Sends an email via smtp |
| 118 | |
| 119 | mail_from: string with email address of sender |
| 120 | mail_to: string or list with email address(es) of recipients |
| 121 | subject: string with subject of email |
| 122 | body: (multi-line) string with body of email |
| 123 | """ |
| 124 | if isinstance(mail_to, str): |
| 125 | mail_to = [mail_to] |
| 126 | msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to), |
| 127 | subject, body) |
| 128 | try: |
| 129 | mailer = smtplib.SMTP('localhost') |
| 130 | try: |
| 131 | mailer.sendmail(mail_from, mail_to, msg) |
| 132 | finally: |
| 133 | mailer.quit() |
| 134 | except Exception, e: |
| 135 | # Emails are non-critical, not errors, but don't raise them |
| 136 | print "Sending email failed. Reason: %s" % repr(e) |
| 137 | |
| 138 | |
jadmanski | 5182e16 | 2008-05-13 21:48:16 +0000 | [diff] [blame] | 139 | def read_one_line(filename): |
mbligh | 6e8840c | 2008-07-11 18:05:49 +0000 | [diff] [blame] | 140 | return open(filename, 'r').readline().rstrip('\n') |
jadmanski | 5182e16 | 2008-05-13 21:48:16 +0000 | [diff] [blame] | 141 | |
| 142 | |
mbligh | b9d0551 | 2008-10-18 13:53:27 +0000 | [diff] [blame] | 143 | def write_one_line(filename, line): |
| 144 | open_write_close(filename, line.rstrip('\n') + '\n') |
| 145 | |
| 146 | |
| 147 | def open_write_close(filename, data): |
mbligh | 618ac9e | 2008-10-06 17:14:32 +0000 | [diff] [blame] | 148 | f = open(filename, 'w') |
mbligh | b9d0551 | 2008-10-18 13:53:27 +0000 | [diff] [blame] | 149 | try: |
| 150 | f.write(data) |
| 151 | finally: |
| 152 | f.close() |
jadmanski | 5182e16 | 2008-05-13 21:48:16 +0000 | [diff] [blame] | 153 | |
| 154 | |
mbligh | de0d47e | 2008-03-28 14:37:18 +0000 | [diff] [blame] | 155 | def read_keyval(path): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 156 | """ |
| 157 | Read a key-value pair format file into a dictionary, and return it. |
| 158 | Takes either a filename or directory name as input. If it's a |
| 159 | directory name, we assume you want the file to be called keyval. |
| 160 | """ |
| 161 | if os.path.isdir(path): |
| 162 | path = os.path.join(path, 'keyval') |
| 163 | keyval = {} |
| 164 | for line in open(path): |
jadmanski | a6014a0 | 2008-07-14 19:41:54 +0000 | [diff] [blame] | 165 | line = re.sub('#.*', '', line).rstrip() |
mbligh | 97227ea | 2009-03-11 17:09:50 +0000 | [diff] [blame] | 166 | if not re.search(r'^[-\.\w]+=', line): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 167 | raise ValueError('Invalid format line: %s' % line) |
| 168 | key, value = line.split('=', 1) |
| 169 | if re.search('^\d+$', value): |
| 170 | value = int(value) |
| 171 | elif re.search('^(\d+\.)?\d+$', value): |
| 172 | value = float(value) |
| 173 | keyval[key] = value |
| 174 | return keyval |
mbligh | de0d47e | 2008-03-28 14:37:18 +0000 | [diff] [blame] | 175 | |
| 176 | |
jadmanski | cc54917 | 2008-05-21 18:11:51 +0000 | [diff] [blame] | 177 | def write_keyval(path, dictionary, type_tag=None): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 178 | """ |
| 179 | Write a key-value pair format file out to a file. This uses append |
| 180 | mode to open the file, so existing text will not be overwritten or |
| 181 | reparsed. |
jadmanski | cc54917 | 2008-05-21 18:11:51 +0000 | [diff] [blame] | 182 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 183 | If type_tag is None, then the key must be composed of alphanumeric |
| 184 | characters (or dashes+underscores). However, if type-tag is not |
| 185 | null then the keys must also have "{type_tag}" as a suffix. At |
| 186 | the moment the only valid values of type_tag are "attr" and "perf". |
| 187 | """ |
| 188 | if os.path.isdir(path): |
| 189 | path = os.path.join(path, 'keyval') |
| 190 | keyval = open(path, 'a') |
jadmanski | cc54917 | 2008-05-21 18:11:51 +0000 | [diff] [blame] | 191 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 192 | if type_tag is None: |
mbligh | 97227ea | 2009-03-11 17:09:50 +0000 | [diff] [blame] | 193 | key_regex = re.compile(r'^[-\.\w]+$') |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 194 | else: |
| 195 | if type_tag not in ('attr', 'perf'): |
| 196 | raise ValueError('Invalid type tag: %s' % type_tag) |
| 197 | escaped_tag = re.escape(type_tag) |
mbligh | 97227ea | 2009-03-11 17:09:50 +0000 | [diff] [blame] | 198 | key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 199 | try: |
| 200 | for key, value in dictionary.iteritems(): |
| 201 | if not key_regex.search(key): |
| 202 | raise ValueError('Invalid key: %s' % key) |
| 203 | keyval.write('%s=%s\n' % (key, value)) |
| 204 | finally: |
| 205 | keyval.close() |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 206 | |
| 207 | |
| 208 | def is_url(path): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 209 | """Return true if path looks like a URL""" |
| 210 | # for now, just handle http and ftp |
| 211 | url_parts = urlparse.urlparse(path) |
| 212 | return (url_parts[0] in ('http', 'ftp')) |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 213 | |
| 214 | |
jadmanski | ed91ba9 | 2008-09-30 17:19:27 +0000 | [diff] [blame] | 215 | def urlopen(url, data=None, proxies=None, timeout=5): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 216 | """Wrapper to urllib.urlopen with timeout addition.""" |
mbligh | 02ff2d5 | 2008-06-03 15:00:21 +0000 | [diff] [blame] | 217 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 218 | # Save old timeout |
| 219 | old_timeout = socket.getdefaulttimeout() |
| 220 | socket.setdefaulttimeout(timeout) |
| 221 | try: |
| 222 | return urllib.urlopen(url, data=data, proxies=proxies) |
| 223 | finally: |
| 224 | socket.setdefaulttimeout(old_timeout) |
mbligh | 02ff2d5 | 2008-06-03 15:00:21 +0000 | [diff] [blame] | 225 | |
| 226 | |
| 227 | def urlretrieve(url, filename=None, reporthook=None, data=None, timeout=300): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 228 | """Wrapper to urllib.urlretrieve with timeout addition.""" |
| 229 | old_timeout = socket.getdefaulttimeout() |
| 230 | socket.setdefaulttimeout(timeout) |
| 231 | try: |
| 232 | return urllib.urlretrieve(url, filename=filename, |
| 233 | reporthook=reporthook, data=data) |
| 234 | finally: |
| 235 | socket.setdefaulttimeout(old_timeout) |
| 236 | |
mbligh | 02ff2d5 | 2008-06-03 15:00:21 +0000 | [diff] [blame] | 237 | |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 238 | def get_file(src, dest, permissions=None): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 239 | """Get a file from src, which can be local or a remote URL""" |
| 240 | if (src == dest): |
| 241 | return |
| 242 | if (is_url(src)): |
| 243 | print 'PWD: ' + os.getcwd() |
| 244 | print 'Fetching \n\t', src, '\n\t->', dest |
| 245 | try: |
| 246 | urllib.urlretrieve(src, dest) |
| 247 | except IOError, e: |
| 248 | raise error.AutotestError('Unable to retrieve %s (to %s)' |
| 249 | % (src, dest), e) |
| 250 | else: |
| 251 | shutil.copyfile(src, dest) |
| 252 | if permissions: |
| 253 | os.chmod(dest, permissions) |
| 254 | return dest |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 255 | |
| 256 | |
| 257 | def unmap_url(srcdir, src, destdir='.'): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 258 | """ |
| 259 | Receives either a path to a local file or a URL. |
| 260 | returns either the path to the local file, or the fetched URL |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 261 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 262 | unmap_url('/usr/src', 'foo.tar', '/tmp') |
| 263 | = '/usr/src/foo.tar' |
| 264 | unmap_url('/usr/src', 'http://site/file', '/tmp') |
| 265 | = '/tmp/file' |
| 266 | (after retrieving it) |
| 267 | """ |
| 268 | if is_url(src): |
| 269 | url_parts = urlparse.urlparse(src) |
| 270 | filename = os.path.basename(url_parts[2]) |
| 271 | dest = os.path.join(destdir, filename) |
| 272 | return get_file(src, dest) |
| 273 | else: |
| 274 | return os.path.join(srcdir, src) |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 275 | |
| 276 | |
| 277 | def update_version(srcdir, preserve_srcdir, new_version, install, |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 278 | *args, **dargs): |
| 279 | """ |
| 280 | Make sure srcdir is version new_version |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 281 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 282 | If not, delete it and install() the new version. |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 283 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 284 | In the preserve_srcdir case, we just check it's up to date, |
| 285 | and if not, we rerun install, without removing srcdir |
| 286 | """ |
| 287 | versionfile = os.path.join(srcdir, '.version') |
| 288 | install_needed = True |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 289 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 290 | if os.path.exists(versionfile): |
| 291 | old_version = pickle.load(open(versionfile)) |
| 292 | if old_version == new_version: |
| 293 | install_needed = False |
mbligh | 6231cd6 | 2008-02-02 19:18:33 +0000 | [diff] [blame] | 294 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 295 | if install_needed: |
| 296 | if not preserve_srcdir and os.path.exists(srcdir): |
| 297 | shutil.rmtree(srcdir) |
| 298 | install(*args, **dargs) |
| 299 | if os.path.exists(srcdir): |
| 300 | pickle.dump(new_version, open(versionfile, 'w')) |
mbligh | 462c015 | 2008-03-13 15:37:10 +0000 | [diff] [blame] | 301 | |
| 302 | |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 303 | def run(command, timeout=None, ignore_status=False, |
showard | 170873e | 2009-01-07 00:22:26 +0000 | [diff] [blame] | 304 | stdout_tee=None, stderr_tee=None, verbose=True, stdin=None): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 305 | """ |
| 306 | Run a command on the host. |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 307 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 308 | Args: |
| 309 | command: the command line string |
| 310 | timeout: time limit in seconds before attempting to |
| 311 | kill the running process. The run() function |
| 312 | will take a few seconds longer than 'timeout' |
| 313 | to complete if it has to kill the process. |
| 314 | ignore_status: do not raise an exception, no matter what |
| 315 | the exit code of the command is. |
| 316 | stdout_tee: optional file-like object to which stdout data |
| 317 | will be written as it is generated (data will still |
| 318 | be stored in result.stdout) |
| 319 | stderr_tee: likewise for stderr |
showard | 170873e | 2009-01-07 00:22:26 +0000 | [diff] [blame] | 320 | stdin: stdin to pass to the executed process |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 321 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 322 | Returns: |
| 323 | a CmdResult object |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 324 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 325 | Raises: |
| 326 | CmdError: the exit code of the command |
| 327 | execution was not 0 |
| 328 | """ |
showard | 170873e | 2009-01-07 00:22:26 +0000 | [diff] [blame] | 329 | bg_job = join_bg_jobs( |
| 330 | (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin),), |
| 331 | timeout)[0] |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 332 | if not ignore_status and bg_job.result.exit_status: |
jadmanski | 9c1098b | 2008-09-02 14:18:48 +0000 | [diff] [blame] | 333 | raise error.CmdError(command, bg_job.result, |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 334 | "Command returned non-zero exit status") |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 335 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 336 | return bg_job.result |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 337 | |
mbligh | 45ffc43 | 2008-12-09 23:35:17 +0000 | [diff] [blame] | 338 | |
mbligh | a5630a5 | 2008-09-03 22:09:50 +0000 | [diff] [blame] | 339 | def run_parallel(commands, timeout=None, ignore_status=False, |
| 340 | stdout_tee=None, stderr_tee=None): |
| 341 | """Beahves the same as run with the following exceptions: |
| 342 | |
| 343 | - commands is a list of commands to run in parallel. |
| 344 | - ignore_status toggles whether or not an exception should be raised |
| 345 | on any error. |
| 346 | |
| 347 | returns a list of CmdResult objects |
| 348 | """ |
| 349 | bg_jobs = [] |
| 350 | for command in commands: |
| 351 | bg_jobs.append(BgJob(command, stdout_tee, stderr_tee)) |
| 352 | |
| 353 | # Updates objects in bg_jobs list with their process information |
| 354 | join_bg_jobs(bg_jobs, timeout) |
| 355 | |
| 356 | for bg_job in bg_jobs: |
| 357 | if not ignore_status and bg_job.result.exit_status: |
| 358 | raise error.CmdError(command, bg_job.result, |
| 359 | "Command returned non-zero exit status") |
| 360 | |
| 361 | return [bg_job.result for bg_job in bg_jobs] |
| 362 | |
| 363 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 364 | @deprecated |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 365 | def run_bg(command): |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 366 | """Function deprecated. Please use BgJob class instead.""" |
| 367 | bg_job = BgJob(command) |
| 368 | return bg_job.sp, bg_job.result |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 369 | |
| 370 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 371 | def join_bg_jobs(bg_jobs, timeout=None): |
mbligh | a5630a5 | 2008-09-03 22:09:50 +0000 | [diff] [blame] | 372 | """Joins the bg_jobs with the current thread. |
| 373 | |
| 374 | Returns the same list of bg_jobs objects that was passed in. |
| 375 | """ |
mbligh | ae69f26 | 2009-04-17 20:14:56 +0000 | [diff] [blame] | 376 | ret, timeout_error = 0, False |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 377 | for bg_job in bg_jobs: |
| 378 | bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO()) |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 379 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 380 | try: |
| 381 | # We are holding ends to stdin, stdout pipes |
| 382 | # hence we need to be sure to close those fds no mater what |
| 383 | start_time = time.time() |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 384 | timeout_error = _wait_for_commands(bg_jobs, start_time, timeout) |
| 385 | |
| 386 | for bg_job in bg_jobs: |
| 387 | # Process stdout and stderr |
| 388 | bg_job.process_output(stdout=True,final_read=True) |
| 389 | bg_job.process_output(stdout=False,final_read=True) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 390 | finally: |
| 391 | # close our ends of the pipes to the sp no matter what |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 392 | for bg_job in bg_jobs: |
| 393 | bg_job.cleanup() |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 394 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 395 | if timeout_error: |
| 396 | # TODO: This needs to be fixed to better represent what happens when |
| 397 | # running in parallel. However this is backwards compatable, so it will |
| 398 | # do for the time being. |
| 399 | raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result, |
| 400 | "Command(s) did not complete within %d seconds" |
| 401 | % timeout) |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 402 | |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 403 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 404 | return bg_jobs |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 405 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 406 | |
| 407 | def _wait_for_commands(bg_jobs, start_time, timeout): |
| 408 | # This returns True if it must return due to a timeout, otherwise False. |
| 409 | |
mbligh | f0b4a0a | 2008-09-03 20:46:16 +0000 | [diff] [blame] | 410 | # To check for processes which terminate without producing any output |
| 411 | # a 1 second timeout is used in select. |
| 412 | SELECT_TIMEOUT = 1 |
| 413 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 414 | select_list = [] |
| 415 | reverse_dict = {} |
| 416 | for bg_job in bg_jobs: |
| 417 | select_list.append(bg_job.sp.stdout) |
| 418 | select_list.append(bg_job.sp.stderr) |
| 419 | reverse_dict[bg_job.sp.stdout] = (bg_job,True) |
| 420 | reverse_dict[bg_job.sp.stderr] = (bg_job,False) |
| 421 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 422 | if timeout: |
| 423 | stop_time = start_time + timeout |
| 424 | time_left = stop_time - time.time() |
| 425 | else: |
| 426 | time_left = None # so that select never times out |
| 427 | while not timeout or time_left > 0: |
| 428 | # select will return when stdout is ready (including when it is |
| 429 | # EOF, that is the process has terminated). |
mbligh | f0b4a0a | 2008-09-03 20:46:16 +0000 | [diff] [blame] | 430 | ready, _, _ = select.select(select_list, [], [], SELECT_TIMEOUT) |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 431 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 432 | # os.read() has to be used instead of |
| 433 | # subproc.stdout.read() which will otherwise block |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 434 | for fileno in ready: |
| 435 | bg_job,stdout = reverse_dict[fileno] |
| 436 | bg_job.process_output(stdout) |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 437 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 438 | remaining_jobs = [x for x in bg_jobs if x.result.exit_status is None] |
| 439 | if len(remaining_jobs) == 0: |
| 440 | return False |
| 441 | for bg_job in remaining_jobs: |
| 442 | bg_job.result.exit_status = bg_job.sp.poll() |
mbligh | 8ea61e2 | 2008-05-09 18:09:37 +0000 | [diff] [blame] | 443 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 444 | if timeout: |
| 445 | time_left = stop_time - time.time() |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 446 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 447 | # Kill all processes which did not complete prior to timeout |
| 448 | for bg_job in [x for x in bg_jobs if x.result.exit_status is None]: |
mbligh | 7afc3a6 | 2008-11-27 00:35:44 +0000 | [diff] [blame] | 449 | print '* Warning: run process timeout (%s) fired' % timeout |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 450 | nuke_subprocess(bg_job.sp) |
mbligh | 095dc64 | 2008-10-01 03:41:35 +0000 | [diff] [blame] | 451 | bg_job.result.exit_status = bg_job.sp.poll() |
mbligh | 8ea61e2 | 2008-05-09 18:09:37 +0000 | [diff] [blame] | 452 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 453 | return True |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 454 | |
| 455 | |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 456 | def nuke_subprocess(subproc): |
jadmanski | 09f9203 | 2008-09-17 14:05:27 +0000 | [diff] [blame] | 457 | # check if the subprocess is still alive, first |
| 458 | if subproc.poll() is not None: |
| 459 | return subproc.poll() |
| 460 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 461 | # the process has not terminated within timeout, |
| 462 | # kill it via an escalating series of signals. |
| 463 | signal_queue = [signal.SIGTERM, signal.SIGKILL] |
| 464 | for sig in signal_queue: |
| 465 | try: |
| 466 | os.kill(subproc.pid, sig) |
| 467 | # The process may have died before we could kill it. |
| 468 | except OSError: |
| 469 | pass |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 470 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 471 | for i in range(5): |
| 472 | rc = subproc.poll() |
mbligh | d876f45 | 2008-12-03 15:09:17 +0000 | [diff] [blame] | 473 | if rc is not None: |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 474 | return rc |
| 475 | time.sleep(1) |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 476 | |
| 477 | |
| 478 | def nuke_pid(pid): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 479 | # the process has not terminated within timeout, |
| 480 | # kill it via an escalating series of signals. |
| 481 | signal_queue = [signal.SIGTERM, signal.SIGKILL] |
| 482 | for sig in signal_queue: |
| 483 | try: |
| 484 | os.kill(pid, sig) |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 485 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 486 | # The process may have died before we could kill it. |
| 487 | except OSError: |
| 488 | pass |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 489 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 490 | try: |
| 491 | for i in range(5): |
| 492 | status = os.waitpid(pid, os.WNOHANG)[0] |
| 493 | if status == pid: |
| 494 | return |
| 495 | time.sleep(1) |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 496 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 497 | if status != pid: |
| 498 | raise error.AutoservRunError('Could not kill %d' |
| 499 | % pid, None) |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 500 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 501 | # the process died before we join it. |
| 502 | except OSError: |
| 503 | pass |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 504 | |
| 505 | |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 506 | def system(command, timeout=None, ignore_status=False): |
mbligh | a5630a5 | 2008-09-03 22:09:50 +0000 | [diff] [blame] | 507 | """This function returns the exit status of command.""" |
mbligh | f8dffb1 | 2008-10-29 16:45:26 +0000 | [diff] [blame] | 508 | return run(command, timeout=timeout, ignore_status=ignore_status, |
mbligh | a5630a5 | 2008-09-03 22:09:50 +0000 | [diff] [blame] | 509 | stdout_tee=sys.stdout, stderr_tee=sys.stderr).exit_status |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 510 | |
| 511 | |
mbligh | a5630a5 | 2008-09-03 22:09:50 +0000 | [diff] [blame] | 512 | def system_parallel(commands, timeout=None, ignore_status=False): |
| 513 | """This function returns a list of exit statuses for the respective |
| 514 | list of commands.""" |
| 515 | return [bg_jobs.exit_status for bg_jobs in |
mbligh | f8dffb1 | 2008-10-29 16:45:26 +0000 | [diff] [blame] | 516 | run_parallel(commands, timeout=timeout, ignore_status=ignore_status, |
mbligh | a5630a5 | 2008-09-03 22:09:50 +0000 | [diff] [blame] | 517 | stdout_tee=sys.stdout, stderr_tee=sys.stderr)] |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 518 | |
| 519 | |
mbligh | 8ea61e2 | 2008-05-09 18:09:37 +0000 | [diff] [blame] | 520 | def system_output(command, timeout=None, ignore_status=False, |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 521 | retain_output=False): |
| 522 | if retain_output: |
mbligh | f8dffb1 | 2008-10-29 16:45:26 +0000 | [diff] [blame] | 523 | out = run(command, timeout=timeout, ignore_status=ignore_status, |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 524 | stdout_tee=sys.stdout, stderr_tee=sys.stderr).stdout |
| 525 | else: |
mbligh | f8dffb1 | 2008-10-29 16:45:26 +0000 | [diff] [blame] | 526 | out = run(command, timeout=timeout, ignore_status=ignore_status).stdout |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 527 | if out[-1:] == '\n': out = out[:-1] |
| 528 | return out |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 529 | |
mbligh | 849a0f6 | 2008-08-28 20:12:19 +0000 | [diff] [blame] | 530 | |
mbligh | a5630a5 | 2008-09-03 22:09:50 +0000 | [diff] [blame] | 531 | def system_output_parallel(commands, timeout=None, ignore_status=False, |
| 532 | retain_output=False): |
| 533 | if retain_output: |
mbligh | f8dffb1 | 2008-10-29 16:45:26 +0000 | [diff] [blame] | 534 | out = [bg_job.stdout for bg_job in run_parallel(commands, |
| 535 | timeout=timeout, ignore_status=ignore_status, |
| 536 | stdout_tee=sys.stdout, stderr_tee=sys.stderr)] |
mbligh | a5630a5 | 2008-09-03 22:09:50 +0000 | [diff] [blame] | 537 | else: |
mbligh | f8dffb1 | 2008-10-29 16:45:26 +0000 | [diff] [blame] | 538 | out = [bg_job.stdout for bg_job in run_parallel(commands, |
| 539 | timeout=timeout, ignore_status=ignore_status)] |
mbligh | a5630a5 | 2008-09-03 22:09:50 +0000 | [diff] [blame] | 540 | for x in out: |
| 541 | if out[-1:] == '\n': out = out[:-1] |
| 542 | return out |
| 543 | |
| 544 | |
mbligh | 9846795 | 2008-11-19 00:25:45 +0000 | [diff] [blame] | 545 | def strip_unicode(input): |
| 546 | if type(input) == list: |
| 547 | return [strip_unicode(i) for i in input] |
| 548 | elif type(input) == dict: |
| 549 | output = {} |
| 550 | for key in input.keys(): |
| 551 | output[str(key)] = strip_unicode(input[key]) |
| 552 | return output |
| 553 | elif type(input) == unicode: |
| 554 | return str(input) |
| 555 | else: |
| 556 | return input |
| 557 | |
| 558 | |
mbligh | a5630a5 | 2008-09-03 22:09:50 +0000 | [diff] [blame] | 559 | def get_cpu_percentage(function, *args, **dargs): |
| 560 | """Returns a tuple containing the CPU% and return value from function call. |
| 561 | |
| 562 | This function calculates the usage time by taking the difference of |
| 563 | the user and system times both before and after the function call. |
| 564 | """ |
| 565 | child_pre = resource.getrusage(resource.RUSAGE_CHILDREN) |
| 566 | self_pre = resource.getrusage(resource.RUSAGE_SELF) |
| 567 | start = time.time() |
| 568 | to_return = function(*args, **dargs) |
| 569 | elapsed = time.time() - start |
| 570 | self_post = resource.getrusage(resource.RUSAGE_SELF) |
| 571 | child_post = resource.getrusage(resource.RUSAGE_CHILDREN) |
| 572 | |
| 573 | # Calculate CPU Percentage |
| 574 | s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]] |
| 575 | c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]] |
| 576 | cpu_percent = (s_user + c_user + s_system + c_system) / elapsed |
| 577 | |
| 578 | return cpu_percent, to_return |
| 579 | |
| 580 | |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 581 | """ |
| 582 | This function is used when there is a need to run more than one |
| 583 | job simultaneously starting exactly at the same time. It basically returns |
| 584 | a modified control file (containing the synchronization code prepended) |
| 585 | whenever it is ready to run the control file. The synchronization |
| 586 | is done using barriers to make sure that the jobs start at the same time. |
| 587 | |
| 588 | Here is how the synchronization is done to make sure that the tests |
| 589 | start at exactly the same time on the client. |
| 590 | sc_bar is a server barrier and s_bar, c_bar are the normal barriers |
| 591 | |
| 592 | Job1 Job2 ...... JobN |
| 593 | Server: | sc_bar |
| 594 | Server: | s_bar ...... s_bar |
| 595 | Server: | at.run() at.run() ...... at.run() |
| 596 | ----------|------------------------------------------------------ |
| 597 | Client | sc_bar |
| 598 | Client | c_bar c_bar ...... c_bar |
| 599 | Client | <run test> <run test> ...... <run test> |
| 600 | |
| 601 | |
| 602 | PARAMS: |
| 603 | control_file : The control file which to which the above synchronization |
| 604 | code would be prepended to |
| 605 | host_name : The host name on which the job is going to run |
| 606 | host_num (non negative) : A number to identify the machine so that we have |
| 607 | different sets of s_bar_ports for each of the machines. |
| 608 | instance : The number of the job |
| 609 | num_jobs : Total number of jobs that are going to run in parallel with |
| 610 | this job starting at the same time |
| 611 | port_base : Port number that is used to derive the actual barrier ports. |
| 612 | |
| 613 | RETURN VALUE: |
| 614 | The modified control file. |
| 615 | |
| 616 | """ |
| 617 | def get_sync_control_file(control, host_name, host_num, |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 618 | instance, num_jobs, port_base=63100): |
| 619 | sc_bar_port = port_base |
| 620 | c_bar_port = port_base |
| 621 | if host_num < 0: |
| 622 | print "Please provide a non negative number for the host" |
| 623 | return None |
| 624 | s_bar_port = port_base + 1 + host_num # The set of s_bar_ports are |
| 625 | # the same for a given machine |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 626 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 627 | sc_bar_timeout = 180 |
| 628 | s_bar_timeout = c_bar_timeout = 120 |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 629 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 630 | # The barrier code snippet is prepended into the conrol file |
| 631 | # dynamically before at.run() is called finally. |
| 632 | control_new = [] |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 633 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 634 | # jobid is the unique name used to identify the processes |
| 635 | # trying to reach the barriers |
| 636 | jobid = "%s#%d" % (host_name, instance) |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 637 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 638 | rendv = [] |
| 639 | # rendvstr is a temp holder for the rendezvous list of the processes |
| 640 | for n in range(num_jobs): |
| 641 | rendv.append("'%s#%d'" % (host_name, n)) |
| 642 | rendvstr = ",".join(rendv) |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 643 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 644 | if instance == 0: |
| 645 | # Do the setup and wait at the server barrier |
| 646 | # Clean up the tmp and the control dirs for the first instance |
| 647 | control_new.append('if os.path.exists(job.tmpdir):') |
| 648 | control_new.append("\t system('umount -f %s > /dev/null" |
| 649 | "2> /dev/null' % job.tmpdir," |
| 650 | "ignore_status=True)") |
| 651 | control_new.append("\t system('rm -rf ' + job.tmpdir)") |
| 652 | control_new.append( |
| 653 | 'b0 = job.barrier("%s", "sc_bar", %d, port=%d)' |
| 654 | % (jobid, sc_bar_timeout, sc_bar_port)) |
| 655 | control_new.append( |
| 656 | 'b0.rendevous_servers("PARALLEL_MASTER", "%s")' |
| 657 | % jobid) |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 658 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 659 | elif instance == 1: |
| 660 | # Wait at the server barrier to wait for instance=0 |
| 661 | # process to complete setup |
| 662 | b0 = barrier.barrier("PARALLEL_MASTER", "sc_bar", sc_bar_timeout, |
| 663 | port=sc_bar_port) |
| 664 | b0.rendevous_servers("PARALLEL_MASTER", jobid) |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 665 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 666 | if(num_jobs > 2): |
| 667 | b1 = barrier.barrier(jobid, "s_bar", s_bar_timeout, |
| 668 | port=s_bar_port) |
| 669 | b1.rendevous(rendvstr) |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 670 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 671 | else: |
| 672 | # For the rest of the clients |
| 673 | b2 = barrier.barrier(jobid, "s_bar", s_bar_timeout, port=s_bar_port) |
| 674 | b2.rendevous(rendvstr) |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 675 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 676 | # Client side barrier for all the tests to start at the same time |
| 677 | control_new.append('b1 = job.barrier("%s", "c_bar", %d, port=%d)' |
| 678 | % (jobid, c_bar_timeout, c_bar_port)) |
| 679 | control_new.append("b1.rendevous(%s)" % rendvstr) |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 680 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 681 | # Stick in the rest of the control file |
| 682 | control_new.append(control) |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 683 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 684 | return "\n".join(control_new) |
mbligh | c1cbc99 | 2008-05-27 20:01:45 +0000 | [diff] [blame] | 685 | |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 686 | |
mbligh | c5ddfd1 | 2008-08-04 17:15:00 +0000 | [diff] [blame] | 687 | def get_arch(run_function=run): |
| 688 | """ |
| 689 | Get the hardware architecture of the machine. |
| 690 | run_function is used to execute the commands. It defaults to |
| 691 | utils.run() but a custom method (if provided) should be of the |
| 692 | same schema as utils.run. It should return a CmdResult object and |
| 693 | throw a CmdError exception. |
| 694 | """ |
| 695 | arch = run_function('/bin/uname -m').stdout.rstrip() |
| 696 | if re.match(r'i\d86$', arch): |
| 697 | arch = 'i386' |
| 698 | return arch |
| 699 | |
| 700 | |
jadmanski | 4f90925 | 2008-12-01 20:47:10 +0000 | [diff] [blame] | 701 | def merge_trees(src, dest): |
| 702 | """ |
| 703 | Merges a source directory tree at 'src' into a destination tree at |
| 704 | 'dest'. If a path is a file in both trees than the file in the source |
| 705 | tree is APPENDED to the one in the destination tree. If a path is |
| 706 | a directory in both trees then the directories are recursively merged |
| 707 | with this function. In any other case, the function will skip the |
| 708 | paths that cannot be merged (instead of failing). |
| 709 | """ |
| 710 | if not os.path.exists(src): |
| 711 | return # exists only in dest |
| 712 | elif not os.path.exists(dest): |
| 713 | if os.path.isfile(src): |
| 714 | shutil.copy2(src, dest) # file only in src |
| 715 | else: |
| 716 | shutil.copytree(src, dest, symlinks=True) # dir only in src |
| 717 | return |
| 718 | elif os.path.isfile(src) and os.path.isfile(dest): |
| 719 | # src & dest are files in both trees, append src to dest |
| 720 | destfile = open(dest, "a") |
| 721 | try: |
| 722 | srcfile = open(src) |
| 723 | try: |
| 724 | destfile.write(srcfile.read()) |
| 725 | finally: |
| 726 | srcfile.close() |
| 727 | finally: |
| 728 | destfile.close() |
| 729 | elif os.path.isdir(src) and os.path.isdir(dest): |
| 730 | # src & dest are directories in both trees, so recursively merge |
| 731 | for name in os.listdir(src): |
| 732 | merge_trees(os.path.join(src, name), os.path.join(dest, name)) |
| 733 | else: |
| 734 | # src & dest both exist, but are incompatible |
| 735 | return |
| 736 | |
| 737 | |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 738 | class CmdResult(object): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 739 | """ |
| 740 | Command execution result. |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 741 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 742 | command: String containing the command line itself |
| 743 | exit_status: Integer exit code of the process |
| 744 | stdout: String containing stdout of the process |
| 745 | stderr: String containing stderr of the process |
| 746 | duration: Elapsed wall clock time running the process |
| 747 | """ |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 748 | |
| 749 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 750 | def __init__(self, command=None, stdout="", stderr="", |
| 751 | exit_status=None, duration=0): |
| 752 | self.command = command |
| 753 | self.exit_status = exit_status |
| 754 | self.stdout = stdout |
| 755 | self.stderr = stderr |
| 756 | self.duration = duration |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 757 | |
| 758 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 759 | def __repr__(self): |
| 760 | wrapper = textwrap.TextWrapper(width = 78, |
| 761 | initial_indent="\n ", |
| 762 | subsequent_indent=" ") |
| 763 | |
| 764 | stdout = self.stdout.rstrip() |
| 765 | if stdout: |
| 766 | stdout = "\nstdout:\n%s" % stdout |
| 767 | |
| 768 | stderr = self.stderr.rstrip() |
| 769 | if stderr: |
| 770 | stderr = "\nstderr:\n%s" % stderr |
| 771 | |
| 772 | return ("* Command: %s\n" |
| 773 | "Exit status: %s\n" |
| 774 | "Duration: %s\n" |
| 775 | "%s" |
| 776 | "%s" |
| 777 | % (wrapper.fill(self.command), self.exit_status, |
| 778 | self.duration, stdout, stderr)) |
mbligh | 63073c9 | 2008-03-31 16:49:32 +0000 | [diff] [blame] | 779 | |
| 780 | |
mbligh | 462c015 | 2008-03-13 15:37:10 +0000 | [diff] [blame] | 781 | class run_randomly: |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 782 | def __init__(self, run_sequentially=False): |
| 783 | # Run sequentially is for debugging control files |
| 784 | self.test_list = [] |
| 785 | self.run_sequentially = run_sequentially |
mbligh | 462c015 | 2008-03-13 15:37:10 +0000 | [diff] [blame] | 786 | |
| 787 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 788 | def add(self, *args, **dargs): |
| 789 | test = (args, dargs) |
| 790 | self.test_list.append(test) |
mbligh | 462c015 | 2008-03-13 15:37:10 +0000 | [diff] [blame] | 791 | |
| 792 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 793 | def run(self, fn): |
| 794 | while self.test_list: |
| 795 | test_index = random.randint(0, len(self.test_list)-1) |
| 796 | if self.run_sequentially: |
| 797 | test_index = 0 |
| 798 | (args, dargs) = self.test_list.pop(test_index) |
| 799 | fn(*args, **dargs) |
mbligh | a700772 | 2009-01-13 00:37:11 +0000 | [diff] [blame] | 800 | |
| 801 | |
mbligh | dd66937 | 2009-02-03 21:57:18 +0000 | [diff] [blame] | 802 | def import_site_symbol(path, module, name, dummy=None, modulefile=None): |
| 803 | """ |
| 804 | Try to import site specific symbol from site specific file if it exists |
| 805 | |
| 806 | @param path full filename of the source file calling this (ie __file__) |
| 807 | @param module full module name |
| 808 | @param name symbol name to be imported from the site file |
| 809 | @param dummy dummy value to return in case there is no symbol to import |
| 810 | @param modulefile module filename |
| 811 | |
| 812 | @return site specific symbol or dummy |
| 813 | |
| 814 | @exception ImportError if the site file exists but imports fails |
| 815 | """ |
mbligh | a700772 | 2009-01-13 00:37:11 +0000 | [diff] [blame] | 816 | short_module = module[module.rfind(".") + 1:] |
| 817 | |
| 818 | if not modulefile: |
| 819 | modulefile = short_module + ".py" |
| 820 | |
| 821 | try: |
| 822 | site_exists = os.path.getsize(os.path.join(os.path.dirname(path), |
| 823 | modulefile)) |
| 824 | except os.error: |
| 825 | site_exists = False |
| 826 | |
| 827 | if site_exists: |
mbligh | 062ed15 | 2009-01-13 00:57:14 +0000 | [diff] [blame] | 828 | # return the object from the imported module |
| 829 | obj = getattr(__import__(module, {}, {}, [short_module]), name) |
mbligh | a700772 | 2009-01-13 00:37:11 +0000 | [diff] [blame] | 830 | else: |
jadmanski | 0dc8ff8 | 2009-02-11 15:11:15 +0000 | [diff] [blame] | 831 | msg = "unable to import site module '%s', using non-site implementation" |
| 832 | msg %= modulefile |
showard | b18134f | 2009-03-20 20:52:18 +0000 | [diff] [blame] | 833 | logging.info(msg) |
mbligh | 062ed15 | 2009-01-13 00:57:14 +0000 | [diff] [blame] | 834 | obj = dummy |
| 835 | |
| 836 | return obj |
| 837 | |
| 838 | |
| 839 | def import_site_class(path, module, classname, baseclass, modulefile=None): |
| 840 | """ |
| 841 | Try to import site specific class from site specific file if it exists |
| 842 | |
| 843 | Args: |
| 844 | path: full filename of the source file calling this (ie __file__) |
| 845 | module: full module name |
| 846 | classname: class name to be loaded from site file |
| 847 | baseclass: base class object to inherit from when no site file present |
| 848 | modulefile: module filename |
| 849 | |
| 850 | Returns: class object of the site class or baseclass |
| 851 | |
| 852 | Raises: ImportError if the site file exists but imports fails |
| 853 | """ |
| 854 | |
mbligh | dd66937 | 2009-02-03 21:57:18 +0000 | [diff] [blame] | 855 | res = import_site_symbol(path, module, classname, None, modulefile) |
mbligh | 062ed15 | 2009-01-13 00:57:14 +0000 | [diff] [blame] | 856 | |
| 857 | if not res: |
| 858 | # we cannot just return baseclass because some callers will want to |
| 859 | # use multiple inheritance on the class object we return and baseclass |
mbligh | 99f112d | 2009-01-13 00:38:57 +0000 | [diff] [blame] | 860 | class dummy(baseclass): |
| 861 | pass |
| 862 | |
mbligh | 062ed15 | 2009-01-13 00:57:14 +0000 | [diff] [blame] | 863 | res = dummy |
mbligh | a700772 | 2009-01-13 00:37:11 +0000 | [diff] [blame] | 864 | |
mbligh | 062ed15 | 2009-01-13 00:57:14 +0000 | [diff] [blame] | 865 | return res |
| 866 | |
| 867 | |
| 868 | def import_site_function(path, module, funcname, dummy, modulefile=None): |
| 869 | """ |
| 870 | Try to import site specific function from site specific file if it exists |
| 871 | |
| 872 | Args: |
| 873 | path: full filename of the source file calling this (ie __file__) |
| 874 | module: full module name |
| 875 | funcname: function name to be imported from site file |
| 876 | dummy: dummy function to return in case there is no function to import |
| 877 | modulefile: module filename |
| 878 | |
| 879 | Returns: site specific function object or dummy |
| 880 | |
| 881 | Raises: ImportError if the site file exists but imports fails |
| 882 | """ |
| 883 | |
mbligh | dd66937 | 2009-02-03 21:57:18 +0000 | [diff] [blame] | 884 | return import_site_symbol(path, module, funcname, dummy, modulefile) |
mbligh | fb67603 | 2009-04-01 18:25:38 +0000 | [diff] [blame] | 885 | |
| 886 | |
| 887 | def write_pid(program_name): |
| 888 | """ |
| 889 | Try to drop <program_name>.pid in the main autotest directory. |
| 890 | |
| 891 | Args: |
| 892 | program_name: prefix for file name |
| 893 | """ |
| 894 | |
| 895 | my_path = os.path.dirname(__file__) |
| 896 | pid_path = os.path.abspath(os.path.join(my_path, "../..")) |
| 897 | pidf = open(os.path.join(pid_path, "%s.pid" % program_name), "w") |
| 898 | if pidf: |
| 899 | pidf.write("%s\n" % os.getpid()) |
| 900 | pidf.close() |