blob: 02cbe4536a5ccbc61d32edaa0c114bf2856a8f94 [file] [log] [blame]
mbligh63073c92008-03-31 16:49:32 +00001#!/usr/bin/python
2#
3# Copyright 2008 Google Inc. Released under the GPL v2
4
mbligh849a0f62008-08-28 20:12:19 +00005import os, pickle, random, re, resource, select, shutil, signal, StringIO
6import socket, struct, subprocess, sys, time, textwrap, urllib, urlparse
mbligh25284cd2009-06-08 16:17:24 +00007import warnings, smtplib, logging, urllib2
showardb18134f2009-03-20 20:52:18 +00008from autotest_lib.client.common_lib import error, barrier
mbligh81edd792008-08-26 16:54:02 +00009
mbligh849a0f62008-08-28 20:12:19 +000010def deprecated(func):
11 """This is a decorator which can be used to mark functions as deprecated.
12 It will result in a warning being emmitted when the function is used."""
13 def new_func(*args, **dargs):
14 warnings.warn("Call to deprecated function %s." % func.__name__,
15 category=DeprecationWarning)
16 return func(*args, **dargs)
17 new_func.__name__ = func.__name__
18 new_func.__doc__ = func.__doc__
19 new_func.__dict__.update(func.__dict__)
20 return new_func
21
22
23class BgJob(object):
showard170873e2009-01-07 00:22:26 +000024 def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
25 stdin=None):
mbligh849a0f62008-08-28 20:12:19 +000026 self.command = command
27 self.stdout_tee = stdout_tee
28 self.stderr_tee = stderr_tee
29 self.result = CmdResult(command)
mblighbd96b452008-09-03 23:14:27 +000030 if verbose:
showardb18134f2009-03-20 20:52:18 +000031 logging.debug("Running '%s'" % command)
mbligh849a0f62008-08-28 20:12:19 +000032 self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
33 stderr=subprocess.PIPE,
34 preexec_fn=self._reset_sigpipe, shell=True,
showard170873e2009-01-07 00:22:26 +000035 executable="/bin/bash",
36 stdin=stdin)
mbligh849a0f62008-08-28 20:12:19 +000037
38
39 def output_prepare(self, stdout_file=None, stderr_file=None):
40 self.stdout_file = stdout_file
41 self.stderr_file = stderr_file
42
mbligh45ffc432008-12-09 23:35:17 +000043
mbligh849a0f62008-08-28 20:12:19 +000044 def process_output(self, stdout=True, final_read=False):
45 """output_prepare must be called prior to calling this"""
46 if stdout:
47 pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
48 else:
49 pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
50
51 if final_read:
52 # read in all the data we can from pipe and then stop
53 data = []
54 while select.select([pipe], [], [], 0)[0]:
55 data.append(os.read(pipe.fileno(), 1024))
56 if len(data[-1]) == 0:
57 break
58 data = "".join(data)
59 else:
60 # perform a single read
61 data = os.read(pipe.fileno(), 1024)
62 buf.write(data)
63 if tee:
64 tee.write(data)
65 tee.flush()
66
67
68 def cleanup(self):
69 self.sp.stdout.close()
70 self.sp.stderr.close()
71 self.result.stdout = self.stdout_file.getvalue()
72 self.result.stderr = self.stderr_file.getvalue()
73
74
75 def _reset_sigpipe(self):
76 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
77
mbligh81edd792008-08-26 16:54:02 +000078
79def ip_to_long(ip):
80 # !L is a long in network byte order
81 return struct.unpack('!L', socket.inet_aton(ip))[0]
82
83
84def long_to_ip(number):
85 # See above comment.
86 return socket.inet_ntoa(struct.pack('!L', number))
87
88
89def create_subnet_mask(bits):
mbligh81edd792008-08-26 16:54:02 +000090 return (1 << 32) - (1 << 32-bits)
91
92
93def format_ip_with_mask(ip, mask_bits):
94 masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
95 return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
mbligh6231cd62008-02-02 19:18:33 +000096
mblighde0d47e2008-03-28 14:37:18 +000097
jadmanskie80d4712008-10-03 16:15:59 +000098def normalize_hostname(alias):
99 ip = socket.gethostbyname(alias)
100 return socket.gethostbyaddr(ip)[0]
101
102
mblighd6d043c2008-09-27 21:00:45 +0000103def get_ip_local_port_range():
104 match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
105 read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
106 return (int(match.group(1)), int(match.group(2)))
107
108
109def set_ip_local_port_range(lower, upper):
110 write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
111 '%d %d\n' % (lower, upper))
112
mbligh315b9412008-10-01 03:34:11 +0000113
mbligh45ffc432008-12-09 23:35:17 +0000114
115def send_email(mail_from, mail_to, subject, body):
116 """
117 Sends an email via smtp
118
119 mail_from: string with email address of sender
120 mail_to: string or list with email address(es) of recipients
121 subject: string with subject of email
122 body: (multi-line) string with body of email
123 """
124 if isinstance(mail_to, str):
125 mail_to = [mail_to]
126 msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
127 subject, body)
128 try:
129 mailer = smtplib.SMTP('localhost')
130 try:
131 mailer.sendmail(mail_from, mail_to, msg)
132 finally:
133 mailer.quit()
134 except Exception, e:
135 # Emails are non-critical, not errors, but don't raise them
136 print "Sending email failed. Reason: %s" % repr(e)
137
138
jadmanski5182e162008-05-13 21:48:16 +0000139def read_one_line(filename):
mbligh6e8840c2008-07-11 18:05:49 +0000140 return open(filename, 'r').readline().rstrip('\n')
jadmanski5182e162008-05-13 21:48:16 +0000141
142
mblighb9d05512008-10-18 13:53:27 +0000143def write_one_line(filename, line):
144 open_write_close(filename, line.rstrip('\n') + '\n')
145
146
147def open_write_close(filename, data):
mbligh618ac9e2008-10-06 17:14:32 +0000148 f = open(filename, 'w')
mblighb9d05512008-10-18 13:53:27 +0000149 try:
150 f.write(data)
151 finally:
152 f.close()
jadmanski5182e162008-05-13 21:48:16 +0000153
154
mblighde0d47e2008-03-28 14:37:18 +0000155def read_keyval(path):
jadmanski0afbb632008-06-06 21:10:57 +0000156 """
157 Read a key-value pair format file into a dictionary, and return it.
158 Takes either a filename or directory name as input. If it's a
159 directory name, we assume you want the file to be called keyval.
160 """
161 if os.path.isdir(path):
162 path = os.path.join(path, 'keyval')
163 keyval = {}
jadmanski58962982009-04-21 19:54:34 +0000164 if os.path.exists(path):
165 for line in open(path):
166 line = re.sub('#.*', '', line).rstrip()
167 if not re.search(r'^[-\.\w]+=', line):
168 raise ValueError('Invalid format line: %s' % line)
169 key, value = line.split('=', 1)
170 if re.search('^\d+$', value):
171 value = int(value)
172 elif re.search('^(\d+\.)?\d+$', value):
173 value = float(value)
174 keyval[key] = value
jadmanski0afbb632008-06-06 21:10:57 +0000175 return keyval
mblighde0d47e2008-03-28 14:37:18 +0000176
177
jadmanskicc549172008-05-21 18:11:51 +0000178def write_keyval(path, dictionary, type_tag=None):
jadmanski0afbb632008-06-06 21:10:57 +0000179 """
180 Write a key-value pair format file out to a file. This uses append
181 mode to open the file, so existing text will not be overwritten or
182 reparsed.
jadmanskicc549172008-05-21 18:11:51 +0000183
jadmanski0afbb632008-06-06 21:10:57 +0000184 If type_tag is None, then the key must be composed of alphanumeric
185 characters (or dashes+underscores). However, if type-tag is not
186 null then the keys must also have "{type_tag}" as a suffix. At
187 the moment the only valid values of type_tag are "attr" and "perf".
188 """
189 if os.path.isdir(path):
190 path = os.path.join(path, 'keyval')
191 keyval = open(path, 'a')
jadmanskicc549172008-05-21 18:11:51 +0000192
jadmanski0afbb632008-06-06 21:10:57 +0000193 if type_tag is None:
mbligh97227ea2009-03-11 17:09:50 +0000194 key_regex = re.compile(r'^[-\.\w]+$')
jadmanski0afbb632008-06-06 21:10:57 +0000195 else:
196 if type_tag not in ('attr', 'perf'):
197 raise ValueError('Invalid type tag: %s' % type_tag)
198 escaped_tag = re.escape(type_tag)
mbligh97227ea2009-03-11 17:09:50 +0000199 key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
jadmanski0afbb632008-06-06 21:10:57 +0000200 try:
201 for key, value in dictionary.iteritems():
202 if not key_regex.search(key):
203 raise ValueError('Invalid key: %s' % key)
204 keyval.write('%s=%s\n' % (key, value))
205 finally:
206 keyval.close()
mbligh6231cd62008-02-02 19:18:33 +0000207
208
209def is_url(path):
jadmanski0afbb632008-06-06 21:10:57 +0000210 """Return true if path looks like a URL"""
211 # for now, just handle http and ftp
212 url_parts = urlparse.urlparse(path)
213 return (url_parts[0] in ('http', 'ftp'))
mbligh6231cd62008-02-02 19:18:33 +0000214
215
jadmanskied91ba92008-09-30 17:19:27 +0000216def urlopen(url, data=None, proxies=None, timeout=5):
jadmanski0afbb632008-06-06 21:10:57 +0000217 """Wrapper to urllib.urlopen with timeout addition."""
mbligh02ff2d52008-06-03 15:00:21 +0000218
jadmanski0afbb632008-06-06 21:10:57 +0000219 # Save old timeout
220 old_timeout = socket.getdefaulttimeout()
221 socket.setdefaulttimeout(timeout)
222 try:
223 return urllib.urlopen(url, data=data, proxies=proxies)
224 finally:
225 socket.setdefaulttimeout(old_timeout)
mbligh02ff2d52008-06-03 15:00:21 +0000226
227
228def urlretrieve(url, filename=None, reporthook=None, data=None, timeout=300):
jadmanski0afbb632008-06-06 21:10:57 +0000229 """Wrapper to urllib.urlretrieve with timeout addition."""
230 old_timeout = socket.getdefaulttimeout()
231 socket.setdefaulttimeout(timeout)
232 try:
233 return urllib.urlretrieve(url, filename=filename,
234 reporthook=reporthook, data=data)
235 finally:
236 socket.setdefaulttimeout(old_timeout)
237
mbligh02ff2d52008-06-03 15:00:21 +0000238
mbligh6231cd62008-02-02 19:18:33 +0000239def get_file(src, dest, permissions=None):
jadmanski0afbb632008-06-06 21:10:57 +0000240 """Get a file from src, which can be local or a remote URL"""
mbligh25284cd2009-06-08 16:17:24 +0000241 if src == dest:
jadmanski0afbb632008-06-06 21:10:57 +0000242 return
mbligh25284cd2009-06-08 16:17:24 +0000243
244 if is_url(src):
245 logging.debug('PWD: %s', os.getcwd())
246 logging.info('Fetching %s -> %s', src, dest)
247
248 src_file = urllib2.urlopen(src)
jadmanski0afbb632008-06-06 21:10:57 +0000249 try:
mbligh25284cd2009-06-08 16:17:24 +0000250 dest_file = open(dest, 'wb')
251 try:
252 shutil.copyfileobj(src_file, dest_file)
253 finally:
254 dest_file.close()
255 finally:
256 src_file.close()
jadmanski0afbb632008-06-06 21:10:57 +0000257 else:
258 shutil.copyfile(src, dest)
mbligh25284cd2009-06-08 16:17:24 +0000259
jadmanski0afbb632008-06-06 21:10:57 +0000260 if permissions:
261 os.chmod(dest, permissions)
262 return dest
mbligh6231cd62008-02-02 19:18:33 +0000263
264
265def unmap_url(srcdir, src, destdir='.'):
jadmanski0afbb632008-06-06 21:10:57 +0000266 """
267 Receives either a path to a local file or a URL.
268 returns either the path to the local file, or the fetched URL
mbligh6231cd62008-02-02 19:18:33 +0000269
jadmanski0afbb632008-06-06 21:10:57 +0000270 unmap_url('/usr/src', 'foo.tar', '/tmp')
271 = '/usr/src/foo.tar'
272 unmap_url('/usr/src', 'http://site/file', '/tmp')
273 = '/tmp/file'
274 (after retrieving it)
275 """
276 if is_url(src):
277 url_parts = urlparse.urlparse(src)
278 filename = os.path.basename(url_parts[2])
279 dest = os.path.join(destdir, filename)
280 return get_file(src, dest)
281 else:
282 return os.path.join(srcdir, src)
mbligh6231cd62008-02-02 19:18:33 +0000283
284
285def update_version(srcdir, preserve_srcdir, new_version, install,
jadmanski0afbb632008-06-06 21:10:57 +0000286 *args, **dargs):
287 """
288 Make sure srcdir is version new_version
mbligh6231cd62008-02-02 19:18:33 +0000289
jadmanski0afbb632008-06-06 21:10:57 +0000290 If not, delete it and install() the new version.
mbligh6231cd62008-02-02 19:18:33 +0000291
jadmanski0afbb632008-06-06 21:10:57 +0000292 In the preserve_srcdir case, we just check it's up to date,
293 and if not, we rerun install, without removing srcdir
294 """
295 versionfile = os.path.join(srcdir, '.version')
296 install_needed = True
mbligh6231cd62008-02-02 19:18:33 +0000297
jadmanski0afbb632008-06-06 21:10:57 +0000298 if os.path.exists(versionfile):
299 old_version = pickle.load(open(versionfile))
300 if old_version == new_version:
301 install_needed = False
mbligh6231cd62008-02-02 19:18:33 +0000302
jadmanski0afbb632008-06-06 21:10:57 +0000303 if install_needed:
304 if not preserve_srcdir and os.path.exists(srcdir):
305 shutil.rmtree(srcdir)
306 install(*args, **dargs)
307 if os.path.exists(srcdir):
308 pickle.dump(new_version, open(versionfile, 'w'))
mbligh462c0152008-03-13 15:37:10 +0000309
310
mbligh63073c92008-03-31 16:49:32 +0000311def run(command, timeout=None, ignore_status=False,
showard170873e2009-01-07 00:22:26 +0000312 stdout_tee=None, stderr_tee=None, verbose=True, stdin=None):
jadmanski0afbb632008-06-06 21:10:57 +0000313 """
314 Run a command on the host.
mbligh63073c92008-03-31 16:49:32 +0000315
jadmanski0afbb632008-06-06 21:10:57 +0000316 Args:
317 command: the command line string
318 timeout: time limit in seconds before attempting to
319 kill the running process. The run() function
320 will take a few seconds longer than 'timeout'
321 to complete if it has to kill the process.
322 ignore_status: do not raise an exception, no matter what
323 the exit code of the command is.
324 stdout_tee: optional file-like object to which stdout data
325 will be written as it is generated (data will still
326 be stored in result.stdout)
327 stderr_tee: likewise for stderr
showard170873e2009-01-07 00:22:26 +0000328 stdin: stdin to pass to the executed process
mbligh63073c92008-03-31 16:49:32 +0000329
jadmanski0afbb632008-06-06 21:10:57 +0000330 Returns:
331 a CmdResult object
mbligh63073c92008-03-31 16:49:32 +0000332
jadmanski0afbb632008-06-06 21:10:57 +0000333 Raises:
334 CmdError: the exit code of the command
335 execution was not 0
336 """
showard170873e2009-01-07 00:22:26 +0000337 bg_job = join_bg_jobs(
338 (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin),),
339 timeout)[0]
mbligh849a0f62008-08-28 20:12:19 +0000340 if not ignore_status and bg_job.result.exit_status:
jadmanski9c1098b2008-09-02 14:18:48 +0000341 raise error.CmdError(command, bg_job.result,
mbligh849a0f62008-08-28 20:12:19 +0000342 "Command returned non-zero exit status")
mbligh63073c92008-03-31 16:49:32 +0000343
mbligh849a0f62008-08-28 20:12:19 +0000344 return bg_job.result
mbligh63073c92008-03-31 16:49:32 +0000345
mbligh45ffc432008-12-09 23:35:17 +0000346
mbligha5630a52008-09-03 22:09:50 +0000347def run_parallel(commands, timeout=None, ignore_status=False,
348 stdout_tee=None, stderr_tee=None):
349 """Beahves the same as run with the following exceptions:
350
351 - commands is a list of commands to run in parallel.
352 - ignore_status toggles whether or not an exception should be raised
353 on any error.
354
355 returns a list of CmdResult objects
356 """
357 bg_jobs = []
358 for command in commands:
359 bg_jobs.append(BgJob(command, stdout_tee, stderr_tee))
360
361 # Updates objects in bg_jobs list with their process information
362 join_bg_jobs(bg_jobs, timeout)
363
364 for bg_job in bg_jobs:
365 if not ignore_status and bg_job.result.exit_status:
366 raise error.CmdError(command, bg_job.result,
367 "Command returned non-zero exit status")
368
369 return [bg_job.result for bg_job in bg_jobs]
370
371
mbligh849a0f62008-08-28 20:12:19 +0000372@deprecated
mbligh63073c92008-03-31 16:49:32 +0000373def run_bg(command):
mbligh849a0f62008-08-28 20:12:19 +0000374 """Function deprecated. Please use BgJob class instead."""
375 bg_job = BgJob(command)
376 return bg_job.sp, bg_job.result
mbligh63073c92008-03-31 16:49:32 +0000377
378
mbligh849a0f62008-08-28 20:12:19 +0000379def join_bg_jobs(bg_jobs, timeout=None):
mbligha5630a52008-09-03 22:09:50 +0000380 """Joins the bg_jobs with the current thread.
381
382 Returns the same list of bg_jobs objects that was passed in.
383 """
mblighae69f262009-04-17 20:14:56 +0000384 ret, timeout_error = 0, False
mbligh849a0f62008-08-28 20:12:19 +0000385 for bg_job in bg_jobs:
386 bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
mbligh63073c92008-03-31 16:49:32 +0000387
jadmanski0afbb632008-06-06 21:10:57 +0000388 try:
389 # We are holding ends to stdin, stdout pipes
390 # hence we need to be sure to close those fds no mater what
391 start_time = time.time()
mbligh849a0f62008-08-28 20:12:19 +0000392 timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
393
394 for bg_job in bg_jobs:
395 # Process stdout and stderr
396 bg_job.process_output(stdout=True,final_read=True)
397 bg_job.process_output(stdout=False,final_read=True)
jadmanski0afbb632008-06-06 21:10:57 +0000398 finally:
399 # close our ends of the pipes to the sp no matter what
mbligh849a0f62008-08-28 20:12:19 +0000400 for bg_job in bg_jobs:
401 bg_job.cleanup()
mbligh63073c92008-03-31 16:49:32 +0000402
mbligh849a0f62008-08-28 20:12:19 +0000403 if timeout_error:
404 # TODO: This needs to be fixed to better represent what happens when
405 # running in parallel. However this is backwards compatable, so it will
406 # do for the time being.
407 raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
408 "Command(s) did not complete within %d seconds"
409 % timeout)
mbligh63073c92008-03-31 16:49:32 +0000410
mbligh63073c92008-03-31 16:49:32 +0000411
mbligh849a0f62008-08-28 20:12:19 +0000412 return bg_jobs
mbligh63073c92008-03-31 16:49:32 +0000413
mbligh849a0f62008-08-28 20:12:19 +0000414
415def _wait_for_commands(bg_jobs, start_time, timeout):
416 # This returns True if it must return due to a timeout, otherwise False.
417
mblighf0b4a0a2008-09-03 20:46:16 +0000418 # To check for processes which terminate without producing any output
419 # a 1 second timeout is used in select.
420 SELECT_TIMEOUT = 1
421
mbligh849a0f62008-08-28 20:12:19 +0000422 select_list = []
423 reverse_dict = {}
424 for bg_job in bg_jobs:
425 select_list.append(bg_job.sp.stdout)
426 select_list.append(bg_job.sp.stderr)
427 reverse_dict[bg_job.sp.stdout] = (bg_job,True)
428 reverse_dict[bg_job.sp.stderr] = (bg_job,False)
429
jadmanski0afbb632008-06-06 21:10:57 +0000430 if timeout:
431 stop_time = start_time + timeout
432 time_left = stop_time - time.time()
433 else:
434 time_left = None # so that select never times out
435 while not timeout or time_left > 0:
436 # select will return when stdout is ready (including when it is
437 # EOF, that is the process has terminated).
mblighf0b4a0a2008-09-03 20:46:16 +0000438 ready, _, _ = select.select(select_list, [], [], SELECT_TIMEOUT)
mbligh849a0f62008-08-28 20:12:19 +0000439
jadmanski0afbb632008-06-06 21:10:57 +0000440 # os.read() has to be used instead of
441 # subproc.stdout.read() which will otherwise block
mbligh849a0f62008-08-28 20:12:19 +0000442 for fileno in ready:
443 bg_job,stdout = reverse_dict[fileno]
444 bg_job.process_output(stdout)
mbligh63073c92008-03-31 16:49:32 +0000445
mbligh849a0f62008-08-28 20:12:19 +0000446 remaining_jobs = [x for x in bg_jobs if x.result.exit_status is None]
447 if len(remaining_jobs) == 0:
448 return False
449 for bg_job in remaining_jobs:
450 bg_job.result.exit_status = bg_job.sp.poll()
mbligh8ea61e22008-05-09 18:09:37 +0000451
jadmanski0afbb632008-06-06 21:10:57 +0000452 if timeout:
453 time_left = stop_time - time.time()
mbligh63073c92008-03-31 16:49:32 +0000454
mbligh849a0f62008-08-28 20:12:19 +0000455 # Kill all processes which did not complete prior to timeout
456 for bg_job in [x for x in bg_jobs if x.result.exit_status is None]:
mbligh7afc3a62008-11-27 00:35:44 +0000457 print '* Warning: run process timeout (%s) fired' % timeout
mbligh849a0f62008-08-28 20:12:19 +0000458 nuke_subprocess(bg_job.sp)
mbligh095dc642008-10-01 03:41:35 +0000459 bg_job.result.exit_status = bg_job.sp.poll()
mbligh8ea61e22008-05-09 18:09:37 +0000460
mbligh849a0f62008-08-28 20:12:19 +0000461 return True
mbligh63073c92008-03-31 16:49:32 +0000462
463
mbligh63073c92008-03-31 16:49:32 +0000464def nuke_subprocess(subproc):
jadmanski09f92032008-09-17 14:05:27 +0000465 # check if the subprocess is still alive, first
466 if subproc.poll() is not None:
467 return subproc.poll()
468
jadmanski0afbb632008-06-06 21:10:57 +0000469 # the process has not terminated within timeout,
470 # kill it via an escalating series of signals.
471 signal_queue = [signal.SIGTERM, signal.SIGKILL]
472 for sig in signal_queue:
473 try:
474 os.kill(subproc.pid, sig)
475 # The process may have died before we could kill it.
476 except OSError:
477 pass
mbligh63073c92008-03-31 16:49:32 +0000478
jadmanski0afbb632008-06-06 21:10:57 +0000479 for i in range(5):
480 rc = subproc.poll()
mblighd876f452008-12-03 15:09:17 +0000481 if rc is not None:
jadmanski0afbb632008-06-06 21:10:57 +0000482 return rc
483 time.sleep(1)
mbligh63073c92008-03-31 16:49:32 +0000484
485
486def nuke_pid(pid):
jadmanski0afbb632008-06-06 21:10:57 +0000487 # the process has not terminated within timeout,
488 # kill it via an escalating series of signals.
489 signal_queue = [signal.SIGTERM, signal.SIGKILL]
490 for sig in signal_queue:
491 try:
492 os.kill(pid, sig)
mbligh63073c92008-03-31 16:49:32 +0000493
jadmanski0afbb632008-06-06 21:10:57 +0000494 # The process may have died before we could kill it.
495 except OSError:
496 pass
mbligh63073c92008-03-31 16:49:32 +0000497
jadmanski0afbb632008-06-06 21:10:57 +0000498 try:
499 for i in range(5):
500 status = os.waitpid(pid, os.WNOHANG)[0]
501 if status == pid:
502 return
503 time.sleep(1)
mbligh63073c92008-03-31 16:49:32 +0000504
jadmanski0afbb632008-06-06 21:10:57 +0000505 if status != pid:
506 raise error.AutoservRunError('Could not kill %d'
507 % pid, None)
mbligh63073c92008-03-31 16:49:32 +0000508
jadmanski0afbb632008-06-06 21:10:57 +0000509 # the process died before we join it.
510 except OSError:
511 pass
mbligh63073c92008-03-31 16:49:32 +0000512
513
mbligh63073c92008-03-31 16:49:32 +0000514def system(command, timeout=None, ignore_status=False):
mbligha5630a52008-09-03 22:09:50 +0000515 """This function returns the exit status of command."""
mblighf8dffb12008-10-29 16:45:26 +0000516 return run(command, timeout=timeout, ignore_status=ignore_status,
mbligha5630a52008-09-03 22:09:50 +0000517 stdout_tee=sys.stdout, stderr_tee=sys.stderr).exit_status
mbligh63073c92008-03-31 16:49:32 +0000518
519
mbligha5630a52008-09-03 22:09:50 +0000520def system_parallel(commands, timeout=None, ignore_status=False):
521 """This function returns a list of exit statuses for the respective
522 list of commands."""
523 return [bg_jobs.exit_status for bg_jobs in
mblighf8dffb12008-10-29 16:45:26 +0000524 run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
mbligha5630a52008-09-03 22:09:50 +0000525 stdout_tee=sys.stdout, stderr_tee=sys.stderr)]
mbligh849a0f62008-08-28 20:12:19 +0000526
527
mbligh8ea61e22008-05-09 18:09:37 +0000528def system_output(command, timeout=None, ignore_status=False,
jadmanski0afbb632008-06-06 21:10:57 +0000529 retain_output=False):
530 if retain_output:
mblighf8dffb12008-10-29 16:45:26 +0000531 out = run(command, timeout=timeout, ignore_status=ignore_status,
jadmanski0afbb632008-06-06 21:10:57 +0000532 stdout_tee=sys.stdout, stderr_tee=sys.stderr).stdout
533 else:
mblighf8dffb12008-10-29 16:45:26 +0000534 out = run(command, timeout=timeout, ignore_status=ignore_status).stdout
jadmanski0afbb632008-06-06 21:10:57 +0000535 if out[-1:] == '\n': out = out[:-1]
536 return out
mbligh63073c92008-03-31 16:49:32 +0000537
mbligh849a0f62008-08-28 20:12:19 +0000538
mbligha5630a52008-09-03 22:09:50 +0000539def system_output_parallel(commands, timeout=None, ignore_status=False,
540 retain_output=False):
541 if retain_output:
mblighf8dffb12008-10-29 16:45:26 +0000542 out = [bg_job.stdout for bg_job in run_parallel(commands,
543 timeout=timeout, ignore_status=ignore_status,
544 stdout_tee=sys.stdout, stderr_tee=sys.stderr)]
mbligha5630a52008-09-03 22:09:50 +0000545 else:
mblighf8dffb12008-10-29 16:45:26 +0000546 out = [bg_job.stdout for bg_job in run_parallel(commands,
547 timeout=timeout, ignore_status=ignore_status)]
mbligha5630a52008-09-03 22:09:50 +0000548 for x in out:
549 if out[-1:] == '\n': out = out[:-1]
550 return out
551
552
mbligh98467952008-11-19 00:25:45 +0000553def strip_unicode(input):
554 if type(input) == list:
555 return [strip_unicode(i) for i in input]
556 elif type(input) == dict:
557 output = {}
558 for key in input.keys():
559 output[str(key)] = strip_unicode(input[key])
560 return output
561 elif type(input) == unicode:
562 return str(input)
563 else:
564 return input
565
566
mbligha5630a52008-09-03 22:09:50 +0000567def get_cpu_percentage(function, *args, **dargs):
568 """Returns a tuple containing the CPU% and return value from function call.
569
570 This function calculates the usage time by taking the difference of
571 the user and system times both before and after the function call.
572 """
573 child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
574 self_pre = resource.getrusage(resource.RUSAGE_SELF)
575 start = time.time()
576 to_return = function(*args, **dargs)
577 elapsed = time.time() - start
578 self_post = resource.getrusage(resource.RUSAGE_SELF)
579 child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
580
581 # Calculate CPU Percentage
582 s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
583 c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
584 cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
585
586 return cpu_percent, to_return
587
588
mblighc1cbc992008-05-27 20:01:45 +0000589"""
590This function is used when there is a need to run more than one
591job simultaneously starting exactly at the same time. It basically returns
592a modified control file (containing the synchronization code prepended)
593whenever it is ready to run the control file. The synchronization
594is done using barriers to make sure that the jobs start at the same time.
595
596Here is how the synchronization is done to make sure that the tests
597start at exactly the same time on the client.
598sc_bar is a server barrier and s_bar, c_bar are the normal barriers
599
600 Job1 Job2 ...... JobN
601 Server: | sc_bar
602 Server: | s_bar ...... s_bar
603 Server: | at.run() at.run() ...... at.run()
604 ----------|------------------------------------------------------
605 Client | sc_bar
606 Client | c_bar c_bar ...... c_bar
607 Client | <run test> <run test> ...... <run test>
608
609
610PARAMS:
611 control_file : The control file which to which the above synchronization
612 code would be prepended to
613 host_name : The host name on which the job is going to run
614 host_num (non negative) : A number to identify the machine so that we have
615 different sets of s_bar_ports for each of the machines.
616 instance : The number of the job
617 num_jobs : Total number of jobs that are going to run in parallel with
618 this job starting at the same time
619 port_base : Port number that is used to derive the actual barrier ports.
620
621RETURN VALUE:
622 The modified control file.
623
624"""
625def get_sync_control_file(control, host_name, host_num,
jadmanski0afbb632008-06-06 21:10:57 +0000626 instance, num_jobs, port_base=63100):
627 sc_bar_port = port_base
628 c_bar_port = port_base
629 if host_num < 0:
630 print "Please provide a non negative number for the host"
631 return None
632 s_bar_port = port_base + 1 + host_num # The set of s_bar_ports are
633 # the same for a given machine
mblighc1cbc992008-05-27 20:01:45 +0000634
jadmanski0afbb632008-06-06 21:10:57 +0000635 sc_bar_timeout = 180
636 s_bar_timeout = c_bar_timeout = 120
mblighc1cbc992008-05-27 20:01:45 +0000637
jadmanski0afbb632008-06-06 21:10:57 +0000638 # The barrier code snippet is prepended into the conrol file
639 # dynamically before at.run() is called finally.
640 control_new = []
mblighc1cbc992008-05-27 20:01:45 +0000641
jadmanski0afbb632008-06-06 21:10:57 +0000642 # jobid is the unique name used to identify the processes
643 # trying to reach the barriers
644 jobid = "%s#%d" % (host_name, instance)
mblighc1cbc992008-05-27 20:01:45 +0000645
jadmanski0afbb632008-06-06 21:10:57 +0000646 rendv = []
647 # rendvstr is a temp holder for the rendezvous list of the processes
648 for n in range(num_jobs):
649 rendv.append("'%s#%d'" % (host_name, n))
650 rendvstr = ",".join(rendv)
mblighc1cbc992008-05-27 20:01:45 +0000651
jadmanski0afbb632008-06-06 21:10:57 +0000652 if instance == 0:
653 # Do the setup and wait at the server barrier
654 # Clean up the tmp and the control dirs for the first instance
655 control_new.append('if os.path.exists(job.tmpdir):')
656 control_new.append("\t system('umount -f %s > /dev/null"
657 "2> /dev/null' % job.tmpdir,"
658 "ignore_status=True)")
659 control_new.append("\t system('rm -rf ' + job.tmpdir)")
660 control_new.append(
661 'b0 = job.barrier("%s", "sc_bar", %d, port=%d)'
662 % (jobid, sc_bar_timeout, sc_bar_port))
663 control_new.append(
664 'b0.rendevous_servers("PARALLEL_MASTER", "%s")'
665 % jobid)
mblighc1cbc992008-05-27 20:01:45 +0000666
jadmanski0afbb632008-06-06 21:10:57 +0000667 elif instance == 1:
668 # Wait at the server barrier to wait for instance=0
669 # process to complete setup
670 b0 = barrier.barrier("PARALLEL_MASTER", "sc_bar", sc_bar_timeout,
671 port=sc_bar_port)
672 b0.rendevous_servers("PARALLEL_MASTER", jobid)
mblighc1cbc992008-05-27 20:01:45 +0000673
jadmanski0afbb632008-06-06 21:10:57 +0000674 if(num_jobs > 2):
675 b1 = barrier.barrier(jobid, "s_bar", s_bar_timeout,
676 port=s_bar_port)
677 b1.rendevous(rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000678
jadmanski0afbb632008-06-06 21:10:57 +0000679 else:
680 # For the rest of the clients
681 b2 = barrier.barrier(jobid, "s_bar", s_bar_timeout, port=s_bar_port)
682 b2.rendevous(rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000683
jadmanski0afbb632008-06-06 21:10:57 +0000684 # Client side barrier for all the tests to start at the same time
685 control_new.append('b1 = job.barrier("%s", "c_bar", %d, port=%d)'
686 % (jobid, c_bar_timeout, c_bar_port))
687 control_new.append("b1.rendevous(%s)" % rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000688
jadmanski0afbb632008-06-06 21:10:57 +0000689 # Stick in the rest of the control file
690 control_new.append(control)
mblighc1cbc992008-05-27 20:01:45 +0000691
jadmanski0afbb632008-06-06 21:10:57 +0000692 return "\n".join(control_new)
mblighc1cbc992008-05-27 20:01:45 +0000693
mbligh63073c92008-03-31 16:49:32 +0000694
mblighc5ddfd12008-08-04 17:15:00 +0000695def get_arch(run_function=run):
696 """
697 Get the hardware architecture of the machine.
698 run_function is used to execute the commands. It defaults to
699 utils.run() but a custom method (if provided) should be of the
700 same schema as utils.run. It should return a CmdResult object and
701 throw a CmdError exception.
702 """
703 arch = run_function('/bin/uname -m').stdout.rstrip()
704 if re.match(r'i\d86$', arch):
705 arch = 'i386'
706 return arch
707
708
showard4745ecd2009-05-26 19:34:56 +0000709def get_num_logical_cpus_per_socket(run_function=run):
mbligh9fd9afe2009-04-28 18:27:25 +0000710 """
711 Get the number of cores (including hyperthreading) per cpu.
712 run_function is used to execute the commands. It defaults to
713 utils.run() but a custom method (if provided) should be of the
714 same schema as utils.run. It should return a CmdResult object and
715 throw a CmdError exception.
716 """
showard4745ecd2009-05-26 19:34:56 +0000717 siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
718 num_siblings = map(int,
719 re.findall(r'^siblings\s*:\s*(\d+)\s*$',
720 siblings, re.M))
721 if len(num_siblings) == 0:
722 raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
723 if min(num_siblings) != max(num_siblings):
724 raise error.TestError('Number of siblings differ %r' %
725 num_siblings)
726 return num_siblings[0]
mbligh9fd9afe2009-04-28 18:27:25 +0000727
728
jadmanski4f909252008-12-01 20:47:10 +0000729def merge_trees(src, dest):
730 """
731 Merges a source directory tree at 'src' into a destination tree at
732 'dest'. If a path is a file in both trees than the file in the source
733 tree is APPENDED to the one in the destination tree. If a path is
734 a directory in both trees then the directories are recursively merged
735 with this function. In any other case, the function will skip the
736 paths that cannot be merged (instead of failing).
737 """
738 if not os.path.exists(src):
739 return # exists only in dest
740 elif not os.path.exists(dest):
741 if os.path.isfile(src):
742 shutil.copy2(src, dest) # file only in src
743 else:
744 shutil.copytree(src, dest, symlinks=True) # dir only in src
745 return
746 elif os.path.isfile(src) and os.path.isfile(dest):
747 # src & dest are files in both trees, append src to dest
748 destfile = open(dest, "a")
749 try:
750 srcfile = open(src)
751 try:
752 destfile.write(srcfile.read())
753 finally:
754 srcfile.close()
755 finally:
756 destfile.close()
757 elif os.path.isdir(src) and os.path.isdir(dest):
758 # src & dest are directories in both trees, so recursively merge
759 for name in os.listdir(src):
760 merge_trees(os.path.join(src, name), os.path.join(dest, name))
761 else:
762 # src & dest both exist, but are incompatible
763 return
764
765
mbligh63073c92008-03-31 16:49:32 +0000766class CmdResult(object):
jadmanski0afbb632008-06-06 21:10:57 +0000767 """
768 Command execution result.
mbligh63073c92008-03-31 16:49:32 +0000769
jadmanski0afbb632008-06-06 21:10:57 +0000770 command: String containing the command line itself
771 exit_status: Integer exit code of the process
772 stdout: String containing stdout of the process
773 stderr: String containing stderr of the process
774 duration: Elapsed wall clock time running the process
775 """
mbligh63073c92008-03-31 16:49:32 +0000776
777
mblighcd63a212009-05-01 23:04:38 +0000778 def __init__(self, command="", stdout="", stderr="",
jadmanski0afbb632008-06-06 21:10:57 +0000779 exit_status=None, duration=0):
780 self.command = command
781 self.exit_status = exit_status
782 self.stdout = stdout
783 self.stderr = stderr
784 self.duration = duration
mbligh63073c92008-03-31 16:49:32 +0000785
786
jadmanski0afbb632008-06-06 21:10:57 +0000787 def __repr__(self):
788 wrapper = textwrap.TextWrapper(width = 78,
789 initial_indent="\n ",
790 subsequent_indent=" ")
791
792 stdout = self.stdout.rstrip()
793 if stdout:
794 stdout = "\nstdout:\n%s" % stdout
795
796 stderr = self.stderr.rstrip()
797 if stderr:
798 stderr = "\nstderr:\n%s" % stderr
799
800 return ("* Command: %s\n"
801 "Exit status: %s\n"
802 "Duration: %s\n"
803 "%s"
804 "%s"
805 % (wrapper.fill(self.command), self.exit_status,
806 self.duration, stdout, stderr))
mbligh63073c92008-03-31 16:49:32 +0000807
808
mbligh462c0152008-03-13 15:37:10 +0000809class run_randomly:
jadmanski0afbb632008-06-06 21:10:57 +0000810 def __init__(self, run_sequentially=False):
811 # Run sequentially is for debugging control files
812 self.test_list = []
813 self.run_sequentially = run_sequentially
mbligh462c0152008-03-13 15:37:10 +0000814
815
jadmanski0afbb632008-06-06 21:10:57 +0000816 def add(self, *args, **dargs):
817 test = (args, dargs)
818 self.test_list.append(test)
mbligh462c0152008-03-13 15:37:10 +0000819
820
jadmanski0afbb632008-06-06 21:10:57 +0000821 def run(self, fn):
822 while self.test_list:
823 test_index = random.randint(0, len(self.test_list)-1)
824 if self.run_sequentially:
825 test_index = 0
826 (args, dargs) = self.test_list.pop(test_index)
827 fn(*args, **dargs)
mbligha7007722009-01-13 00:37:11 +0000828
829
mblighdd669372009-02-03 21:57:18 +0000830def import_site_symbol(path, module, name, dummy=None, modulefile=None):
831 """
832 Try to import site specific symbol from site specific file if it exists
833
834 @param path full filename of the source file calling this (ie __file__)
835 @param module full module name
836 @param name symbol name to be imported from the site file
837 @param dummy dummy value to return in case there is no symbol to import
838 @param modulefile module filename
839
840 @return site specific symbol or dummy
841
842 @exception ImportError if the site file exists but imports fails
843 """
mbligha7007722009-01-13 00:37:11 +0000844 short_module = module[module.rfind(".") + 1:]
845
846 if not modulefile:
847 modulefile = short_module + ".py"
848
849 try:
850 site_exists = os.path.getsize(os.path.join(os.path.dirname(path),
851 modulefile))
852 except os.error:
853 site_exists = False
854
855 if site_exists:
mbligh062ed152009-01-13 00:57:14 +0000856 # return the object from the imported module
857 obj = getattr(__import__(module, {}, {}, [short_module]), name)
mbligha7007722009-01-13 00:37:11 +0000858 else:
jadmanski0dc8ff82009-02-11 15:11:15 +0000859 msg = "unable to import site module '%s', using non-site implementation"
860 msg %= modulefile
showardb18134f2009-03-20 20:52:18 +0000861 logging.info(msg)
mbligh062ed152009-01-13 00:57:14 +0000862 obj = dummy
863
864 return obj
865
866
867def import_site_class(path, module, classname, baseclass, modulefile=None):
868 """
869 Try to import site specific class from site specific file if it exists
870
871 Args:
872 path: full filename of the source file calling this (ie __file__)
873 module: full module name
874 classname: class name to be loaded from site file
mbligh0a8c3322009-04-28 18:32:19 +0000875 baseclass: base class object to return when no site file present or
876 to mixin when site class exists but is not inherited from baseclass
mbligh062ed152009-01-13 00:57:14 +0000877 modulefile: module filename
878
mbligh0a8c3322009-04-28 18:32:19 +0000879 Returns: baseclass if site specific class does not exist, the site specific
880 class if it exists and is inherited from baseclass or a mixin of the
881 site specific class and baseclass when the site specific class exists
882 and is not inherited from baseclass
mbligh062ed152009-01-13 00:57:14 +0000883
884 Raises: ImportError if the site file exists but imports fails
885 """
886
mblighdd669372009-02-03 21:57:18 +0000887 res = import_site_symbol(path, module, classname, None, modulefile)
mbligh0a8c3322009-04-28 18:32:19 +0000888 if res:
889 if not issubclass(res, baseclass):
890 # if not a subclass of baseclass then mix in baseclass with the
891 # site specific class object and return the result
892 res = type(classname, (res, baseclass), {})
893 else:
894 res = baseclass
mbligha7007722009-01-13 00:37:11 +0000895
mbligh062ed152009-01-13 00:57:14 +0000896 return res
897
898
899def import_site_function(path, module, funcname, dummy, modulefile=None):
900 """
901 Try to import site specific function from site specific file if it exists
902
903 Args:
904 path: full filename of the source file calling this (ie __file__)
905 module: full module name
906 funcname: function name to be imported from site file
907 dummy: dummy function to return in case there is no function to import
908 modulefile: module filename
909
910 Returns: site specific function object or dummy
911
912 Raises: ImportError if the site file exists but imports fails
913 """
914
mblighdd669372009-02-03 21:57:18 +0000915 return import_site_symbol(path, module, funcname, dummy, modulefile)
mblighfb676032009-04-01 18:25:38 +0000916
917
918def write_pid(program_name):
919 """
920 Try to drop <program_name>.pid in the main autotest directory.
921
922 Args:
923 program_name: prefix for file name
924 """
925
926 my_path = os.path.dirname(__file__)
927 pid_path = os.path.abspath(os.path.join(my_path, "../.."))
928 pidf = open(os.path.join(pid_path, "%s.pid" % program_name), "w")
929 if pidf:
930 pidf.write("%s\n" % os.getpid())
931 pidf.close()
mbligh45561782009-05-11 21:14:34 +0000932
933
934def get_relative_path(path, reference):
935 """Given 2 absolute paths "path" and "reference", compute the path of
936 "path" as relative to the directory "reference".
937
938 @param path the absolute path to convert to a relative path
939 @param reference an absolute directory path to which the relative
940 path will be computed
941 """
942 # normalize the paths (remove double slashes, etc)
943 assert(os.path.isabs(path))
944 assert(os.path.isabs(reference))
945
946 path = os.path.normpath(path)
947 reference = os.path.normpath(reference)
948
949 # we could use os.path.split() but it splits from the end
950 path_list = path.split(os.path.sep)[1:]
951 ref_list = reference.split(os.path.sep)[1:]
952
953 # find the longest leading common path
954 for i in xrange(min(len(path_list), len(ref_list))):
955 if path_list[i] != ref_list[i]:
956 # decrement i so when exiting this loop either by no match or by
957 # end of range we are one step behind
958 i -= 1
959 break
960 i += 1
961 # drop the common part of the paths, not interested in that anymore
962 del path_list[:i]
963
964 # for each uncommon component in the reference prepend a ".."
965 path_list[:0] = ['..'] * (len(ref_list) - i)
966
967 return os.path.join(*path_list)