blob: 77d7e81eae2fade7c803b394a78625cb9a227185 [file] [log] [blame]
mbligh63073c92008-03-31 16:49:32 +00001#!/usr/bin/python
2#
3# Copyright 2008 Google Inc. Released under the GPL v2
4
mbligh849a0f62008-08-28 20:12:19 +00005import os, pickle, random, re, resource, select, shutil, signal, StringIO
6import socket, struct, subprocess, sys, time, textwrap, urllib, urlparse
mbligh45ffc432008-12-09 23:35:17 +00007import warnings, smtplib
mbligh76a42932008-10-09 20:35:38 +00008from autotest_lib.client.common_lib import error, barrier, debug
mbligh81edd792008-08-26 16:54:02 +00009
mbligh849a0f62008-08-28 20:12:19 +000010def deprecated(func):
11 """This is a decorator which can be used to mark functions as deprecated.
12 It will result in a warning being emmitted when the function is used."""
13 def new_func(*args, **dargs):
14 warnings.warn("Call to deprecated function %s." % func.__name__,
15 category=DeprecationWarning)
16 return func(*args, **dargs)
17 new_func.__name__ = func.__name__
18 new_func.__doc__ = func.__doc__
19 new_func.__dict__.update(func.__dict__)
20 return new_func
21
22
23class BgJob(object):
mblighbd96b452008-09-03 23:14:27 +000024 def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True):
mbligh849a0f62008-08-28 20:12:19 +000025 self.command = command
26 self.stdout_tee = stdout_tee
27 self.stderr_tee = stderr_tee
28 self.result = CmdResult(command)
mbligh76a42932008-10-09 20:35:38 +000029 self.log = debug.get_logger()
mblighbd96b452008-09-03 23:14:27 +000030 if verbose:
mbligh76a42932008-10-09 20:35:38 +000031 self.log.debug("running: %s" % command)
mbligh849a0f62008-08-28 20:12:19 +000032 self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
33 stderr=subprocess.PIPE,
34 preexec_fn=self._reset_sigpipe, shell=True,
35 executable="/bin/bash")
36
37
38 def output_prepare(self, stdout_file=None, stderr_file=None):
39 self.stdout_file = stdout_file
40 self.stderr_file = stderr_file
41
mbligh45ffc432008-12-09 23:35:17 +000042
mbligh849a0f62008-08-28 20:12:19 +000043 def process_output(self, stdout=True, final_read=False):
44 """output_prepare must be called prior to calling this"""
45 if stdout:
46 pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
47 else:
48 pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
49
50 if final_read:
51 # read in all the data we can from pipe and then stop
52 data = []
53 while select.select([pipe], [], [], 0)[0]:
54 data.append(os.read(pipe.fileno(), 1024))
55 if len(data[-1]) == 0:
56 break
57 data = "".join(data)
58 else:
59 # perform a single read
60 data = os.read(pipe.fileno(), 1024)
61 buf.write(data)
62 if tee:
63 tee.write(data)
64 tee.flush()
65
66
67 def cleanup(self):
68 self.sp.stdout.close()
69 self.sp.stderr.close()
70 self.result.stdout = self.stdout_file.getvalue()
71 self.result.stderr = self.stderr_file.getvalue()
72
73
74 def _reset_sigpipe(self):
75 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
76
mbligh81edd792008-08-26 16:54:02 +000077
78def ip_to_long(ip):
79 # !L is a long in network byte order
80 return struct.unpack('!L', socket.inet_aton(ip))[0]
81
82
83def long_to_ip(number):
84 # See above comment.
85 return socket.inet_ntoa(struct.pack('!L', number))
86
87
88def create_subnet_mask(bits):
mbligh81edd792008-08-26 16:54:02 +000089 return (1 << 32) - (1 << 32-bits)
90
91
92def format_ip_with_mask(ip, mask_bits):
93 masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
94 return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
mbligh6231cd62008-02-02 19:18:33 +000095
mblighde0d47e2008-03-28 14:37:18 +000096
jadmanskie80d4712008-10-03 16:15:59 +000097def normalize_hostname(alias):
98 ip = socket.gethostbyname(alias)
99 return socket.gethostbyaddr(ip)[0]
100
101
mblighd6d043c2008-09-27 21:00:45 +0000102def get_ip_local_port_range():
103 match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
104 read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
105 return (int(match.group(1)), int(match.group(2)))
106
107
108def set_ip_local_port_range(lower, upper):
109 write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
110 '%d %d\n' % (lower, upper))
111
mbligh315b9412008-10-01 03:34:11 +0000112
mbligh45ffc432008-12-09 23:35:17 +0000113
114def send_email(mail_from, mail_to, subject, body):
115 """
116 Sends an email via smtp
117
118 mail_from: string with email address of sender
119 mail_to: string or list with email address(es) of recipients
120 subject: string with subject of email
121 body: (multi-line) string with body of email
122 """
123 if isinstance(mail_to, str):
124 mail_to = [mail_to]
125 msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
126 subject, body)
127 try:
128 mailer = smtplib.SMTP('localhost')
129 try:
130 mailer.sendmail(mail_from, mail_to, msg)
131 finally:
132 mailer.quit()
133 except Exception, e:
134 # Emails are non-critical, not errors, but don't raise them
135 print "Sending email failed. Reason: %s" % repr(e)
136
137
jadmanski5182e162008-05-13 21:48:16 +0000138def read_one_line(filename):
mbligh6e8840c2008-07-11 18:05:49 +0000139 return open(filename, 'r').readline().rstrip('\n')
jadmanski5182e162008-05-13 21:48:16 +0000140
141
mblighb9d05512008-10-18 13:53:27 +0000142def write_one_line(filename, line):
143 open_write_close(filename, line.rstrip('\n') + '\n')
144
145
146def open_write_close(filename, data):
mbligh618ac9e2008-10-06 17:14:32 +0000147 f = open(filename, 'w')
mblighb9d05512008-10-18 13:53:27 +0000148 try:
149 f.write(data)
150 finally:
151 f.close()
jadmanski5182e162008-05-13 21:48:16 +0000152
153
mblighde0d47e2008-03-28 14:37:18 +0000154def read_keyval(path):
jadmanski0afbb632008-06-06 21:10:57 +0000155 """
156 Read a key-value pair format file into a dictionary, and return it.
157 Takes either a filename or directory name as input. If it's a
158 directory name, we assume you want the file to be called keyval.
159 """
160 if os.path.isdir(path):
161 path = os.path.join(path, 'keyval')
162 keyval = {}
163 for line in open(path):
jadmanskia6014a02008-07-14 19:41:54 +0000164 line = re.sub('#.*', '', line).rstrip()
jadmanski0afbb632008-06-06 21:10:57 +0000165 if not re.search(r'^[-\w]+=', line):
166 raise ValueError('Invalid format line: %s' % line)
167 key, value = line.split('=', 1)
168 if re.search('^\d+$', value):
169 value = int(value)
170 elif re.search('^(\d+\.)?\d+$', value):
171 value = float(value)
172 keyval[key] = value
173 return keyval
mblighde0d47e2008-03-28 14:37:18 +0000174
175
jadmanskicc549172008-05-21 18:11:51 +0000176def write_keyval(path, dictionary, type_tag=None):
jadmanski0afbb632008-06-06 21:10:57 +0000177 """
178 Write a key-value pair format file out to a file. This uses append
179 mode to open the file, so existing text will not be overwritten or
180 reparsed.
jadmanskicc549172008-05-21 18:11:51 +0000181
jadmanski0afbb632008-06-06 21:10:57 +0000182 If type_tag is None, then the key must be composed of alphanumeric
183 characters (or dashes+underscores). However, if type-tag is not
184 null then the keys must also have "{type_tag}" as a suffix. At
185 the moment the only valid values of type_tag are "attr" and "perf".
186 """
187 if os.path.isdir(path):
188 path = os.path.join(path, 'keyval')
189 keyval = open(path, 'a')
jadmanskicc549172008-05-21 18:11:51 +0000190
jadmanski0afbb632008-06-06 21:10:57 +0000191 if type_tag is None:
192 key_regex = re.compile(r'^[-\w]+$')
193 else:
194 if type_tag not in ('attr', 'perf'):
195 raise ValueError('Invalid type tag: %s' % type_tag)
196 escaped_tag = re.escape(type_tag)
197 key_regex = re.compile(r'^[-\w]+\{%s\}$' % escaped_tag)
198 try:
199 for key, value in dictionary.iteritems():
200 if not key_regex.search(key):
201 raise ValueError('Invalid key: %s' % key)
202 keyval.write('%s=%s\n' % (key, value))
203 finally:
204 keyval.close()
mbligh6231cd62008-02-02 19:18:33 +0000205
206
207def is_url(path):
jadmanski0afbb632008-06-06 21:10:57 +0000208 """Return true if path looks like a URL"""
209 # for now, just handle http and ftp
210 url_parts = urlparse.urlparse(path)
211 return (url_parts[0] in ('http', 'ftp'))
mbligh6231cd62008-02-02 19:18:33 +0000212
213
jadmanskied91ba92008-09-30 17:19:27 +0000214def urlopen(url, data=None, proxies=None, timeout=5):
jadmanski0afbb632008-06-06 21:10:57 +0000215 """Wrapper to urllib.urlopen with timeout addition."""
mbligh02ff2d52008-06-03 15:00:21 +0000216
jadmanski0afbb632008-06-06 21:10:57 +0000217 # Save old timeout
218 old_timeout = socket.getdefaulttimeout()
219 socket.setdefaulttimeout(timeout)
220 try:
221 return urllib.urlopen(url, data=data, proxies=proxies)
222 finally:
223 socket.setdefaulttimeout(old_timeout)
mbligh02ff2d52008-06-03 15:00:21 +0000224
225
226def urlretrieve(url, filename=None, reporthook=None, data=None, timeout=300):
jadmanski0afbb632008-06-06 21:10:57 +0000227 """Wrapper to urllib.urlretrieve with timeout addition."""
228 old_timeout = socket.getdefaulttimeout()
229 socket.setdefaulttimeout(timeout)
230 try:
231 return urllib.urlretrieve(url, filename=filename,
232 reporthook=reporthook, data=data)
233 finally:
234 socket.setdefaulttimeout(old_timeout)
235
mbligh02ff2d52008-06-03 15:00:21 +0000236
mbligh6231cd62008-02-02 19:18:33 +0000237def get_file(src, dest, permissions=None):
jadmanski0afbb632008-06-06 21:10:57 +0000238 """Get a file from src, which can be local or a remote URL"""
239 if (src == dest):
240 return
241 if (is_url(src)):
242 print 'PWD: ' + os.getcwd()
243 print 'Fetching \n\t', src, '\n\t->', dest
244 try:
245 urllib.urlretrieve(src, dest)
246 except IOError, e:
247 raise error.AutotestError('Unable to retrieve %s (to %s)'
248 % (src, dest), e)
249 else:
250 shutil.copyfile(src, dest)
251 if permissions:
252 os.chmod(dest, permissions)
253 return dest
mbligh6231cd62008-02-02 19:18:33 +0000254
255
256def unmap_url(srcdir, src, destdir='.'):
jadmanski0afbb632008-06-06 21:10:57 +0000257 """
258 Receives either a path to a local file or a URL.
259 returns either the path to the local file, or the fetched URL
mbligh6231cd62008-02-02 19:18:33 +0000260
jadmanski0afbb632008-06-06 21:10:57 +0000261 unmap_url('/usr/src', 'foo.tar', '/tmp')
262 = '/usr/src/foo.tar'
263 unmap_url('/usr/src', 'http://site/file', '/tmp')
264 = '/tmp/file'
265 (after retrieving it)
266 """
267 if is_url(src):
268 url_parts = urlparse.urlparse(src)
269 filename = os.path.basename(url_parts[2])
270 dest = os.path.join(destdir, filename)
271 return get_file(src, dest)
272 else:
273 return os.path.join(srcdir, src)
mbligh6231cd62008-02-02 19:18:33 +0000274
275
276def update_version(srcdir, preserve_srcdir, new_version, install,
jadmanski0afbb632008-06-06 21:10:57 +0000277 *args, **dargs):
278 """
279 Make sure srcdir is version new_version
mbligh6231cd62008-02-02 19:18:33 +0000280
jadmanski0afbb632008-06-06 21:10:57 +0000281 If not, delete it and install() the new version.
mbligh6231cd62008-02-02 19:18:33 +0000282
jadmanski0afbb632008-06-06 21:10:57 +0000283 In the preserve_srcdir case, we just check it's up to date,
284 and if not, we rerun install, without removing srcdir
285 """
286 versionfile = os.path.join(srcdir, '.version')
287 install_needed = True
mbligh6231cd62008-02-02 19:18:33 +0000288
jadmanski0afbb632008-06-06 21:10:57 +0000289 if os.path.exists(versionfile):
290 old_version = pickle.load(open(versionfile))
291 if old_version == new_version:
292 install_needed = False
mbligh6231cd62008-02-02 19:18:33 +0000293
jadmanski0afbb632008-06-06 21:10:57 +0000294 if install_needed:
295 if not preserve_srcdir and os.path.exists(srcdir):
296 shutil.rmtree(srcdir)
297 install(*args, **dargs)
298 if os.path.exists(srcdir):
299 pickle.dump(new_version, open(versionfile, 'w'))
mbligh462c0152008-03-13 15:37:10 +0000300
301
mbligh63073c92008-03-31 16:49:32 +0000302def run(command, timeout=None, ignore_status=False,
mblighbd96b452008-09-03 23:14:27 +0000303 stdout_tee=None, stderr_tee=None, verbose=True):
jadmanski0afbb632008-06-06 21:10:57 +0000304 """
305 Run a command on the host.
mbligh63073c92008-03-31 16:49:32 +0000306
jadmanski0afbb632008-06-06 21:10:57 +0000307 Args:
308 command: the command line string
309 timeout: time limit in seconds before attempting to
310 kill the running process. The run() function
311 will take a few seconds longer than 'timeout'
312 to complete if it has to kill the process.
313 ignore_status: do not raise an exception, no matter what
314 the exit code of the command is.
315 stdout_tee: optional file-like object to which stdout data
316 will be written as it is generated (data will still
317 be stored in result.stdout)
318 stderr_tee: likewise for stderr
mbligh63073c92008-03-31 16:49:32 +0000319
jadmanski0afbb632008-06-06 21:10:57 +0000320 Returns:
321 a CmdResult object
mbligh63073c92008-03-31 16:49:32 +0000322
jadmanski0afbb632008-06-06 21:10:57 +0000323 Raises:
324 CmdError: the exit code of the command
325 execution was not 0
326 """
mblighbd96b452008-09-03 23:14:27 +0000327 bg_job = join_bg_jobs((BgJob(command, stdout_tee, stderr_tee, verbose),),
mbligh849a0f62008-08-28 20:12:19 +0000328 timeout)[0]
329 if not ignore_status and bg_job.result.exit_status:
jadmanski9c1098b2008-09-02 14:18:48 +0000330 raise error.CmdError(command, bg_job.result,
mbligh849a0f62008-08-28 20:12:19 +0000331 "Command returned non-zero exit status")
mbligh63073c92008-03-31 16:49:32 +0000332
mbligh849a0f62008-08-28 20:12:19 +0000333 return bg_job.result
mbligh63073c92008-03-31 16:49:32 +0000334
mbligh45ffc432008-12-09 23:35:17 +0000335
mbligha5630a52008-09-03 22:09:50 +0000336def run_parallel(commands, timeout=None, ignore_status=False,
337 stdout_tee=None, stderr_tee=None):
338 """Beahves the same as run with the following exceptions:
339
340 - commands is a list of commands to run in parallel.
341 - ignore_status toggles whether or not an exception should be raised
342 on any error.
343
344 returns a list of CmdResult objects
345 """
346 bg_jobs = []
347 for command in commands:
348 bg_jobs.append(BgJob(command, stdout_tee, stderr_tee))
349
350 # Updates objects in bg_jobs list with their process information
351 join_bg_jobs(bg_jobs, timeout)
352
353 for bg_job in bg_jobs:
354 if not ignore_status and bg_job.result.exit_status:
355 raise error.CmdError(command, bg_job.result,
356 "Command returned non-zero exit status")
357
358 return [bg_job.result for bg_job in bg_jobs]
359
360
mbligh849a0f62008-08-28 20:12:19 +0000361@deprecated
mbligh63073c92008-03-31 16:49:32 +0000362def run_bg(command):
mbligh849a0f62008-08-28 20:12:19 +0000363 """Function deprecated. Please use BgJob class instead."""
364 bg_job = BgJob(command)
365 return bg_job.sp, bg_job.result
mbligh63073c92008-03-31 16:49:32 +0000366
367
mbligh849a0f62008-08-28 20:12:19 +0000368def join_bg_jobs(bg_jobs, timeout=None):
mbligha5630a52008-09-03 22:09:50 +0000369 """Joins the bg_jobs with the current thread.
370
371 Returns the same list of bg_jobs objects that was passed in.
372 """
mbligh849a0f62008-08-28 20:12:19 +0000373 ret, timeouterr = 0, False
374 for bg_job in bg_jobs:
375 bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
mbligh63073c92008-03-31 16:49:32 +0000376
jadmanski0afbb632008-06-06 21:10:57 +0000377 try:
378 # We are holding ends to stdin, stdout pipes
379 # hence we need to be sure to close those fds no mater what
380 start_time = time.time()
mbligh849a0f62008-08-28 20:12:19 +0000381 timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
382
383 for bg_job in bg_jobs:
384 # Process stdout and stderr
385 bg_job.process_output(stdout=True,final_read=True)
386 bg_job.process_output(stdout=False,final_read=True)
jadmanski0afbb632008-06-06 21:10:57 +0000387 finally:
388 # close our ends of the pipes to the sp no matter what
mbligh849a0f62008-08-28 20:12:19 +0000389 for bg_job in bg_jobs:
390 bg_job.cleanup()
mbligh63073c92008-03-31 16:49:32 +0000391
mbligh849a0f62008-08-28 20:12:19 +0000392 if timeout_error:
393 # TODO: This needs to be fixed to better represent what happens when
394 # running in parallel. However this is backwards compatable, so it will
395 # do for the time being.
396 raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
397 "Command(s) did not complete within %d seconds"
398 % timeout)
mbligh63073c92008-03-31 16:49:32 +0000399
mbligh63073c92008-03-31 16:49:32 +0000400
mbligh849a0f62008-08-28 20:12:19 +0000401 return bg_jobs
mbligh63073c92008-03-31 16:49:32 +0000402
mbligh849a0f62008-08-28 20:12:19 +0000403
404def _wait_for_commands(bg_jobs, start_time, timeout):
405 # This returns True if it must return due to a timeout, otherwise False.
406
mblighf0b4a0a2008-09-03 20:46:16 +0000407 # To check for processes which terminate without producing any output
408 # a 1 second timeout is used in select.
409 SELECT_TIMEOUT = 1
410
mbligh849a0f62008-08-28 20:12:19 +0000411 select_list = []
412 reverse_dict = {}
413 for bg_job in bg_jobs:
414 select_list.append(bg_job.sp.stdout)
415 select_list.append(bg_job.sp.stderr)
416 reverse_dict[bg_job.sp.stdout] = (bg_job,True)
417 reverse_dict[bg_job.sp.stderr] = (bg_job,False)
418
jadmanski0afbb632008-06-06 21:10:57 +0000419 if timeout:
420 stop_time = start_time + timeout
421 time_left = stop_time - time.time()
422 else:
423 time_left = None # so that select never times out
424 while not timeout or time_left > 0:
425 # select will return when stdout is ready (including when it is
426 # EOF, that is the process has terminated).
mblighf0b4a0a2008-09-03 20:46:16 +0000427 ready, _, _ = select.select(select_list, [], [], SELECT_TIMEOUT)
mbligh849a0f62008-08-28 20:12:19 +0000428
jadmanski0afbb632008-06-06 21:10:57 +0000429 # os.read() has to be used instead of
430 # subproc.stdout.read() which will otherwise block
mbligh849a0f62008-08-28 20:12:19 +0000431 for fileno in ready:
432 bg_job,stdout = reverse_dict[fileno]
433 bg_job.process_output(stdout)
mbligh63073c92008-03-31 16:49:32 +0000434
mbligh849a0f62008-08-28 20:12:19 +0000435 remaining_jobs = [x for x in bg_jobs if x.result.exit_status is None]
436 if len(remaining_jobs) == 0:
437 return False
438 for bg_job in remaining_jobs:
439 bg_job.result.exit_status = bg_job.sp.poll()
mbligh8ea61e22008-05-09 18:09:37 +0000440
jadmanski0afbb632008-06-06 21:10:57 +0000441 if timeout:
442 time_left = stop_time - time.time()
mbligh63073c92008-03-31 16:49:32 +0000443
mbligh849a0f62008-08-28 20:12:19 +0000444 # Kill all processes which did not complete prior to timeout
445 for bg_job in [x for x in bg_jobs if x.result.exit_status is None]:
mbligh7afc3a62008-11-27 00:35:44 +0000446 print '* Warning: run process timeout (%s) fired' % timeout
mbligh849a0f62008-08-28 20:12:19 +0000447 nuke_subprocess(bg_job.sp)
mbligh095dc642008-10-01 03:41:35 +0000448 bg_job.result.exit_status = bg_job.sp.poll()
mbligh8ea61e22008-05-09 18:09:37 +0000449
mbligh849a0f62008-08-28 20:12:19 +0000450 return True
mbligh63073c92008-03-31 16:49:32 +0000451
452
mbligh63073c92008-03-31 16:49:32 +0000453def nuke_subprocess(subproc):
jadmanski09f92032008-09-17 14:05:27 +0000454 # check if the subprocess is still alive, first
455 if subproc.poll() is not None:
456 return subproc.poll()
457
jadmanski0afbb632008-06-06 21:10:57 +0000458 # the process has not terminated within timeout,
459 # kill it via an escalating series of signals.
460 signal_queue = [signal.SIGTERM, signal.SIGKILL]
461 for sig in signal_queue:
462 try:
463 os.kill(subproc.pid, sig)
464 # The process may have died before we could kill it.
465 except OSError:
466 pass
mbligh63073c92008-03-31 16:49:32 +0000467
jadmanski0afbb632008-06-06 21:10:57 +0000468 for i in range(5):
469 rc = subproc.poll()
mblighd876f452008-12-03 15:09:17 +0000470 if rc is not None:
jadmanski0afbb632008-06-06 21:10:57 +0000471 return rc
472 time.sleep(1)
mbligh63073c92008-03-31 16:49:32 +0000473
474
475def nuke_pid(pid):
jadmanski0afbb632008-06-06 21:10:57 +0000476 # the process has not terminated within timeout,
477 # kill it via an escalating series of signals.
478 signal_queue = [signal.SIGTERM, signal.SIGKILL]
479 for sig in signal_queue:
480 try:
481 os.kill(pid, sig)
mbligh63073c92008-03-31 16:49:32 +0000482
jadmanski0afbb632008-06-06 21:10:57 +0000483 # The process may have died before we could kill it.
484 except OSError:
485 pass
mbligh63073c92008-03-31 16:49:32 +0000486
jadmanski0afbb632008-06-06 21:10:57 +0000487 try:
488 for i in range(5):
489 status = os.waitpid(pid, os.WNOHANG)[0]
490 if status == pid:
491 return
492 time.sleep(1)
mbligh63073c92008-03-31 16:49:32 +0000493
jadmanski0afbb632008-06-06 21:10:57 +0000494 if status != pid:
495 raise error.AutoservRunError('Could not kill %d'
496 % pid, None)
mbligh63073c92008-03-31 16:49:32 +0000497
jadmanski0afbb632008-06-06 21:10:57 +0000498 # the process died before we join it.
499 except OSError:
500 pass
mbligh63073c92008-03-31 16:49:32 +0000501
502
mbligh63073c92008-03-31 16:49:32 +0000503def system(command, timeout=None, ignore_status=False):
mbligha5630a52008-09-03 22:09:50 +0000504 """This function returns the exit status of command."""
mblighf8dffb12008-10-29 16:45:26 +0000505 return run(command, timeout=timeout, ignore_status=ignore_status,
mbligha5630a52008-09-03 22:09:50 +0000506 stdout_tee=sys.stdout, stderr_tee=sys.stderr).exit_status
mbligh63073c92008-03-31 16:49:32 +0000507
508
mbligha5630a52008-09-03 22:09:50 +0000509def system_parallel(commands, timeout=None, ignore_status=False):
510 """This function returns a list of exit statuses for the respective
511 list of commands."""
512 return [bg_jobs.exit_status for bg_jobs in
mblighf8dffb12008-10-29 16:45:26 +0000513 run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
mbligha5630a52008-09-03 22:09:50 +0000514 stdout_tee=sys.stdout, stderr_tee=sys.stderr)]
mbligh849a0f62008-08-28 20:12:19 +0000515
516
mbligh8ea61e22008-05-09 18:09:37 +0000517def system_output(command, timeout=None, ignore_status=False,
jadmanski0afbb632008-06-06 21:10:57 +0000518 retain_output=False):
519 if retain_output:
mblighf8dffb12008-10-29 16:45:26 +0000520 out = run(command, timeout=timeout, ignore_status=ignore_status,
jadmanski0afbb632008-06-06 21:10:57 +0000521 stdout_tee=sys.stdout, stderr_tee=sys.stderr).stdout
522 else:
mblighf8dffb12008-10-29 16:45:26 +0000523 out = run(command, timeout=timeout, ignore_status=ignore_status).stdout
jadmanski0afbb632008-06-06 21:10:57 +0000524 if out[-1:] == '\n': out = out[:-1]
525 return out
mbligh63073c92008-03-31 16:49:32 +0000526
mbligh849a0f62008-08-28 20:12:19 +0000527
mbligha5630a52008-09-03 22:09:50 +0000528def system_output_parallel(commands, timeout=None, ignore_status=False,
529 retain_output=False):
530 if retain_output:
mblighf8dffb12008-10-29 16:45:26 +0000531 out = [bg_job.stdout for bg_job in run_parallel(commands,
532 timeout=timeout, ignore_status=ignore_status,
533 stdout_tee=sys.stdout, stderr_tee=sys.stderr)]
mbligha5630a52008-09-03 22:09:50 +0000534 else:
mblighf8dffb12008-10-29 16:45:26 +0000535 out = [bg_job.stdout for bg_job in run_parallel(commands,
536 timeout=timeout, ignore_status=ignore_status)]
mbligha5630a52008-09-03 22:09:50 +0000537 for x in out:
538 if out[-1:] == '\n': out = out[:-1]
539 return out
540
541
mbligh98467952008-11-19 00:25:45 +0000542def strip_unicode(input):
543 if type(input) == list:
544 return [strip_unicode(i) for i in input]
545 elif type(input) == dict:
546 output = {}
547 for key in input.keys():
548 output[str(key)] = strip_unicode(input[key])
549 return output
550 elif type(input) == unicode:
551 return str(input)
552 else:
553 return input
554
555
mbligha5630a52008-09-03 22:09:50 +0000556def get_cpu_percentage(function, *args, **dargs):
557 """Returns a tuple containing the CPU% and return value from function call.
558
559 This function calculates the usage time by taking the difference of
560 the user and system times both before and after the function call.
561 """
562 child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
563 self_pre = resource.getrusage(resource.RUSAGE_SELF)
564 start = time.time()
565 to_return = function(*args, **dargs)
566 elapsed = time.time() - start
567 self_post = resource.getrusage(resource.RUSAGE_SELF)
568 child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
569
570 # Calculate CPU Percentage
571 s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
572 c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
573 cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
574
575 return cpu_percent, to_return
576
577
mblighc1cbc992008-05-27 20:01:45 +0000578"""
579This function is used when there is a need to run more than one
580job simultaneously starting exactly at the same time. It basically returns
581a modified control file (containing the synchronization code prepended)
582whenever it is ready to run the control file. The synchronization
583is done using barriers to make sure that the jobs start at the same time.
584
585Here is how the synchronization is done to make sure that the tests
586start at exactly the same time on the client.
587sc_bar is a server barrier and s_bar, c_bar are the normal barriers
588
589 Job1 Job2 ...... JobN
590 Server: | sc_bar
591 Server: | s_bar ...... s_bar
592 Server: | at.run() at.run() ...... at.run()
593 ----------|------------------------------------------------------
594 Client | sc_bar
595 Client | c_bar c_bar ...... c_bar
596 Client | <run test> <run test> ...... <run test>
597
598
599PARAMS:
600 control_file : The control file which to which the above synchronization
601 code would be prepended to
602 host_name : The host name on which the job is going to run
603 host_num (non negative) : A number to identify the machine so that we have
604 different sets of s_bar_ports for each of the machines.
605 instance : The number of the job
606 num_jobs : Total number of jobs that are going to run in parallel with
607 this job starting at the same time
608 port_base : Port number that is used to derive the actual barrier ports.
609
610RETURN VALUE:
611 The modified control file.
612
613"""
614def get_sync_control_file(control, host_name, host_num,
jadmanski0afbb632008-06-06 21:10:57 +0000615 instance, num_jobs, port_base=63100):
616 sc_bar_port = port_base
617 c_bar_port = port_base
618 if host_num < 0:
619 print "Please provide a non negative number for the host"
620 return None
621 s_bar_port = port_base + 1 + host_num # The set of s_bar_ports are
622 # the same for a given machine
mblighc1cbc992008-05-27 20:01:45 +0000623
jadmanski0afbb632008-06-06 21:10:57 +0000624 sc_bar_timeout = 180
625 s_bar_timeout = c_bar_timeout = 120
mblighc1cbc992008-05-27 20:01:45 +0000626
jadmanski0afbb632008-06-06 21:10:57 +0000627 # The barrier code snippet is prepended into the conrol file
628 # dynamically before at.run() is called finally.
629 control_new = []
mblighc1cbc992008-05-27 20:01:45 +0000630
jadmanski0afbb632008-06-06 21:10:57 +0000631 # jobid is the unique name used to identify the processes
632 # trying to reach the barriers
633 jobid = "%s#%d" % (host_name, instance)
mblighc1cbc992008-05-27 20:01:45 +0000634
jadmanski0afbb632008-06-06 21:10:57 +0000635 rendv = []
636 # rendvstr is a temp holder for the rendezvous list of the processes
637 for n in range(num_jobs):
638 rendv.append("'%s#%d'" % (host_name, n))
639 rendvstr = ",".join(rendv)
mblighc1cbc992008-05-27 20:01:45 +0000640
jadmanski0afbb632008-06-06 21:10:57 +0000641 if instance == 0:
642 # Do the setup and wait at the server barrier
643 # Clean up the tmp and the control dirs for the first instance
644 control_new.append('if os.path.exists(job.tmpdir):')
645 control_new.append("\t system('umount -f %s > /dev/null"
646 "2> /dev/null' % job.tmpdir,"
647 "ignore_status=True)")
648 control_new.append("\t system('rm -rf ' + job.tmpdir)")
649 control_new.append(
650 'b0 = job.barrier("%s", "sc_bar", %d, port=%d)'
651 % (jobid, sc_bar_timeout, sc_bar_port))
652 control_new.append(
653 'b0.rendevous_servers("PARALLEL_MASTER", "%s")'
654 % jobid)
mblighc1cbc992008-05-27 20:01:45 +0000655
jadmanski0afbb632008-06-06 21:10:57 +0000656 elif instance == 1:
657 # Wait at the server barrier to wait for instance=0
658 # process to complete setup
659 b0 = barrier.barrier("PARALLEL_MASTER", "sc_bar", sc_bar_timeout,
660 port=sc_bar_port)
661 b0.rendevous_servers("PARALLEL_MASTER", jobid)
mblighc1cbc992008-05-27 20:01:45 +0000662
jadmanski0afbb632008-06-06 21:10:57 +0000663 if(num_jobs > 2):
664 b1 = barrier.barrier(jobid, "s_bar", s_bar_timeout,
665 port=s_bar_port)
666 b1.rendevous(rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000667
jadmanski0afbb632008-06-06 21:10:57 +0000668 else:
669 # For the rest of the clients
670 b2 = barrier.barrier(jobid, "s_bar", s_bar_timeout, port=s_bar_port)
671 b2.rendevous(rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000672
jadmanski0afbb632008-06-06 21:10:57 +0000673 # Client side barrier for all the tests to start at the same time
674 control_new.append('b1 = job.barrier("%s", "c_bar", %d, port=%d)'
675 % (jobid, c_bar_timeout, c_bar_port))
676 control_new.append("b1.rendevous(%s)" % rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000677
jadmanski0afbb632008-06-06 21:10:57 +0000678 # Stick in the rest of the control file
679 control_new.append(control)
mblighc1cbc992008-05-27 20:01:45 +0000680
jadmanski0afbb632008-06-06 21:10:57 +0000681 return "\n".join(control_new)
mblighc1cbc992008-05-27 20:01:45 +0000682
mbligh63073c92008-03-31 16:49:32 +0000683
mblighc5ddfd12008-08-04 17:15:00 +0000684def get_arch(run_function=run):
685 """
686 Get the hardware architecture of the machine.
687 run_function is used to execute the commands. It defaults to
688 utils.run() but a custom method (if provided) should be of the
689 same schema as utils.run. It should return a CmdResult object and
690 throw a CmdError exception.
691 """
692 arch = run_function('/bin/uname -m').stdout.rstrip()
693 if re.match(r'i\d86$', arch):
694 arch = 'i386'
695 return arch
696
697
jadmanski4f909252008-12-01 20:47:10 +0000698def merge_trees(src, dest):
699 """
700 Merges a source directory tree at 'src' into a destination tree at
701 'dest'. If a path is a file in both trees than the file in the source
702 tree is APPENDED to the one in the destination tree. If a path is
703 a directory in both trees then the directories are recursively merged
704 with this function. In any other case, the function will skip the
705 paths that cannot be merged (instead of failing).
706 """
707 if not os.path.exists(src):
708 return # exists only in dest
709 elif not os.path.exists(dest):
710 if os.path.isfile(src):
711 shutil.copy2(src, dest) # file only in src
712 else:
713 shutil.copytree(src, dest, symlinks=True) # dir only in src
714 return
715 elif os.path.isfile(src) and os.path.isfile(dest):
716 # src & dest are files in both trees, append src to dest
717 destfile = open(dest, "a")
718 try:
719 srcfile = open(src)
720 try:
721 destfile.write(srcfile.read())
722 finally:
723 srcfile.close()
724 finally:
725 destfile.close()
726 elif os.path.isdir(src) and os.path.isdir(dest):
727 # src & dest are directories in both trees, so recursively merge
728 for name in os.listdir(src):
729 merge_trees(os.path.join(src, name), os.path.join(dest, name))
730 else:
731 # src & dest both exist, but are incompatible
732 return
733
734
mbligh63073c92008-03-31 16:49:32 +0000735class CmdResult(object):
jadmanski0afbb632008-06-06 21:10:57 +0000736 """
737 Command execution result.
mbligh63073c92008-03-31 16:49:32 +0000738
jadmanski0afbb632008-06-06 21:10:57 +0000739 command: String containing the command line itself
740 exit_status: Integer exit code of the process
741 stdout: String containing stdout of the process
742 stderr: String containing stderr of the process
743 duration: Elapsed wall clock time running the process
744 """
mbligh63073c92008-03-31 16:49:32 +0000745
746
jadmanski0afbb632008-06-06 21:10:57 +0000747 def __init__(self, command=None, stdout="", stderr="",
748 exit_status=None, duration=0):
749 self.command = command
750 self.exit_status = exit_status
751 self.stdout = stdout
752 self.stderr = stderr
753 self.duration = duration
mbligh63073c92008-03-31 16:49:32 +0000754
755
jadmanski0afbb632008-06-06 21:10:57 +0000756 def __repr__(self):
757 wrapper = textwrap.TextWrapper(width = 78,
758 initial_indent="\n ",
759 subsequent_indent=" ")
760
761 stdout = self.stdout.rstrip()
762 if stdout:
763 stdout = "\nstdout:\n%s" % stdout
764
765 stderr = self.stderr.rstrip()
766 if stderr:
767 stderr = "\nstderr:\n%s" % stderr
768
769 return ("* Command: %s\n"
770 "Exit status: %s\n"
771 "Duration: %s\n"
772 "%s"
773 "%s"
774 % (wrapper.fill(self.command), self.exit_status,
775 self.duration, stdout, stderr))
mbligh63073c92008-03-31 16:49:32 +0000776
777
mbligh462c0152008-03-13 15:37:10 +0000778class run_randomly:
jadmanski0afbb632008-06-06 21:10:57 +0000779 def __init__(self, run_sequentially=False):
780 # Run sequentially is for debugging control files
781 self.test_list = []
782 self.run_sequentially = run_sequentially
mbligh462c0152008-03-13 15:37:10 +0000783
784
jadmanski0afbb632008-06-06 21:10:57 +0000785 def add(self, *args, **dargs):
786 test = (args, dargs)
787 self.test_list.append(test)
mbligh462c0152008-03-13 15:37:10 +0000788
789
jadmanski0afbb632008-06-06 21:10:57 +0000790 def run(self, fn):
791 while self.test_list:
792 test_index = random.randint(0, len(self.test_list)-1)
793 if self.run_sequentially:
794 test_index = 0
795 (args, dargs) = self.test_list.pop(test_index)
796 fn(*args, **dargs)