blob: c7f292dffaa82a37c656e0e032bb6abf29360477 [file] [log] [blame]
mbligh63073c92008-03-31 16:49:32 +00001#
2# Copyright 2008 Google Inc. Released under the GPL v2
3
mbligh849a0f62008-08-28 20:12:19 +00004import os, pickle, random, re, resource, select, shutil, signal, StringIO
mblighb2896192009-07-11 00:12:37 +00005import socket, struct, subprocess, sys, time, textwrap, urlparse
mbligh25284cd2009-06-08 16:17:24 +00006import warnings, smtplib, logging, urllib2
showard108d73e2009-06-22 18:14:41 +00007from autotest_lib.client.common_lib import error, barrier, logging_manager
mbligh81edd792008-08-26 16:54:02 +00008
mbligh849a0f62008-08-28 20:12:19 +00009def deprecated(func):
10 """This is a decorator which can be used to mark functions as deprecated.
11 It will result in a warning being emmitted when the function is used."""
12 def new_func(*args, **dargs):
13 warnings.warn("Call to deprecated function %s." % func.__name__,
14 category=DeprecationWarning)
15 return func(*args, **dargs)
16 new_func.__name__ = func.__name__
17 new_func.__doc__ = func.__doc__
18 new_func.__dict__.update(func.__dict__)
19 return new_func
20
21
showard108d73e2009-06-22 18:14:41 +000022class _NullStream(object):
23 def write(self, data):
24 pass
25
26
27 def flush(self):
28 pass
29
30
31TEE_TO_LOGS = object()
32_the_null_stream = _NullStream()
33
showardb45a4662009-07-15 14:27:56 +000034DEFAULT_STDOUT_LEVEL = logging.DEBUG
35DEFAULT_STDERR_LEVEL = logging.ERROR
36
showard108d73e2009-06-22 18:14:41 +000037def get_stream_tee_file(stream, level):
38 if stream is None:
39 return _the_null_stream
40 if stream is TEE_TO_LOGS:
41 return logging_manager.LoggingFile(level=level)
42 return stream
43
44
mbligh849a0f62008-08-28 20:12:19 +000045class BgJob(object):
showard170873e2009-01-07 00:22:26 +000046 def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
showardb45a4662009-07-15 14:27:56 +000047 stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
mbligh849a0f62008-08-28 20:12:19 +000048 self.command = command
showardb45a4662009-07-15 14:27:56 +000049 self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL)
50 self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level)
mbligh849a0f62008-08-28 20:12:19 +000051 self.result = CmdResult(command)
mblighbd96b452008-09-03 23:14:27 +000052 if verbose:
showardb18134f2009-03-20 20:52:18 +000053 logging.debug("Running '%s'" % command)
mbligh849a0f62008-08-28 20:12:19 +000054 self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
55 stderr=subprocess.PIPE,
56 preexec_fn=self._reset_sigpipe, shell=True,
showard170873e2009-01-07 00:22:26 +000057 executable="/bin/bash",
58 stdin=stdin)
mbligh849a0f62008-08-28 20:12:19 +000059
60
61 def output_prepare(self, stdout_file=None, stderr_file=None):
62 self.stdout_file = stdout_file
63 self.stderr_file = stderr_file
64
mbligh45ffc432008-12-09 23:35:17 +000065
mbligh849a0f62008-08-28 20:12:19 +000066 def process_output(self, stdout=True, final_read=False):
67 """output_prepare must be called prior to calling this"""
68 if stdout:
69 pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
70 else:
71 pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
72
73 if final_read:
74 # read in all the data we can from pipe and then stop
75 data = []
76 while select.select([pipe], [], [], 0)[0]:
77 data.append(os.read(pipe.fileno(), 1024))
78 if len(data[-1]) == 0:
79 break
80 data = "".join(data)
81 else:
82 # perform a single read
83 data = os.read(pipe.fileno(), 1024)
84 buf.write(data)
showard108d73e2009-06-22 18:14:41 +000085 tee.write(data)
mbligh849a0f62008-08-28 20:12:19 +000086
87
88 def cleanup(self):
showard108d73e2009-06-22 18:14:41 +000089 self.stdout_tee.flush()
90 self.stderr_tee.flush()
mbligh849a0f62008-08-28 20:12:19 +000091 self.sp.stdout.close()
92 self.sp.stderr.close()
93 self.result.stdout = self.stdout_file.getvalue()
94 self.result.stderr = self.stderr_file.getvalue()
95
96
97 def _reset_sigpipe(self):
98 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
99
mbligh81edd792008-08-26 16:54:02 +0000100
101def ip_to_long(ip):
102 # !L is a long in network byte order
103 return struct.unpack('!L', socket.inet_aton(ip))[0]
104
105
106def long_to_ip(number):
107 # See above comment.
108 return socket.inet_ntoa(struct.pack('!L', number))
109
110
111def create_subnet_mask(bits):
mbligh81edd792008-08-26 16:54:02 +0000112 return (1 << 32) - (1 << 32-bits)
113
114
115def format_ip_with_mask(ip, mask_bits):
116 masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
117 return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
mbligh6231cd62008-02-02 19:18:33 +0000118
mblighde0d47e2008-03-28 14:37:18 +0000119
jadmanskie80d4712008-10-03 16:15:59 +0000120def normalize_hostname(alias):
121 ip = socket.gethostbyname(alias)
122 return socket.gethostbyaddr(ip)[0]
123
124
mblighd6d043c2008-09-27 21:00:45 +0000125def get_ip_local_port_range():
126 match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
127 read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
128 return (int(match.group(1)), int(match.group(2)))
129
130
131def set_ip_local_port_range(lower, upper):
132 write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
133 '%d %d\n' % (lower, upper))
134
mbligh315b9412008-10-01 03:34:11 +0000135
mbligh45ffc432008-12-09 23:35:17 +0000136
137def send_email(mail_from, mail_to, subject, body):
138 """
139 Sends an email via smtp
140
141 mail_from: string with email address of sender
142 mail_to: string or list with email address(es) of recipients
143 subject: string with subject of email
144 body: (multi-line) string with body of email
145 """
146 if isinstance(mail_to, str):
147 mail_to = [mail_to]
148 msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
149 subject, body)
150 try:
151 mailer = smtplib.SMTP('localhost')
152 try:
153 mailer.sendmail(mail_from, mail_to, msg)
154 finally:
155 mailer.quit()
156 except Exception, e:
157 # Emails are non-critical, not errors, but don't raise them
158 print "Sending email failed. Reason: %s" % repr(e)
159
160
jadmanski5182e162008-05-13 21:48:16 +0000161def read_one_line(filename):
mbligh6e8840c2008-07-11 18:05:49 +0000162 return open(filename, 'r').readline().rstrip('\n')
jadmanski5182e162008-05-13 21:48:16 +0000163
164
mblighb9d05512008-10-18 13:53:27 +0000165def write_one_line(filename, line):
166 open_write_close(filename, line.rstrip('\n') + '\n')
167
168
169def open_write_close(filename, data):
mbligh618ac9e2008-10-06 17:14:32 +0000170 f = open(filename, 'w')
mblighb9d05512008-10-18 13:53:27 +0000171 try:
172 f.write(data)
173 finally:
174 f.close()
jadmanski5182e162008-05-13 21:48:16 +0000175
176
mblighde0d47e2008-03-28 14:37:18 +0000177def read_keyval(path):
jadmanski0afbb632008-06-06 21:10:57 +0000178 """
179 Read a key-value pair format file into a dictionary, and return it.
180 Takes either a filename or directory name as input. If it's a
181 directory name, we assume you want the file to be called keyval.
182 """
183 if os.path.isdir(path):
184 path = os.path.join(path, 'keyval')
185 keyval = {}
jadmanski58962982009-04-21 19:54:34 +0000186 if os.path.exists(path):
187 for line in open(path):
188 line = re.sub('#.*', '', line).rstrip()
189 if not re.search(r'^[-\.\w]+=', line):
190 raise ValueError('Invalid format line: %s' % line)
191 key, value = line.split('=', 1)
192 if re.search('^\d+$', value):
193 value = int(value)
194 elif re.search('^(\d+\.)?\d+$', value):
195 value = float(value)
196 keyval[key] = value
jadmanski0afbb632008-06-06 21:10:57 +0000197 return keyval
mblighde0d47e2008-03-28 14:37:18 +0000198
199
jadmanskicc549172008-05-21 18:11:51 +0000200def write_keyval(path, dictionary, type_tag=None):
jadmanski0afbb632008-06-06 21:10:57 +0000201 """
202 Write a key-value pair format file out to a file. This uses append
203 mode to open the file, so existing text will not be overwritten or
204 reparsed.
jadmanskicc549172008-05-21 18:11:51 +0000205
jadmanski0afbb632008-06-06 21:10:57 +0000206 If type_tag is None, then the key must be composed of alphanumeric
207 characters (or dashes+underscores). However, if type-tag is not
208 null then the keys must also have "{type_tag}" as a suffix. At
209 the moment the only valid values of type_tag are "attr" and "perf".
210 """
211 if os.path.isdir(path):
212 path = os.path.join(path, 'keyval')
213 keyval = open(path, 'a')
jadmanskicc549172008-05-21 18:11:51 +0000214
jadmanski0afbb632008-06-06 21:10:57 +0000215 if type_tag is None:
mbligh97227ea2009-03-11 17:09:50 +0000216 key_regex = re.compile(r'^[-\.\w]+$')
jadmanski0afbb632008-06-06 21:10:57 +0000217 else:
218 if type_tag not in ('attr', 'perf'):
219 raise ValueError('Invalid type tag: %s' % type_tag)
220 escaped_tag = re.escape(type_tag)
mbligh97227ea2009-03-11 17:09:50 +0000221 key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
jadmanski0afbb632008-06-06 21:10:57 +0000222 try:
mbligh6955e232009-07-11 00:58:47 +0000223 for key in sorted(dictionary.keys()):
jadmanski0afbb632008-06-06 21:10:57 +0000224 if not key_regex.search(key):
225 raise ValueError('Invalid key: %s' % key)
mbligh6955e232009-07-11 00:58:47 +0000226 keyval.write('%s=%s\n' % (key, dictionary[key]))
jadmanski0afbb632008-06-06 21:10:57 +0000227 finally:
228 keyval.close()
mbligh6231cd62008-02-02 19:18:33 +0000229
230
231def is_url(path):
jadmanski0afbb632008-06-06 21:10:57 +0000232 """Return true if path looks like a URL"""
233 # for now, just handle http and ftp
234 url_parts = urlparse.urlparse(path)
235 return (url_parts[0] in ('http', 'ftp'))
mbligh6231cd62008-02-02 19:18:33 +0000236
237
mblighb2896192009-07-11 00:12:37 +0000238def urlopen(url, data=None, timeout=5):
239 """Wrapper to urllib2.urlopen with timeout addition."""
mbligh02ff2d52008-06-03 15:00:21 +0000240
jadmanski0afbb632008-06-06 21:10:57 +0000241 # Save old timeout
242 old_timeout = socket.getdefaulttimeout()
243 socket.setdefaulttimeout(timeout)
244 try:
mblighb2896192009-07-11 00:12:37 +0000245 return urllib2.urlopen(url, data=data)
jadmanski0afbb632008-06-06 21:10:57 +0000246 finally:
247 socket.setdefaulttimeout(old_timeout)
mbligh02ff2d52008-06-03 15:00:21 +0000248
249
mblighb2896192009-07-11 00:12:37 +0000250def urlretrieve(url, filename, data=None, timeout=300):
251 """Retrieve a file from given url."""
252 logging.debug('Fetching %s -> %s', url, filename)
253
254 src_file = urlopen(url, data=data, timeout=timeout)
jadmanski0afbb632008-06-06 21:10:57 +0000255 try:
mblighb2896192009-07-11 00:12:37 +0000256 dest_file = open(filename, 'wb')
257 try:
258 shutil.copyfileobj(src_file, dest_file)
259 finally:
260 dest_file.close()
jadmanski0afbb632008-06-06 21:10:57 +0000261 finally:
mblighb2896192009-07-11 00:12:37 +0000262 src_file.close()
jadmanski0afbb632008-06-06 21:10:57 +0000263
mbligh02ff2d52008-06-03 15:00:21 +0000264
mbligh6231cd62008-02-02 19:18:33 +0000265def get_file(src, dest, permissions=None):
jadmanski0afbb632008-06-06 21:10:57 +0000266 """Get a file from src, which can be local or a remote URL"""
mbligh25284cd2009-06-08 16:17:24 +0000267 if src == dest:
jadmanski0afbb632008-06-06 21:10:57 +0000268 return
mbligh25284cd2009-06-08 16:17:24 +0000269
270 if is_url(src):
mblighb2896192009-07-11 00:12:37 +0000271 urlretrieve(src, dest)
jadmanski0afbb632008-06-06 21:10:57 +0000272 else:
273 shutil.copyfile(src, dest)
mbligh25284cd2009-06-08 16:17:24 +0000274
jadmanski0afbb632008-06-06 21:10:57 +0000275 if permissions:
276 os.chmod(dest, permissions)
277 return dest
mbligh6231cd62008-02-02 19:18:33 +0000278
279
280def unmap_url(srcdir, src, destdir='.'):
jadmanski0afbb632008-06-06 21:10:57 +0000281 """
282 Receives either a path to a local file or a URL.
283 returns either the path to the local file, or the fetched URL
mbligh6231cd62008-02-02 19:18:33 +0000284
jadmanski0afbb632008-06-06 21:10:57 +0000285 unmap_url('/usr/src', 'foo.tar', '/tmp')
286 = '/usr/src/foo.tar'
287 unmap_url('/usr/src', 'http://site/file', '/tmp')
288 = '/tmp/file'
289 (after retrieving it)
290 """
291 if is_url(src):
292 url_parts = urlparse.urlparse(src)
293 filename = os.path.basename(url_parts[2])
294 dest = os.path.join(destdir, filename)
295 return get_file(src, dest)
296 else:
297 return os.path.join(srcdir, src)
mbligh6231cd62008-02-02 19:18:33 +0000298
299
300def update_version(srcdir, preserve_srcdir, new_version, install,
jadmanski0afbb632008-06-06 21:10:57 +0000301 *args, **dargs):
302 """
303 Make sure srcdir is version new_version
mbligh6231cd62008-02-02 19:18:33 +0000304
jadmanski0afbb632008-06-06 21:10:57 +0000305 If not, delete it and install() the new version.
mbligh6231cd62008-02-02 19:18:33 +0000306
jadmanski0afbb632008-06-06 21:10:57 +0000307 In the preserve_srcdir case, we just check it's up to date,
308 and if not, we rerun install, without removing srcdir
309 """
310 versionfile = os.path.join(srcdir, '.version')
311 install_needed = True
mbligh6231cd62008-02-02 19:18:33 +0000312
jadmanski0afbb632008-06-06 21:10:57 +0000313 if os.path.exists(versionfile):
314 old_version = pickle.load(open(versionfile))
315 if old_version == new_version:
316 install_needed = False
mbligh6231cd62008-02-02 19:18:33 +0000317
jadmanski0afbb632008-06-06 21:10:57 +0000318 if install_needed:
319 if not preserve_srcdir and os.path.exists(srcdir):
320 shutil.rmtree(srcdir)
321 install(*args, **dargs)
322 if os.path.exists(srcdir):
323 pickle.dump(new_version, open(versionfile, 'w'))
mbligh462c0152008-03-13 15:37:10 +0000324
325
showardb45a4662009-07-15 14:27:56 +0000326def get_stderr_level(stderr_is_expected):
327 if stderr_is_expected:
328 return DEFAULT_STDOUT_LEVEL
329 return DEFAULT_STDERR_LEVEL
330
331
mbligh63073c92008-03-31 16:49:32 +0000332def run(command, timeout=None, ignore_status=False,
showardb45a4662009-07-15 14:27:56 +0000333 stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
334 stderr_is_expected=None):
jadmanski0afbb632008-06-06 21:10:57 +0000335 """
336 Run a command on the host.
mbligh63073c92008-03-31 16:49:32 +0000337
jadmanski0afbb632008-06-06 21:10:57 +0000338 Args:
339 command: the command line string
340 timeout: time limit in seconds before attempting to
341 kill the running process. The run() function
342 will take a few seconds longer than 'timeout'
343 to complete if it has to kill the process.
344 ignore_status: do not raise an exception, no matter what
345 the exit code of the command is.
346 stdout_tee: optional file-like object to which stdout data
347 will be written as it is generated (data will still
348 be stored in result.stdout)
349 stderr_tee: likewise for stderr
showard108d73e2009-06-22 18:14:41 +0000350 verbose: if True, log the command being run
showard170873e2009-01-07 00:22:26 +0000351 stdin: stdin to pass to the executed process
mbligh63073c92008-03-31 16:49:32 +0000352
jadmanski0afbb632008-06-06 21:10:57 +0000353 Returns:
354 a CmdResult object
mbligh63073c92008-03-31 16:49:32 +0000355
jadmanski0afbb632008-06-06 21:10:57 +0000356 Raises:
357 CmdError: the exit code of the command
358 execution was not 0
359 """
showardb45a4662009-07-15 14:27:56 +0000360 if stderr_is_expected is None:
361 stderr_is_expected = ignore_status
showard170873e2009-01-07 00:22:26 +0000362 bg_job = join_bg_jobs(
showardb45a4662009-07-15 14:27:56 +0000363 (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
364 stderr_level=get_stderr_level(stderr_is_expected)),),
showard170873e2009-01-07 00:22:26 +0000365 timeout)[0]
mbligh849a0f62008-08-28 20:12:19 +0000366 if not ignore_status and bg_job.result.exit_status:
jadmanski9c1098b2008-09-02 14:18:48 +0000367 raise error.CmdError(command, bg_job.result,
mbligh849a0f62008-08-28 20:12:19 +0000368 "Command returned non-zero exit status")
mbligh63073c92008-03-31 16:49:32 +0000369
mbligh849a0f62008-08-28 20:12:19 +0000370 return bg_job.result
mbligh63073c92008-03-31 16:49:32 +0000371
mbligh45ffc432008-12-09 23:35:17 +0000372
mbligha5630a52008-09-03 22:09:50 +0000373def run_parallel(commands, timeout=None, ignore_status=False,
374 stdout_tee=None, stderr_tee=None):
375 """Beahves the same as run with the following exceptions:
376
377 - commands is a list of commands to run in parallel.
378 - ignore_status toggles whether or not an exception should be raised
379 on any error.
380
381 returns a list of CmdResult objects
382 """
383 bg_jobs = []
384 for command in commands:
showardb45a4662009-07-15 14:27:56 +0000385 bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
386 stderr_level=get_stderr_level(ignore_status)))
mbligha5630a52008-09-03 22:09:50 +0000387
388 # Updates objects in bg_jobs list with their process information
389 join_bg_jobs(bg_jobs, timeout)
390
391 for bg_job in bg_jobs:
392 if not ignore_status and bg_job.result.exit_status:
393 raise error.CmdError(command, bg_job.result,
394 "Command returned non-zero exit status")
395
396 return [bg_job.result for bg_job in bg_jobs]
397
398
mbligh849a0f62008-08-28 20:12:19 +0000399@deprecated
mbligh63073c92008-03-31 16:49:32 +0000400def run_bg(command):
mbligh849a0f62008-08-28 20:12:19 +0000401 """Function deprecated. Please use BgJob class instead."""
402 bg_job = BgJob(command)
403 return bg_job.sp, bg_job.result
mbligh63073c92008-03-31 16:49:32 +0000404
405
mbligh849a0f62008-08-28 20:12:19 +0000406def join_bg_jobs(bg_jobs, timeout=None):
mbligha5630a52008-09-03 22:09:50 +0000407 """Joins the bg_jobs with the current thread.
408
409 Returns the same list of bg_jobs objects that was passed in.
410 """
mblighae69f262009-04-17 20:14:56 +0000411 ret, timeout_error = 0, False
mbligh849a0f62008-08-28 20:12:19 +0000412 for bg_job in bg_jobs:
413 bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
mbligh63073c92008-03-31 16:49:32 +0000414
jadmanski0afbb632008-06-06 21:10:57 +0000415 try:
416 # We are holding ends to stdin, stdout pipes
417 # hence we need to be sure to close those fds no mater what
418 start_time = time.time()
mbligh849a0f62008-08-28 20:12:19 +0000419 timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
420
421 for bg_job in bg_jobs:
422 # Process stdout and stderr
423 bg_job.process_output(stdout=True,final_read=True)
424 bg_job.process_output(stdout=False,final_read=True)
jadmanski0afbb632008-06-06 21:10:57 +0000425 finally:
426 # close our ends of the pipes to the sp no matter what
mbligh849a0f62008-08-28 20:12:19 +0000427 for bg_job in bg_jobs:
428 bg_job.cleanup()
mbligh63073c92008-03-31 16:49:32 +0000429
mbligh849a0f62008-08-28 20:12:19 +0000430 if timeout_error:
431 # TODO: This needs to be fixed to better represent what happens when
432 # running in parallel. However this is backwards compatable, so it will
433 # do for the time being.
434 raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
435 "Command(s) did not complete within %d seconds"
436 % timeout)
mbligh63073c92008-03-31 16:49:32 +0000437
mbligh63073c92008-03-31 16:49:32 +0000438
mbligh849a0f62008-08-28 20:12:19 +0000439 return bg_jobs
mbligh63073c92008-03-31 16:49:32 +0000440
mbligh849a0f62008-08-28 20:12:19 +0000441
442def _wait_for_commands(bg_jobs, start_time, timeout):
443 # This returns True if it must return due to a timeout, otherwise False.
444
mblighf0b4a0a2008-09-03 20:46:16 +0000445 # To check for processes which terminate without producing any output
446 # a 1 second timeout is used in select.
447 SELECT_TIMEOUT = 1
448
mbligh849a0f62008-08-28 20:12:19 +0000449 select_list = []
450 reverse_dict = {}
451 for bg_job in bg_jobs:
452 select_list.append(bg_job.sp.stdout)
453 select_list.append(bg_job.sp.stderr)
454 reverse_dict[bg_job.sp.stdout] = (bg_job,True)
455 reverse_dict[bg_job.sp.stderr] = (bg_job,False)
456
jadmanski0afbb632008-06-06 21:10:57 +0000457 if timeout:
458 stop_time = start_time + timeout
459 time_left = stop_time - time.time()
460 else:
461 time_left = None # so that select never times out
462 while not timeout or time_left > 0:
463 # select will return when stdout is ready (including when it is
464 # EOF, that is the process has terminated).
mblighf0b4a0a2008-09-03 20:46:16 +0000465 ready, _, _ = select.select(select_list, [], [], SELECT_TIMEOUT)
mbligh849a0f62008-08-28 20:12:19 +0000466
jadmanski0afbb632008-06-06 21:10:57 +0000467 # os.read() has to be used instead of
468 # subproc.stdout.read() which will otherwise block
mbligh849a0f62008-08-28 20:12:19 +0000469 for fileno in ready:
470 bg_job,stdout = reverse_dict[fileno]
471 bg_job.process_output(stdout)
mbligh63073c92008-03-31 16:49:32 +0000472
mbligh849a0f62008-08-28 20:12:19 +0000473 remaining_jobs = [x for x in bg_jobs if x.result.exit_status is None]
474 if len(remaining_jobs) == 0:
475 return False
476 for bg_job in remaining_jobs:
477 bg_job.result.exit_status = bg_job.sp.poll()
mbligh8ea61e22008-05-09 18:09:37 +0000478
jadmanski0afbb632008-06-06 21:10:57 +0000479 if timeout:
480 time_left = stop_time - time.time()
mbligh63073c92008-03-31 16:49:32 +0000481
mbligh849a0f62008-08-28 20:12:19 +0000482 # Kill all processes which did not complete prior to timeout
483 for bg_job in [x for x in bg_jobs if x.result.exit_status is None]:
mbligh7afc3a62008-11-27 00:35:44 +0000484 print '* Warning: run process timeout (%s) fired' % timeout
mbligh849a0f62008-08-28 20:12:19 +0000485 nuke_subprocess(bg_job.sp)
mbligh095dc642008-10-01 03:41:35 +0000486 bg_job.result.exit_status = bg_job.sp.poll()
mbligh8ea61e22008-05-09 18:09:37 +0000487
mbligh849a0f62008-08-28 20:12:19 +0000488 return True
mbligh63073c92008-03-31 16:49:32 +0000489
490
showard549afad2009-08-20 23:33:36 +0000491def pid_is_alive(pid):
492 """
493 True if process pid exists and is not yet stuck in Zombie state.
494 Zombies are impossible to move between cgroups, etc.
495 pid can be integer, or text of integer.
496 """
497 path = '/proc/%s/stat' % pid
498
499 try:
500 stat = read_one_line(path)
501 except IOError:
502 if not os.path.exists(path):
503 # file went away
504 return False
505 raise
506
507 return stat.split()[2] != 'Z'
508
509
510def signal_pid(pid, sig):
511 """
512 Sends a signal to a process id. Returns True if the process terminated
513 successfully, False otherwise.
514 """
515 try:
516 os.kill(pid, sig)
517 except OSError:
518 # The process may have died before we could kill it.
519 pass
520
521 for i in range(5):
522 if not pid_is_alive(pid):
523 return True
524 time.sleep(1)
525
526 # The process is still alive
527 return False
528
529
mbligh63073c92008-03-31 16:49:32 +0000530def nuke_subprocess(subproc):
jadmanski09f92032008-09-17 14:05:27 +0000531 # check if the subprocess is still alive, first
532 if subproc.poll() is not None:
533 return subproc.poll()
534
jadmanski0afbb632008-06-06 21:10:57 +0000535 # the process has not terminated within timeout,
536 # kill it via an escalating series of signals.
537 signal_queue = [signal.SIGTERM, signal.SIGKILL]
538 for sig in signal_queue:
showard549afad2009-08-20 23:33:36 +0000539 signal_pid(subproc.pid, sig)
540 if subproc.poll() is not None:
541 return subproc.poll()
mbligh63073c92008-03-31 16:49:32 +0000542
543
544def nuke_pid(pid):
jadmanski0afbb632008-06-06 21:10:57 +0000545 # the process has not terminated within timeout,
546 # kill it via an escalating series of signals.
547 signal_queue = [signal.SIGTERM, signal.SIGKILL]
548 for sig in signal_queue:
showard549afad2009-08-20 23:33:36 +0000549 if signal_pid(pid, sig):
550 return
mbligh63073c92008-03-31 16:49:32 +0000551
showard549afad2009-08-20 23:33:36 +0000552 # no signal successfully terminated the process
553 raise error.AutoservRunError('Could not kill %d' % pid, None)
mbligh298ed592009-06-15 21:52:57 +0000554
555
mbligh63073c92008-03-31 16:49:32 +0000556def system(command, timeout=None, ignore_status=False):
mbligha5630a52008-09-03 22:09:50 +0000557 """This function returns the exit status of command."""
mblighf8dffb12008-10-29 16:45:26 +0000558 return run(command, timeout=timeout, ignore_status=ignore_status,
showard108d73e2009-06-22 18:14:41 +0000559 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
mbligh63073c92008-03-31 16:49:32 +0000560
561
mbligha5630a52008-09-03 22:09:50 +0000562def system_parallel(commands, timeout=None, ignore_status=False):
563 """This function returns a list of exit statuses for the respective
564 list of commands."""
565 return [bg_jobs.exit_status for bg_jobs in
mblighf8dffb12008-10-29 16:45:26 +0000566 run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
showard108d73e2009-06-22 18:14:41 +0000567 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
mbligh849a0f62008-08-28 20:12:19 +0000568
569
mbligh8ea61e22008-05-09 18:09:37 +0000570def system_output(command, timeout=None, ignore_status=False,
jadmanski0afbb632008-06-06 21:10:57 +0000571 retain_output=False):
572 if retain_output:
mblighf8dffb12008-10-29 16:45:26 +0000573 out = run(command, timeout=timeout, ignore_status=ignore_status,
showard108d73e2009-06-22 18:14:41 +0000574 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).stdout
jadmanski0afbb632008-06-06 21:10:57 +0000575 else:
mblighf8dffb12008-10-29 16:45:26 +0000576 out = run(command, timeout=timeout, ignore_status=ignore_status).stdout
jadmanski0afbb632008-06-06 21:10:57 +0000577 if out[-1:] == '\n': out = out[:-1]
578 return out
mbligh63073c92008-03-31 16:49:32 +0000579
mbligh849a0f62008-08-28 20:12:19 +0000580
mbligha5630a52008-09-03 22:09:50 +0000581def system_output_parallel(commands, timeout=None, ignore_status=False,
582 retain_output=False):
583 if retain_output:
showard108d73e2009-06-22 18:14:41 +0000584 out = [bg_job.stdout for bg_job
585 in run_parallel(commands, timeout=timeout,
586 ignore_status=ignore_status,
587 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
mbligha5630a52008-09-03 22:09:50 +0000588 else:
mblighf8dffb12008-10-29 16:45:26 +0000589 out = [bg_job.stdout for bg_job in run_parallel(commands,
590 timeout=timeout, ignore_status=ignore_status)]
mbligha5630a52008-09-03 22:09:50 +0000591 for x in out:
592 if out[-1:] == '\n': out = out[:-1]
593 return out
594
595
mbligh98467952008-11-19 00:25:45 +0000596def strip_unicode(input):
597 if type(input) == list:
598 return [strip_unicode(i) for i in input]
599 elif type(input) == dict:
600 output = {}
601 for key in input.keys():
602 output[str(key)] = strip_unicode(input[key])
603 return output
604 elif type(input) == unicode:
605 return str(input)
606 else:
607 return input
608
609
mbligha5630a52008-09-03 22:09:50 +0000610def get_cpu_percentage(function, *args, **dargs):
611 """Returns a tuple containing the CPU% and return value from function call.
612
613 This function calculates the usage time by taking the difference of
614 the user and system times both before and after the function call.
615 """
616 child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
617 self_pre = resource.getrusage(resource.RUSAGE_SELF)
618 start = time.time()
619 to_return = function(*args, **dargs)
620 elapsed = time.time() - start
621 self_post = resource.getrusage(resource.RUSAGE_SELF)
622 child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
623
624 # Calculate CPU Percentage
625 s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
626 c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
627 cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
628
629 return cpu_percent, to_return
630
631
mblighc1cbc992008-05-27 20:01:45 +0000632"""
633This function is used when there is a need to run more than one
634job simultaneously starting exactly at the same time. It basically returns
635a modified control file (containing the synchronization code prepended)
636whenever it is ready to run the control file. The synchronization
637is done using barriers to make sure that the jobs start at the same time.
638
639Here is how the synchronization is done to make sure that the tests
640start at exactly the same time on the client.
641sc_bar is a server barrier and s_bar, c_bar are the normal barriers
642
643 Job1 Job2 ...... JobN
644 Server: | sc_bar
645 Server: | s_bar ...... s_bar
646 Server: | at.run() at.run() ...... at.run()
647 ----------|------------------------------------------------------
648 Client | sc_bar
649 Client | c_bar c_bar ...... c_bar
650 Client | <run test> <run test> ...... <run test>
651
652
653PARAMS:
654 control_file : The control file which to which the above synchronization
655 code would be prepended to
656 host_name : The host name on which the job is going to run
657 host_num (non negative) : A number to identify the machine so that we have
658 different sets of s_bar_ports for each of the machines.
659 instance : The number of the job
660 num_jobs : Total number of jobs that are going to run in parallel with
661 this job starting at the same time
662 port_base : Port number that is used to derive the actual barrier ports.
663
664RETURN VALUE:
665 The modified control file.
666
667"""
668def get_sync_control_file(control, host_name, host_num,
jadmanski0afbb632008-06-06 21:10:57 +0000669 instance, num_jobs, port_base=63100):
670 sc_bar_port = port_base
671 c_bar_port = port_base
672 if host_num < 0:
673 print "Please provide a non negative number for the host"
674 return None
675 s_bar_port = port_base + 1 + host_num # The set of s_bar_ports are
676 # the same for a given machine
mblighc1cbc992008-05-27 20:01:45 +0000677
jadmanski0afbb632008-06-06 21:10:57 +0000678 sc_bar_timeout = 180
679 s_bar_timeout = c_bar_timeout = 120
mblighc1cbc992008-05-27 20:01:45 +0000680
jadmanski0afbb632008-06-06 21:10:57 +0000681 # The barrier code snippet is prepended into the conrol file
682 # dynamically before at.run() is called finally.
683 control_new = []
mblighc1cbc992008-05-27 20:01:45 +0000684
jadmanski0afbb632008-06-06 21:10:57 +0000685 # jobid is the unique name used to identify the processes
686 # trying to reach the barriers
687 jobid = "%s#%d" % (host_name, instance)
mblighc1cbc992008-05-27 20:01:45 +0000688
jadmanski0afbb632008-06-06 21:10:57 +0000689 rendv = []
690 # rendvstr is a temp holder for the rendezvous list of the processes
691 for n in range(num_jobs):
692 rendv.append("'%s#%d'" % (host_name, n))
693 rendvstr = ",".join(rendv)
mblighc1cbc992008-05-27 20:01:45 +0000694
jadmanski0afbb632008-06-06 21:10:57 +0000695 if instance == 0:
696 # Do the setup and wait at the server barrier
697 # Clean up the tmp and the control dirs for the first instance
698 control_new.append('if os.path.exists(job.tmpdir):')
699 control_new.append("\t system('umount -f %s > /dev/null"
700 "2> /dev/null' % job.tmpdir,"
701 "ignore_status=True)")
702 control_new.append("\t system('rm -rf ' + job.tmpdir)")
703 control_new.append(
704 'b0 = job.barrier("%s", "sc_bar", %d, port=%d)'
705 % (jobid, sc_bar_timeout, sc_bar_port))
706 control_new.append(
mbligh9c12f772009-06-22 19:03:55 +0000707 'b0.rendezvous_servers("PARALLEL_MASTER", "%s")'
jadmanski0afbb632008-06-06 21:10:57 +0000708 % jobid)
mblighc1cbc992008-05-27 20:01:45 +0000709
jadmanski0afbb632008-06-06 21:10:57 +0000710 elif instance == 1:
711 # Wait at the server barrier to wait for instance=0
712 # process to complete setup
713 b0 = barrier.barrier("PARALLEL_MASTER", "sc_bar", sc_bar_timeout,
714 port=sc_bar_port)
mbligh9c12f772009-06-22 19:03:55 +0000715 b0.rendezvous_servers("PARALLEL_MASTER", jobid)
mblighc1cbc992008-05-27 20:01:45 +0000716
jadmanski0afbb632008-06-06 21:10:57 +0000717 if(num_jobs > 2):
718 b1 = barrier.barrier(jobid, "s_bar", s_bar_timeout,
719 port=s_bar_port)
mbligh9c12f772009-06-22 19:03:55 +0000720 b1.rendezvous(rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000721
jadmanski0afbb632008-06-06 21:10:57 +0000722 else:
723 # For the rest of the clients
724 b2 = barrier.barrier(jobid, "s_bar", s_bar_timeout, port=s_bar_port)
mbligh9c12f772009-06-22 19:03:55 +0000725 b2.rendezvous(rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000726
jadmanski0afbb632008-06-06 21:10:57 +0000727 # Client side barrier for all the tests to start at the same time
728 control_new.append('b1 = job.barrier("%s", "c_bar", %d, port=%d)'
729 % (jobid, c_bar_timeout, c_bar_port))
mbligh9c12f772009-06-22 19:03:55 +0000730 control_new.append("b1.rendezvous(%s)" % rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000731
jadmanski0afbb632008-06-06 21:10:57 +0000732 # Stick in the rest of the control file
733 control_new.append(control)
mblighc1cbc992008-05-27 20:01:45 +0000734
jadmanski0afbb632008-06-06 21:10:57 +0000735 return "\n".join(control_new)
mblighc1cbc992008-05-27 20:01:45 +0000736
mbligh63073c92008-03-31 16:49:32 +0000737
mblighc5ddfd12008-08-04 17:15:00 +0000738def get_arch(run_function=run):
739 """
740 Get the hardware architecture of the machine.
741 run_function is used to execute the commands. It defaults to
742 utils.run() but a custom method (if provided) should be of the
743 same schema as utils.run. It should return a CmdResult object and
744 throw a CmdError exception.
745 """
746 arch = run_function('/bin/uname -m').stdout.rstrip()
747 if re.match(r'i\d86$', arch):
748 arch = 'i386'
749 return arch
750
751
showard4745ecd2009-05-26 19:34:56 +0000752def get_num_logical_cpus_per_socket(run_function=run):
mbligh9fd9afe2009-04-28 18:27:25 +0000753 """
754 Get the number of cores (including hyperthreading) per cpu.
755 run_function is used to execute the commands. It defaults to
756 utils.run() but a custom method (if provided) should be of the
757 same schema as utils.run. It should return a CmdResult object and
758 throw a CmdError exception.
759 """
showard4745ecd2009-05-26 19:34:56 +0000760 siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
761 num_siblings = map(int,
762 re.findall(r'^siblings\s*:\s*(\d+)\s*$',
763 siblings, re.M))
764 if len(num_siblings) == 0:
765 raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
766 if min(num_siblings) != max(num_siblings):
767 raise error.TestError('Number of siblings differ %r' %
768 num_siblings)
769 return num_siblings[0]
mbligh9fd9afe2009-04-28 18:27:25 +0000770
771
jadmanski4f909252008-12-01 20:47:10 +0000772def merge_trees(src, dest):
773 """
774 Merges a source directory tree at 'src' into a destination tree at
775 'dest'. If a path is a file in both trees than the file in the source
776 tree is APPENDED to the one in the destination tree. If a path is
777 a directory in both trees then the directories are recursively merged
778 with this function. In any other case, the function will skip the
779 paths that cannot be merged (instead of failing).
780 """
781 if not os.path.exists(src):
782 return # exists only in dest
783 elif not os.path.exists(dest):
784 if os.path.isfile(src):
785 shutil.copy2(src, dest) # file only in src
786 else:
787 shutil.copytree(src, dest, symlinks=True) # dir only in src
788 return
789 elif os.path.isfile(src) and os.path.isfile(dest):
790 # src & dest are files in both trees, append src to dest
791 destfile = open(dest, "a")
792 try:
793 srcfile = open(src)
794 try:
795 destfile.write(srcfile.read())
796 finally:
797 srcfile.close()
798 finally:
799 destfile.close()
800 elif os.path.isdir(src) and os.path.isdir(dest):
801 # src & dest are directories in both trees, so recursively merge
802 for name in os.listdir(src):
803 merge_trees(os.path.join(src, name), os.path.join(dest, name))
804 else:
805 # src & dest both exist, but are incompatible
806 return
807
808
mbligh63073c92008-03-31 16:49:32 +0000809class CmdResult(object):
jadmanski0afbb632008-06-06 21:10:57 +0000810 """
811 Command execution result.
mbligh63073c92008-03-31 16:49:32 +0000812
jadmanski0afbb632008-06-06 21:10:57 +0000813 command: String containing the command line itself
814 exit_status: Integer exit code of the process
815 stdout: String containing stdout of the process
816 stderr: String containing stderr of the process
817 duration: Elapsed wall clock time running the process
818 """
mbligh63073c92008-03-31 16:49:32 +0000819
820
mblighcd63a212009-05-01 23:04:38 +0000821 def __init__(self, command="", stdout="", stderr="",
jadmanski0afbb632008-06-06 21:10:57 +0000822 exit_status=None, duration=0):
823 self.command = command
824 self.exit_status = exit_status
825 self.stdout = stdout
826 self.stderr = stderr
827 self.duration = duration
mbligh63073c92008-03-31 16:49:32 +0000828
829
jadmanski0afbb632008-06-06 21:10:57 +0000830 def __repr__(self):
831 wrapper = textwrap.TextWrapper(width = 78,
832 initial_indent="\n ",
833 subsequent_indent=" ")
834
835 stdout = self.stdout.rstrip()
836 if stdout:
837 stdout = "\nstdout:\n%s" % stdout
838
839 stderr = self.stderr.rstrip()
840 if stderr:
841 stderr = "\nstderr:\n%s" % stderr
842
843 return ("* Command: %s\n"
844 "Exit status: %s\n"
845 "Duration: %s\n"
846 "%s"
847 "%s"
848 % (wrapper.fill(self.command), self.exit_status,
849 self.duration, stdout, stderr))
mbligh63073c92008-03-31 16:49:32 +0000850
851
mbligh462c0152008-03-13 15:37:10 +0000852class run_randomly:
jadmanski0afbb632008-06-06 21:10:57 +0000853 def __init__(self, run_sequentially=False):
854 # Run sequentially is for debugging control files
855 self.test_list = []
856 self.run_sequentially = run_sequentially
mbligh462c0152008-03-13 15:37:10 +0000857
858
jadmanski0afbb632008-06-06 21:10:57 +0000859 def add(self, *args, **dargs):
860 test = (args, dargs)
861 self.test_list.append(test)
mbligh462c0152008-03-13 15:37:10 +0000862
863
jadmanski0afbb632008-06-06 21:10:57 +0000864 def run(self, fn):
865 while self.test_list:
866 test_index = random.randint(0, len(self.test_list)-1)
867 if self.run_sequentially:
868 test_index = 0
869 (args, dargs) = self.test_list.pop(test_index)
870 fn(*args, **dargs)
mbligha7007722009-01-13 00:37:11 +0000871
872
mblighdd669372009-02-03 21:57:18 +0000873def import_site_symbol(path, module, name, dummy=None, modulefile=None):
874 """
875 Try to import site specific symbol from site specific file if it exists
876
877 @param path full filename of the source file calling this (ie __file__)
878 @param module full module name
879 @param name symbol name to be imported from the site file
880 @param dummy dummy value to return in case there is no symbol to import
881 @param modulefile module filename
882
883 @return site specific symbol or dummy
884
885 @exception ImportError if the site file exists but imports fails
886 """
mbligha7007722009-01-13 00:37:11 +0000887 short_module = module[module.rfind(".") + 1:]
888
889 if not modulefile:
890 modulefile = short_module + ".py"
891
892 try:
893 site_exists = os.path.getsize(os.path.join(os.path.dirname(path),
894 modulefile))
895 except os.error:
896 site_exists = False
897
mbligh61f4e442009-06-08 16:48:20 +0000898 msg = None
mbligha7007722009-01-13 00:37:11 +0000899 if site_exists:
mbligh61f4e442009-06-08 16:48:20 +0000900 # special unique value to tell us if the symbol can't be imported
901 cant_import = object()
902
mbligh062ed152009-01-13 00:57:14 +0000903 # return the object from the imported module
mbligh61f4e442009-06-08 16:48:20 +0000904 obj = getattr(__import__(module, {}, {}, [short_module]), name,
905 cant_import)
906 if obj is cant_import:
907 msg = ("unable to import site symbol '%s', using non-site "
908 "implementation") % name
mblighc26763e2009-08-11 19:10:59 +0000909 logging.error(msg)
910 obj = dummy
mbligha7007722009-01-13 00:37:11 +0000911 else:
mbligh062ed152009-01-13 00:57:14 +0000912 obj = dummy
913
914 return obj
915
916
917def import_site_class(path, module, classname, baseclass, modulefile=None):
918 """
919 Try to import site specific class from site specific file if it exists
920
921 Args:
922 path: full filename of the source file calling this (ie __file__)
923 module: full module name
924 classname: class name to be loaded from site file
mbligh0a8c3322009-04-28 18:32:19 +0000925 baseclass: base class object to return when no site file present or
926 to mixin when site class exists but is not inherited from baseclass
mbligh062ed152009-01-13 00:57:14 +0000927 modulefile: module filename
928
mbligh0a8c3322009-04-28 18:32:19 +0000929 Returns: baseclass if site specific class does not exist, the site specific
930 class if it exists and is inherited from baseclass or a mixin of the
931 site specific class and baseclass when the site specific class exists
932 and is not inherited from baseclass
mbligh062ed152009-01-13 00:57:14 +0000933
934 Raises: ImportError if the site file exists but imports fails
935 """
936
mblighdd669372009-02-03 21:57:18 +0000937 res = import_site_symbol(path, module, classname, None, modulefile)
mbligh0a8c3322009-04-28 18:32:19 +0000938 if res:
939 if not issubclass(res, baseclass):
940 # if not a subclass of baseclass then mix in baseclass with the
941 # site specific class object and return the result
942 res = type(classname, (res, baseclass), {})
943 else:
944 res = baseclass
mbligha7007722009-01-13 00:37:11 +0000945
mbligh062ed152009-01-13 00:57:14 +0000946 return res
947
948
949def import_site_function(path, module, funcname, dummy, modulefile=None):
950 """
951 Try to import site specific function from site specific file if it exists
952
953 Args:
954 path: full filename of the source file calling this (ie __file__)
955 module: full module name
956 funcname: function name to be imported from site file
957 dummy: dummy function to return in case there is no function to import
958 modulefile: module filename
959
960 Returns: site specific function object or dummy
961
962 Raises: ImportError if the site file exists but imports fails
963 """
964
mblighdd669372009-02-03 21:57:18 +0000965 return import_site_symbol(path, module, funcname, dummy, modulefile)
mblighfb676032009-04-01 18:25:38 +0000966
967
showard549afad2009-08-20 23:33:36 +0000968def _get_pid_path(program_name):
969 my_path = os.path.dirname(__file__)
970 return os.path.abspath(os.path.join(my_path, "..", "..",
971 "%s.pid" % program_name))
972
973
mblighfb676032009-04-01 18:25:38 +0000974def write_pid(program_name):
975 """
976 Try to drop <program_name>.pid in the main autotest directory.
977
978 Args:
979 program_name: prefix for file name
980 """
showard549afad2009-08-20 23:33:36 +0000981 pidfile = open(_get_pid_path(program_name), "w")
982 try:
983 pidfile.write("%s\n" % os.getpid())
984 finally:
985 pidfile.close()
mblighfb676032009-04-01 18:25:38 +0000986
showard549afad2009-08-20 23:33:36 +0000987
988def delete_pid_file_if_exists(program_name):
989 """
990 Tries to remove <program_name>.pid from the main autotest directory.
991 """
992 pidfile_path = _get_pid_path(program_name)
993
994 try:
995 os.remove(pidfile_path)
996 except OSError:
997 if not os.path.exists(pidfile_path):
998 return
999 raise
1000
1001
1002def get_pid_from_file(program_name):
1003 """
1004 Reads the pid from <program_name>.pid in the autotest directory.
1005
1006 @param program_name the name of the program
1007 @return the pid if the file exists, None otherwise.
1008 """
1009 pidfile_path = _get_pid_path(program_name)
1010 if not os.path.exists(pidfile_path):
1011 return None
1012
1013 pidfile = open(_get_pid_path(program_name), 'r')
1014
1015 try:
1016 try:
1017 pid = int(pidfile.readline())
1018 except IOError:
1019 if not os.path.exists(pidfile_path):
1020 return None
1021 raise
1022 finally:
1023 pidfile.close()
1024
1025 return pid
1026
1027
showard8de37132009-08-31 18:33:08 +00001028def program_is_alive(program_name):
showard549afad2009-08-20 23:33:36 +00001029 """
1030 Checks if the process is alive and not in Zombie state.
1031
1032 @param program_name the name of the program
1033 @return True if still alive, False otherwise
1034 """
1035 pid = get_pid_from_file(program_name)
1036 if pid is None:
1037 return False
1038 return pid_is_alive(pid)
1039
1040
showard8de37132009-08-31 18:33:08 +00001041def signal_program(program_name, sig=signal.SIGTERM):
showard549afad2009-08-20 23:33:36 +00001042 """
1043 Sends a signal to the process listed in <program_name>.pid
1044
1045 @param program_name the name of the program
1046 @param sig signal to send
1047 """
1048 pid = get_pid_from_file(program_name)
1049 if pid:
1050 signal_pid(pid, sig)
mbligh45561782009-05-11 21:14:34 +00001051
1052
1053def get_relative_path(path, reference):
1054 """Given 2 absolute paths "path" and "reference", compute the path of
1055 "path" as relative to the directory "reference".
1056
1057 @param path the absolute path to convert to a relative path
1058 @param reference an absolute directory path to which the relative
1059 path will be computed
1060 """
1061 # normalize the paths (remove double slashes, etc)
1062 assert(os.path.isabs(path))
1063 assert(os.path.isabs(reference))
1064
1065 path = os.path.normpath(path)
1066 reference = os.path.normpath(reference)
1067
1068 # we could use os.path.split() but it splits from the end
1069 path_list = path.split(os.path.sep)[1:]
1070 ref_list = reference.split(os.path.sep)[1:]
1071
1072 # find the longest leading common path
1073 for i in xrange(min(len(path_list), len(ref_list))):
1074 if path_list[i] != ref_list[i]:
1075 # decrement i so when exiting this loop either by no match or by
1076 # end of range we are one step behind
1077 i -= 1
1078 break
1079 i += 1
1080 # drop the common part of the paths, not interested in that anymore
1081 del path_list[:i]
1082
1083 # for each uncommon component in the reference prepend a ".."
1084 path_list[:0] = ['..'] * (len(ref_list) - i)
1085
1086 return os.path.join(*path_list)
mbligh277a0e42009-07-11 00:11:45 +00001087
1088
1089def sh_escape(command):
1090 """
1091 Escape special characters from a command so that it can be passed
1092 as a double quoted (" ") string in a (ba)sh command.
1093
1094 Args:
1095 command: the command string to escape.
1096
1097 Returns:
1098 The escaped command string. The required englobing double
1099 quotes are NOT added and so should be added at some point by
1100 the caller.
1101
1102 See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1103 """
1104 command = command.replace("\\", "\\\\")
1105 command = command.replace("$", r'\$')
1106 command = command.replace('"', r'\"')
1107 command = command.replace('`', r'\`')
1108 return command