blob: fd0175fb8e8743e8bee60af5029d1ae6ebf6e88e [file] [log] [blame]
mbligh63073c92008-03-31 16:49:32 +00001#
2# Copyright 2008 Google Inc. Released under the GPL v2
3
mbligh849a0f62008-08-28 20:12:19 +00004import os, pickle, random, re, resource, select, shutil, signal, StringIO
mblighb2896192009-07-11 00:12:37 +00005import socket, struct, subprocess, sys, time, textwrap, urlparse
mbligh25284cd2009-06-08 16:17:24 +00006import warnings, smtplib, logging, urllib2
showard108d73e2009-06-22 18:14:41 +00007from autotest_lib.client.common_lib import error, barrier, logging_manager
mbligh81edd792008-08-26 16:54:02 +00008
mbligh849a0f62008-08-28 20:12:19 +00009def deprecated(func):
10 """This is a decorator which can be used to mark functions as deprecated.
11 It will result in a warning being emmitted when the function is used."""
12 def new_func(*args, **dargs):
13 warnings.warn("Call to deprecated function %s." % func.__name__,
14 category=DeprecationWarning)
15 return func(*args, **dargs)
16 new_func.__name__ = func.__name__
17 new_func.__doc__ = func.__doc__
18 new_func.__dict__.update(func.__dict__)
19 return new_func
20
21
showard108d73e2009-06-22 18:14:41 +000022class _NullStream(object):
23 def write(self, data):
24 pass
25
26
27 def flush(self):
28 pass
29
30
31TEE_TO_LOGS = object()
32_the_null_stream = _NullStream()
33
showardb45a4662009-07-15 14:27:56 +000034DEFAULT_STDOUT_LEVEL = logging.DEBUG
35DEFAULT_STDERR_LEVEL = logging.ERROR
36
showard108d73e2009-06-22 18:14:41 +000037def get_stream_tee_file(stream, level):
38 if stream is None:
39 return _the_null_stream
40 if stream is TEE_TO_LOGS:
41 return logging_manager.LoggingFile(level=level)
42 return stream
43
44
mbligh849a0f62008-08-28 20:12:19 +000045class BgJob(object):
showard170873e2009-01-07 00:22:26 +000046 def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
showardb45a4662009-07-15 14:27:56 +000047 stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
mbligh849a0f62008-08-28 20:12:19 +000048 self.command = command
showardb45a4662009-07-15 14:27:56 +000049 self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL)
50 self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level)
mbligh849a0f62008-08-28 20:12:19 +000051 self.result = CmdResult(command)
mblighbd96b452008-09-03 23:14:27 +000052 if verbose:
showardb18134f2009-03-20 20:52:18 +000053 logging.debug("Running '%s'" % command)
mbligh849a0f62008-08-28 20:12:19 +000054 self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
55 stderr=subprocess.PIPE,
56 preexec_fn=self._reset_sigpipe, shell=True,
showard170873e2009-01-07 00:22:26 +000057 executable="/bin/bash",
58 stdin=stdin)
mbligh849a0f62008-08-28 20:12:19 +000059
60
61 def output_prepare(self, stdout_file=None, stderr_file=None):
62 self.stdout_file = stdout_file
63 self.stderr_file = stderr_file
64
mbligh45ffc432008-12-09 23:35:17 +000065
mbligh849a0f62008-08-28 20:12:19 +000066 def process_output(self, stdout=True, final_read=False):
67 """output_prepare must be called prior to calling this"""
68 if stdout:
69 pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
70 else:
71 pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
72
73 if final_read:
74 # read in all the data we can from pipe and then stop
75 data = []
76 while select.select([pipe], [], [], 0)[0]:
77 data.append(os.read(pipe.fileno(), 1024))
78 if len(data[-1]) == 0:
79 break
80 data = "".join(data)
81 else:
82 # perform a single read
83 data = os.read(pipe.fileno(), 1024)
84 buf.write(data)
showard108d73e2009-06-22 18:14:41 +000085 tee.write(data)
mbligh849a0f62008-08-28 20:12:19 +000086
87
88 def cleanup(self):
showard108d73e2009-06-22 18:14:41 +000089 self.stdout_tee.flush()
90 self.stderr_tee.flush()
mbligh849a0f62008-08-28 20:12:19 +000091 self.sp.stdout.close()
92 self.sp.stderr.close()
93 self.result.stdout = self.stdout_file.getvalue()
94 self.result.stderr = self.stderr_file.getvalue()
95
96
97 def _reset_sigpipe(self):
98 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
99
mbligh81edd792008-08-26 16:54:02 +0000100
101def ip_to_long(ip):
102 # !L is a long in network byte order
103 return struct.unpack('!L', socket.inet_aton(ip))[0]
104
105
106def long_to_ip(number):
107 # See above comment.
108 return socket.inet_ntoa(struct.pack('!L', number))
109
110
111def create_subnet_mask(bits):
mbligh81edd792008-08-26 16:54:02 +0000112 return (1 << 32) - (1 << 32-bits)
113
114
115def format_ip_with_mask(ip, mask_bits):
116 masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
117 return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
mbligh6231cd62008-02-02 19:18:33 +0000118
mblighde0d47e2008-03-28 14:37:18 +0000119
jadmanskie80d4712008-10-03 16:15:59 +0000120def normalize_hostname(alias):
121 ip = socket.gethostbyname(alias)
122 return socket.gethostbyaddr(ip)[0]
123
124
mblighd6d043c2008-09-27 21:00:45 +0000125def get_ip_local_port_range():
126 match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
127 read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
128 return (int(match.group(1)), int(match.group(2)))
129
130
131def set_ip_local_port_range(lower, upper):
132 write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
133 '%d %d\n' % (lower, upper))
134
mbligh315b9412008-10-01 03:34:11 +0000135
mbligh45ffc432008-12-09 23:35:17 +0000136
137def send_email(mail_from, mail_to, subject, body):
138 """
139 Sends an email via smtp
140
141 mail_from: string with email address of sender
142 mail_to: string or list with email address(es) of recipients
143 subject: string with subject of email
144 body: (multi-line) string with body of email
145 """
146 if isinstance(mail_to, str):
147 mail_to = [mail_to]
148 msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
149 subject, body)
150 try:
151 mailer = smtplib.SMTP('localhost')
152 try:
153 mailer.sendmail(mail_from, mail_to, msg)
154 finally:
155 mailer.quit()
156 except Exception, e:
157 # Emails are non-critical, not errors, but don't raise them
158 print "Sending email failed. Reason: %s" % repr(e)
159
160
jadmanski5182e162008-05-13 21:48:16 +0000161def read_one_line(filename):
mbligh6e8840c2008-07-11 18:05:49 +0000162 return open(filename, 'r').readline().rstrip('\n')
jadmanski5182e162008-05-13 21:48:16 +0000163
164
mblighb9d05512008-10-18 13:53:27 +0000165def write_one_line(filename, line):
166 open_write_close(filename, line.rstrip('\n') + '\n')
167
168
169def open_write_close(filename, data):
mbligh618ac9e2008-10-06 17:14:32 +0000170 f = open(filename, 'w')
mblighb9d05512008-10-18 13:53:27 +0000171 try:
172 f.write(data)
173 finally:
174 f.close()
jadmanski5182e162008-05-13 21:48:16 +0000175
176
mblighde0d47e2008-03-28 14:37:18 +0000177def read_keyval(path):
jadmanski0afbb632008-06-06 21:10:57 +0000178 """
179 Read a key-value pair format file into a dictionary, and return it.
180 Takes either a filename or directory name as input. If it's a
181 directory name, we assume you want the file to be called keyval.
182 """
183 if os.path.isdir(path):
184 path = os.path.join(path, 'keyval')
185 keyval = {}
jadmanski58962982009-04-21 19:54:34 +0000186 if os.path.exists(path):
187 for line in open(path):
188 line = re.sub('#.*', '', line).rstrip()
189 if not re.search(r'^[-\.\w]+=', line):
190 raise ValueError('Invalid format line: %s' % line)
191 key, value = line.split('=', 1)
192 if re.search('^\d+$', value):
193 value = int(value)
194 elif re.search('^(\d+\.)?\d+$', value):
195 value = float(value)
196 keyval[key] = value
jadmanski0afbb632008-06-06 21:10:57 +0000197 return keyval
mblighde0d47e2008-03-28 14:37:18 +0000198
199
jadmanskicc549172008-05-21 18:11:51 +0000200def write_keyval(path, dictionary, type_tag=None):
jadmanski0afbb632008-06-06 21:10:57 +0000201 """
202 Write a key-value pair format file out to a file. This uses append
203 mode to open the file, so existing text will not be overwritten or
204 reparsed.
jadmanskicc549172008-05-21 18:11:51 +0000205
jadmanski0afbb632008-06-06 21:10:57 +0000206 If type_tag is None, then the key must be composed of alphanumeric
207 characters (or dashes+underscores). However, if type-tag is not
208 null then the keys must also have "{type_tag}" as a suffix. At
209 the moment the only valid values of type_tag are "attr" and "perf".
210 """
211 if os.path.isdir(path):
212 path = os.path.join(path, 'keyval')
213 keyval = open(path, 'a')
jadmanskicc549172008-05-21 18:11:51 +0000214
jadmanski0afbb632008-06-06 21:10:57 +0000215 if type_tag is None:
mbligh97227ea2009-03-11 17:09:50 +0000216 key_regex = re.compile(r'^[-\.\w]+$')
jadmanski0afbb632008-06-06 21:10:57 +0000217 else:
218 if type_tag not in ('attr', 'perf'):
219 raise ValueError('Invalid type tag: %s' % type_tag)
220 escaped_tag = re.escape(type_tag)
mbligh97227ea2009-03-11 17:09:50 +0000221 key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
jadmanski0afbb632008-06-06 21:10:57 +0000222 try:
mbligh6955e232009-07-11 00:58:47 +0000223 for key in sorted(dictionary.keys()):
jadmanski0afbb632008-06-06 21:10:57 +0000224 if not key_regex.search(key):
225 raise ValueError('Invalid key: %s' % key)
mbligh6955e232009-07-11 00:58:47 +0000226 keyval.write('%s=%s\n' % (key, dictionary[key]))
jadmanski0afbb632008-06-06 21:10:57 +0000227 finally:
228 keyval.close()
mbligh6231cd62008-02-02 19:18:33 +0000229
230
231def is_url(path):
jadmanski0afbb632008-06-06 21:10:57 +0000232 """Return true if path looks like a URL"""
233 # for now, just handle http and ftp
234 url_parts = urlparse.urlparse(path)
235 return (url_parts[0] in ('http', 'ftp'))
mbligh6231cd62008-02-02 19:18:33 +0000236
237
mblighb2896192009-07-11 00:12:37 +0000238def urlopen(url, data=None, timeout=5):
239 """Wrapper to urllib2.urlopen with timeout addition."""
mbligh02ff2d52008-06-03 15:00:21 +0000240
jadmanski0afbb632008-06-06 21:10:57 +0000241 # Save old timeout
242 old_timeout = socket.getdefaulttimeout()
243 socket.setdefaulttimeout(timeout)
244 try:
mblighb2896192009-07-11 00:12:37 +0000245 return urllib2.urlopen(url, data=data)
jadmanski0afbb632008-06-06 21:10:57 +0000246 finally:
247 socket.setdefaulttimeout(old_timeout)
mbligh02ff2d52008-06-03 15:00:21 +0000248
249
mblighb2896192009-07-11 00:12:37 +0000250def urlretrieve(url, filename, data=None, timeout=300):
251 """Retrieve a file from given url."""
252 logging.debug('Fetching %s -> %s', url, filename)
253
254 src_file = urlopen(url, data=data, timeout=timeout)
jadmanski0afbb632008-06-06 21:10:57 +0000255 try:
mblighb2896192009-07-11 00:12:37 +0000256 dest_file = open(filename, 'wb')
257 try:
258 shutil.copyfileobj(src_file, dest_file)
259 finally:
260 dest_file.close()
jadmanski0afbb632008-06-06 21:10:57 +0000261 finally:
mblighb2896192009-07-11 00:12:37 +0000262 src_file.close()
jadmanski0afbb632008-06-06 21:10:57 +0000263
mbligh02ff2d52008-06-03 15:00:21 +0000264
mbligh6231cd62008-02-02 19:18:33 +0000265def get_file(src, dest, permissions=None):
jadmanski0afbb632008-06-06 21:10:57 +0000266 """Get a file from src, which can be local or a remote URL"""
mbligh25284cd2009-06-08 16:17:24 +0000267 if src == dest:
jadmanski0afbb632008-06-06 21:10:57 +0000268 return
mbligh25284cd2009-06-08 16:17:24 +0000269
270 if is_url(src):
mblighb2896192009-07-11 00:12:37 +0000271 urlretrieve(src, dest)
jadmanski0afbb632008-06-06 21:10:57 +0000272 else:
273 shutil.copyfile(src, dest)
mbligh25284cd2009-06-08 16:17:24 +0000274
jadmanski0afbb632008-06-06 21:10:57 +0000275 if permissions:
276 os.chmod(dest, permissions)
277 return dest
mbligh6231cd62008-02-02 19:18:33 +0000278
279
280def unmap_url(srcdir, src, destdir='.'):
jadmanski0afbb632008-06-06 21:10:57 +0000281 """
282 Receives either a path to a local file or a URL.
283 returns either the path to the local file, or the fetched URL
mbligh6231cd62008-02-02 19:18:33 +0000284
jadmanski0afbb632008-06-06 21:10:57 +0000285 unmap_url('/usr/src', 'foo.tar', '/tmp')
286 = '/usr/src/foo.tar'
287 unmap_url('/usr/src', 'http://site/file', '/tmp')
288 = '/tmp/file'
289 (after retrieving it)
290 """
291 if is_url(src):
292 url_parts = urlparse.urlparse(src)
293 filename = os.path.basename(url_parts[2])
294 dest = os.path.join(destdir, filename)
295 return get_file(src, dest)
296 else:
297 return os.path.join(srcdir, src)
mbligh6231cd62008-02-02 19:18:33 +0000298
299
300def update_version(srcdir, preserve_srcdir, new_version, install,
jadmanski0afbb632008-06-06 21:10:57 +0000301 *args, **dargs):
302 """
303 Make sure srcdir is version new_version
mbligh6231cd62008-02-02 19:18:33 +0000304
jadmanski0afbb632008-06-06 21:10:57 +0000305 If not, delete it and install() the new version.
mbligh6231cd62008-02-02 19:18:33 +0000306
jadmanski0afbb632008-06-06 21:10:57 +0000307 In the preserve_srcdir case, we just check it's up to date,
308 and if not, we rerun install, without removing srcdir
309 """
310 versionfile = os.path.join(srcdir, '.version')
311 install_needed = True
mbligh6231cd62008-02-02 19:18:33 +0000312
jadmanski0afbb632008-06-06 21:10:57 +0000313 if os.path.exists(versionfile):
314 old_version = pickle.load(open(versionfile))
315 if old_version == new_version:
316 install_needed = False
mbligh6231cd62008-02-02 19:18:33 +0000317
jadmanski0afbb632008-06-06 21:10:57 +0000318 if install_needed:
319 if not preserve_srcdir and os.path.exists(srcdir):
320 shutil.rmtree(srcdir)
321 install(*args, **dargs)
322 if os.path.exists(srcdir):
323 pickle.dump(new_version, open(versionfile, 'w'))
mbligh462c0152008-03-13 15:37:10 +0000324
325
showardb45a4662009-07-15 14:27:56 +0000326def get_stderr_level(stderr_is_expected):
327 if stderr_is_expected:
328 return DEFAULT_STDOUT_LEVEL
329 return DEFAULT_STDERR_LEVEL
330
331
mbligh63073c92008-03-31 16:49:32 +0000332def run(command, timeout=None, ignore_status=False,
showardb45a4662009-07-15 14:27:56 +0000333 stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
334 stderr_is_expected=None):
jadmanski0afbb632008-06-06 21:10:57 +0000335 """
336 Run a command on the host.
mbligh63073c92008-03-31 16:49:32 +0000337
jadmanski0afbb632008-06-06 21:10:57 +0000338 Args:
339 command: the command line string
340 timeout: time limit in seconds before attempting to
341 kill the running process. The run() function
342 will take a few seconds longer than 'timeout'
343 to complete if it has to kill the process.
344 ignore_status: do not raise an exception, no matter what
345 the exit code of the command is.
346 stdout_tee: optional file-like object to which stdout data
347 will be written as it is generated (data will still
348 be stored in result.stdout)
349 stderr_tee: likewise for stderr
showard108d73e2009-06-22 18:14:41 +0000350 verbose: if True, log the command being run
showard170873e2009-01-07 00:22:26 +0000351 stdin: stdin to pass to the executed process
mbligh63073c92008-03-31 16:49:32 +0000352
jadmanski0afbb632008-06-06 21:10:57 +0000353 Returns:
354 a CmdResult object
mbligh63073c92008-03-31 16:49:32 +0000355
jadmanski0afbb632008-06-06 21:10:57 +0000356 Raises:
357 CmdError: the exit code of the command
358 execution was not 0
359 """
showardb45a4662009-07-15 14:27:56 +0000360 if stderr_is_expected is None:
361 stderr_is_expected = ignore_status
showard170873e2009-01-07 00:22:26 +0000362 bg_job = join_bg_jobs(
showardb45a4662009-07-15 14:27:56 +0000363 (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
364 stderr_level=get_stderr_level(stderr_is_expected)),),
showard170873e2009-01-07 00:22:26 +0000365 timeout)[0]
mbligh849a0f62008-08-28 20:12:19 +0000366 if not ignore_status and bg_job.result.exit_status:
jadmanski9c1098b2008-09-02 14:18:48 +0000367 raise error.CmdError(command, bg_job.result,
mbligh849a0f62008-08-28 20:12:19 +0000368 "Command returned non-zero exit status")
mbligh63073c92008-03-31 16:49:32 +0000369
mbligh849a0f62008-08-28 20:12:19 +0000370 return bg_job.result
mbligh63073c92008-03-31 16:49:32 +0000371
mbligh45ffc432008-12-09 23:35:17 +0000372
mbligha5630a52008-09-03 22:09:50 +0000373def run_parallel(commands, timeout=None, ignore_status=False,
374 stdout_tee=None, stderr_tee=None):
375 """Beahves the same as run with the following exceptions:
376
377 - commands is a list of commands to run in parallel.
378 - ignore_status toggles whether or not an exception should be raised
379 on any error.
380
381 returns a list of CmdResult objects
382 """
383 bg_jobs = []
384 for command in commands:
showardb45a4662009-07-15 14:27:56 +0000385 bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
386 stderr_level=get_stderr_level(ignore_status)))
mbligha5630a52008-09-03 22:09:50 +0000387
388 # Updates objects in bg_jobs list with their process information
389 join_bg_jobs(bg_jobs, timeout)
390
391 for bg_job in bg_jobs:
392 if not ignore_status and bg_job.result.exit_status:
393 raise error.CmdError(command, bg_job.result,
394 "Command returned non-zero exit status")
395
396 return [bg_job.result for bg_job in bg_jobs]
397
398
mbligh849a0f62008-08-28 20:12:19 +0000399@deprecated
mbligh63073c92008-03-31 16:49:32 +0000400def run_bg(command):
mbligh849a0f62008-08-28 20:12:19 +0000401 """Function deprecated. Please use BgJob class instead."""
402 bg_job = BgJob(command)
403 return bg_job.sp, bg_job.result
mbligh63073c92008-03-31 16:49:32 +0000404
405
mbligh849a0f62008-08-28 20:12:19 +0000406def join_bg_jobs(bg_jobs, timeout=None):
mbligha5630a52008-09-03 22:09:50 +0000407 """Joins the bg_jobs with the current thread.
408
409 Returns the same list of bg_jobs objects that was passed in.
410 """
mblighae69f262009-04-17 20:14:56 +0000411 ret, timeout_error = 0, False
mbligh849a0f62008-08-28 20:12:19 +0000412 for bg_job in bg_jobs:
413 bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
mbligh63073c92008-03-31 16:49:32 +0000414
jadmanski0afbb632008-06-06 21:10:57 +0000415 try:
416 # We are holding ends to stdin, stdout pipes
417 # hence we need to be sure to close those fds no mater what
418 start_time = time.time()
mbligh849a0f62008-08-28 20:12:19 +0000419 timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
420
421 for bg_job in bg_jobs:
422 # Process stdout and stderr
423 bg_job.process_output(stdout=True,final_read=True)
424 bg_job.process_output(stdout=False,final_read=True)
jadmanski0afbb632008-06-06 21:10:57 +0000425 finally:
426 # close our ends of the pipes to the sp no matter what
mbligh849a0f62008-08-28 20:12:19 +0000427 for bg_job in bg_jobs:
428 bg_job.cleanup()
mbligh63073c92008-03-31 16:49:32 +0000429
mbligh849a0f62008-08-28 20:12:19 +0000430 if timeout_error:
431 # TODO: This needs to be fixed to better represent what happens when
432 # running in parallel. However this is backwards compatable, so it will
433 # do for the time being.
434 raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
435 "Command(s) did not complete within %d seconds"
436 % timeout)
mbligh63073c92008-03-31 16:49:32 +0000437
mbligh63073c92008-03-31 16:49:32 +0000438
mbligh849a0f62008-08-28 20:12:19 +0000439 return bg_jobs
mbligh63073c92008-03-31 16:49:32 +0000440
mbligh849a0f62008-08-28 20:12:19 +0000441
442def _wait_for_commands(bg_jobs, start_time, timeout):
443 # This returns True if it must return due to a timeout, otherwise False.
444
mblighf0b4a0a2008-09-03 20:46:16 +0000445 # To check for processes which terminate without producing any output
446 # a 1 second timeout is used in select.
447 SELECT_TIMEOUT = 1
448
mbligh849a0f62008-08-28 20:12:19 +0000449 select_list = []
450 reverse_dict = {}
451 for bg_job in bg_jobs:
452 select_list.append(bg_job.sp.stdout)
453 select_list.append(bg_job.sp.stderr)
454 reverse_dict[bg_job.sp.stdout] = (bg_job,True)
455 reverse_dict[bg_job.sp.stderr] = (bg_job,False)
456
jadmanski0afbb632008-06-06 21:10:57 +0000457 if timeout:
458 stop_time = start_time + timeout
459 time_left = stop_time - time.time()
460 else:
461 time_left = None # so that select never times out
462 while not timeout or time_left > 0:
463 # select will return when stdout is ready (including when it is
464 # EOF, that is the process has terminated).
mblighf0b4a0a2008-09-03 20:46:16 +0000465 ready, _, _ = select.select(select_list, [], [], SELECT_TIMEOUT)
mbligh849a0f62008-08-28 20:12:19 +0000466
jadmanski0afbb632008-06-06 21:10:57 +0000467 # os.read() has to be used instead of
468 # subproc.stdout.read() which will otherwise block
mbligh849a0f62008-08-28 20:12:19 +0000469 for fileno in ready:
470 bg_job,stdout = reverse_dict[fileno]
471 bg_job.process_output(stdout)
mbligh63073c92008-03-31 16:49:32 +0000472
mbligh849a0f62008-08-28 20:12:19 +0000473 remaining_jobs = [x for x in bg_jobs if x.result.exit_status is None]
474 if len(remaining_jobs) == 0:
475 return False
476 for bg_job in remaining_jobs:
477 bg_job.result.exit_status = bg_job.sp.poll()
mbligh8ea61e22008-05-09 18:09:37 +0000478
jadmanski0afbb632008-06-06 21:10:57 +0000479 if timeout:
480 time_left = stop_time - time.time()
mbligh63073c92008-03-31 16:49:32 +0000481
mbligh849a0f62008-08-28 20:12:19 +0000482 # Kill all processes which did not complete prior to timeout
483 for bg_job in [x for x in bg_jobs if x.result.exit_status is None]:
mbligh7afc3a62008-11-27 00:35:44 +0000484 print '* Warning: run process timeout (%s) fired' % timeout
mbligh849a0f62008-08-28 20:12:19 +0000485 nuke_subprocess(bg_job.sp)
mbligh095dc642008-10-01 03:41:35 +0000486 bg_job.result.exit_status = bg_job.sp.poll()
mbligh8ea61e22008-05-09 18:09:37 +0000487
mbligh849a0f62008-08-28 20:12:19 +0000488 return True
mbligh63073c92008-03-31 16:49:32 +0000489
490
mbligh63073c92008-03-31 16:49:32 +0000491def nuke_subprocess(subproc):
jadmanski09f92032008-09-17 14:05:27 +0000492 # check if the subprocess is still alive, first
493 if subproc.poll() is not None:
494 return subproc.poll()
495
jadmanski0afbb632008-06-06 21:10:57 +0000496 # the process has not terminated within timeout,
497 # kill it via an escalating series of signals.
498 signal_queue = [signal.SIGTERM, signal.SIGKILL]
499 for sig in signal_queue:
500 try:
501 os.kill(subproc.pid, sig)
502 # The process may have died before we could kill it.
503 except OSError:
504 pass
mbligh63073c92008-03-31 16:49:32 +0000505
jadmanski0afbb632008-06-06 21:10:57 +0000506 for i in range(5):
507 rc = subproc.poll()
mblighd876f452008-12-03 15:09:17 +0000508 if rc is not None:
jadmanski0afbb632008-06-06 21:10:57 +0000509 return rc
510 time.sleep(1)
mbligh63073c92008-03-31 16:49:32 +0000511
512
513def nuke_pid(pid):
jadmanski0afbb632008-06-06 21:10:57 +0000514 # the process has not terminated within timeout,
515 # kill it via an escalating series of signals.
516 signal_queue = [signal.SIGTERM, signal.SIGKILL]
517 for sig in signal_queue:
518 try:
519 os.kill(pid, sig)
mbligh63073c92008-03-31 16:49:32 +0000520
jadmanski0afbb632008-06-06 21:10:57 +0000521 # The process may have died before we could kill it.
522 except OSError:
523 pass
mbligh63073c92008-03-31 16:49:32 +0000524
jadmanski0afbb632008-06-06 21:10:57 +0000525 try:
526 for i in range(5):
527 status = os.waitpid(pid, os.WNOHANG)[0]
528 if status == pid:
529 return
530 time.sleep(1)
mbligh63073c92008-03-31 16:49:32 +0000531
jadmanski0afbb632008-06-06 21:10:57 +0000532 if status != pid:
533 raise error.AutoservRunError('Could not kill %d'
534 % pid, None)
mbligh63073c92008-03-31 16:49:32 +0000535
jadmanski0afbb632008-06-06 21:10:57 +0000536 # the process died before we join it.
537 except OSError:
538 pass
mbligh63073c92008-03-31 16:49:32 +0000539
540
mbligh298ed592009-06-15 21:52:57 +0000541def pid_is_alive(pid):
542 """
543 True if process pid exists and is not yet stuck in Zombie state.
544 Zombies are impossible to move between cgroups, etc.
545 pid can be integer, or text of integer.
546 """
547 try:
mbligh277a0e42009-07-11 00:11:45 +0000548 stat = read_one_line('/proc/%s/stat' % pid) # pid exists
mbligh298ed592009-06-15 21:52:57 +0000549 return stat.split()[2] != 'Z' # and is not in Zombie state
550 except Exception:
551 return False # process no longer exists at all
552
553
mbligh63073c92008-03-31 16:49:32 +0000554def system(command, timeout=None, ignore_status=False):
mbligha5630a52008-09-03 22:09:50 +0000555 """This function returns the exit status of command."""
mblighf8dffb12008-10-29 16:45:26 +0000556 return run(command, timeout=timeout, ignore_status=ignore_status,
showard108d73e2009-06-22 18:14:41 +0000557 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
mbligh63073c92008-03-31 16:49:32 +0000558
559
mbligha5630a52008-09-03 22:09:50 +0000560def system_parallel(commands, timeout=None, ignore_status=False):
561 """This function returns a list of exit statuses for the respective
562 list of commands."""
563 return [bg_jobs.exit_status for bg_jobs in
mblighf8dffb12008-10-29 16:45:26 +0000564 run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
showard108d73e2009-06-22 18:14:41 +0000565 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
mbligh849a0f62008-08-28 20:12:19 +0000566
567
mbligh8ea61e22008-05-09 18:09:37 +0000568def system_output(command, timeout=None, ignore_status=False,
jadmanski0afbb632008-06-06 21:10:57 +0000569 retain_output=False):
570 if retain_output:
mblighf8dffb12008-10-29 16:45:26 +0000571 out = run(command, timeout=timeout, ignore_status=ignore_status,
showard108d73e2009-06-22 18:14:41 +0000572 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).stdout
jadmanski0afbb632008-06-06 21:10:57 +0000573 else:
mblighf8dffb12008-10-29 16:45:26 +0000574 out = run(command, timeout=timeout, ignore_status=ignore_status).stdout
jadmanski0afbb632008-06-06 21:10:57 +0000575 if out[-1:] == '\n': out = out[:-1]
576 return out
mbligh63073c92008-03-31 16:49:32 +0000577
mbligh849a0f62008-08-28 20:12:19 +0000578
mbligha5630a52008-09-03 22:09:50 +0000579def system_output_parallel(commands, timeout=None, ignore_status=False,
580 retain_output=False):
581 if retain_output:
showard108d73e2009-06-22 18:14:41 +0000582 out = [bg_job.stdout for bg_job
583 in run_parallel(commands, timeout=timeout,
584 ignore_status=ignore_status,
585 stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
mbligha5630a52008-09-03 22:09:50 +0000586 else:
mblighf8dffb12008-10-29 16:45:26 +0000587 out = [bg_job.stdout for bg_job in run_parallel(commands,
588 timeout=timeout, ignore_status=ignore_status)]
mbligha5630a52008-09-03 22:09:50 +0000589 for x in out:
590 if out[-1:] == '\n': out = out[:-1]
591 return out
592
593
mbligh98467952008-11-19 00:25:45 +0000594def strip_unicode(input):
595 if type(input) == list:
596 return [strip_unicode(i) for i in input]
597 elif type(input) == dict:
598 output = {}
599 for key in input.keys():
600 output[str(key)] = strip_unicode(input[key])
601 return output
602 elif type(input) == unicode:
603 return str(input)
604 else:
605 return input
606
607
mbligha5630a52008-09-03 22:09:50 +0000608def get_cpu_percentage(function, *args, **dargs):
609 """Returns a tuple containing the CPU% and return value from function call.
610
611 This function calculates the usage time by taking the difference of
612 the user and system times both before and after the function call.
613 """
614 child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
615 self_pre = resource.getrusage(resource.RUSAGE_SELF)
616 start = time.time()
617 to_return = function(*args, **dargs)
618 elapsed = time.time() - start
619 self_post = resource.getrusage(resource.RUSAGE_SELF)
620 child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
621
622 # Calculate CPU Percentage
623 s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
624 c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
625 cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
626
627 return cpu_percent, to_return
628
629
mblighc1cbc992008-05-27 20:01:45 +0000630"""
631This function is used when there is a need to run more than one
632job simultaneously starting exactly at the same time. It basically returns
633a modified control file (containing the synchronization code prepended)
634whenever it is ready to run the control file. The synchronization
635is done using barriers to make sure that the jobs start at the same time.
636
637Here is how the synchronization is done to make sure that the tests
638start at exactly the same time on the client.
639sc_bar is a server barrier and s_bar, c_bar are the normal barriers
640
641 Job1 Job2 ...... JobN
642 Server: | sc_bar
643 Server: | s_bar ...... s_bar
644 Server: | at.run() at.run() ...... at.run()
645 ----------|------------------------------------------------------
646 Client | sc_bar
647 Client | c_bar c_bar ...... c_bar
648 Client | <run test> <run test> ...... <run test>
649
650
651PARAMS:
652 control_file : The control file which to which the above synchronization
653 code would be prepended to
654 host_name : The host name on which the job is going to run
655 host_num (non negative) : A number to identify the machine so that we have
656 different sets of s_bar_ports for each of the machines.
657 instance : The number of the job
658 num_jobs : Total number of jobs that are going to run in parallel with
659 this job starting at the same time
660 port_base : Port number that is used to derive the actual barrier ports.
661
662RETURN VALUE:
663 The modified control file.
664
665"""
666def get_sync_control_file(control, host_name, host_num,
jadmanski0afbb632008-06-06 21:10:57 +0000667 instance, num_jobs, port_base=63100):
668 sc_bar_port = port_base
669 c_bar_port = port_base
670 if host_num < 0:
671 print "Please provide a non negative number for the host"
672 return None
673 s_bar_port = port_base + 1 + host_num # The set of s_bar_ports are
674 # the same for a given machine
mblighc1cbc992008-05-27 20:01:45 +0000675
jadmanski0afbb632008-06-06 21:10:57 +0000676 sc_bar_timeout = 180
677 s_bar_timeout = c_bar_timeout = 120
mblighc1cbc992008-05-27 20:01:45 +0000678
jadmanski0afbb632008-06-06 21:10:57 +0000679 # The barrier code snippet is prepended into the conrol file
680 # dynamically before at.run() is called finally.
681 control_new = []
mblighc1cbc992008-05-27 20:01:45 +0000682
jadmanski0afbb632008-06-06 21:10:57 +0000683 # jobid is the unique name used to identify the processes
684 # trying to reach the barriers
685 jobid = "%s#%d" % (host_name, instance)
mblighc1cbc992008-05-27 20:01:45 +0000686
jadmanski0afbb632008-06-06 21:10:57 +0000687 rendv = []
688 # rendvstr is a temp holder for the rendezvous list of the processes
689 for n in range(num_jobs):
690 rendv.append("'%s#%d'" % (host_name, n))
691 rendvstr = ",".join(rendv)
mblighc1cbc992008-05-27 20:01:45 +0000692
jadmanski0afbb632008-06-06 21:10:57 +0000693 if instance == 0:
694 # Do the setup and wait at the server barrier
695 # Clean up the tmp and the control dirs for the first instance
696 control_new.append('if os.path.exists(job.tmpdir):')
697 control_new.append("\t system('umount -f %s > /dev/null"
698 "2> /dev/null' % job.tmpdir,"
699 "ignore_status=True)")
700 control_new.append("\t system('rm -rf ' + job.tmpdir)")
701 control_new.append(
702 'b0 = job.barrier("%s", "sc_bar", %d, port=%d)'
703 % (jobid, sc_bar_timeout, sc_bar_port))
704 control_new.append(
mbligh9c12f772009-06-22 19:03:55 +0000705 'b0.rendezvous_servers("PARALLEL_MASTER", "%s")'
jadmanski0afbb632008-06-06 21:10:57 +0000706 % jobid)
mblighc1cbc992008-05-27 20:01:45 +0000707
jadmanski0afbb632008-06-06 21:10:57 +0000708 elif instance == 1:
709 # Wait at the server barrier to wait for instance=0
710 # process to complete setup
711 b0 = barrier.barrier("PARALLEL_MASTER", "sc_bar", sc_bar_timeout,
712 port=sc_bar_port)
mbligh9c12f772009-06-22 19:03:55 +0000713 b0.rendezvous_servers("PARALLEL_MASTER", jobid)
mblighc1cbc992008-05-27 20:01:45 +0000714
jadmanski0afbb632008-06-06 21:10:57 +0000715 if(num_jobs > 2):
716 b1 = barrier.barrier(jobid, "s_bar", s_bar_timeout,
717 port=s_bar_port)
mbligh9c12f772009-06-22 19:03:55 +0000718 b1.rendezvous(rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000719
jadmanski0afbb632008-06-06 21:10:57 +0000720 else:
721 # For the rest of the clients
722 b2 = barrier.barrier(jobid, "s_bar", s_bar_timeout, port=s_bar_port)
mbligh9c12f772009-06-22 19:03:55 +0000723 b2.rendezvous(rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000724
jadmanski0afbb632008-06-06 21:10:57 +0000725 # Client side barrier for all the tests to start at the same time
726 control_new.append('b1 = job.barrier("%s", "c_bar", %d, port=%d)'
727 % (jobid, c_bar_timeout, c_bar_port))
mbligh9c12f772009-06-22 19:03:55 +0000728 control_new.append("b1.rendezvous(%s)" % rendvstr)
mblighc1cbc992008-05-27 20:01:45 +0000729
jadmanski0afbb632008-06-06 21:10:57 +0000730 # Stick in the rest of the control file
731 control_new.append(control)
mblighc1cbc992008-05-27 20:01:45 +0000732
jadmanski0afbb632008-06-06 21:10:57 +0000733 return "\n".join(control_new)
mblighc1cbc992008-05-27 20:01:45 +0000734
mbligh63073c92008-03-31 16:49:32 +0000735
mblighc5ddfd12008-08-04 17:15:00 +0000736def get_arch(run_function=run):
737 """
738 Get the hardware architecture of the machine.
739 run_function is used to execute the commands. It defaults to
740 utils.run() but a custom method (if provided) should be of the
741 same schema as utils.run. It should return a CmdResult object and
742 throw a CmdError exception.
743 """
744 arch = run_function('/bin/uname -m').stdout.rstrip()
745 if re.match(r'i\d86$', arch):
746 arch = 'i386'
747 return arch
748
749
showard4745ecd2009-05-26 19:34:56 +0000750def get_num_logical_cpus_per_socket(run_function=run):
mbligh9fd9afe2009-04-28 18:27:25 +0000751 """
752 Get the number of cores (including hyperthreading) per cpu.
753 run_function is used to execute the commands. It defaults to
754 utils.run() but a custom method (if provided) should be of the
755 same schema as utils.run. It should return a CmdResult object and
756 throw a CmdError exception.
757 """
showard4745ecd2009-05-26 19:34:56 +0000758 siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
759 num_siblings = map(int,
760 re.findall(r'^siblings\s*:\s*(\d+)\s*$',
761 siblings, re.M))
762 if len(num_siblings) == 0:
763 raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
764 if min(num_siblings) != max(num_siblings):
765 raise error.TestError('Number of siblings differ %r' %
766 num_siblings)
767 return num_siblings[0]
mbligh9fd9afe2009-04-28 18:27:25 +0000768
769
jadmanski4f909252008-12-01 20:47:10 +0000770def merge_trees(src, dest):
771 """
772 Merges a source directory tree at 'src' into a destination tree at
773 'dest'. If a path is a file in both trees than the file in the source
774 tree is APPENDED to the one in the destination tree. If a path is
775 a directory in both trees then the directories are recursively merged
776 with this function. In any other case, the function will skip the
777 paths that cannot be merged (instead of failing).
778 """
779 if not os.path.exists(src):
780 return # exists only in dest
781 elif not os.path.exists(dest):
782 if os.path.isfile(src):
783 shutil.copy2(src, dest) # file only in src
784 else:
785 shutil.copytree(src, dest, symlinks=True) # dir only in src
786 return
787 elif os.path.isfile(src) and os.path.isfile(dest):
788 # src & dest are files in both trees, append src to dest
789 destfile = open(dest, "a")
790 try:
791 srcfile = open(src)
792 try:
793 destfile.write(srcfile.read())
794 finally:
795 srcfile.close()
796 finally:
797 destfile.close()
798 elif os.path.isdir(src) and os.path.isdir(dest):
799 # src & dest are directories in both trees, so recursively merge
800 for name in os.listdir(src):
801 merge_trees(os.path.join(src, name), os.path.join(dest, name))
802 else:
803 # src & dest both exist, but are incompatible
804 return
805
806
mbligh63073c92008-03-31 16:49:32 +0000807class CmdResult(object):
jadmanski0afbb632008-06-06 21:10:57 +0000808 """
809 Command execution result.
mbligh63073c92008-03-31 16:49:32 +0000810
jadmanski0afbb632008-06-06 21:10:57 +0000811 command: String containing the command line itself
812 exit_status: Integer exit code of the process
813 stdout: String containing stdout of the process
814 stderr: String containing stderr of the process
815 duration: Elapsed wall clock time running the process
816 """
mbligh63073c92008-03-31 16:49:32 +0000817
818
mblighcd63a212009-05-01 23:04:38 +0000819 def __init__(self, command="", stdout="", stderr="",
jadmanski0afbb632008-06-06 21:10:57 +0000820 exit_status=None, duration=0):
821 self.command = command
822 self.exit_status = exit_status
823 self.stdout = stdout
824 self.stderr = stderr
825 self.duration = duration
mbligh63073c92008-03-31 16:49:32 +0000826
827
jadmanski0afbb632008-06-06 21:10:57 +0000828 def __repr__(self):
829 wrapper = textwrap.TextWrapper(width = 78,
830 initial_indent="\n ",
831 subsequent_indent=" ")
832
833 stdout = self.stdout.rstrip()
834 if stdout:
835 stdout = "\nstdout:\n%s" % stdout
836
837 stderr = self.stderr.rstrip()
838 if stderr:
839 stderr = "\nstderr:\n%s" % stderr
840
841 return ("* Command: %s\n"
842 "Exit status: %s\n"
843 "Duration: %s\n"
844 "%s"
845 "%s"
846 % (wrapper.fill(self.command), self.exit_status,
847 self.duration, stdout, stderr))
mbligh63073c92008-03-31 16:49:32 +0000848
849
mbligh462c0152008-03-13 15:37:10 +0000850class run_randomly:
jadmanski0afbb632008-06-06 21:10:57 +0000851 def __init__(self, run_sequentially=False):
852 # Run sequentially is for debugging control files
853 self.test_list = []
854 self.run_sequentially = run_sequentially
mbligh462c0152008-03-13 15:37:10 +0000855
856
jadmanski0afbb632008-06-06 21:10:57 +0000857 def add(self, *args, **dargs):
858 test = (args, dargs)
859 self.test_list.append(test)
mbligh462c0152008-03-13 15:37:10 +0000860
861
jadmanski0afbb632008-06-06 21:10:57 +0000862 def run(self, fn):
863 while self.test_list:
864 test_index = random.randint(0, len(self.test_list)-1)
865 if self.run_sequentially:
866 test_index = 0
867 (args, dargs) = self.test_list.pop(test_index)
868 fn(*args, **dargs)
mbligha7007722009-01-13 00:37:11 +0000869
870
mblighdd669372009-02-03 21:57:18 +0000871def import_site_symbol(path, module, name, dummy=None, modulefile=None):
872 """
873 Try to import site specific symbol from site specific file if it exists
874
875 @param path full filename of the source file calling this (ie __file__)
876 @param module full module name
877 @param name symbol name to be imported from the site file
878 @param dummy dummy value to return in case there is no symbol to import
879 @param modulefile module filename
880
881 @return site specific symbol or dummy
882
883 @exception ImportError if the site file exists but imports fails
884 """
mbligha7007722009-01-13 00:37:11 +0000885 short_module = module[module.rfind(".") + 1:]
886
887 if not modulefile:
888 modulefile = short_module + ".py"
889
890 try:
891 site_exists = os.path.getsize(os.path.join(os.path.dirname(path),
892 modulefile))
893 except os.error:
894 site_exists = False
895
mbligh61f4e442009-06-08 16:48:20 +0000896 msg = None
mbligha7007722009-01-13 00:37:11 +0000897 if site_exists:
mbligh61f4e442009-06-08 16:48:20 +0000898 # special unique value to tell us if the symbol can't be imported
899 cant_import = object()
900
mbligh062ed152009-01-13 00:57:14 +0000901 # return the object from the imported module
mbligh61f4e442009-06-08 16:48:20 +0000902 obj = getattr(__import__(module, {}, {}, [short_module]), name,
903 cant_import)
904 if obj is cant_import:
905 msg = ("unable to import site symbol '%s', using non-site "
906 "implementation") % name
mblighc26763e2009-08-11 19:10:59 +0000907 logging.error(msg)
908 obj = dummy
mbligha7007722009-01-13 00:37:11 +0000909 else:
mbligh062ed152009-01-13 00:57:14 +0000910 obj = dummy
911
912 return obj
913
914
915def import_site_class(path, module, classname, baseclass, modulefile=None):
916 """
917 Try to import site specific class from site specific file if it exists
918
919 Args:
920 path: full filename of the source file calling this (ie __file__)
921 module: full module name
922 classname: class name to be loaded from site file
mbligh0a8c3322009-04-28 18:32:19 +0000923 baseclass: base class object to return when no site file present or
924 to mixin when site class exists but is not inherited from baseclass
mbligh062ed152009-01-13 00:57:14 +0000925 modulefile: module filename
926
mbligh0a8c3322009-04-28 18:32:19 +0000927 Returns: baseclass if site specific class does not exist, the site specific
928 class if it exists and is inherited from baseclass or a mixin of the
929 site specific class and baseclass when the site specific class exists
930 and is not inherited from baseclass
mbligh062ed152009-01-13 00:57:14 +0000931
932 Raises: ImportError if the site file exists but imports fails
933 """
934
mblighdd669372009-02-03 21:57:18 +0000935 res = import_site_symbol(path, module, classname, None, modulefile)
mbligh0a8c3322009-04-28 18:32:19 +0000936 if res:
937 if not issubclass(res, baseclass):
938 # if not a subclass of baseclass then mix in baseclass with the
939 # site specific class object and return the result
940 res = type(classname, (res, baseclass), {})
941 else:
942 res = baseclass
mbligha7007722009-01-13 00:37:11 +0000943
mbligh062ed152009-01-13 00:57:14 +0000944 return res
945
946
947def import_site_function(path, module, funcname, dummy, modulefile=None):
948 """
949 Try to import site specific function from site specific file if it exists
950
951 Args:
952 path: full filename of the source file calling this (ie __file__)
953 module: full module name
954 funcname: function name to be imported from site file
955 dummy: dummy function to return in case there is no function to import
956 modulefile: module filename
957
958 Returns: site specific function object or dummy
959
960 Raises: ImportError if the site file exists but imports fails
961 """
962
mblighdd669372009-02-03 21:57:18 +0000963 return import_site_symbol(path, module, funcname, dummy, modulefile)
mblighfb676032009-04-01 18:25:38 +0000964
965
966def write_pid(program_name):
967 """
968 Try to drop <program_name>.pid in the main autotest directory.
969
970 Args:
971 program_name: prefix for file name
972 """
973
974 my_path = os.path.dirname(__file__)
975 pid_path = os.path.abspath(os.path.join(my_path, "../.."))
976 pidf = open(os.path.join(pid_path, "%s.pid" % program_name), "w")
977 if pidf:
mbligh1ef218d2009-08-03 16:57:56 +0000978 pidf.write("%s\n" % os.getpid())
979 pidf.close()
mbligh45561782009-05-11 21:14:34 +0000980
981
982def get_relative_path(path, reference):
983 """Given 2 absolute paths "path" and "reference", compute the path of
984 "path" as relative to the directory "reference".
985
986 @param path the absolute path to convert to a relative path
987 @param reference an absolute directory path to which the relative
988 path will be computed
989 """
990 # normalize the paths (remove double slashes, etc)
991 assert(os.path.isabs(path))
992 assert(os.path.isabs(reference))
993
994 path = os.path.normpath(path)
995 reference = os.path.normpath(reference)
996
997 # we could use os.path.split() but it splits from the end
998 path_list = path.split(os.path.sep)[1:]
999 ref_list = reference.split(os.path.sep)[1:]
1000
1001 # find the longest leading common path
1002 for i in xrange(min(len(path_list), len(ref_list))):
1003 if path_list[i] != ref_list[i]:
1004 # decrement i so when exiting this loop either by no match or by
1005 # end of range we are one step behind
1006 i -= 1
1007 break
1008 i += 1
1009 # drop the common part of the paths, not interested in that anymore
1010 del path_list[:i]
1011
1012 # for each uncommon component in the reference prepend a ".."
1013 path_list[:0] = ['..'] * (len(ref_list) - i)
1014
1015 return os.path.join(*path_list)
mbligh277a0e42009-07-11 00:11:45 +00001016
1017
1018def sh_escape(command):
1019 """
1020 Escape special characters from a command so that it can be passed
1021 as a double quoted (" ") string in a (ba)sh command.
1022
1023 Args:
1024 command: the command string to escape.
1025
1026 Returns:
1027 The escaped command string. The required englobing double
1028 quotes are NOT added and so should be added at some point by
1029 the caller.
1030
1031 See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1032 """
1033 command = command.replace("\\", "\\\\")
1034 command = command.replace("$", r'\$')
1035 command = command.replace('"', r'\"')
1036 command = command.replace('`', r'\`')
1037 return command