blob: cb4a9a05fc685dbe6e20ccc5fce9735c2b8fff3e [file] [log] [blame]
Ilja H. Friedelbee84a72016-09-28 15:57:06 -07001# Copyright 2016 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5# repohooks/pre-upload.py currently does not run pylint. But for developers who
6# want to check their code manually we disable several harmless pylint warnings
7# which just distract from more serious remaining issues.
8#
9# The instance variables _host and _install_paths are not defined in __init__().
10# pylint: disable=attribute-defined-outside-init
11#
12# Many short variable names don't follow the naming convention.
13# pylint: disable=invalid-name
14#
15# _parse_result() and _dir_size() don't access self and could be functions.
16# pylint: disable=no-self-use
17#
18# _ChromeLogin and _TradefedLogCollector have no public methods.
19# pylint: disable=too-few-public-methods
20
21import contextlib
22import errno
David Haddockb9a362b2016-10-28 16:19:12 -070023import glob
Ilja H. Friedelbee84a72016-09-28 15:57:06 -070024import hashlib
Ilja H. Friedel46863772017-01-25 00:53:44 -080025import lockfile
Ilja H. Friedelbee84a72016-09-28 15:57:06 -070026import logging
27import os
28import pipes
29import random
30import re
31import shutil
32import stat
33import tempfile
34import urlparse
35
36from autotest_lib.client.bin import utils as client_utils
Luis Hector Chavez554c6f82017-01-27 14:21:40 -080037from autotest_lib.client.common_lib import base_utils
Ilja H. Friedelbee84a72016-09-28 15:57:06 -070038from autotest_lib.client.common_lib import error
39from autotest_lib.client.common_lib.cros import dev_server
Ilja H. Friedelbee84a72016-09-28 15:57:06 -070040from autotest_lib.server import autotest
41from autotest_lib.server import test
42from autotest_lib.server import utils
Ilja H. Friedelbee84a72016-09-28 15:57:06 -070043
Ilja H. Friedelbee84a72016-09-28 15:57:06 -070044
Ilja H. Friedel3581dff2017-01-09 20:43:21 -080045# TODO(ihf): Find a home for all these paths. This is getting out of hand.
46_SDK_TOOLS_DIR_M = 'gs://chromeos-arc-images/builds/git_mnc-dr-arc-dev-linux-static_sdk_tools/3554341'
Ilja H. Friedelbee84a72016-09-28 15:57:06 -070047_SDK_TOOLS_FILES = ['aapt']
48# To stabilize adb behavior, we use dynamically linked adb.
Ilja H. Friedel3581dff2017-01-09 20:43:21 -080049_ADB_DIR_M = 'gs://chromeos-arc-images/builds/git_mnc-dr-arc-dev-linux-cheets_arm-user/3554341'
Ilja H. Friedelbee84a72016-09-28 15:57:06 -070050_ADB_FILES = ['adb']
51
52_ADB_POLLING_INTERVAL_SECONDS = 1
53_ADB_READY_TIMEOUT_SECONDS = 60
54_ANDROID_ADB_KEYS_PATH = '/data/misc/adb/adb_keys'
55
56_ARC_POLLING_INTERVAL_SECONDS = 1
57_ARC_READY_TIMEOUT_SECONDS = 60
58
59_TRADEFED_PREFIX = 'autotest-tradefed-install_'
60_TRADEFED_CACHE_LOCAL = '/tmp/autotest-tradefed-cache'
61_TRADEFED_CACHE_CONTAINER = '/usr/local/autotest/results/shared/cache'
62_TRADEFED_CACHE_CONTAINER_LOCK = '/usr/local/autotest/results/shared/lock'
63
64# According to dshi a drone has 500GB of disk space. It is ok for now to use
65# 10GB of disk space, as no more than 10 tests should run in parallel.
66# TODO(ihf): Investigate tighter cache size.
67_TRADEFED_CACHE_MAX_SIZE = (10 * 1024 * 1024 * 1024)
68
69
70class _ChromeLogin(object):
71 """Context manager to handle Chrome login state."""
72
73 def __init__(self, host):
74 self._host = host
75
76 def __enter__(self):
77 """Logs in to the Chrome."""
78 logging.info('Ensure Android is running...')
79 autotest.Autotest(self._host).run_test('cheets_CTSHelper',
80 check_client_result=True)
81
82 def __exit__(self, exc_type, exc_value, traceback):
83 """On exit, to wipe out all the login state, reboot the machine.
84
85 @param exc_type: Exception type if an exception is raised from the
86 with-block.
87 @param exc_value: Exception instance if an exception is raised from
88 the with-block.
89 @param traceback: Stack trace info if an exception is raised from
90 the with-block.
91 @return None, indicating not to ignore an exception from the with-block
92 if raised.
93 """
94 logging.info('Rebooting...')
95 try:
96 self._host.reboot()
97 except Exception:
98 if exc_type is None:
99 raise
100 # If an exception is raise from the with-block, just record the
101 # exception for the rebooting to avoid ignoring the original
102 # exception.
103 logging.exception('Rebooting failed.')
104
105
106@contextlib.contextmanager
107def lock(filename):
108 """Prevents other autotest/tradefed instances from accessing cache."""
109 filelock = lockfile.FileLock(filename)
110 # It is tempting just to call filelock.acquire(3600). But the implementation
111 # has very poor temporal granularity (timeout/10), which is unsuitable for
112 # our needs. See /usr/lib64/python2.7/site-packages/lockfile/
Ilja H. Friedeld2410cc2016-10-27 11:38:45 -0700113 attempts = 0
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700114 while not filelock.i_am_locking():
115 try:
Ilja H. Friedeld2410cc2016-10-27 11:38:45 -0700116 attempts += 1
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700117 logging.info('Waiting for cache lock...')
118 filelock.acquire(random.randint(1, 5))
119 except (lockfile.AlreadyLocked, lockfile.LockTimeout):
Ilja H. Friedeld2410cc2016-10-27 11:38:45 -0700120 if attempts > 1000:
121 # Normally we should aqcuire the lock in a few seconds. Once we
122 # wait on the order of hours either the dev server IO is
123 # overloaded or a lock didn't get cleaned up. Take one for the
124 # team, break the lock and report a failure. This should fix
125 # the lock for following tests. If the failure affects more than
126 # one job look for a deadlock or dev server overload.
127 logging.error('Permanent lock failure. Trying to break lock.')
128 filelock.break_lock()
129 raise error.TestFail('Error: permanent cache lock failure.')
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700130 else:
Ilja H. Friedeld2410cc2016-10-27 11:38:45 -0700131 logging.info('Acquired cache lock after %d attempts.', attempts)
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700132 try:
133 yield
134 finally:
135 filelock.release()
136 logging.info('Released cache lock.')
137
138
Luis Hector Chavez554c6f82017-01-27 14:21:40 -0800139@contextlib.contextmanager
140def adb_keepalive(target, extra_paths):
141 """A context manager that keeps the adb connection alive.
142
143 AdbKeepalive will spin off a new process that will continuously poll for
144 adb's connected state, and will attempt to reconnect if it ever goes down.
145 This is the only way we can currently recover safely from (intentional)
146 reboots.
147
148 @param target: the hostname and port of the DUT.
149 @param extra_paths: any additional components to the PATH environment
150 variable.
151 """
152 from autotest_lib.client.common_lib.cros import adb_keepalive as module
153 # |__file__| returns the absolute path of the compiled bytecode of the
154 # module. We want to run the original .py file, so we need to change the
155 # extension back.
156 script_filename = module.__file__.replace('.pyc', '.py')
157 job = base_utils.BgJob([script_filename, target],
158 nickname='adb_keepalive', stderr_level=logging.DEBUG,
159 stdout_tee=base_utils.TEE_TO_LOGS,
160 stderr_tee=base_utils.TEE_TO_LOGS,
161 extra_paths=extra_paths)
162
163 try:
164 yield
165 finally:
166 # The adb_keepalive.py script runs forever until SIGTERM is sent.
167 base_utils.nuke_subprocess(job.sp)
168 base_utils.join_bg_jobs([job])
169
170
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700171class TradefedTest(test.test):
172 """Base class to prepare DUT to run tests via tradefed."""
173 version = 1
174
Ilja H. Friedel3581dff2017-01-09 20:43:21 -0800175 # TODO(ihf): Remove _ABD_DIR_M/_SDK_TOOLS_DIR_M defaults once M is dead.
176 def initialize(self, host=None, adb_dir=_ADB_DIR_M,
177 sdk_tools_dir=_SDK_TOOLS_DIR_M):
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700178 """Sets up the tools and binary bundles for the test."""
179 logging.info('Hostname: %s', host.hostname)
180 self._host = host
181 self._install_paths = []
182 # Tests in the lab run within individual lxc container instances.
183 if utils.is_in_container():
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700184 cache_root = _TRADEFED_CACHE_CONTAINER
185 else:
186 cache_root = _TRADEFED_CACHE_LOCAL
Ilja H. Friedel94639902017-01-18 00:42:44 -0800187 # Quick sanity check and spew of java version installed on the server.
188 utils.run('java', args=('-version',), ignore_status=False, verbose=True,
189 stdout_tee=utils.TEE_TO_LOGS, stderr_tee=utils.TEE_TO_LOGS)
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700190 # The content of the cache survives across jobs.
191 self._safe_makedirs(cache_root)
192 self._tradefed_cache = os.path.join(cache_root, 'cache')
193 self._tradefed_cache_lock = os.path.join(cache_root, 'lock')
194 # The content of the install location does not survive across jobs and
195 # is isolated (by using a unique path)_against other autotest instances.
196 # This is not needed for the lab, but if somebody wants to run multiple
197 # TradedefTest instance.
198 self._tradefed_install = tempfile.mkdtemp(prefix=_TRADEFED_PREFIX)
199 # Under lxc the cache is shared between multiple autotest/tradefed
200 # instances. We need to synchronize access to it. All binaries are
201 # installed through the (shared) cache into the local (unshared)
202 # lxc/autotest instance storage.
203 # If clearing the cache it must happen before all downloads.
204 self._clear_download_cache_if_needed()
205 # Set permissions (rwxr-xr-x) to the executable binaries.
206 permission = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH
207 | stat.S_IXOTH)
Ilja H. Friedel3581dff2017-01-09 20:43:21 -0800208 self._install_files(adb_dir, _ADB_FILES, permission)
209 self._install_files(sdk_tools_dir, _SDK_TOOLS_FILES, permission)
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700210
211 def cleanup(self):
212 """Cleans up any dirtied state."""
213 # Kill any lingering adb servers.
214 self._run('adb', verbose=True, args=('kill-server',))
215 logging.info('Cleaning up %s.', self._tradefed_install)
216 shutil.rmtree(self._tradefed_install)
217
218 def _login_chrome(self):
219 """Returns Chrome log-in context manager.
220
221 Please see also cheets_CTSHelper for details about how this works.
222 """
223 return _ChromeLogin(self._host)
224
Luis Hector Chavez554c6f82017-01-27 14:21:40 -0800225 def _get_adb_target(self):
226 return '{}:{}'.format(self._host.hostname, self._host.port)
227
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700228 def _try_adb_connect(self):
229 """Attempts to connect to adb on the DUT.
230
231 @return boolean indicating if adb connected successfully.
232 """
233 # This may fail return failure due to a race condition in adb connect
234 # (b/29370989). If adb is already connected, this command will
235 # immediately return success.
Luis Hector Chavez554c6f82017-01-27 14:21:40 -0800236 hostport = self._get_adb_target()
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700237 result = self._run(
238 'adb',
239 args=('connect', hostport),
240 verbose=True,
241 ignore_status=True)
242 logging.info('adb connect {}:\n{}'.format(hostport, result.stdout))
243 if result.exit_status != 0:
244 return False
245
246 result = self._run('adb', args=('devices',))
247 logging.info('adb devices:\n' + result.stdout)
248 if not re.search(
249 r'{}\s+(device|unauthorized)'.format(re.escape(hostport)),
250 result.stdout):
251 return False
252
253 # Actually test the connection with an adb command as there can be
254 # a race between detecting the connected device and actually being
255 # able to run a commmand with authenticated adb.
256 result = self._run('adb', args=('shell', 'exit'), ignore_status=True)
257 return result.exit_status == 0
258
259 def _android_shell(self, command):
260 """Run a command remotely on the device in an android shell
261
262 This function is strictly for internal use only, as commands do not run
263 in a fully consistent Android environment. Prefer adb shell instead.
264 """
265 self._host.run('android-sh -c ' + pipes.quote(command))
266
267 def _write_android_file(self, filename, data):
268 """Writes a file to a location relative to the android container.
269
270 This is an internal function used to bootstrap adb.
271 Tests should use adb push to write files.
272 """
273 android_cmd = 'echo %s > %s' % (pipes.quote(data),
274 pipes.quote(filename))
275 self._android_shell(android_cmd)
276
277 def _connect_adb(self):
278 """Sets up ADB connection to the ARC container."""
279 logging.info('Setting up adb connection.')
280 # Generate and push keys for adb.
281 # TODO(elijahtaylor): Extract this code to arc_common and de-duplicate
282 # code in arc.py on the client side tests.
283 key_path = os.path.join(self.tmpdir, 'test_key')
284 pubkey_path = key_path + '.pub'
285 self._run('adb', verbose=True, args=('keygen', pipes.quote(key_path)))
286 with open(pubkey_path, 'r') as f:
287 self._write_android_file(_ANDROID_ADB_KEYS_PATH, f.read())
288 self._android_shell('restorecon ' + pipes.quote(_ANDROID_ADB_KEYS_PATH))
289 os.environ['ADB_VENDOR_KEYS'] = key_path
290
291 # Kill existing adb server to ensure that the env var is picked up.
292 self._run('adb', verbose=True, args=('kill-server',))
293
294 # This starts adbd.
295 self._android_shell('setprop sys.usb.config mtp,adb')
296
Luis Hector Chavez554c6f82017-01-27 14:21:40 -0800297 # Also let it be automatically started upon reboot.
298 self._android_shell('setprop persist.sys.usb.config mtp,adb')
299
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700300 # adbd may take some time to come up. Repeatedly try to connect to adb.
301 utils.poll_for_condition(lambda: self._try_adb_connect(),
Ilja H. Friedel6d5ca8f2016-10-26 22:35:36 -0700302 exception=error.TestFail(
303 'Error: Failed to set up adb connection'),
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700304 timeout=_ADB_READY_TIMEOUT_SECONDS,
305 sleep_interval=_ADB_POLLING_INTERVAL_SECONDS)
306
307 logging.info('Successfully setup adb connection.')
308
309 def _wait_for_arc_boot(self):
310 """Wait until ARC is fully booted.
311
312 Tests for the presence of the intent helper app to determine whether ARC
313 has finished booting.
314 """
315 def intent_helper_running():
Kazuhiro Inabaf2c47052017-01-26 09:18:51 +0900316 result = self._run('adb', args=('shell', 'pgrep', '-f',
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700317 'org.chromium.arc.intent_helper'))
318 return bool(result.stdout)
319 utils.poll_for_condition(
320 intent_helper_running,
Ilja H. Friedel6d5ca8f2016-10-26 22:35:36 -0700321 exception=error.TestFail(
322 'Error: Timed out waiting for intent helper.'),
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700323 timeout=_ARC_READY_TIMEOUT_SECONDS,
324 sleep_interval=_ARC_POLLING_INTERVAL_SECONDS)
325
326 def _disable_adb_install_dialog(self):
327 """Disables a dialog shown on adb install execution.
328
329 By default, on adb install execution, "Allow Google to regularly check
330 device activity ... " dialog is shown. It requires manual user action
331 so that tests are blocked at the point.
332 This method disables it.
333 """
334 logging.info('Disabling the adb install dialog.')
335 result = self._run(
336 'adb',
337 verbose=True,
338 args=(
339 'shell',
340 'settings',
341 'put',
342 'global',
343 'verifier_verify_adb_installs',
344 '0'))
345 logging.info('Disable adb dialog: %s', result.stdout)
346
347 def _ready_arc(self):
348 """Ready ARC and adb for running tests via tradefed."""
349 self._connect_adb()
350 self._disable_adb_install_dialog()
351 self._wait_for_arc_boot()
352
353 def _safe_makedirs(self, path):
354 """Creates a directory at |path| and its ancestors.
355
356 Unlike os.makedirs(), ignore errors even if directories exist.
357 """
358 try:
359 os.makedirs(path)
360 except OSError as e:
361 if not (e.errno == errno.EEXIST and os.path.isdir(path)):
362 raise
363
364 def _unzip(self, filename):
365 """Unzip the file.
366
367 The destination directory name will be the stem of filename.
368 E.g., _unzip('foo/bar/baz.zip') will create directory at
369 'foo/bar/baz', and then will inflate zip's content under the directory.
370 If here is already a directory at the stem, that directory will be used.
371
372 @param filename: Path to the zip archive.
373 @return Path to the inflated directory.
374 """
375 destination = os.path.splitext(filename)[0]
376 if os.path.isdir(destination):
377 return destination
378 self._safe_makedirs(destination)
379 utils.run('unzip', args=('-d', destination, filename))
380 return destination
381
382 def _dir_size(self, directory):
383 """Compute recursive size in bytes of directory."""
384 size = 0
385 for root, _, files in os.walk(directory):
386 size += sum(os.path.getsize(os.path.join(root, name))
387 for name in files)
388 return size
389
390 def _clear_download_cache_if_needed(self):
391 """Invalidates cache to prevent it from growing too large."""
392 # If the cache is large enough to hold a working set, we can simply
393 # delete everything without thrashing.
394 # TODO(ihf): Investigate strategies like LRU.
395 with lock(self._tradefed_cache_lock):
396 size = self._dir_size(self._tradefed_cache)
397 if size > _TRADEFED_CACHE_MAX_SIZE:
398 logging.info('Current cache size=%d got too large. Clearing %s.'
399 , size, self._tradefed_cache)
400 shutil.rmtree(self._tradefed_cache)
401 self._safe_makedirs(self._tradefed_cache)
402 else:
403 logging.info('Current cache size=%d of %s.', size,
404 self._tradefed_cache)
405
406 def _download_to_cache(self, uri):
407 """Downloads the uri from the storage server.
408
409 It always checks the cache for available binaries first and skips
410 download if binaries are already in cache.
411
412 The caller of this function is responsible for holding the cache lock.
413
414 @param uri: The Google Storage or dl.google.com uri.
415 @return Path to the downloaded object, name.
416 """
417 # Split uri into 3 pieces for use by gsutil and also by wget.
418 parsed = urlparse.urlparse(uri)
419 filename = os.path.basename(parsed.path)
420 # We are hashing the uri instead of the binary. This is acceptable, as
421 # the uris are supposed to contain version information and an object is
422 # not supposed to be changed once created.
423 output_dir = os.path.join(self._tradefed_cache,
424 hashlib.md5(uri).hexdigest())
425 output = os.path.join(output_dir, filename)
426 # Check for existence of file.
427 if os.path.exists(output):
428 logging.info('Skipping download of %s, reusing %s.', uri, output)
429 return output
430 self._safe_makedirs(output_dir)
431
432 if parsed.scheme not in ['gs', 'http', 'https']:
Ilja H. Friedel6d5ca8f2016-10-26 22:35:36 -0700433 raise error.TestFail('Error: Unknown download scheme %s' %
434 parsed.scheme)
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700435 if parsed.scheme in ['http', 'https']:
436 logging.info('Using wget to download %s to %s.', uri, output_dir)
437 # We are downloading 1 file at a time, hence using -O over -P.
438 # We also limit the rate to 20MBytes/s
439 utils.run(
440 'wget',
441 args=(
442 '--report-speed=bits',
443 '--limit-rate=20M',
444 '-O',
445 output,
446 uri),
447 verbose=True)
448 return output
449
450 if not client_utils.is_moblab():
451 # If the machine can access to the storage server directly,
452 # defer to "gsutil" for downloading.
453 logging.info('Host %s not in lab. Downloading %s directly to %s.',
454 self._host.hostname, uri, output)
455 # b/17445576: gsutil rsync of individual files is not implemented.
456 utils.run('gsutil', args=('cp', uri, output), verbose=True)
457 return output
458
459 # We are in the moblab. Because the machine cannot access the storage
460 # server directly, use dev server to proxy.
461 logging.info('Host %s is in lab. Downloading %s by staging to %s.',
462 self._host.hostname, uri, output)
463
464 dirname = os.path.dirname(parsed.path)
465 archive_url = '%s://%s%s' % (parsed.scheme, parsed.netloc, dirname)
466
467 # First, request the devserver to download files into the lab network.
468 # TODO(ihf): Switch stage_artifacts to honor rsync. Then we don't have
469 # to shuffle files inside of tarballs.
Prathmesh Prabhu4723b172017-02-06 10:03:18 -0800470 info = self._host.host_info_store.get()
471 ds = dev_server.ImageServer.resolve(info.build)
472 ds.stage_artifacts(info.build, files=[filename],
473 archive_url=archive_url)
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700474
475 # Then download files from the dev server.
476 # TODO(ihf): use rsync instead of wget. Are there 3 machines involved?
477 # Itself, dev_server plus DUT? Or is there just no rsync in moblab?
478 ds_src = '/'.join([ds.url(), 'static', dirname, filename])
479 logging.info('dev_server URL: %s', ds_src)
480 # Calls into DUT to pull uri from dev_server.
481 utils.run(
482 'wget',
483 args=(
484 '--report-speed=bits',
485 '--limit-rate=20M',
486 '-O',
Ilja H. Friedelb83646b2016-10-18 13:02:59 -0700487 output,
488 ds_src),
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700489 verbose=True)
490 return output
491
492 def _instance_copy(self, cache_path):
493 """Makes a copy of a file from the (shared) cache to a wholy owned
494 local instance. Also copies one level of cache directoy (MD5 named).
495 """
496 filename = os.path.basename(cache_path)
497 dirname = os.path.basename(os.path.dirname(cache_path))
498 instance_dir = os.path.join(self._tradefed_install, dirname)
499 # Make sure destination directory is named the same.
500 self._safe_makedirs(instance_dir)
501 instance_path = os.path.join(instance_dir, filename)
502 shutil.copyfile(cache_path, instance_path)
503 return instance_path
504
505 def _install_bundle(self, gs_uri):
506 """Downloads a zip file, installs it and returns the local path."""
507 if not gs_uri.endswith('.zip'):
Ilja H. Friedel6d5ca8f2016-10-26 22:35:36 -0700508 raise error.TestFail('Error: Not a .zip file %s.', gs_uri)
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700509 # Atomic write through of file.
510 with lock(self._tradefed_cache_lock):
511 cache_path = self._download_to_cache(gs_uri)
512 local = self._instance_copy(cache_path)
David Haddockb9a362b2016-10-28 16:19:12 -0700513
514 unzipped = self._unzip(local)
515 self._abi = 'x86' if 'x86-x86' in unzipped else 'arm'
516 return unzipped
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700517
518 def _install_files(self, gs_dir, files, permission):
519 """Installs binary tools."""
520 for filename in files:
521 gs_uri = os.path.join(gs_dir, filename)
522 # Atomic write through of file.
523 with lock(self._tradefed_cache_lock):
524 cache_path = self._download_to_cache(gs_uri)
525 local = self._instance_copy(cache_path)
526 os.chmod(local, permission)
527 # Keep track of PATH.
528 self._install_paths.append(os.path.dirname(local))
529
530 def _run(self, *args, **kwargs):
531 """Executes the given command line.
532
533 To support SDK tools, such as adb or aapt, this adds _install_paths
534 to the extra_paths. Before invoking this, ensure _install_files() has
535 been called.
536 """
537 kwargs['extra_paths'] = (
538 kwargs.get('extra_paths', []) + self._install_paths)
539 return utils.run(*args, **kwargs)
540
Kazuhiro Inabaeb6b6332017-01-28 01:10:16 +0900541 def _collect_tradefed_global_log(self, result, destination):
542 """Collects the tradefed global log.
543
544 @param result: The result object from utils.run.
545 @param destination: Autotest result directory (destination of logs).
546 """
547 match = re.search(r'Saved log to /tmp/(tradefed_global_log_.*\.txt)',
548 result.stdout)
549 if not match:
550 logging.error('no tradefed_global_log file is found')
551 return
552
553 name = match.group(1)
554 dest = os.path.join(destination, 'logs', 'tmp')
555 self._safe_makedirs(dest)
556 shutil.copy(os.path.join('/tmp', name), os.path.join(dest, name))
557
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700558 def _parse_tradefed_datetime(self, result, summary=None):
559 """Get the tradefed provided result ID consisting of a datetime stamp.
560
561 Unfortunately we are unable to tell tradefed where to store the results.
562 In the lab we have multiple instances of tradefed running in parallel
563 writing results and logs to the same base directory. This function
564 finds the identifier which tradefed used during the current run and
565 returns it for further processing of result files.
566
567 @param result: The result object from utils.run.
568 @param summary: Test result summary from runs so far.
569 @return datetime_id: The result ID chosen by tradefed.
570 Example: '2016.07.14_00.34.50'.
571 """
572 # This string is show for both 'run' and 'continue' after all tests.
573 match = re.search(r': XML test result file generated at (\S+). Passed',
574 result.stdout)
575 if not (match and match.group(1)):
576 # TODO(ihf): Find out if we ever recover something interesting in
577 # this case. Otherwise delete it.
578 # Try harder to find the remains. This string shows before all
579 # tests but only with 'run', not 'continue'.
580 logging.warning('XML test result file incomplete?')
581 match = re.search(r': Created result dir (\S+)', result.stdout)
582 if not (match and match.group(1)):
583 error_msg = 'Test did not complete due to Chrome or ARC crash.'
584 if summary:
585 error_msg += (' Test summary from previous runs: %s'
586 % summary)
Ilja H. Friedel6d5ca8f2016-10-26 22:35:36 -0700587 raise error.TestFail(error_msg)
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700588 datetime_id = match.group(1)
589 logging.info('Tradefed identified results and logs with %s.',
590 datetime_id)
591 return datetime_id
592
Ilja H. Friedel3581dff2017-01-09 20:43:21 -0800593 def _parse_tradefed_datetime_N(self, result, summary=None):
594 """Get the tradefed provided result ID consisting of a datetime stamp.
595
596 Unfortunately we are unable to tell tradefed where to store the results.
597 In the lab we have multiple instances of tradefed running in parallel
598 writing results and logs to the same base directory. This function
599 finds the identifier which tradefed used during the current run and
600 returns it for further processing of result files.
601
602 @param result: The result object from utils.run.
603 @param summary: Test result summary from runs so far.
604 @return datetime_id: The result ID chosen by tradefed.
605 Example: '2016.07.14_00.34.50'.
606 """
607 # This string is show for both 'run' and 'continue' after all tests.
608 match = re.search(r'(\d\d\d\d.\d\d.\d\d_\d\d.\d\d.\d\d)', result.stdout)
609 if not (match and match.group(1)):
610 error_msg = 'Error: Test did not complete. (Chrome or ARC crash?)'
611 if summary:
612 error_msg += (' Test summary from previous runs: %s'
613 % summary)
614 raise error.TestFail(error_msg)
615 datetime_id = match.group(1)
616 logging.info('Tradefed identified results and logs with %s.',
617 datetime_id)
618 return datetime_id
619
Rohit Makasana99116d32016-10-17 19:32:04 -0700620 def _parse_result(self, result, waivers=None):
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700621 """Check the result from the tradefed output.
622
623 This extracts the test pass/fail/executed list from the output of
624 tradefed. It is up to the caller to handle inconsistencies.
625
626 @param result: The result object from utils.run.
Rohit Makasana99116d32016-10-17 19:32:04 -0700627 @param waivers: a set() of tests which are permitted to fail.
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700628 """
629 # Parse the stdout to extract test status. In particular step over
630 # similar output for each ABI and just look at the final summary.
631 match = re.search(r'(XML test result file generated at (\S+). '
632 r'Passed (\d+), Failed (\d+), Not Executed (\d+))',
633 result.stdout)
634 if not match:
635 raise error.Test('Test log does not contain a summary.')
636
637 passed = int(match.group(3))
638 failed = int(match.group(4))
639 not_executed = int(match.group(5))
640 match = re.search(r'(Start test run of (\d+) packages, containing '
641 r'(\d+(?:,\d+)?) tests)', result.stdout)
642 if match and match.group(3):
643 tests = int(match.group(3).replace(',', ''))
644 else:
645 # Unfortunately this happens. Assume it made no other mistakes.
646 logging.warning('Tradefed forgot to print number of tests.')
647 tests = passed + failed + not_executed
Rohit Makasana99116d32016-10-17 19:32:04 -0700648 # TODO(rohitbm): make failure parsing more robust by extracting the list
649 # of failing tests instead of searching in the result blob. As well as
650 # only parse for waivers for the running ABI.
651 if waivers:
652 for testname in waivers:
David Haddock16712332016-11-03 14:35:23 -0700653 # TODO(dhaddock): Find a more robust way to apply waivers.
654 fail_count = result.stdout.count(testname + ' FAIL')
655 if fail_count:
656 if fail_count > 2:
657 raise error.TestFail('Error: There are too many '
658 'failures found in the output to '
659 'be valid for applying waivers. '
660 'Please check output.')
661 failed -= fail_count
Rohit Makasana99116d32016-10-17 19:32:04 -0700662 # To maintain total count consistency.
David Haddock16712332016-11-03 14:35:23 -0700663 passed += fail_count
664 logging.info('Waived failure for %s %d time(s)',
665 testname, fail_count)
Rohit Makasana99116d32016-10-17 19:32:04 -0700666 logging.info('tests=%d, passed=%d, failed=%d, not_executed=%d',
667 tests, passed, failed, not_executed)
David Haddock16712332016-11-03 14:35:23 -0700668 if failed < 0:
669 raise error.TestFail('Error: Internal waiver book keeping has '
670 'become inconsistent.')
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700671 return (tests, passed, failed, not_executed)
672
Ilja H. Friedel3581dff2017-01-09 20:43:21 -0800673 def _parse_result_N(self, result, waivers=None):
674 """Check the result from the tradefed output.
675
676 This extracts the test pass/fail/executed list from the output of
677 tradefed. It is up to the caller to handle inconsistencies.
678
679 @param result: The result object from utils.run.
680 @param waivers: a set() of tests which are permitted to fail.
681 """
682 # Parse the stdout to extract test status. In particular step over
683 # similar output for each ABI and just look at the final summary.
684 # I/ResultReporter: Invocation finished in 2m 9s. \
685 # PASSED: 818, FAILED: 0, NOT EXECUTED: 0, MODULES: 1 of 1
686 match = re.search(r'PASSED: (\d+), FAILED: (\d+), NOT EXECUTED: (\d+), '
687 r'MODULES: (\d+) of (\d+)',
688 result.stdout)
689 if not match:
690 raise error.Test('Test log does not contain a summary.')
691 passed = int(match.group(1))
692 failed = int(match.group(2))
693 not_executed = int(match.group(3))
694
695 # Starting x86 CtsUtilTestCases with 204 tests
696 match = re.search(r'Starting (?:armeabi-v7a|x86) (.*) with '
697 r'(\d+(?:,\d+)?) tests', result.stdout)
698 if match and match.group(2):
699 tests = int(match.group(2).replace(',', ''))
700 logging.info('Found %d tests.', tests)
701 else:
702 # Unfortunately this happens. Assume it made no other mistakes.
703 logging.warning('Tradefed forgot to print number of tests.')
704 # TODO(ihf): Once b/35530394 is fixed "+ not_executed".
705 tests = passed + failed
706
707 # TODO(rohitbm): make failure parsing more robust by extracting the list
708 # of failing tests instead of searching in the result blob. As well as
709 # only parse for waivers for the running ABI.
710 waived = 0
711 if waivers:
712 for testname in waivers:
713 # TODO(dhaddock): Find a more robust way to apply waivers.
714 fail_count = (result.stdout.count(testname + ' FAIL') +
715 result.stdout.count(testname + ' fail'))
716 if fail_count:
717 if fail_count > 2:
718 raise error.TestFail('Error: There are too many '
719 'failures found in the output to '
720 'be valid for applying waivers. '
721 'Please check output.')
722 waived += fail_count
723 logging.info('Waived failure for %s %d time(s)',
724 testname, fail_count)
725 counts = (tests, passed, failed, not_executed, waived)
726 msg = ('tests=%d, passed=%d, failed=%d, not_executed=%d, waived=%d' %
727 counts)
728 logging.info(msg)
729 if failed - waived < 0:
730 raise error.TestFail('Error: Internal waiver bookkeeping has '
731 'become inconsistent (failed=%d, waived=%d).'
732 % (failed, waived))
733 return counts
734
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700735 def _collect_logs(self, repository, datetime, destination):
736 """Collects the tradefed logs.
737
738 It is legal to collect the same logs multiple times. This is normal
739 after 'tradefed continue' updates existing logs with new results.
740
741 @param repository: Full path to tradefeds output on disk.
742 @param datetime: The identifier which tradefed assigned to the run.
743 Currently this looks like '2016.07.14_00.34.50'.
744 @param destination: Autotest result directory (destination of logs).
745 """
746 logging.info('Collecting tradefed testResult.xml and logs to %s.',
747 destination)
748 repository_results = os.path.join(repository, 'results')
749 repository_logs = os.path.join(repository, 'logs')
750 # Because other tools rely on the currently chosen Google storage paths
751 # we need to keep destination_results in
752 # cheets_CTS.*/results/android-cts/2016.mm.dd_hh.mm.ss(/|.zip)
753 # and destination_logs in
754 # cheets_CTS.*/results/android-cts/logs/2016.mm.dd_hh.mm.ss/
755 destination_results = destination
Ilja H. Friedelb83646b2016-10-18 13:02:59 -0700756 destination_results_datetime = os.path.join(destination_results,
757 datetime)
Ilja H. Friedelbee84a72016-09-28 15:57:06 -0700758 destination_results_datetime_zip = destination_results_datetime + '.zip'
759 destination_logs = os.path.join(destination, 'logs')
760 destination_logs_datetime = os.path.join(destination_logs, datetime)
761 # We may have collected the same logs before, clean old versions.
762 if os.path.exists(destination_results_datetime_zip):
763 os.remove(destination_results_datetime_zip)
764 if os.path.exists(destination_results_datetime):
765 shutil.rmtree(destination_results_datetime)
766 if os.path.exists(destination_logs_datetime):
767 shutil.rmtree(destination_logs_datetime)
768 shutil.copytree(
769 os.path.join(repository_results, datetime),
770 destination_results_datetime)
771 # Copying the zip file has to happen after the tree so the destination
772 # directory is available.
773 shutil.copy(
774 os.path.join(repository_results, datetime) + '.zip',
775 destination_results_datetime_zip)
776 shutil.copytree(
777 os.path.join(repository_logs, datetime),
778 destination_logs_datetime)
David Haddockb9a362b2016-10-28 16:19:12 -0700779
Rohit Makasana77566902016-11-01 15:34:27 -0700780 def _get_expected_failures(self, directory):
781 """Return a list of expected failures.
David Haddockb9a362b2016-10-28 16:19:12 -0700782
Rohit Makasana77566902016-11-01 15:34:27 -0700783 @return: a list of expected failures.
David Haddockb9a362b2016-10-28 16:19:12 -0700784 """
Rohit Makasana77566902016-11-01 15:34:27 -0700785 logging.info('Loading expected failures from %s.', directory)
786 expected_fail_dir = os.path.join(self.bindir, directory)
David Haddockb9a362b2016-10-28 16:19:12 -0700787 expected_fail_files = glob.glob(expected_fail_dir + '/*.' + self._abi)
Rohit Makasana77566902016-11-01 15:34:27 -0700788 expected_failures = set()
David Haddockb9a362b2016-10-28 16:19:12 -0700789 for expected_fail_file in expected_fail_files:
790 try:
791 file_path = os.path.join(expected_fail_dir, expected_fail_file)
792 with open(file_path) as f:
793 lines = set(f.read().splitlines())
794 logging.info('Loaded %d expected failures from %s',
795 len(lines), expected_fail_file)
Rohit Makasana77566902016-11-01 15:34:27 -0700796 expected_failures |= lines
David Haddockb9a362b2016-10-28 16:19:12 -0700797 except IOError as e:
798 logging.error('Error loading %s (%s).', file_path, e.strerror)
Ilja H. Friedel3581dff2017-01-09 20:43:21 -0800799 logging.info('Finished loading expected failures: %s',
800 expected_failures)
Rohit Makasana77566902016-11-01 15:34:27 -0700801 return expected_failures