Snap for 6219949 from 2b68127d6634c9b6e7da53bb6b7bec8ce4e3ca4a to qt-qpr3-release
Change-Id: Ic5994b2166514b5c6fc13064e58588d9847870ef
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 96b1dfc..03350c7 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,12 +1,21 @@
[Hook Scripts]
# Only list fast unittests here.
-config_unittest = ./rh/config_unittest.py
-hooks_unittest = ./rh/hooks_unittest.py
-shell_unittest = ./rh/shell_unittest.py
-android_test_mapping_format_unittest = ./tools/android_test_mapping_format_unittest.py
+py2_config_unittest = python2 ./rh/config_unittest.py
+py3_config_unittest = python3 ./rh/config_unittest.py
+py2_hooks_unittest = python2 ./rh/hooks_unittest.py
+py3_hooks_unittest = python3 ./rh/hooks_unittest.py
+py2_shell_unittest = python2 ./rh/shell_unittest.py
+py3_shell_unittest = python3 ./rh/shell_unittest.py
+py2_utils_unittest = python2 ./rh/utils_unittest.py
+py3_utils_unittest = python3 ./rh/utils_unittest.py
+py2_android_test_mapping_format_unittest = python2 ./tools/android_test_mapping_format_unittest.py
+py3_android_test_mapping_format_unittest = python3 ./tools/android_test_mapping_format_unittest.py
+py2_config_test = python2 ./rh/config_test.py --check-env --commit-id ${PREUPLOAD_COMMIT} --commit-msg ${PREUPLOAD_COMMIT_MESSAGE} --repo-root ${REPO_ROOT} -- ${PREUPLOAD_FILES}
+py3_config_test = python3 ./rh/config_test.py --check-env --commit-id ${PREUPLOAD_COMMIT} --commit-msg ${PREUPLOAD_COMMIT_MESSAGE} --repo-root ${REPO_ROOT} -- ${PREUPLOAD_FILES}
[Builtin Hooks]
commit_msg_bug_field = true
commit_msg_changeid_field = true
commit_msg_test_field = true
-pylint = true
+pylint2 = true
+pylint3 = true
diff --git a/README.md b/README.md
index faab1cf..43a72d5 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,4 @@
-# AOSP Presubmit Hooks
-
-[TOC]
+# AOSP Preupload Hooks
This repo holds hooks that get run by repo during the upload phase. They
perform various checks automatically such as running linters on your code.
@@ -8,6 +6,8 @@
Note: Currently all hooks are disabled by default. Each repo must explicitly
turn on any hook it wishes to enforce.
+[TOC]
+
## Usage
Normally these execute automatically when you run `repo upload`. If you want to
@@ -49,12 +49,15 @@
## PREUPLOAD.cfg
-This file are checked in the top of a specific git repository. Stacking them
+This file is checked in the top of a specific git repository. Stacking them
in subdirectories (to try and override parent settings) is not supported.
## Example
```
+# Per-project `repo upload` hook settings.
+# https://android.googlesource.com/platform/tools/repohooks
+
[Options]
ignore_merged_commits = true
@@ -126,13 +129,20 @@
that is executed. The key is used as the name of the hook for reporting purposes,
so it should be at least somewhat descriptive.
+Whitespace in the key name is OK!
+
+The keys must be unique as duplicates will silently clobber earlier values.
+
+You do not need to send stderr to stdout. The tooling will take care of
+merging them together for you automatically.
+
```
[Hook Scripts]
-my_first_hook = program --gogog ${PREUPLOAD_FILES}
-another_hook = funtimes --i-need "some space" ${PREUPLOAD_FILES}
-some_fish = linter --ate-a-cat ${PREUPLOAD_FILES}
-some_cat = formatter --cat-commit ${PREUPLOAD_COMMIT}
-some_dog = tool --no-cat-in-commit-message ${PREUPLOAD_COMMIT_MESSAGE}
+my first hook = program --gogog ${PREUPLOAD_FILES}
+another hook = funtimes --i-need "some space" ${PREUPLOAD_FILES}
+some fish = linter --ate-a-cat ${PREUPLOAD_FILES}
+some cat = formatter --cat-commit ${PREUPLOAD_COMMIT}
+some dog = tool --no-cat-in-commit-message ${PREUPLOAD_COMMIT_MESSAGE}
```
## [Builtin Hooks]
@@ -140,6 +150,7 @@
This section allows for turning on common/builtin hooks. There are a bunch of
canned hooks already included geared towards AOSP style guidelines.
+* `bpfmt`: Run Blueprint files (.bp) through `bpfmt`.
* `checkpatch`: Run commits through the Linux kernel's `checkpatch.pl` script.
* `clang_format`: Run git-clang-format against the commit. The default style is
`file`.
@@ -153,7 +164,9 @@
* `google_java_format`: Run Java code through
[`google-java-format`](https://github.com/google/google-java-format)
* `jsonlint`: Verify JSON code is sane.
-* `pylint`: Run Python code through `pylint`.
+* `pylint`: Alias of `pylint2`. Will change to `pylint3` by end of 2019.
+* `pylint2`: Run Python code through `pylint` using Python 2.
+* `pylint3`: Run Python code through `pylint` using Python 3.
* `xmllint`: Run XML code through `xmllint`.
* `android_test_mapping_format`: Validate TEST_MAPPING files in Android source
code. Refer to go/test-mapping for more details.
@@ -195,6 +208,7 @@
provide consistent behavior for developers across different OS and Linux
distros/versions. The following tools are recognized:
+* `bpfmt`: used for the `bpfmt` builtin hook.
* `clang-format`: used for the `clang_format` builtin hook.
* `cpplint`: used for the `cpplint` builtin hook.
* `git-clang-format`: used for the `clang_format` builtin hook.
@@ -226,13 +240,13 @@
* New hooks can be added in `rh/hooks.py`. Be sure to keep the list up-to-date
with the documentation in this file.
-### Warnings
+## Warnings
If the return code of a hook is 77, then it is assumed to be a warning. The
output will be printed to the terminal, but uploading will still be allowed
without a bypass being required.
-## TODO/Limitations
+# TODO/Limitations
* `pylint` should support per-directory pylintrc files.
* Some checkers operate on the files as they exist in the filesystem. This is
diff --git a/pre-upload.py b/pre-upload.py
index fffc03c..53b5ffb 100755
--- a/pre-upload.py
+++ b/pre-upload.py
@@ -23,15 +23,27 @@
from __future__ import print_function
import argparse
+import datetime
import os
import sys
-try:
- __file__
-except NameError:
- # Work around repo until it gets fixed.
- # https://gerrit-review.googlesource.com/75481
- __file__ = os.path.join(os.getcwd(), 'pre-upload.py')
+
+# Assert some minimum Python versions as we don't test or support any others.
+# We only support Python 2.7, and require 2.7.5+/3.4+ to include signal fix:
+# https://bugs.python.org/issue14173
+if sys.version_info < (2, 7, 5):
+ print('repohooks: error: Python-2.7.5+ is required', file=sys.stderr)
+ sys.exit(1)
+elif sys.version_info.major == 3 and sys.version_info < (3, 4):
+ # We don't actually test <Python-3.6. Hope for the best!
+ print('repohooks: error: Python-3.4+ is required', file=sys.stderr)
+ sys.exit(1)
+elif sys.version_info.major == 3 and sys.version_info < (3, 6):
+ # We want to get people off of old versions of Python.
+ print('repohooks: warning: Python-3.6+ is going to be required; '
+ 'please upgrade soon to maintain support.', file=sys.stderr)
+
+
_path = os.path.dirname(os.path.realpath(__file__))
if sys.path[0] != _path:
sys.path.insert(0, _path)
@@ -45,6 +57,7 @@
import rh.config
import rh.git
import rh.hooks
+import rh.sixish
import rh.terminal
import rh.utils
@@ -63,17 +76,25 @@
FAILED = COLOR.color(COLOR.RED, 'FAILED')
WARNING = COLOR.color(COLOR.YELLOW, 'WARNING')
- def __init__(self, project_name, num_hooks):
+ def __init__(self, project_name):
"""Create a new Output object for a specified project.
Args:
project_name: name of project.
- num_hooks: number of hooks to be run.
"""
self.project_name = project_name
- self.num_hooks = num_hooks
+ self.num_hooks = None
self.hook_index = 0
self.success = True
+ self.start_time = datetime.datetime.now()
+
+ def set_num_hooks(self, num_hooks):
+ """Keep track of how many hooks we'll be running.
+
+ Args:
+ num_hooks: number of hooks to be run.
+ """
+ self.num_hooks = num_hooks
def commit_start(self, commit, commit_summary):
"""Emit status for new commit.
@@ -98,19 +119,16 @@
rh.terminal.print_status_line(status_line)
def hook_error(self, hook_name, error):
- """Print an error.
+ """Print an error for a single hook.
Args:
hook_name: name of the hook.
error: error string.
"""
- status_line = '[%s] %s' % (self.FAILED, hook_name)
- rh.terminal.print_status_line(status_line, print_newline=True)
- print(error, file=sys.stderr)
- self.success = False
+ self.error(hook_name, error)
def hook_warning(self, hook_name, warning):
- """Print a warning.
+ """Print a warning for a single hook.
Args:
hook_name: name of the hook.
@@ -120,12 +138,25 @@
rh.terminal.print_status_line(status_line, print_newline=True)
print(warning, file=sys.stderr)
+ def error(self, header, error):
+ """Print a general error.
+
+ Args:
+ header: A unique identifier for the source of this error.
+ error: error string.
+ """
+ status_line = '[%s] %s' % (self.FAILED, header)
+ rh.terminal.print_status_line(status_line, print_newline=True)
+ print(error, file=sys.stderr)
+ self.success = False
+
def finish(self):
- """Print repohook summary."""
- status_line = '[%s] repohooks for %s %s' % (
+ """Print summary for all the hooks."""
+ status_line = '[%s] repohooks for %s %s in %s' % (
self.PASSED if self.success else self.FAILED,
self.project_name,
- 'passed' if self.success else 'failed')
+ 'passed' if self.success else 'failed',
+ rh.utils.timedelta_str(datetime.datetime.now() - self.start_time))
rh.terminal.print_status_line(status_line, print_newline=True)
@@ -174,13 +205,7 @@
# Load the config for this git repo.
'.',
)
- try:
- config = rh.config.PreSubmitConfig(paths=paths,
- global_paths=global_paths)
- except rh.config.ValidationError as e:
- print('invalid config file: %s' % (e,), file=sys.stderr)
- sys.exit(1)
- return config
+ return rh.config.PreUploadConfig(paths=paths, global_paths=global_paths)
def _attempt_fixes(fixup_func_list, commit_list):
@@ -214,14 +239,13 @@
'attempting to upload again.\n', file=sys.stderr)
-def _run_project_hooks(project_name, proj_dir=None,
- commit_list=None):
- """For each project run its project specific hook from the hooks dictionary.
+def _run_project_hooks_in_cwd(project_name, proj_dir, output, commit_list=None):
+ """Run the project-specific hooks in the cwd.
Args:
- project_name: The name of project to run hooks for.
- proj_dir: If non-None, this is the directory the project is in. If None,
- we'll ask repo.
+ project_name: The name of this project.
+ proj_dir: The directory for this project (for passing on in metadata).
+ output: Helper for summarizing output/errors to the user.
commit_list: A list of commits to run hooks against. If None or empty
list then we'll automatically get the list of commits that would be
uploaded.
@@ -229,49 +253,37 @@
Returns:
False if any errors were found, else True.
"""
- if proj_dir is None:
- cmd = ['repo', 'forall', project_name, '-c', 'pwd']
- result = rh.utils.run_command(cmd, capture_output=True)
- proj_dirs = result.output.split()
- if len(proj_dirs) == 0:
- print('%s cannot be found.' % project_name, file=sys.stderr)
- print('Please specify a valid project.', file=sys.stderr)
- return 0
- if len(proj_dirs) > 1:
- print('%s is associated with multiple directories.' % project_name,
- file=sys.stderr)
- print('Please specify a directory to help disambiguate.',
- file=sys.stderr)
- return 0
- proj_dir = proj_dirs[0]
-
- pwd = os.getcwd()
- # Hooks assume they are run from the root of the project.
- os.chdir(proj_dir)
+ try:
+ config = _get_project_config()
+ except rh.config.ValidationError as e:
+ output.error('Loading config files', str(e))
+ return False
# If the repo has no pre-upload hooks enabled, then just return.
- config = _get_project_config()
hooks = list(config.callable_hooks())
if not hooks:
return True
+ output.set_num_hooks(len(hooks))
+
# Set up the environment like repo would with the forall command.
try:
remote = rh.git.get_upstream_remote()
upstream_branch = rh.git.get_upstream_branch()
- except rh.utils.RunCommandError as e:
- print('upstream remote cannot be found: %s' % (e,), file=sys.stderr)
- print('Did you run repo start?', file=sys.stderr)
- sys.exit(1)
+ except rh.utils.CalledProcessError as e:
+ output.error('Upstream remote/tracking branch lookup',
+ '%s\nDid you run repo start? Is your HEAD detached?' %
+ (e,))
+ return False
+
os.environ.update({
'REPO_LREV': rh.git.get_commit_for_ref(upstream_branch),
- 'REPO_PATH': proj_dir,
+ 'REPO_PATH': os.path.relpath(proj_dir, rh.git.find_repo_root()),
'REPO_PROJECT': project_name,
'REPO_REMOTE': remote,
'REPO_RREV': rh.git.get_remote_revision(upstream_branch, remote),
})
- output = Output(project_name, len(hooks))
project = rh.Project(name=project_name, dir=proj_dir, remote=remote)
if not commit_list:
@@ -286,7 +298,7 @@
os.environ['PREUPLOAD_COMMIT'] = commit
diff = rh.git.get_affected_files(commit)
desc = rh.git.get_commit_desc(commit)
- os.environ['PREUPLOAD_COMMIT_MESSAGE'] = desc
+ rh.sixish.setenv('PREUPLOAD_COMMIT_MESSAGE', desc)
commit_summary = desc.split('\n', 1)[0]
output.commit_start(commit=commit, commit_summary=commit_summary)
@@ -309,11 +321,52 @@
if fixup_func_list:
_attempt_fixes(fixup_func_list, commit_list)
- output.finish()
- os.chdir(pwd)
return ret
+def _run_project_hooks(project_name, proj_dir=None, commit_list=None):
+ """Run the project-specific hooks in |proj_dir|.
+
+ Args:
+ project_name: The name of project to run hooks for.
+ proj_dir: If non-None, this is the directory the project is in. If None,
+ we'll ask repo.
+ commit_list: A list of commits to run hooks against. If None or empty
+ list then we'll automatically get the list of commits that would be
+ uploaded.
+
+ Returns:
+ False if any errors were found, else True.
+ """
+ output = Output(project_name)
+
+ if proj_dir is None:
+ cmd = ['repo', 'forall', project_name, '-c', 'pwd']
+ result = rh.utils.run(cmd, capture_output=True)
+ proj_dirs = result.stdout.split()
+ if not proj_dirs:
+ print('%s cannot be found.' % project_name, file=sys.stderr)
+ print('Please specify a valid project.', file=sys.stderr)
+ return False
+ if len(proj_dirs) > 1:
+ print('%s is associated with multiple directories.' % project_name,
+ file=sys.stderr)
+ print('Please specify a directory to help disambiguate.',
+ file=sys.stderr)
+ return False
+ proj_dir = proj_dirs[0]
+
+ pwd = os.getcwd()
+ try:
+ # Hooks assume they are run from the root of the project.
+ os.chdir(proj_dir)
+ return _run_project_hooks_in_cwd(project_name, proj_dir, output,
+ commit_list=commit_list)
+ finally:
+ output.finish()
+ os.chdir(pwd)
+
+
def main(project_list, worktree_list=None, **_kwargs):
"""Main function invoked directly by repo.
@@ -336,6 +389,10 @@
for project, worktree in zip(project_list, worktree_list):
if not _run_project_hooks(project, proj_dir=worktree):
found_error = True
+ # If a repo had failures, add a blank line to help break up the
+ # output. If there were no failures, then the output should be
+ # very minimal, so we don't add it then.
+ print('', file=sys.stderr)
if found_error:
color = rh.terminal.Color()
@@ -354,8 +411,8 @@
a blank string upon failure.
"""
cmd = ['repo', 'forall', '.', '-c', 'echo ${REPO_PROJECT}']
- return rh.utils.run_command(cmd, capture_output=True, redirect_stderr=True,
- cwd=path).output.strip()
+ return rh.utils.run(cmd, capture_output=True, redirect_stderr=True,
+ cwd=path).stdout.strip()
def direct_main(argv):
@@ -387,8 +444,8 @@
# project from CWD.
if opts.dir is None:
cmd = ['git', 'rev-parse', '--git-dir']
- git_dir = rh.utils.run_command(cmd, capture_output=True,
- redirect_stderr=True).output.strip()
+ git_dir = rh.utils.run(cmd, capture_output=True,
+ redirect_stderr=True).stdout.strip()
if not git_dir:
parser.error('The current directory is not part of a git project.')
opts.dir = os.path.dirname(os.path.abspath(git_dir))
@@ -407,8 +464,7 @@
if _run_project_hooks(opts.project, proj_dir=opts.dir,
commit_list=opts.commits):
return 0
- else:
- return 1
+ return 1
if __name__ == '__main__':
diff --git a/rh/config.py b/rh/config.py
index 61f2ef0..6e96fc7 100644
--- a/rh/config.py
+++ b/rh/config.py
@@ -17,7 +17,6 @@
from __future__ import print_function
-import ConfigParser
import functools
import os
import shlex
@@ -31,6 +30,7 @@
# pylint: disable=wrong-import-position
import rh.hooks
import rh.shell
+from rh.sixish import configparser
class Error(Exception):
@@ -41,7 +41,7 @@
"""Config file has unknown sections/keys or other values."""
-class RawConfigParser(ConfigParser.RawConfigParser):
+class RawConfigParser(configparser.RawConfigParser):
"""Like RawConfigParser but with some default helpers."""
@staticmethod
@@ -52,6 +52,9 @@
(name, cnt_min, cnt_max, cnt,))
return cnt
+ # pylint can't seem to grok our use of *args here.
+ # pylint: disable=arguments-differ
+
def options(self, section, *args):
"""Return the options in |section| (with default |args|).
@@ -61,8 +64,8 @@
"""
cnt = self._check_args('options', 2, 3, args)
try:
- return ConfigParser.RawConfigParser.options(self, section)
- except ConfigParser.NoSectionError:
+ return configparser.RawConfigParser.options(self, section)
+ except configparser.NoSectionError:
if cnt == 1:
return args[0]
raise
@@ -71,8 +74,8 @@
"""Return the value for |option| in |section| (with default |args|)."""
cnt = self._check_args('get', 3, 4, args)
try:
- return ConfigParser.RawConfigParser.get(self, section, option)
- except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return configparser.RawConfigParser.get(self, section, option)
+ except (configparser.NoSectionError, configparser.NoOptionError):
if cnt == 1:
return args[0]
raise
@@ -81,14 +84,14 @@
"""Return a list of (key, value) tuples for the options in |section|."""
cnt = self._check_args('items', 2, 3, args)
try:
- return ConfigParser.RawConfigParser.items(self, section)
- except ConfigParser.NoSectionError:
+ return configparser.RawConfigParser.items(self, section)
+ except configparser.NoSectionError:
if cnt == 1:
return args[0]
raise
-class PreSubmitConfig(object):
+class PreUploadConfig(object):
"""Config file used for per-project `repo upload` hooks."""
FILENAME = 'PREUPLOAD.cfg'
@@ -121,7 +124,7 @@
self.paths.append(path)
try:
config.read(path)
- except ConfigParser.ParsingError as e:
+ except configparser.ParsingError as e:
raise ValidationError('%s: %s' % (path, e))
self.paths = []
diff --git a/rh/config_test.py b/rh/config_test.py
new file mode 100755
index 0000000..794e50f
--- /dev/null
+++ b/rh/config_test.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+# -*- coding:utf-8 -*-
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Integration tests for the config module (via PREUPLOAD.cfg)."""
+
+from __future__ import print_function
+
+import argparse
+import os
+import re
+import sys
+
+
+REPOTOOLS = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+REPO_ROOT = os.path.dirname(os.path.dirname(REPOTOOLS))
+
+
+def assertEqual(msg, exp, actual):
+ """Assert |exp| equals |actual|."""
+ assert exp == actual, '%s: expected "%s" but got "%s"' % (msg, exp, actual)
+
+
+def assertEnv(var, value):
+ """Assert |var| is set in the environment as |value|."""
+ assert var in os.environ, '$%s missing in environment' % (var,)
+ assertEqual('env[%s]' % (var,), value, os.environ[var])
+
+
+def check_commit_id(commit):
+ """Check |commit| looks like a git commit id."""
+ assert len(commit) == 40, 'commit "%s" must be 40 chars' % (commit,)
+ assert re.match(r'^[a-f0-9]+$', commit), \
+ 'commit "%s" must be all hex' % (commit,)
+
+
+def check_commit_msg(msg):
+ """Check the ${PREUPLOAD_COMMIT_MESSAGE} setting."""
+ assert len(msg) > 1, 'commit message must be at least 2 bytes: %s'
+
+
+def check_repo_root(root):
+ """Check the ${REPO_ROOT} setting."""
+ assertEqual('REPO_ROOT', REPO_ROOT, root)
+
+
+def check_files(files):
+ """Check the ${PREUPLOAD_FILES} setting."""
+ assert files
+
+
+def check_env():
+ """Verify all exported env vars look sane."""
+ assertEnv('REPO_PROJECT', 'platform/tools/repohooks')
+ assertEnv('REPO_PATH', 'tools/repohooks')
+ assertEnv('REPO_REMOTE', 'aosp')
+ check_commit_id(os.environ['REPO_LREV'])
+ print(os.environ['REPO_RREV'])
+ check_commit_id(os.environ['PREUPLOAD_COMMIT'])
+
+
+def get_parser():
+ """Return a command line parser."""
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('--check-env', action='store_true',
+ help='Check all exported env vars.')
+ parser.add_argument('--commit-id',
+ help='${PREUPLOAD_COMMIT} setting.')
+ parser.add_argument('--commit-msg',
+ help='${PREUPLOAD_COMMIT_MESSAGE} setting.')
+ parser.add_argument('--repo-root',
+ help='${REPO_ROOT} setting.')
+ parser.add_argument('files', nargs='+',
+ help='${PREUPLOAD_FILES} paths.')
+ return parser
+
+
+def main(argv):
+ """The main entry."""
+ parser = get_parser()
+ opts = parser.parse_args(argv)
+
+ try:
+ if opts.check_env:
+ check_env()
+ if opts.commit_id is not None:
+ check_commit_id(opts.commit_id)
+ if opts.commit_msg is not None:
+ check_commit_msg(opts.commit_msg)
+ if opts.repo_root is not None:
+ check_repo_root(opts.repo_root)
+ check_files(opts.files)
+ except AssertionError as e:
+ print('error: %s' % (e,), file=sys.stderr)
+ return 1
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/rh/config_unittest.py b/rh/config_unittest.py
index 55d08e9..d51afdc 100755
--- a/rh/config_unittest.py
+++ b/rh/config_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
@@ -36,8 +36,8 @@
import rh.config
-class PreSubmitConfigTests(unittest.TestCase):
- """Tests for the PreSubmitConfig class."""
+class PreUploadConfigTests(unittest.TestCase):
+ """Tests for the PreUploadConfig class."""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
@@ -48,7 +48,7 @@
def _write_config(self, data, filename=None):
"""Helper to write out a config file for testing."""
if filename is None:
- filename = rh.config.PreSubmitConfig.FILENAME
+ filename = rh.config.PreUploadConfig.FILENAME
path = os.path.join(self.tempdir, filename)
with open(path, 'w') as fp:
fp.write(data)
@@ -56,16 +56,16 @@
def _write_global_config(self, data):
"""Helper to write out a global config file for testing."""
self._write_config(
- data, filename=rh.config.PreSubmitConfig.GLOBAL_FILENAME)
+ data, filename=rh.config.PreUploadConfig.GLOBAL_FILENAME)
def testMissing(self):
"""Instantiating a non-existent config file should be fine."""
- rh.config.PreSubmitConfig()
+ rh.config.PreUploadConfig()
def testEmpty(self):
"""Instantiating an empty config file should be fine."""
self._write_config('')
- rh.config.PreSubmitConfig(paths=(self.tempdir,))
+ rh.config.PreUploadConfig(paths=(self.tempdir,))
def testValid(self):
"""Verify a fully valid file works."""
@@ -82,30 +82,30 @@
[Options]
ignore_merged_commits = true
""")
- rh.config.PreSubmitConfig(paths=(self.tempdir,))
+ rh.config.PreUploadConfig(paths=(self.tempdir,))
def testUnknownSection(self):
"""Reject unknown sections."""
self._write_config('[BOOGA]')
- self.assertRaises(rh.config.ValidationError, rh.config.PreSubmitConfig,
+ self.assertRaises(rh.config.ValidationError, rh.config.PreUploadConfig,
paths=(self.tempdir,))
def testUnknownBuiltin(self):
"""Reject unknown builtin hooks."""
self._write_config('[Builtin Hooks]\nbooga = borg!')
- self.assertRaises(rh.config.ValidationError, rh.config.PreSubmitConfig,
+ self.assertRaises(rh.config.ValidationError, rh.config.PreUploadConfig,
paths=(self.tempdir,))
def testEmptyCustomHook(self):
"""Reject empty custom hooks."""
self._write_config('[Hook Scripts]\nbooga = \t \n')
- self.assertRaises(rh.config.ValidationError, rh.config.PreSubmitConfig,
+ self.assertRaises(rh.config.ValidationError, rh.config.PreUploadConfig,
paths=(self.tempdir,))
def testInvalidIni(self):
"""Reject invalid ini files."""
self._write_config('[Hook Scripts]\n =')
- self.assertRaises(rh.config.ValidationError, rh.config.PreSubmitConfig,
+ self.assertRaises(rh.config.ValidationError, rh.config.PreUploadConfig,
paths=(self.tempdir,))
def testInvalidString(self):
@@ -113,7 +113,7 @@
self._write_config("""[Hook Scripts]
name = script --'bad-quotes
""")
- self.assertRaises(rh.config.ValidationError, rh.config.PreSubmitConfig,
+ self.assertRaises(rh.config.ValidationError, rh.config.PreUploadConfig,
paths=(self.tempdir,))
def testGlobalConfigs(self):
@@ -125,7 +125,7 @@
self._write_config("""[Builtin Hooks]
commit_msg_bug_field = false
commit_msg_test_field = true""")
- config = rh.config.PreSubmitConfig(paths=(self.tempdir,),
+ config = rh.config.PreUploadConfig(paths=(self.tempdir,),
global_paths=(self.tempdir,))
self.assertEqual(config.builtin_hooks,
['commit_msg_changeid_field', 'commit_msg_test_field'])
diff --git a/rh/git.py b/rh/git.py
index 196fc69..baf669c 100644
--- a/rh/git.py
+++ b/rh/git.py
@@ -34,13 +34,13 @@
"""Returns the current upstream remote name."""
# First get the current branch name.
cmd = ['git', 'rev-parse', '--abbrev-ref', 'HEAD']
- result = rh.utils.run_command(cmd, capture_output=True)
- branch = result.output.strip()
+ result = rh.utils.run(cmd, capture_output=True)
+ branch = result.stdout.strip()
# Then get the remote associated with this branch.
cmd = ['git', 'config', 'branch.%s.remote' % branch]
- result = rh.utils.run_command(cmd, capture_output=True)
- return result.output.strip()
+ result = rh.utils.run(cmd, capture_output=True)
+ return result.stdout.strip()
def get_upstream_branch():
@@ -50,21 +50,21 @@
Error if there is no tracking branch
"""
cmd = ['git', 'symbolic-ref', 'HEAD']
- result = rh.utils.run_command(cmd, capture_output=True)
- current_branch = result.output.strip().replace('refs/heads/', '')
+ result = rh.utils.run(cmd, capture_output=True)
+ current_branch = result.stdout.strip().replace('refs/heads/', '')
if not current_branch:
raise ValueError('Need to be on a tracking branch')
cfg_option = 'branch.' + current_branch + '.%s'
cmd = ['git', 'config', cfg_option % 'merge']
- result = rh.utils.run_command(cmd, capture_output=True)
- full_upstream = result.output.strip()
+ result = rh.utils.run(cmd, capture_output=True)
+ full_upstream = result.stdout.strip()
# If remote is not fully qualified, add an implicit namespace.
if '/' not in full_upstream:
full_upstream = 'refs/heads/%s' % full_upstream
cmd = ['git', 'config', cfg_option % 'remote']
- result = rh.utils.run_command(cmd, capture_output=True)
- remote = result.output.strip()
+ result = rh.utils.run(cmd, capture_output=True)
+ remote = result.stdout.strip()
if not remote or not full_upstream:
raise ValueError('Need to be on a tracking branch')
@@ -74,8 +74,8 @@
def get_commit_for_ref(ref):
"""Returns the latest commit for this ref."""
cmd = ['git', 'rev-parse', ref]
- result = rh.utils.run_command(cmd, capture_output=True)
- return result.output.strip()
+ result = rh.utils.run(cmd, capture_output=True)
+ return result.stdout.strip()
def get_remote_revision(ref, remote):
@@ -89,19 +89,7 @@
def get_patch(commit):
"""Returns the patch for this commit."""
cmd = ['git', 'format-patch', '--stdout', '-1', commit]
- return rh.utils.run_command(cmd, capture_output=True).output
-
-
-def _try_utf8_decode(data):
- """Attempts to decode a string as UTF-8.
-
- Returns:
- The decoded Unicode object, or the original string if parsing fails.
- """
- try:
- return unicode(data, 'utf-8', 'strict')
- except UnicodeDecodeError:
- return data
+ return rh.utils.run(cmd, capture_output=True).stdout
def get_file_content(commit, path):
@@ -115,14 +103,25 @@
content will not have any newlines.
"""
cmd = ['git', 'show', '%s:%s' % (commit, path)]
- return rh.utils.run_command(cmd, capture_output=True).output
+ return rh.utils.run(cmd, capture_output=True).stdout
-# RawDiffEntry represents a line of raw formatted git diff output.
-RawDiffEntry = rh.utils.collection(
- 'RawDiffEntry',
- src_mode=0, dst_mode=0, src_sha=None, dst_sha=None,
- status=None, score=None, src_file=None, dst_file=None, file=None)
+class RawDiffEntry(object):
+ """Representation of a line from raw formatted git diff output."""
+
+ # pylint: disable=redefined-builtin
+ def __init__(self, src_mode=0, dst_mode=0, src_sha=None, dst_sha=None,
+ status=None, score=None, src_file=None, dst_file=None,
+ file=None):
+ self.src_mode = src_mode
+ self.dst_mode = dst_mode
+ self.src_sha = src_sha
+ self.dst_sha = dst_sha
+ self.status = status
+ self.score = score
+ self.src_file = src_file
+ self.dst_file = dst_file
+ self.file = file
# This regular expression pulls apart a line of raw formatted git diff output.
@@ -146,17 +145,18 @@
entries = []
cmd = ['git', 'diff', '--no-ext-diff', '-M', '--raw', target]
- diff = rh.utils.run_command(cmd, cwd=path, capture_output=True).output
+ diff = rh.utils.run(cmd, cwd=path, capture_output=True).stdout
diff_lines = diff.strip().splitlines()
for line in diff_lines:
match = DIFF_RE.match(line)
if not match:
raise ValueError('Failed to parse diff output: %s' % line)
- diff = RawDiffEntry(**match.groupdict())
- diff.src_mode = int(diff.src_mode)
- diff.dst_mode = int(diff.dst_mode)
- diff.file = diff.dst_file if diff.dst_file else diff.src_file
- entries.append(diff)
+ rawdiff = RawDiffEntry(**match.groupdict())
+ rawdiff.src_mode = int(rawdiff.src_mode)
+ rawdiff.dst_mode = int(rawdiff.dst_mode)
+ rawdiff.file = (rawdiff.dst_file
+ if rawdiff.dst_file else rawdiff.src_file)
+ entries.append(rawdiff)
return entries
@@ -175,13 +175,13 @@
cmd = ['git', 'log', '%s..' % get_upstream_branch(), '--format=%H']
if ignore_merged_commits:
cmd.append('--first-parent')
- return rh.utils.run_command(cmd, capture_output=True).output.split()
+ return rh.utils.run(cmd, capture_output=True).stdout.split()
def get_commit_desc(commit):
"""Returns the full commit message of a commit."""
cmd = ['git', 'log', '--format=%B', commit + '^!']
- return rh.utils.run_command(cmd, capture_output=True).output
+ return rh.utils.run(cmd, capture_output=True).stdout
def find_repo_root(path=None):
@@ -202,5 +202,5 @@
def is_git_repository(path):
"""Returns True if the path is a valid git repository."""
cmd = ['git', 'rev-parse', '--resolve-git-dir', os.path.join(path, '.git')]
- result = rh.utils.run_command(cmd, quiet=True, error_code_ok=True)
+ result = rh.utils.run(cmd, capture_output=True, check=False)
return result.returncode == 0
diff --git a/rh/hooks.py b/rh/hooks.py
index bf70643..fd86dc0 100644
--- a/rh/hooks.py
+++ b/rh/hooks.py
@@ -29,8 +29,9 @@
del _path
# pylint: disable=wrong-import-position
-import rh.results
import rh.git
+import rh.results
+from rh.sixish import string_types
import rh.utils
@@ -72,7 +73,7 @@
for key, val in replacements.items():
var = '${%s}' % (key,)
if arg == var:
- if isinstance(val, str):
+ if isinstance(val, string_types):
ret.append(val)
else:
ret.extend(val)
@@ -82,10 +83,9 @@
# If no exact matches, do an inline replacement.
def replace(m):
val = self.get(m.group(1))
- if isinstance(val, str):
+ if isinstance(val, string_types):
return val
- else:
- return ' '.join(val)
+ return ' '.join(val)
ret.append(re.sub(r'\$\{(%s)\}' % ('|'.join(all_vars),),
replace, arg))
@@ -186,13 +186,13 @@
return self.expand_vars([tool_path])[0]
-def _run_command(cmd, **kwargs):
+def _run(cmd, **kwargs):
"""Helper command for checks that tend to gather output."""
kwargs.setdefault('redirect_stderr', True)
kwargs.setdefault('combine_stdout_stderr', True)
kwargs.setdefault('capture_output', True)
- kwargs.setdefault('error_code_ok', True)
- return rh.utils.run_command(cmd, **kwargs)
+ kwargs.setdefault('check', False)
+ return rh.utils.run(cmd, **kwargs)
def _match_regex_list(subject, expressions):
@@ -246,9 +246,9 @@
system = platform.system()
if 'Darwin' in system or 'Macintosh' in system:
return 'darwin-x86'
- else:
- # TODO: Add more values if needed.
- return 'linux-x86'
+
+ # TODO: Add more values if needed.
+ return 'linux-x86'
def _fixup_func_caller(cmd, **kwargs):
@@ -259,9 +259,9 @@
parameter in HookCommandResult.
"""
def wrapper():
- result = _run_command(cmd, **kwargs)
+ result = _run(cmd, **kwargs)
if result.returncode not in (None, 0):
- return result.output
+ return result.stdout
return None
return wrapper
@@ -269,7 +269,7 @@
def _check_cmd(hook_name, project, commit, cmd, fixup_func=None, **kwargs):
"""Runs |cmd| and returns its result as a HookCommandResult."""
return [rh.results.HookCommandResult(hook_name, project, commit,
- _run_command(cmd, **kwargs),
+ _run(cmd, **kwargs),
fixup_func=fixup_func)]
@@ -287,6 +287,25 @@
**kwargs)
+def check_bpfmt(project, commit, _desc, diff, options=None):
+ """Checks that Blueprint files are formatted with bpfmt."""
+ filtered = _filter_diff(diff, [r'\.bp$'])
+ if not filtered:
+ return None
+
+ bpfmt = options.tool_path('bpfmt')
+ cmd = [bpfmt, '-l'] + options.args((), filtered)
+ ret = []
+ for d in filtered:
+ data = rh.git.get_file_content(commit, d.file)
+ result = _run(cmd, input=data)
+ if result.stdout:
+ ret.append(rh.results.HookResult(
+ 'bpfmt', project, commit, error=result.stdout,
+ files=(d.file,)))
+ return ret
+
+
def check_checkpatch(project, commit, _desc, diff, options=None):
"""Run |diff| through the kernel's checkpatch.pl tool."""
tool = get_helper_path('checkpatch.pl')
@@ -343,7 +362,7 @@
error = ('Commit message is missing a "%s:" line. It must match the\n'
'following case-sensitive regex:\n\n %s') % (field, regex)
else:
- return
+ return None
return [rh.results.HookResult('commit msg: "%s:" check' % (field,),
project, commit, error=error)]
@@ -363,14 +382,14 @@
if check_re.match(line):
found.append(line)
- if len(found) == 0:
+ if not found:
error = ('Commit message is missing a "%s:" line. It must match the\n'
'following case-sensitive regex:\n\n %s') % (field, regex)
elif len(found) > 1:
error = ('Commit message has too many "%s:" lines. There can be only '
'one.') % (field,)
else:
- return
+ return None
return [rh.results.HookResult('commit msg: "%s:" check' % (field,),
project, commit, error=error)]
@@ -407,7 +426,7 @@
filtered = _filter_diff(diff, [r'\.apk$'])
if not filtered:
- return
+ return None
regexes = [
r'^package: .*$',
@@ -427,7 +446,7 @@
if missing:
error = PREBUILT_APK_MSG % '\n '.join(missing)
else:
- return
+ return None
return [rh.results.HookResult('commit msg: "prebuilt apk:" check',
project, commit, error=error)]
@@ -479,7 +498,7 @@
if not found:
error = TEST_MSG % (regex)
else:
- return
+ return None
return [rh.results.HookResult('commit msg: "%s:" check' % (field,),
project, commit, error=error)]
@@ -491,7 +510,7 @@
# but cpplint would just ignore them.
filtered = _filter_diff(diff, [r'\.(cc|h|cpp|cu|cuh)$'])
if not filtered:
- return
+ return None
cpplint = options.tool_path('cpplint')
cmd = [cpplint] + options.args(('${PREUPLOAD_FILES}',), filtered)
@@ -502,17 +521,17 @@
"""Checks that Go files are formatted with gofmt."""
filtered = _filter_diff(diff, [r'\.go$'])
if not filtered:
- return
+ return None
gofmt = options.tool_path('gofmt')
cmd = [gofmt, '-l'] + options.args((), filtered)
ret = []
for d in filtered:
data = rh.git.get_file_content(commit, d.file)
- result = _run_command(cmd, input=data)
- if result.output:
+ result = _run(cmd, input=data)
+ if result.stdout:
ret.append(rh.results.HookResult(
- 'gofmt', project, commit, error=result.output,
+ 'gofmt', project, commit, error=result.stdout,
files=(d.file,)))
return ret
@@ -524,7 +543,7 @@
filtered = _filter_diff(diff, [r'\.json$'])
if not filtered:
- return
+ return None
ret = []
for d in filtered:
@@ -538,20 +557,35 @@
return ret
-def check_pylint(project, commit, _desc, diff, options=None):
+def _check_pylint(project, commit, _desc, diff, extra_args=None, options=None):
"""Run pylint."""
filtered = _filter_diff(diff, [r'\.py$'])
if not filtered:
- return
+ return None
+
+ if extra_args is None:
+ extra_args = []
pylint = options.tool_path('pylint')
cmd = [
get_helper_path('pylint.py'),
'--executable-path', pylint,
- ] + options.args(('${PREUPLOAD_FILES}',), filtered)
+ ] + extra_args + options.args(('${PREUPLOAD_FILES}',), filtered)
return _check_cmd('pylint', project, commit, cmd)
+def check_pylint2(project, commit, desc, diff, options=None):
+ """Run pylint through Python 2."""
+ return _check_pylint(project, commit, desc, diff, options=options)
+
+
+def check_pylint3(project, commit, desc, diff, options=None):
+ """Run pylint through Python 3."""
+ return _check_pylint(project, commit, desc, diff,
+ extra_args=['--executable-path=pylint3'],
+ options=options)
+
+
def check_xmllint(project, commit, _desc, diff, options=None):
"""Run xmllint."""
# XXX: Should we drop most of these and probe for <?xml> tags?
@@ -587,7 +621,7 @@
filtered = _filter_diff(diff, [r'\.(%s)$' % '|'.join(extensions)])
if not filtered:
- return
+ return None
# TODO: Figure out how to integrate schema validation.
# XXX: Should we use python's XML libs instead?
@@ -602,11 +636,12 @@
raise ValueError('Android TEST_MAPPING check takes no options')
filtered = _filter_diff(diff, [r'(^|.*/)TEST_MAPPING$'])
if not filtered:
- return
+ return None
testmapping_format = options.tool_path('android-test-mapping-format')
+ testmapping_args = ['--commit', commit]
cmd = [testmapping_format] + options.args(
- (project.dir, '${PREUPLOAD_FILES}',), filtered)
+ (project.dir, '${PREUPLOAD_FILES}'), filtered) + testmapping_args
return _check_cmd('android-test-mapping-format', project, commit, cmd)
@@ -614,6 +649,7 @@
# Note: Make sure to keep the top level README.md up to date when adding more!
BUILTIN_HOOKS = {
'android_test_mapping_format': check_android_test_mapping,
+ 'bpfmt': check_bpfmt,
'checkpatch': check_checkpatch,
'clang_format': check_clang_format,
'commit_msg_bug_field': check_commit_msg_bug_field,
@@ -624,7 +660,9 @@
'gofmt': check_gofmt,
'google_java_format': check_google_java_format,
'jsonlint': check_json,
- 'pylint': check_pylint,
+ 'pylint': check_pylint2,
+ 'pylint2': check_pylint2,
+ 'pylint3': check_pylint3,
'xmllint': check_xmllint,
}
@@ -633,6 +671,7 @@
TOOL_PATHS = {
'android-test-mapping-format':
os.path.join(TOOLS_DIR, 'android_test_mapping_format.py'),
+ 'bpfmt': 'bpfmt',
'clang-format': 'clang-format',
'cpplint': os.path.join(TOOLS_DIR, 'cpplint.py'),
'git-clang-format': 'git-clang-format',
diff --git a/rh/hooks_unittest.py b/rh/hooks_unittest.py
index b7b5c86..4124f10 100755
--- a/rh/hooks_unittest.py
+++ b/rh/hooks_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
@@ -22,8 +22,6 @@
import sys
import unittest
-import mock
-
_path = os.path.realpath(__file__ + '/../..')
if sys.path[0] != _path:
sys.path.insert(0, _path)
@@ -33,8 +31,10 @@
# relative imports because this is an executable program, not a module.
# pylint: disable=wrong-import-position
import rh
-import rh.hooks
import rh.config
+import rh.hooks
+from rh.sixish import mock
+from rh.sixish import string_types
class HooksDocsTests(unittest.TestCase):
@@ -52,16 +52,18 @@
"""Extract the |section| text out of the readme."""
ret = []
in_section = False
- for line in open(self.readme):
- if not in_section:
- # Look for the section like "## [Tool Paths]".
- if line.startswith('#') and line.lstrip('#').strip() == section:
- in_section = True
- else:
- # Once we hit the next section (higher or lower), break.
- if line[0] == '#':
- break
- ret.append(line)
+ with open(self.readme) as fp:
+ for line in fp:
+ if not in_section:
+ # Look for the section like "## [Tool Paths]".
+ if (line.startswith('#') and
+ line.lstrip('#').strip() == section):
+ in_section = True
+ else:
+ # Once we hit the next section (higher or lower), break.
+ if line[0] == '#':
+ break
+ ret.append(line)
return ''.join(ret)
def testBuiltinHooks(self):
@@ -211,10 +213,10 @@
"""Verify misc utility functions."""
def testRunCommand(self):
- """Check _run_command behavior."""
+ """Check _run behavior."""
# Most testing is done against the utils.RunCommand already.
# pylint: disable=protected-access
- ret = rh.hooks._run_command(['true'])
+ ret = rh.hooks._run(['true'])
self.assertEqual(ret.returncode, 0)
def testBuildOs(self):
@@ -222,19 +224,19 @@
# Just verify it returns something and doesn't crash.
# pylint: disable=protected-access
ret = rh.hooks._get_build_os_name()
- self.assertTrue(isinstance(ret, str))
+ self.assertTrue(isinstance(ret, string_types))
self.assertNotEqual(ret, '')
def testGetHelperPath(self):
"""Check get_helper_path behavior."""
# Just verify it doesn't crash. It's a dirt simple func.
ret = rh.hooks.get_helper_path('booga')
- self.assertTrue(isinstance(ret, str))
+ self.assertTrue(isinstance(ret, string_types))
self.assertNotEqual(ret, '')
-@mock.patch.object(rh.utils, 'run_command')
+@mock.patch.object(rh.utils, 'run')
@mock.patch.object(rh.hooks, '_check_cmd', return_value=['check_cmd'])
class BuiltinHooksTests(unittest.TestCase):
"""Verify the builtin hooks."""
@@ -290,6 +292,20 @@
self.assertIn('test_%s' % (hook,), dir(self),
msg='Missing unittest for builtin hook %s' % (hook,))
+ def test_bpfmt(self, mock_check, _mock_run):
+ """Verify the bpfmt builtin hook."""
+ # First call should do nothing as there are no files to check.
+ ret = rh.hooks.check_bpfmt(
+ self.project, 'commit', 'desc', (), options=self.options)
+ self.assertIsNone(ret)
+ self.assertFalse(mock_check.called)
+
+ # Second call will have some results.
+ diff = [rh.git.RawDiffEntry(file='Android.bp')]
+ ret = rh.hooks.check_bpfmt(
+ self.project, 'commit', 'desc', diff, options=self.options)
+ self.assertIsNotNone(ret)
+
def test_checkpatch(self, mock_check, _mock_run):
"""Verify the checkpatch builtin hook."""
ret = rh.hooks.check_checkpatch(
@@ -323,6 +339,8 @@
'subj',
'subj\n\nBUG=1234\n',
'subj\n\nBUG: 1234\n',
+ 'subj\n\nBug: N/A\n',
+ 'subj\n\nBug:\n',
))
def test_commit_msg_changeid_field(self, _mock_check, _mock_run):
@@ -494,7 +512,17 @@
def test_pylint(self, mock_check, _mock_run):
"""Verify the pylint builtin hook."""
- self._test_file_filter(mock_check, rh.hooks.check_pylint,
+ self._test_file_filter(mock_check, rh.hooks.check_pylint2,
+ ('foo.py',))
+
+ def test_pylint2(self, mock_check, _mock_run):
+ """Verify the pylint2 builtin hook."""
+ self._test_file_filter(mock_check, rh.hooks.check_pylint2,
+ ('foo.py',))
+
+ def test_pylint3(self, mock_check, _mock_run):
+ """Verify the pylint3 builtin hook."""
+ self._test_file_filter(mock_check, rh.hooks.check_pylint3,
('foo.py',))
def test_xmllint(self, mock_check, _mock_run):
diff --git a/rh/results.py b/rh/results.py
index 7ce48d6..bdac83c 100644
--- a/rh/results.py
+++ b/rh/results.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Common errors thrown when repo presubmit checks fail."""
+"""Common errors thrown when repo preupload checks fail."""
from __future__ import print_function
@@ -54,6 +54,7 @@
def __bool__(self):
return bool(self.error)
+ # pylint: disable=nonzero-method
def __nonzero__(self):
"""Python 2/3 glue."""
return self.__bool__()
@@ -63,12 +64,12 @@
class HookCommandResult(HookResult):
- """A single hook result based on a CommandResult."""
+ """A single hook result based on a CompletedProcess."""
def __init__(self, hook, project, commit, result, files=(),
fixup_func=None):
HookResult.__init__(self, hook, project, commit,
- result.error if result.error else result.output,
+ result.stderr if result.stderr else result.stdout,
files=files, fixup_func=fixup_func)
self.result = result
diff --git a/rh/shell.py b/rh/shell.py
index 6c943d3..f466f63 100644
--- a/rh/shell.py
+++ b/rh/shell.py
@@ -25,6 +25,9 @@
sys.path.insert(0, _path)
del _path
+# pylint: disable=wrong-import-position
+from rh.sixish import string_types
+
# For use by ShellQuote. Match all characters that the shell might treat
# specially. This means a number of things:
@@ -68,28 +71,26 @@
Returns:
A safely (possibly quoted) string.
"""
- s = s.encode('utf-8')
+ if isinstance(s, bytes):
+ s = s.encode('utf-8')
# See if no quoting is needed so we can return the string as-is.
for c in s:
if c in _SHELL_QUOTABLE_CHARS:
break
else:
- if not s:
- return "''"
- else:
- return s
+ return s if s else u"''"
# See if we can use single quotes first. Output is nicer.
if "'" not in s:
- return "'%s'" % s
+ return u"'%s'" % s
# Have to use double quotes. Escape the few chars that still expand when
# used inside of double quotes.
for c in _SHELL_ESCAPE_CHARS:
if c in s:
s = s.replace(c, r'\%s' % c)
- return '"%s"' % s
+ return u'"%s"' % s
def shell_unquote(s):
@@ -155,11 +156,11 @@
if sval is None:
return default
- if isinstance(sval, basestring):
+ if isinstance(sval, string_types):
s = sval.lower()
if s in ('yes', 'y', '1', 'true'):
return True
- elif s in ('no', 'n', '0', 'false'):
+ if s in ('no', 'n', '0', 'false'):
return False
raise ValueError('Could not decode as a boolean value: %r' % (sval,))
diff --git a/rh/shell_unittest.py b/rh/shell_unittest.py
index 7fc0f91..47182a5 100755
--- a/rh/shell_unittest.py
+++ b/rh/shell_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright 2016 The Android Open Source Project
#
@@ -49,7 +49,7 @@
def _testData(self, functor, tests, check_type=True):
"""Process a dict of test data."""
- for test_output, test_input in tests.iteritems():
+ for test_output, test_input in tests.items():
result = functor(test_input)
self._assertEqual(functor.__name__, test_input, test_output, result)
@@ -68,8 +68,8 @@
# Dict of expected output strings to input lists.
tests_quote = {
"''": '',
- 'a': unicode('a'),
- "'a b c'": unicode('a b c'),
+ 'a': u'a',
+ "'a b c'": u'a b c',
"'a\tb'": 'a\tb',
"'/a$file'": '/a$file',
"'/a#file'": '/a#file',
@@ -95,7 +95,7 @@
# Test that the operations are reversible.
self._testData(aux, {k: k for k in tests_quote.values()}, False)
- self._testData(aux, {k: k for k in tests_quote.keys()}, False)
+ self._testData(aux, {k: k for k in tests_quote}, False)
class CmdToStrTest(DiffTestCase):
@@ -108,7 +108,7 @@
r"'a b' c": ['a b', 'c'],
r'''a "b'c"''': ['a', "b'c"],
r'''a "/'\$b" 'a b c' "xy'z"''':
- [unicode('a'), "/'$b", 'a b c', "xy'z"],
+ [u'a', "/'$b", 'a b c', "xy'z"],
'': [],
}
self._testData(rh.shell.cmd_to_str, tests)
diff --git a/rh/signals.py b/rh/signals.py
index 80e5da7..45d4e8a 100644
--- a/rh/signals.py
+++ b/rh/signals.py
@@ -36,53 +36,9 @@
"""
if handler in (None, signal.SIG_IGN):
return True
- elif handler == signal.SIG_DFL:
+ if handler == signal.SIG_DFL:
# This scenario is a fairly painful to handle fully, thus we just
# state we couldn't handle it and leave it to client code.
return False
handler(signum, frame)
return True
-
-
-def signal_module_usable(_signal=signal.signal, _SIGUSR1=signal.SIGUSR1):
- """Verify that the signal module is usable and won't segfault on us.
-
- See http://bugs.python.org/issue14173. This function detects if the
- signals module is no longer safe to use (which only occurs during
- final stages of the interpreter shutdown) and heads off a segfault
- if signal.* was accessed.
-
- This shouldn't be used by anything other than functionality that is
- known and unavoidably invoked by finalizer code during python shutdown.
-
- Finally, the default args here are intentionally binding what we need
- from the signal module to do the necessary test; invoking code shouldn't
- pass any options, nor should any developer ever remove those default
- options.
-
- Note that this functionality is intended to be removed just as soon
- as all consuming code installs their own SIGTERM handlers.
- """
- # Track any signals we receive while doing the check.
- received, actual = [], None
- def handler(signum, frame):
- received.append([signum, frame])
- try:
- # Play with sigusr1, since it's not particularly used.
- actual = _signal(_SIGUSR1, handler)
- _signal(_SIGUSR1, actual)
- return True
- except (TypeError, AttributeError, SystemError, ValueError):
- # The first three exceptions can be thrown depending on the state of the
- # signal module internal Handlers array; we catch all, and interpret it
- # as if we were invoked during sys.exit cleanup.
- # The last exception can be thrown if we're trying to be used in a
- # thread which is not the main one. This can come up with standard
- # python modules such as BaseHTTPServer.HTTPServer.
- return False
- finally:
- # And now relay those signals to the original handler. Not all may
- # be delivered- the first may throw an exception for example. Not our
- # problem however.
- for signum, frame in received:
- actual(signum, frame)
diff --git a/rh/sixish.py b/rh/sixish.py
new file mode 100644
index 0000000..693598c
--- /dev/null
+++ b/rh/sixish.py
@@ -0,0 +1,69 @@
+# -*- coding:utf-8 -*-
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Local version of the standard six module.
+
+Since this module is often unavailable by default on distros (or only available
+for specific versions), manage our own little copy.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+
+_path = os.path.realpath(__file__ + '/../..')
+if sys.path[0] != _path:
+ sys.path.insert(0, _path)
+del _path
+
+
+# Our attempts to wrap things below for diff versions of python confuse pylint.
+# pylint: disable=import-error,no-name-in-module,unused-import
+
+
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+
+
+# We allow mock to be disabled as it's not needed by non-unittest code.
+try:
+ import unittest.mock as mock
+except ImportError:
+ try:
+ import mock
+ except ImportError:
+ pass
+
+
+if sys.version_info.major < 3:
+ # pylint: disable=basestring-builtin,undefined-variable
+ string_types = basestring
+else:
+ string_types = str
+
+
+def setenv(var, val):
+ """Set |var| in the environment to |val|.
+
+ Python 2 wants ASCII strings, not unicode.
+ Python 3 only wants unicode strings.
+ """
+ try:
+ os.environ[var] = val
+ except UnicodeEncodeError:
+ os.environ[var] = val.encode('utf-8')
diff --git a/rh/terminal.py b/rh/terminal.py
index 686a7ca..c549f12 100644
--- a/rh/terminal.py
+++ b/rh/terminal.py
@@ -142,11 +142,12 @@
def get_input(prompt):
"""Python 2/3 glue for raw_input/input differences."""
try:
+ # pylint: disable=raw_input-builtin
return raw_input(prompt)
except NameError:
# Python 3 renamed raw_input() to input(), which is safe to call since
# it does not evaluate the input.
- # pylint: disable=bad-builtin
+ # pylint: disable=bad-builtin,input-builtin
return input(prompt)
diff --git a/rh/utils.py b/rh/utils.py
index 5328ea7..c6c77e0 100644
--- a/rh/utils.py
+++ b/rh/utils.py
@@ -34,16 +34,49 @@
# pylint: disable=wrong-import-position
import rh.shell
import rh.signals
+from rh.sixish import string_types
-class CommandResult(object):
- """An object to store various attributes of a child process."""
+def timedelta_str(delta):
+ """A less noisy timedelta.__str__.
- def __init__(self, cmd=None, error=None, output=None, returncode=None):
- self.cmd = cmd
- self.error = error
- self.output = output
- self.returncode = returncode
+ The default timedelta stringification contains a lot of leading zeros and
+ uses microsecond resolution. This makes for noisy output.
+ """
+ total = delta.total_seconds()
+ hours, rem = divmod(total, 3600)
+ mins, secs = divmod(rem, 60)
+ ret = '%i.%03is' % (secs, delta.microseconds // 1000)
+ if mins:
+ ret = '%im%s' % (mins, ret)
+ if hours:
+ ret = '%ih%s' % (hours, ret)
+ return ret
+
+
+class CompletedProcess(getattr(subprocess, 'CompletedProcess', object)):
+ """An object to store various attributes of a child process.
+
+ This is akin to subprocess.CompletedProcess.
+ """
+
+ # The linter is confused by the getattr usage above.
+ # TODO(vapier): Drop this once we're Python 3-only and we drop getattr.
+ # pylint: disable=bad-option-value,super-on-old-class
+ def __init__(self, args=None, returncode=None, stdout=None, stderr=None):
+ if sys.version_info.major < 3:
+ self.args = args
+ self.stdout = stdout
+ self.stderr = stderr
+ self.returncode = returncode
+ else:
+ super(CompletedProcess, self).__init__(
+ args=args, returncode=returncode, stdout=stdout, stderr=stderr)
+
+ @property
+ def cmd(self):
+ """Alias to self.args to better match other subprocess APIs."""
+ return self.args
@property
def cmdstr(self):
@@ -51,54 +84,67 @@
return rh.shell.cmd_to_str(self.cmd)
-class RunCommandError(Exception):
- """Error caught in RunCommand() method."""
+class CalledProcessError(subprocess.CalledProcessError):
+ """Error caught in run() function.
- def __init__(self, msg, result, exception=None):
- self.msg, self.result, self.exception = msg, result, exception
+ This is akin to subprocess.CalledProcessError. We do not support |output|,
+ only |stdout|.
+
+ Attributes:
+ returncode: The exit code of the process.
+ cmd: The command that triggered this exception.
+ msg: Short explanation of the error.
+ exception: The underlying Exception if available.
+ """
+
+ def __init__(self, returncode, cmd, stdout=None, stderr=None, msg=None,
+ exception=None):
if exception is not None and not isinstance(exception, Exception):
- raise ValueError('exception must be an exception instance; got %r'
- % (exception,))
- Exception.__init__(self, msg)
- self.args = (msg, result, exception)
+ raise TypeError('exception must be an exception instance; got %r'
+ % (exception,))
- def stringify(self, error=True, output=True):
+ super(CalledProcessError, self).__init__(returncode, cmd, stdout)
+ # The parent class will set |output|, so delete it.
+ del self.output
+ # TODO(vapier): When we're Python 3-only, delete this assignment as the
+ # parent handles it for us.
+ self.stdout = stdout
+ # TODO(vapier): When we're Python 3-only, move stderr to the init above.
+ self.stderr = stderr
+ self.msg = msg
+ self.exception = exception
+
+ @property
+ def cmdstr(self):
+ """Return self.cmd as a well shell-quoted string for debugging."""
+ return '' if self.cmd is None else rh.shell.cmd_to_str(self.cmd)
+
+ def stringify(self, stdout=True, stderr=True):
"""Custom method for controlling what is included in stringifying this.
- Each individual argument is the literal name of an attribute
- on the result object; if False, that value is ignored for adding
- to this string content. If true, it'll be incorporated.
-
Args:
- error: See comment about individual arguments above.
- output: See comment about individual arguments above.
+ stdout: Whether to include captured stdout in the return value.
+ stderr: Whether to include captured stderr in the return value.
+
+ Returns:
+ A summary string for this result.
"""
items = [
- 'return code: %s; command: %s' % (
- self.result.returncode, self.result.cmdstr),
+ 'return code: %s; command: %s' % (self.returncode, self.cmdstr),
]
- if error and self.result.error:
- items.append(self.result.error)
- if output and self.result.output:
- items.append(self.result.output)
+ if stderr and self.stderr:
+ items.append(self.stderr)
+ if stdout and self.stdout:
+ items.append(self.stdout)
if self.msg:
items.append(self.msg)
return '\n'.join(items)
def __str__(self):
- # __str__ needs to return ascii, thus force a conversion to be safe.
- return self.stringify().decode('utf-8', 'replace').encode(
- 'ascii', 'xmlcharrefreplace')
-
- def __eq__(self, other):
- return (type(self) == type(other) and
- self.args == other.args)
-
- def __ne__(self, other):
- return not self.__eq__(other)
+ return self.stringify()
-class TerminateRunCommandError(RunCommandError):
+class TerminateCalledProcessError(CalledProcessError):
"""We were signaled to shutdown while running a command.
Client code shouldn't generally know, nor care about this class. It's
@@ -106,7 +152,7 @@
"""
-def sudo_run_command(cmd, user='root', **kwargs):
+def sudo_run(cmd, user='root', **kwargs):
"""Run a command via sudo.
Client code must use this rather than coming up with their own RunCommand
@@ -125,15 +171,19 @@
See RunCommand documentation.
Raises:
- This function may immediately raise RunCommandError if we're operating
+ This function may immediately raise CalledProcessError if we're operating
in a strict sudo context and the API is being misused.
Barring that, see RunCommand's documentation- it can raise the same things
RunCommand does.
"""
+ # We don't use this anywhere, so it's easier to not bother supporting it.
+ assert not isinstance(cmd, string_types), 'shell commands not supported'
+ assert 'shell' not in kwargs, 'shell=True is not supported'
+
sudo_cmd = ['sudo']
if user == 'root' and os.geteuid() == 0:
- return run_command(cmd, **kwargs)
+ return run(cmd, **kwargs)
if user != 'root':
sudo_cmd += ['-u', user]
@@ -143,24 +193,14 @@
extra_env = kwargs.pop('extra_env', None)
extra_env = {} if extra_env is None else extra_env.copy()
- sudo_cmd.extend('%s=%s' % (k, v) for k, v in extra_env.iteritems())
+ sudo_cmd.extend('%s=%s' % (k, v) for k, v in extra_env.items())
# Finally, block people from passing options to sudo.
sudo_cmd.append('--')
- if isinstance(cmd, basestring):
- # We need to handle shell ourselves so the order is correct:
- # $ sudo [sudo args] -- bash -c '[shell command]'
- # If we let RunCommand take care of it, we'd end up with:
- # $ bash -c 'sudo [sudo args] -- [shell command]'
- shell = kwargs.pop('shell', False)
- if not shell:
- raise Exception('Cannot run a string command without a shell')
- sudo_cmd.extend(['/bin/bash', '-c', cmd])
- else:
- sudo_cmd.extend(cmd)
+ sudo_cmd.extend(cmd)
- return run_command(sudo_cmd, **kwargs)
+ return run(sudo_cmd, **kwargs)
def _kill_child_process(proc, int_timeout, kill_timeout, cmd, original_handler,
@@ -203,9 +243,8 @@
if not rh.signals.relay_signal(original_handler, signum, frame):
# Mock up our own, matching exit code for signaling.
- cmd_result = CommandResult(cmd=cmd, returncode=signum << 8)
- raise TerminateRunCommandError('Received signal %i' % signum,
- cmd_result)
+ raise TerminateCalledProcessError(
+ signum << 8, cmd, msg='Received signal %i' % signum)
class _Popen(subprocess.Popen):
@@ -221,6 +260,7 @@
process has knowingly been waitpid'd already.
"""
+ # pylint: disable=arguments-differ
def send_signal(self, signum):
if self.returncode is not None:
# The original implementation in Popen allows signaling whatever
@@ -236,10 +276,10 @@
if e.errno == errno.EPERM:
# Kill returns either 0 (signal delivered), or 1 (signal wasn't
# delivered). This isn't particularly informative, but we still
- # need that info to decide what to do, thus error_code_ok=True.
- ret = sudo_run_command(['kill', '-%i' % signum, str(self.pid)],
- redirect_stdout=True,
- redirect_stderr=True, error_code_ok=True)
+ # need that info to decide what to do, thus check=False.
+ ret = sudo_run(['kill', '-%i' % signum, str(self.pid)],
+ redirect_stdout=True,
+ redirect_stderr=True, check=False)
if ret.returncode == 1:
# The kill binary doesn't distinguish between permission
# denied and the pid is missing. Denied can only occur
@@ -257,21 +297,18 @@
raise
-# pylint: disable=redefined-builtin
-def run_command(cmd, error_message=None, redirect_stdout=False,
- redirect_stderr=False, cwd=None, input=None,
- shell=False, env=None, extra_env=None, ignore_sigint=False,
- combine_stdout_stderr=False, log_stdout_to_file=None,
- error_code_ok=False, int_timeout=1, kill_timeout=1,
- stdout_to_pipe=False, capture_output=False,
- quiet=False, close_fds=True):
+# We use the keyword arg |input| which trips up pylint checks.
+# pylint: disable=redefined-builtin,input-builtin
+def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
+ shell=False, env=None, extra_env=None, combine_stdout_stderr=False,
+ check=True, int_timeout=1, kill_timeout=1, capture_output=False,
+ close_fds=True):
"""Runs a command.
Args:
cmd: cmd to run. Should be input to subprocess.Popen. If a string, shell
must be true. Otherwise the command must be an array of arguments,
and shell must be false.
- error_message: Prints out this message when an error occurs.
redirect_stdout: Returns the stdout.
redirect_stderr: Holds stderr output until input is communicated.
cwd: The working directory to run this cmd.
@@ -282,51 +319,44 @@
env: If non-None, this is the environment for the new process.
extra_env: If set, this is added to the environment for the new process.
This dictionary is not used to clear any entries though.
- ignore_sigint: If True, we'll ignore signal.SIGINT before calling the
- child. This is the desired behavior if we know our child will handle
- Ctrl-C. If we don't do this, I think we and the child will both get
- Ctrl-C at the same time, which means we'll forcefully kill the child.
combine_stdout_stderr: Combines stdout and stderr streams into stdout.
- log_stdout_to_file: If set, redirects stdout to file specified by this
- path. If |combine_stdout_stderr| is set to True, then stderr will
- also be logged to the specified file.
- error_code_ok: Does not raise an exception when command returns a non-zero
- exit code. Instead, returns the CommandResult object containing the
- exit code.
+ check: Whether to raise an exception when command returns a non-zero exit
+ code, or return the CompletedProcess object containing the exit code.
+ Note: will still raise an exception if the cmd file does not exist.
int_timeout: If we're interrupted, how long (in seconds) should we give
the invoked process to clean up before we send a SIGTERM.
kill_timeout: If we're interrupted, how long (in seconds) should we give
the invoked process to shutdown from a SIGTERM before we SIGKILL it.
- stdout_to_pipe: Redirect stdout to pipe.
capture_output: Set |redirect_stdout| and |redirect_stderr| to True.
- quiet: Set |stdout_to_pipe| and |combine_stdout_stderr| to True.
close_fds: Whether to close all fds before running |cmd|.
Returns:
- A CommandResult object.
+ A CompletedProcess object.
Raises:
- RunCommandError: Raises exception on error with optional error_message.
+ CalledProcessError: Raises exception on error.
"""
if capture_output:
redirect_stdout, redirect_stderr = True, True
- if quiet:
- stdout_to_pipe, combine_stdout_stderr = True, True
-
# Set default for variables.
stdout = None
stderr = None
stdin = None
- cmd_result = CommandResult()
+ result = CompletedProcess()
# Force the timeout to float; in the process, if it's not convertible,
# a self-explanatory exception will be thrown.
kill_timeout = float(kill_timeout)
def _get_tempfile():
+ kwargs = {}
+ if sys.version_info.major < 3:
+ kwargs['bufsize'] = 0
+ else:
+ kwargs['buffering'] = 0
try:
- return tempfile.TemporaryFile(bufsize=0)
+ return tempfile.TemporaryFile(**kwargs)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
@@ -335,7 +365,7 @@
# issue in this particular case since our usage gurantees deletion,
# and since this is primarily triggered during hard cgroups
# shutdown.
- return tempfile.TemporaryFile(bufsize=0, dir='/tmp')
+ return tempfile.TemporaryFile(dir='/tmp', **kwargs)
# Modify defaults based on parameters.
# Note that tempfiles must be unbuffered else attempts to read
@@ -343,11 +373,7 @@
# view of the file.
# The Popen API accepts either an int or a file handle for stdout/stderr.
# pylint: disable=redefined-variable-type
- if log_stdout_to_file:
- stdout = open(log_stdout_to_file, 'w+')
- elif stdout_to_pipe:
- stdout = subprocess.PIPE
- elif redirect_stdout:
+ if redirect_stdout:
stdout = _get_tempfile()
if combine_stdout_stderr:
@@ -364,13 +390,14 @@
# If input is a string, we'll create a pipe and send it through that.
# Otherwise we assume it's a file object that can be read from directly.
- if isinstance(input, basestring):
+ if isinstance(input, string_types):
stdin = subprocess.PIPE
+ input = input.encode('utf-8')
elif input is not None:
stdin = input
input = None
- if isinstance(cmd, basestring):
+ if isinstance(cmd, string_types):
if not shell:
raise Exception('Cannot run a string command without a shell')
cmd = ['/bin/bash', '-c', cmd]
@@ -383,134 +410,76 @@
env = env.copy() if env is not None else os.environ.copy()
env.update(extra_env if extra_env else {})
- cmd_result.cmd = cmd
+ result.args = cmd
proc = None
- # Verify that the signals modules is actually usable, and won't segfault
- # upon invocation of getsignal. See signals.SignalModuleUsable for the
- # details and upstream python bug.
- use_signals = rh.signals.signal_module_usable()
try:
proc = _Popen(cmd, cwd=cwd, stdin=stdin, stdout=stdout,
stderr=stderr, shell=False, env=env,
close_fds=close_fds)
- if use_signals:
- old_sigint = signal.getsignal(signal.SIGINT)
- if ignore_sigint:
- handler = signal.SIG_IGN
- else:
- handler = functools.partial(
- _kill_child_process, proc, int_timeout, kill_timeout, cmd,
- old_sigint)
- signal.signal(signal.SIGINT, handler)
+ old_sigint = signal.getsignal(signal.SIGINT)
+ handler = functools.partial(_kill_child_process, proc, int_timeout,
+ kill_timeout, cmd, old_sigint)
+ signal.signal(signal.SIGINT, handler)
- old_sigterm = signal.getsignal(signal.SIGTERM)
- handler = functools.partial(_kill_child_process, proc, int_timeout,
- kill_timeout, cmd, old_sigterm)
- signal.signal(signal.SIGTERM, handler)
+ old_sigterm = signal.getsignal(signal.SIGTERM)
+ handler = functools.partial(_kill_child_process, proc, int_timeout,
+ kill_timeout, cmd, old_sigterm)
+ signal.signal(signal.SIGTERM, handler)
try:
- (cmd_result.output, cmd_result.error) = proc.communicate(input)
+ (result.stdout, result.stderr) = proc.communicate(input)
finally:
- if use_signals:
- signal.signal(signal.SIGINT, old_sigint)
- signal.signal(signal.SIGTERM, old_sigterm)
+ signal.signal(signal.SIGINT, old_sigint)
+ signal.signal(signal.SIGTERM, old_sigterm)
- if stdout and not log_stdout_to_file and not stdout_to_pipe:
+ if stdout:
# The linter is confused by how stdout is a file & an int.
# pylint: disable=maybe-no-member,no-member
stdout.seek(0)
- cmd_result.output = stdout.read()
+ result.stdout = stdout.read()
stdout.close()
if stderr and stderr != subprocess.STDOUT:
# The linter is confused by how stderr is a file & an int.
# pylint: disable=maybe-no-member,no-member
stderr.seek(0)
- cmd_result.error = stderr.read()
+ result.stderr = stderr.read()
stderr.close()
- cmd_result.returncode = proc.returncode
+ result.returncode = proc.returncode
- if not error_code_ok and proc.returncode:
+ if check and proc.returncode:
msg = 'cwd=%s' % cwd
if extra_env:
msg += ', extra env=%s' % extra_env
- if error_message:
- msg += '\n%s' % error_message
- raise RunCommandError(msg, cmd_result)
+ raise CalledProcessError(
+ result.returncode, result.cmd, stdout=result.stdout,
+ stderr=result.stderr, msg=msg)
except OSError as e:
estr = str(e)
if e.errno == errno.EACCES:
estr += '; does the program need `chmod a+x`?'
- if error_code_ok:
- cmd_result = CommandResult(cmd=cmd, error=estr, returncode=255)
+ if not check:
+ result = CompletedProcess(args=cmd, stderr=estr, returncode=255)
else:
- raise RunCommandError(estr, CommandResult(cmd=cmd), exception=e)
+ raise CalledProcessError(
+ result.returncode, result.cmd, stdout=result.stdout,
+ stderr=result.stderr, msg=estr, exception=e)
finally:
if proc is not None:
# Ensure the process is dead.
+ # Some pylint3 versions are confused here.
+ # pylint: disable=too-many-function-args
_kill_child_process(proc, int_timeout, kill_timeout, cmd, None,
None, None)
- return cmd_result
-# pylint: enable=redefined-builtin
+ # Make sure output is returned as a string rather than bytes.
+ if result.stdout is not None:
+ result.stdout = result.stdout.decode('utf-8', 'replace')
+ if result.stderr is not None:
+ result.stderr = result.stderr.decode('utf-8', 'replace')
-
-def collection(classname, **kwargs):
- """Create a new class with mutable named members.
-
- This is like collections.namedtuple, but mutable. Also similar to the
- python 3.3 types.SimpleNamespace.
-
- Example:
- # Declare default values for this new class.
- Foo = collection('Foo', a=0, b=10)
- # Create a new class but set b to 4.
- foo = Foo(b=4)
- # Print out a (will be the default 0) and b (will be 4).
- print('a = %i, b = %i' % (foo.a, foo.b))
- """
-
- def sn_init(self, **kwargs):
- """The new class's __init__ function."""
- # First verify the kwargs don't have excess settings.
- valid_keys = set(self.__slots__[1:])
- these_keys = set(kwargs.keys())
- invalid_keys = these_keys - valid_keys
- if invalid_keys:
- raise TypeError('invalid keyword arguments for this object: %r' %
- invalid_keys)
-
- # Now initialize this object.
- for k in valid_keys:
- setattr(self, k, kwargs.get(k, self.__defaults__[k]))
-
- def sn_repr(self):
- """The new class's __repr__ function."""
- return '%s(%s)' % (classname, ', '.join(
- '%s=%r' % (k, getattr(self, k)) for k in self.__slots__[1:]))
-
- # Give the new class a unique name and then generate the code for it.
- classname = 'Collection_%s' % classname
- expr = '\n'.join((
- 'class %(classname)s(object):',
- ' __slots__ = ["__defaults__", "%(slots)s"]',
- ' __defaults__ = {}',
- )) % {
- 'classname': classname,
- 'slots': '", "'.join(sorted(str(k) for k in kwargs)),
- }
-
- # Create the class in a local namespace as exec requires.
- namespace = {}
- exec expr in namespace # pylint: disable=exec-used
- new_class = namespace[classname]
-
- # Bind the helpers.
- new_class.__defaults__ = kwargs.copy()
- new_class.__init__ = sn_init
- new_class.__repr__ = sn_repr
-
- return new_class
+ return result
+# pylint: enable=redefined-builtin,input-builtin
diff --git a/rh/utils_unittest.py b/rh/utils_unittest.py
new file mode 100755
index 0000000..8f50998
--- /dev/null
+++ b/rh/utils_unittest.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python3
+# -*- coding:utf-8 -*-
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittests for the utils module."""
+
+from __future__ import print_function
+
+import datetime
+import os
+import sys
+import unittest
+
+_path = os.path.realpath(__file__ + '/../..')
+if sys.path[0] != _path:
+ sys.path.insert(0, _path)
+del _path
+
+# We have to import our local modules after the sys.path tweak. We can't use
+# relative imports because this is an executable program, not a module.
+# pylint: disable=wrong-import-position
+import rh
+import rh.utils
+from rh.sixish import mock
+
+
+class TimeDeltaStrTests(unittest.TestCase):
+ """Verify behavior of timedelta_str object."""
+
+ def test_same(self):
+ """Check timedelta of 0 seconds."""
+ delta = datetime.timedelta(0)
+ self.assertEqual('0.000s', rh.utils.timedelta_str(delta))
+
+ def test_millisecondss(self):
+ """Check timedelta of milliseconds."""
+ delta = datetime.timedelta(seconds=0.123456)
+ self.assertEqual('0.123s', rh.utils.timedelta_str(delta))
+
+ def test_seconds(self):
+ """Check timedelta of seconds."""
+ delta = datetime.timedelta(seconds=12.3)
+ self.assertEqual('12.300s', rh.utils.timedelta_str(delta))
+
+ def test_minutes(self):
+ """Check timedelta of minutes."""
+ delta = datetime.timedelta(seconds=72.3)
+ self.assertEqual('1m12.300s', rh.utils.timedelta_str(delta))
+
+ def test_hours(self):
+ """Check timedelta of hours."""
+ delta = datetime.timedelta(seconds=4000.3)
+ self.assertEqual('1h6m40.300s', rh.utils.timedelta_str(delta))
+
+
+class CompletedProcessTests(unittest.TestCase):
+ """Verify behavior of CompletedProcess object."""
+
+ def test_empty_cmdstr(self):
+ """Check cmdstr with an empty command."""
+ result = rh.utils.CompletedProcess(args=[])
+ self.assertEqual('', result.cmdstr)
+
+ def test_basic_cmdstr(self):
+ """Check cmdstr with a basic command command."""
+ result = rh.utils.CompletedProcess(args=['ls', 'a b'])
+ self.assertEqual("ls 'a b'", result.cmdstr)
+
+ def test_str(self):
+ """Check str() handling."""
+ # We don't enforce much, just that it doesn't crash.
+ result = rh.utils.CompletedProcess()
+ self.assertNotEqual('', str(result))
+ result = rh.utils.CompletedProcess(args=[])
+ self.assertNotEqual('', str(result))
+
+ def test_repr(self):
+ """Check repr() handling."""
+ # We don't enforce much, just that it doesn't crash.
+ result = rh.utils.CompletedProcess()
+ self.assertNotEqual('', repr(result))
+ result = rh.utils.CompletedProcess(args=[])
+ self.assertNotEqual('', repr(result))
+
+
+class CalledProcessErrorTests(unittest.TestCase):
+ """Verify behavior of CalledProcessError object."""
+
+ def test_basic(self):
+ """Basic test we can create a normal instance."""
+ rh.utils.CalledProcessError(0, ['mycmd'])
+ rh.utils.CalledProcessError(1, ['mycmd'], exception=Exception('bad'))
+
+ def test_stringify(self):
+ """Check stringify() handling."""
+ # We don't assert much so we leave flexibility in changing format.
+ err = rh.utils.CalledProcessError(0, ['mycmd'])
+ self.assertIn('mycmd', err.stringify())
+ err = rh.utils.CalledProcessError(
+ 0, ['mycmd'], exception=Exception('bad'))
+ self.assertIn('mycmd', err.stringify())
+
+ def test_str(self):
+ """Check str() handling."""
+ # We don't assert much so we leave flexibility in changing format.
+ err = rh.utils.CalledProcessError(0, ['mycmd'])
+ self.assertIn('mycmd', str(err))
+ err = rh.utils.CalledProcessError(
+ 0, ['mycmd'], exception=Exception('bad'))
+ self.assertIn('mycmd', str(err))
+
+ def test_repr(self):
+ """Check repr() handling."""
+ # We don't assert much so we leave flexibility in changing format.
+ err = rh.utils.CalledProcessError(0, ['mycmd'])
+ self.assertNotEqual('', repr(err))
+ err = rh.utils.CalledProcessError(
+ 0, ['mycmd'], exception=Exception('bad'))
+ self.assertNotEqual('', repr(err))
+
+
+# We shouldn't require sudo to run unittests :).
+@mock.patch.object(rh.utils, 'run')
+@mock.patch.object(os, 'geteuid', return_value=1000)
+class SudoRunCommandTests(unittest.TestCase):
+ """Verify behavior of sudo_run helper."""
+
+ def test_run_as_root_as_root(self, mock_geteuid, mock_run):
+ """Check behavior when we're already root."""
+ mock_geteuid.return_value = 0
+ ret = rh.utils.sudo_run(['ls'], user='root')
+ self.assertIsNotNone(ret)
+ args, _kwargs = mock_run.call_args
+ self.assertEqual((['ls'],), args)
+
+ def test_run_as_root_as_nonroot(self, _mock_geteuid, mock_run):
+ """Check behavior when we're not already root."""
+ ret = rh.utils.sudo_run(['ls'], user='root')
+ self.assertIsNotNone(ret)
+ args, _kwargs = mock_run.call_args
+ self.assertEqual((['sudo', '--', 'ls'],), args)
+
+ def test_run_as_nonroot_as_nonroot(self, _mock_geteuid, mock_run):
+ """Check behavior when we're not already root."""
+ ret = rh.utils.sudo_run(['ls'], user='nobody')
+ self.assertIsNotNone(ret)
+ args, _kwargs = mock_run.call_args
+ self.assertEqual((['sudo', '-u', 'nobody', '--', 'ls'],), args)
+
+ def test_env(self, _mock_geteuid, mock_run):
+ """Check passing through env vars."""
+ ret = rh.utils.sudo_run(['ls'], extra_env={'FOO': 'bar'})
+ self.assertIsNotNone(ret)
+ args, _kwargs = mock_run.call_args
+ self.assertEqual((['sudo', 'FOO=bar', '--', 'ls'],), args)
+
+ def test_shell(self, _mock_geteuid, _mock_run):
+ """Check attempts to use shell code are rejected."""
+ with self.assertRaises(AssertionError):
+ rh.utils.sudo_run('foo')
+ with self.assertRaises(AssertionError):
+ rh.utils.sudo_run(['ls'], shell=True)
+
+
+class RunCommandTests(unittest.TestCase):
+ """Verify behavior of run helper."""
+
+ def test_basic(self):
+ """Simple basic test."""
+ ret = rh.utils.run(['true'])
+ self.assertEqual('true', ret.cmdstr)
+ self.assertIsNone(ret.stdout)
+ self.assertIsNone(ret.stderr)
+
+ def test_stdout_capture(self):
+ """Verify output capturing works."""
+ ret = rh.utils.run(['echo', 'hi'], redirect_stdout=True)
+ self.assertEqual('hi\n', ret.stdout)
+ self.assertIsNone(ret.stderr)
+
+ def test_stderr_capture(self):
+ """Verify stderr capturing works."""
+ ret = rh.utils.run(['sh', '-c', 'echo hi >&2'], redirect_stderr=True)
+ self.assertIsNone(ret.stdout)
+ self.assertEqual('hi\n', ret.stderr)
+
+ def test_stdout_utf8(self):
+ """Verify reading UTF-8 data works."""
+ ret = rh.utils.run(['printf', r'\xc3\x9f'], redirect_stdout=True)
+ self.assertEqual(u'ß', ret.stdout)
+ self.assertIsNone(ret.stderr)
+
+ def test_stdin_utf8(self):
+ """Verify writing UTF-8 data works."""
+ ret = rh.utils.run(['cat'], redirect_stdout=True, input=u'ß')
+ self.assertEqual(u'ß', ret.stdout)
+ self.assertIsNone(ret.stderr)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/android_test_mapping_format.py b/tools/android_test_mapping_format.py
index 0b78dd5..14b02f5 100755
--- a/tools/android_test_mapping_format.py
+++ b/tools/android_test_mapping_format.py
@@ -28,8 +28,19 @@
import argparse
import json
import os
+import re
import sys
+_path = os.path.realpath(__file__ + '/../..')
+if sys.path[0] != _path:
+ sys.path.insert(0, _path)
+del _path
+
+# We have to import our local modules after the sys.path tweak. We can't use
+# relative imports because this is an executable program, not a module.
+# pylint: disable=wrong-import-position
+import rh.git
+
IMPORTS = 'imports'
NAME = 'name'
OPTIONS = 'options'
@@ -41,6 +52,16 @@
'https://source.android.com/compatibility/tests/development/'
'test-mapping')
+# Pattern used to identify line-level '//'-format comment in TEST_MAPPING file.
+_COMMENTS_RE = re.compile(r'^\s*//')
+
+
+if sys.version_info.major < 3:
+ # pylint: disable=basestring-builtin,undefined-variable
+ string_types = basestring
+else:
+ string_types = str
+
class Error(Exception):
"""Base exception for all custom exceptions in this module."""
@@ -50,6 +71,19 @@
"""Exception to raise when detecting an invalid TEST_MAPPING file."""
+def filter_comments(json_data):
+ """Remove '//'-format comments in TEST_MAPPING file to valid format.
+
+ Args:
+ json_data: TEST_MAPPING file content (as a string).
+
+ Returns:
+ Valid json string without comments.
+ """
+ return ''.join('\n' if _COMMENTS_RE.match(x) else x for x in
+ json_data.splitlines())
+
+
def _validate_import(entry, test_mapping_file):
"""Validate an import setting.
@@ -65,7 +99,7 @@
'Invalid import config in test mapping file %s. each import can '
'only have one `path` setting. Failed entry: %s' %
(test_mapping_file, entry))
- if entry.keys()[0] != PATH:
+ if list(entry.keys())[0] != PATH:
raise InvalidTestMappingError(
'Invalid import config in test mapping file %s. import can only '
'have one `path` setting. Failed entry: %s' %
@@ -94,14 +128,14 @@
'Failed test config: %s' % (test_mapping_file, test))
preferred_targets = test.get(PREFERRED_TARGETS, [])
if (not isinstance(preferred_targets, list) or
- any(not isinstance(t, basestring) for t in preferred_targets)):
+ any(not isinstance(t, string_types) for t in preferred_targets)):
raise InvalidTestMappingError(
'Invalid test config in test mapping file %s. `preferred_targets` '
'setting in test config can only be a list of strings. Failed test '
'config: %s' % (test_mapping_file, test))
file_patterns = test.get(FILE_PATTERNS, [])
if (not isinstance(file_patterns, list) or
- any(not isinstance(p, basestring) for p in file_patterns)):
+ any(not isinstance(p, string_types) for p in file_patterns)):
raise InvalidTestMappingError(
'Invalid test config in test mapping file %s. `file_patterns` '
'setting in test config can only be a list of strings. Failed test '
@@ -117,8 +151,7 @@
def _load_file(test_mapping_file):
"""Load a TEST_MAPPING file as a json file."""
try:
- with open(test_mapping_file) as file_obj:
- return json.load(file_obj)
+ return json.loads(filter_comments(test_mapping_file))
except ValueError as e:
# The file is not a valid JSON file.
print(
@@ -143,6 +176,8 @@
def get_parser():
"""Return a command line parser."""
parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('--commit', type=str,
+ help='Specify the commit to validate.')
parser.add_argument('project_dir')
parser.add_argument('files', nargs='+')
return parser
@@ -153,7 +188,12 @@
opts = parser.parse_args(argv)
try:
for filename in opts.files:
- process_file(os.path.join(opts.project_dir, filename))
+ if opts.commit:
+ json_data = rh.git.get_file_content(opts.commit, filename)
+ else:
+ with open(os.path.join(opts.project_dir, filename)) as f:
+ json_data = f.read()
+ process_file(json_data)
except:
print('Visit %s for details about the format of TEST_MAPPING '
'file.' % TEST_MAPPING_URL, file=sys.stderr)
diff --git a/tools/android_test_mapping_format_unittest.py b/tools/android_test_mapping_format_unittest.py
index 248786e..9191a25 100755
--- a/tools/android_test_mapping_format_unittest.py
+++ b/tools/android_test_mapping_format_unittest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright 2018 The Android Open Source Project
#
@@ -149,6 +149,40 @@
}
"""
+TEST_MAPPING_WITH_SUPPORTED_COMMENTS = r"""
+// supported comment
+{
+ // supported comment!@#$%^&*()_
+ "presubmit": [
+ {
+ "name": "CtsWindowManagerDeviceTestCases\"foo//baz",
+ "options": [
+ // supported comment!@#$%^&*()_
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ }
+ ]
+ }
+ ],
+ "imports": [
+ {
+ "path": "path1//path2//path3"
+ }
+ ]
+}
+"""
+
+TEST_MAPPING_WITH_NON_SUPPORTED_COMMENTS = """
+{ #non-supported comments
+ // supported comments
+ "presubmit": [#non-supported comments
+ { // non-supported comments
+ "name": "CtsWindowManagerDeviceTestCases",
+ }
+ ]
+}
+"""
+
class AndroidTestMappingFormatTests(unittest.TestCase):
"""Unittest for android_test_mapping_format module."""
@@ -165,84 +199,110 @@
"""
with open(self.test_mapping_file, 'w') as f:
f.write(VALID_TEST_MAPPING)
- android_test_mapping_format.process_file(self.test_mapping_file)
+ with open(self.test_mapping_file, 'r') as f:
+ android_test_mapping_format.process_file(f.read())
def test_invalid_test_mapping_bad_json(self):
"""Verify that TEST_MAPPING file with bad json can be detected."""
with open(self.test_mapping_file, 'w') as f:
f.write(BAD_JSON)
- self.assertRaises(
- ValueError, android_test_mapping_format.process_file,
- self.test_mapping_file)
+ with open(self.test_mapping_file, 'r') as f:
+ self.assertRaises(
+ ValueError, android_test_mapping_format.process_file,
+ f.read())
def test_invalid_test_mapping_wrong_test_key(self):
"""Verify that test config using wrong key can be detected."""
with open(self.test_mapping_file, 'w') as f:
f.write(BAD_TEST_WRONG_KEY)
- self.assertRaises(
- android_test_mapping_format.InvalidTestMappingError,
- android_test_mapping_format.process_file,
- self.test_mapping_file)
+ with open(self.test_mapping_file, 'r') as f:
+ self.assertRaises(
+ android_test_mapping_format.InvalidTestMappingError,
+ android_test_mapping_format.process_file,
+ f.read())
def test_invalid_test_mapping_wrong_test_value(self):
"""Verify that test config using wrong host value can be detected."""
with open(self.test_mapping_file, 'w') as f:
f.write(BAD_TEST_WRONG_HOST_VALUE)
- self.assertRaises(
- android_test_mapping_format.InvalidTestMappingError,
- android_test_mapping_format.process_file,
- self.test_mapping_file)
+ with open(self.test_mapping_file, 'r') as f:
+ self.assertRaises(
+ android_test_mapping_format.InvalidTestMappingError,
+ android_test_mapping_format.process_file,
+ f.read())
def test_invalid_test_mapping_wrong_preferred_targets_value(self):
"""Verify invalid preferred_targets are rejected."""
with open(self.test_mapping_file, 'w') as f:
f.write(BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST)
- self.assertRaises(
- android_test_mapping_format.InvalidTestMappingError,
- android_test_mapping_format.process_file,
- self.test_mapping_file)
+ with open(self.test_mapping_file, 'r') as f:
+ self.assertRaises(
+ android_test_mapping_format.InvalidTestMappingError,
+ android_test_mapping_format.process_file,
+ f.read())
with open(self.test_mapping_file, 'w') as f:
f.write(BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE)
- self.assertRaises(
- android_test_mapping_format.InvalidTestMappingError,
- android_test_mapping_format.process_file,
- self.test_mapping_file)
+ with open(self.test_mapping_file, 'r') as f:
+ self.assertRaises(
+ android_test_mapping_format.InvalidTestMappingError,
+ android_test_mapping_format.process_file,
+ f.read())
def test_invalid_test_mapping_wrong_test_option(self):
"""Verify that test config using wrong option can be detected."""
with open(self.test_mapping_file, 'w') as f:
f.write(BAD_TEST_WRONG_OPTION)
- self.assertRaises(
- android_test_mapping_format.InvalidTestMappingError,
- android_test_mapping_format.process_file,
- self.test_mapping_file)
+ with open(self.test_mapping_file, 'r') as f:
+ self.assertRaises(
+ android_test_mapping_format.InvalidTestMappingError,
+ android_test_mapping_format.process_file,
+ f.read())
def test_invalid_test_mapping_wrong_import_key(self):
"""Verify that import setting using wrong key can be detected."""
with open(self.test_mapping_file, 'w') as f:
f.write(BAD_IMPORT_WRONG_KEY)
- self.assertRaises(
- android_test_mapping_format.InvalidTestMappingError,
- android_test_mapping_format.process_file,
- self.test_mapping_file)
+ with open(self.test_mapping_file, 'r') as f:
+ self.assertRaises(
+ android_test_mapping_format.InvalidTestMappingError,
+ android_test_mapping_format.process_file,
+ f.read())
def test_invalid_test_mapping_wrong_import_value(self):
"""Verify that import setting using wrong value can be detected."""
with open(self.test_mapping_file, 'w') as f:
f.write(BAD_IMPORT_WRONG_IMPORT_VALUE)
- self.assertRaises(
- android_test_mapping_format.InvalidTestMappingError,
- android_test_mapping_format.process_file,
- self.test_mapping_file)
+ with open(self.test_mapping_file, 'r') as f:
+ self.assertRaises(
+ android_test_mapping_format.InvalidTestMappingError,
+ android_test_mapping_format.process_file,
+ f.read())
def test_invalid_test_mapping_file_patterns_value(self):
"""Verify that file_patterns using wrong value can be detected."""
with open(self.test_mapping_file, 'w') as f:
f.write(BAD_FILE_PATTERNS)
- self.assertRaises(
- android_test_mapping_format.InvalidTestMappingError,
- android_test_mapping_format.process_file,
- self.test_mapping_file)
+ with open(self.test_mapping_file, 'r') as f:
+ self.assertRaises(
+ android_test_mapping_format.InvalidTestMappingError,
+ android_test_mapping_format.process_file,
+ f.read())
+
+ def test_valid_test_mapping_file_with_supported_comments(self):
+ """Verify that '//'-format comment can be filtered."""
+ with open(self.test_mapping_file, 'w') as f:
+ f.write(TEST_MAPPING_WITH_SUPPORTED_COMMENTS)
+ with open(self.test_mapping_file, 'r') as f:
+ android_test_mapping_format.process_file(f.read())
+
+ def test_valid_test_mapping_file_with_non_supported_comments(self):
+ """Verify that non-supported comment can be detected."""
+ with open(self.test_mapping_file, 'w') as f:
+ f.write(TEST_MAPPING_WITH_NON_SUPPORTED_COMMENTS)
+ with open(self.test_mapping_file, 'r') as f:
+ self.assertRaises(
+ ValueError, android_test_mapping_format.process_file,
+ f.read())
if __name__ == '__main__':
diff --git a/tools/clang-format.py b/tools/clang-format.py
index 55b3f3b..7fa5b10 100755
--- a/tools/clang-format.py
+++ b/tools/clang-format.py
@@ -83,13 +83,13 @@
# Fail gracefully if clang-format itself aborts/fails.
try:
- result = rh.utils.run_command(cmd, capture_output=True)
- except rh.utils.RunCommandError as e:
+ result = rh.utils.run(cmd, capture_output=True)
+ except rh.utils.CalledProcessError as e:
print('clang-format failed:\n%s' % (e,), file=sys.stderr)
print('\nPlease report this to the clang team.', file=sys.stderr)
return 1
- stdout = result.output
+ stdout = result.stdout
if stdout.rstrip('\n') == 'no modified files to format':
# This is always printed when only files that clang-format does not
# understand were modified.
@@ -102,12 +102,19 @@
if diff_filenames:
if opts.fix:
- rh.utils.run_command(['git', 'apply'], input=stdout)
+ result = rh.utils.run(['git', 'apply'], input=stdout, check=False)
+ if result.returncode:
+ print('Error: Unable to automatically fix things.\n'
+ ' Make sure your checkout is clean first.\n'
+ ' If you have multiple commits, you might have to '
+ 'manually rebase your tree first.',
+ file=sys.stderr)
+ return result.returncode
else:
print('The following files have formatting errors:')
for filename in diff_filenames:
print('\t%s' % filename)
- print('You can run `%s --fix %s` to fix this' %
+ print('You can try to fix this by running:\n%s --fix %s' %
(sys.argv[0], rh.shell.cmd_to_str(argv)))
return 1
diff --git a/tools/google-java-format.py b/tools/google-java-format.py
index 0022c9d..ed5ce28 100755
--- a/tools/google-java-format.py
+++ b/tools/google-java-format.py
@@ -83,7 +83,7 @@
# https://github.com/google/google-java-format/issues/107
diff_cmd = ['git', 'diff', '--no-ext-diff', '-U0', '%s^!' % opts.commit]
diff_cmd.extend(['--'] + opts.files)
- diff = rh.utils.run_command(diff_cmd, capture_output=True).output
+ diff = rh.utils.run(diff_cmd, capture_output=True).stdout
cmd = [opts.google_java_format_diff, '-p1', '--aosp']
if opts.fix:
@@ -91,11 +91,9 @@
if not opts.sort_imports:
cmd.extend(['--skip-sorting-imports'])
- stdout = rh.utils.run_command(cmd,
- input=diff,
- capture_output=True,
- extra_env=extra_env).output
- if stdout != '':
+ stdout = rh.utils.run(cmd, input=diff, capture_output=True,
+ extra_env=extra_env).stdout
+ if stdout:
print('One or more files in your commit have Java formatting errors.')
print('You can run `%s --fix %s` to fix this' %
(sys.argv[0], rh.shell.cmd_to_str(argv)))
diff --git a/tools/pylint.py b/tools/pylint.py
index aef87ac..7885dcf 100755
--- a/tools/pylint.py
+++ b/tools/pylint.py
@@ -67,6 +67,7 @@
try:
os.execvp(cmd[0], cmd)
+ return 0
except OSError as e:
if e.errno == errno.ENOENT:
print('%s: unable to run `%s`: %s' % (__file__, cmd[0], e),
@@ -74,8 +75,8 @@
print('%s: Try installing pylint: sudo apt-get install %s' %
(__file__, os.path.basename(cmd[0])), file=sys.stderr)
return 1
- else:
- raise
+
+ raise
if __name__ == '__main__':
diff --git a/tools/pylintrc b/tools/pylintrc
index 704b21b..68c74ef 100644
--- a/tools/pylintrc
+++ b/tools/pylintrc
@@ -34,9 +34,16 @@
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
+ pylint.extensions.bad_builtin,
+ pylint.extensions.check_elif,
+ pylint.extensions.docstyle,
+ pylint.extensions.emptystring,
+ pylint.extensions.overlapping_exceptions,
+ pylint.extensions.redefined_variable_type,
-# Use multiple processes to speed up Pylint.
-jobs=1
+# Use multiple processes to speed up Pylint. A value of 0 autodetects available
+# processors.
+jobs=0
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
@@ -65,7 +72,70 @@
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time. See also the "--disable" option for examples.
-#enable=
+enable=
+ apply-builtin,
+ backtick,
+ bad-python3-import,
+ basestring-builtin,
+ buffer-builtin,
+ cmp-builtin,
+ cmp-method,
+ coerce-builtin,
+ coerce-method,
+ delslice-method,
+ deprecated-itertools-function,
+ deprecated-str-translate-call,
+ deprecated-string-function,
+ deprecated-types-field,
+ dict-items-not-iterating,
+ dict-iter-method,
+ dict-keys-not-iterating,
+ dict-values-not-iterating,
+ dict-view-method,
+ div-method,
+ exception-message-attribute,
+ execfile-builtin,
+ file-builtin,
+ filter-builtin-not-iterating,
+ getslice-method,
+ hex-method,
+ idiv-method,
+ import-star-module-level,
+ indexing-exception,
+ input-builtin,
+ intern-builtin,
+ invalid-str-codec,
+ long-builtin,
+ long-suffix,
+ map-builtin-not-iterating,
+ metaclass-assignment,
+ next-method-called,
+ next-method-defined,
+ nonzero-method,
+ oct-method,
+ old-division,
+ old-ne-operator,
+ old-octal-literal,
+ old-raise-syntax,
+ parameter-unpacking,
+ print-statement,
+ raising-string,
+ range-builtin-not-iterating,
+ raw_input-builtin,
+ rdiv-method,
+ reduce-builtin,
+ reload-builtin,
+ round-builtin,
+ setslice-method,
+ standarderror-builtin,
+ sys-max-int,
+ unichr-builtin,
+ unicode-builtin,
+ unpacking-in-except,
+ using-cmp-argument,
+ xrange-builtin,
+ zip-builtin-not-iterating,
+
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
@@ -77,6 +147,7 @@
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
# We leave many of the style warnings to judgement/peer review.
+# useless-object-inheritance: We disable this for Python 2 compatibility.
disable=
fixme,
file-ignored,
@@ -95,6 +166,7 @@
too-many-public-methods,
too-many-return-statements,
too-many-statements,
+ useless-object-inheritance,
[REPORTS]
@@ -112,6 +184,9 @@
# Tells whether to display a full report or only the messages
reports=no
+# Activate the evaluation score.
+score=no
+
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total