Merge pull request #209 from craigcitro/lint
Fix lint errors in apitools.
diff --git a/apitools/base/protorpclite/protojson.py b/apitools/base/protorpclite/protojson.py
index 8e923b5..685fb3f 100644
--- a/apitools/base/protorpclite/protojson.py
+++ b/apitools/base/protorpclite/protojson.py
@@ -78,6 +78,8 @@
logging.error('Must use valid json library (json or simplejson)')
raise first_import_error # pylint:disable=raising-bad-type
+
+
json = _load_json_module()
@@ -368,6 +370,7 @@
raise TypeError('Expected protocol of type ProtoJson')
ProtoJson.__default = protocol
+
CONTENT_TYPE = ProtoJson.CONTENT_TYPE
ALTERNATIVE_CONTENT_TYPES = ProtoJson.ALTERNATIVE_CONTENT_TYPES
diff --git a/apitools/base/py/base_api.py b/apitools/base/py/base_api.py
index 35951a6..cdb36fc 100644
--- a/apitools/base/py/base_api.py
+++ b/apitools/base/py/base_api.py
@@ -115,6 +115,8 @@
request_field = messages.StringField(11, default='')
upload_config = messages.MessageField(ApiUploadInfo, 12)
supports_download = messages.BooleanField(13, default=False)
+
+
REQUEST_IS_BODY = '<request>'
@@ -239,7 +241,8 @@
model=None, log_request=False, log_response=False,
num_retries=5, max_retry_wait=60, credentials_args=None,
default_global_params=None, additional_http_headers=None,
- check_response_func=None, retry_func=None, response_encoding=None):
+ check_response_func=None, retry_func=None,
+ response_encoding=None):
_RequireClassAttrs(self, ('_package', '_scopes', 'messages_module'))
if default_global_params is not None:
util.Typecheck(default_global_params, self.params_type)
diff --git a/apitools/base/py/base_cli.py b/apitools/base/py/base_cli.py
index 2527e64..70515c1 100644
--- a/apitools/base/py/base_cli.py
+++ b/apitools/base/py/base_cli.py
@@ -75,6 +75,7 @@
_BASE_FLAGS_DECLARED = True
+
FLAGS = flags.FLAGS
diff --git a/apitools/base/py/extra_types.py b/apitools/base/py/extra_types.py
index 79a4900..b48b59d 100644
--- a/apitools/base/py/extra_types.py
+++ b/apitools/base/py/extra_types.py
@@ -286,6 +286,7 @@
# Don't need to do anything special, they're decoded just fine
return encoding.CodecResult(value=value, complete=False)
+
encoding.RegisterFieldTypeCodec(_EncodeInt64Field, _DecodeInt64Field)(
messages.IntegerField)
@@ -303,4 +304,5 @@
date = datetime.datetime.strptime(value, '%Y-%m-%d').date()
return encoding.CodecResult(value=date, complete=True)
+
encoding.RegisterFieldTypeCodec(_EncodeDateField, _DecodeDateField)(DateField)
diff --git a/apitools/gen/gen_client.py b/apitools/gen/gen_client.py
index c36fbc4..462b347 100644
--- a/apitools/gen/gen_client.py
+++ b/apitools/gen/gen_client.py
@@ -346,5 +346,6 @@
args = parser.parse_args(argv[1:])
return args.func(args) or 0
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/apitools/gen/message_registry.py b/apitools/gen/message_registry.py
index 4f004de..0ab44c1 100644
--- a/apitools/gen/message_registry.py
+++ b/apitools/gen/message_registry.py
@@ -169,7 +169,7 @@
def LookupDescriptorOrDie(self, name):
message_descriptor = self.LookupDescriptor(name)
if message_descriptor is None:
- raise ValueError('No message descriptor named "%s"', name)
+ raise ValueError('No message descriptor named "%s"' % name)
return message_descriptor
def __GetDescriptor(self, name):
@@ -262,7 +262,7 @@
self.__DeclareMessageAlias(schema, 'extra_types.JsonValue')
return
if schema.get('type') != 'object':
- raise ValueError('Cannot create message descriptors for type %s',
+ raise ValueError('Cannot create message descriptors for type %s' %
schema.get('type'))
message = extended_descriptor.ExtendedMessageDescriptor()
message.name = self.__names.ClassName(schema['id'])
diff --git a/apitools/gen/service_registry.py b/apitools/gen/service_registry.py
index f89cb44..fdcd93e 100644
--- a/apitools/gen/service_registry.py
+++ b/apitools/gen/service_registry.py
@@ -238,7 +238,8 @@
printer('get_credentials=True, http=None, model=None,')
printer('log_request=False, log_response=False,')
printer('credentials_args=None, default_global_params=None,')
- printer('additional_http_headers=None, response_encoding=None):')
+ printer('additional_http_headers=None, '
+ 'response_encoding=None):')
with printer.Indent():
printer('"""Create a new %s handle."""', client_info.package)
printer('url = url or self.BASE_URL')
diff --git a/apitools/scripts/oauth2l.py b/apitools/scripts/oauth2l.py
deleted file mode 100644
index cddba0a..0000000
--- a/apitools/scripts/oauth2l.py
+++ /dev/null
@@ -1,339 +0,0 @@
-#
-# Copyright 2015 Google Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Command-line utility for fetching/inspecting credentials.
-
-oauth2l (pronounced "oauthtool") is a small utility for fetching
-credentials, or inspecting existing credentials. Here we demonstrate
-some sample use:
-
- $ oauth2l fetch userinfo.email bigquery compute
- Fetched credentials of type:
- oauth2client.client.OAuth2Credentials
- Access token:
- ya29.abcdefghijklmnopqrstuvwxyz123yessirree
- $ oauth2l header userinfo.email
- Authorization: Bearer ya29.zyxwvutsrqpnmolkjihgfedcba
- $ oauth2l validate thisisnotatoken
- <exit status: 1>
- $ oauth2l validate ya29.zyxwvutsrqpnmolkjihgfedcba
- $ oauth2l scopes ya29.abcdefghijklmnopqrstuvwxyz123yessirree
- https://www.googleapis.com/auth/bigquery
- https://www.googleapis.com/auth/compute
- https://www.googleapis.com/auth/userinfo.email
-
-The `header` command is designed to be easy to use with `curl`:
-
- $ curl -H "$(oauth2l header bigquery)" \\
- 'https://www.googleapis.com/bigquery/v2/projects'
- ... lists all projects ...
-
-The token can also be printed in other formats, for easy chaining
-into other programs:
-
- $ oauth2l fetch -f json_compact userinfo.email
- <one-line JSON object with credential information>
- $ oauth2l fetch -f bare drive
- ya29.suchT0kenManyCredentialsW0Wokyougetthepoint
-
-"""
-
-from __future__ import print_function
-
-import argparse
-import json
-import logging
-import os
-import pkgutil
-import sys
-import textwrap
-
-import oauth2client.client
-from six.moves import http_client
-
-import apitools.base.py as apitools_base
-
-# We could use a generated client here, but it's used for precisely
-# one URL, with one parameter and no worries about URL encoding. Let's
-# go with simple.
-_OAUTH2_TOKENINFO_TEMPLATE = (
- 'https://www.googleapis.com/oauth2/v2/tokeninfo'
- '?access_token={access_token}'
-)
-
-
-def GetDefaultClientInfo():
- client_secrets_json = pkgutil.get_data(
- 'apitools.data', 'apitools_client_secrets.json').decode('utf8')
- client_secrets = json.loads(client_secrets_json)['installed']
- return {
- 'client_id': client_secrets['client_id'],
- 'client_secret': client_secrets['client_secret'],
- 'user_agent': 'apitools/0.2 oauth2l/0.1',
- }
-
-
-def GetClientInfoFromFlags(client_secrets):
- """Fetch client info from args."""
- if client_secrets:
- client_secrets_path = os.path.expanduser(client_secrets)
- if not os.path.exists(client_secrets_path):
- raise ValueError(
- 'Cannot find file: {0}'.format(client_secrets))
- with open(client_secrets_path) as client_secrets_file:
- client_secrets = json.load(client_secrets_file)
- if 'installed' not in client_secrets:
- raise ValueError('Provided client ID must be for an installed app')
- client_secrets = client_secrets['installed']
- return {
- 'client_id': client_secrets['client_id'],
- 'client_secret': client_secrets['client_secret'],
- 'user_agent': 'apitools/0.2 oauth2l/0.1',
- }
- else:
- return GetDefaultClientInfo()
-
-
-def _ExpandScopes(scopes):
- scope_prefix = 'https://www.googleapis.com/auth/'
- return [s if s.startswith('https://') else scope_prefix + s
- for s in scopes]
-
-
-def _PrettyJson(data):
- return json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
-
-
-def _CompactJson(data):
- return json.dumps(data, sort_keys=True, separators=(',', ':'))
-
-
-def _AsText(text_or_bytes):
- if isinstance(text_or_bytes, bytes):
- return text_or_bytes.decode('utf8')
- return text_or_bytes
-
-
-def _Format(fmt, credentials):
- """Format credentials according to fmt."""
- if fmt == 'bare':
- return credentials.access_token
- elif fmt == 'header':
- return 'Authorization: Bearer %s' % credentials.access_token
- elif fmt == 'json':
- return _PrettyJson(json.loads(_AsText(credentials.to_json())))
- elif fmt == 'json_compact':
- return _CompactJson(json.loads(_AsText(credentials.to_json())))
- elif fmt == 'pretty':
- format_str = textwrap.dedent('\n'.join([
- 'Fetched credentials of type:',
- ' {credentials_type.__module__}.{credentials_type.__name__}',
- 'Access token:',
- ' {credentials.access_token}',
- ]))
- return format_str.format(credentials=credentials,
- credentials_type=type(credentials))
- raise ValueError('Unknown format: {0}'.format(fmt))
-
-_FORMATS = set(('bare', 'header', 'json', 'json_compact', 'pretty'))
-
-
-def _GetTokenScopes(access_token):
- """Return the list of valid scopes for the given token as a list."""
- url = _OAUTH2_TOKENINFO_TEMPLATE.format(access_token=access_token)
- response = apitools_base.MakeRequest(
- apitools_base.GetHttp(), apitools_base.Request(url))
- if response.status_code not in [http_client.OK, http_client.BAD_REQUEST]:
- raise apitools_base.HttpError.FromResponse(response)
- if response.status_code == http_client.BAD_REQUEST:
- return []
- return json.loads(_AsText(response.content))['scope'].split(' ')
-
-
-def _ValidateToken(access_token):
- """Return True iff the provided access token is valid."""
- return bool(_GetTokenScopes(access_token))
-
-
-def _FetchCredentials(args, client_info=None, credentials_filename=None):
- """Fetch a credential for the given client_info and scopes."""
- client_info = client_info or GetClientInfoFromFlags(args.client_secrets)
- scopes = _ExpandScopes(args.scope)
- if not scopes:
- raise ValueError('No scopes provided')
- credentials_filename = credentials_filename or args.credentials_filename
- # TODO(craigcitro): Remove this logging nonsense once we quiet the
- # spurious logging in oauth2client.
- old_level = logging.getLogger().level
- logging.getLogger().setLevel(logging.ERROR)
- credentials = apitools_base.GetCredentials(
- 'oauth2l', scopes, credentials_filename=credentials_filename,
- service_account_json_keyfile=args.service_account_json_keyfile,
- oauth2client_args='', **client_info)
- logging.getLogger().setLevel(old_level)
- if not _ValidateToken(credentials.access_token):
- credentials.refresh(apitools_base.GetHttp())
- return credentials
-
-
-def _Email(args):
- """Print the email address for this token, if possible."""
- userinfo = apitools_base.GetUserinfo(
- oauth2client.client.AccessTokenCredentials(args.access_token,
- 'oauth2l/1.0'))
- user_email = userinfo.get('email')
- if user_email:
- print(user_email)
-
-
-def _Fetch(args):
- """Fetch a valid access token and display it."""
- credentials = _FetchCredentials(args)
- print(_Format(args.credentials_format.lower(), credentials))
-
-
-def _Header(args):
- """Fetch an access token and display it formatted as an HTTP header."""
- print(_Format('header', _FetchCredentials(args)))
-
-
-def _Scopes(args):
- """Print the list of scopes for a valid token."""
- scopes = _GetTokenScopes(args.access_token)
- if not scopes:
- return 1
- for scope in sorted(scopes):
- print(scope)
-
-
-def _Userinfo(args):
- """Print the userinfo for this token, if possible."""
- userinfo = apitools_base.GetUserinfo(
- oauth2client.client.AccessTokenCredentials(args.access_token,
- 'oauth2l/1.0'))
- if args.format == 'json':
- print(_PrettyJson(userinfo))
- else:
- print(_CompactJson(userinfo))
-
-
-def _Validate(args):
- """Validate an access token. Exits with 0 if valid, 1 otherwise."""
- return 1 - (_ValidateToken(args.access_token))
-
-
-def _GetParser():
- """Returns argparse argument parser."""
- shared_flags = argparse.ArgumentParser(add_help=False)
- shared_flags.add_argument(
- '--client_secrets',
- default='',
- help=('If specified, use the client ID/secret from the named '
- 'file, which should be a client_secrets.json file '
- 'downloaded from the Developer Console.'))
- shared_flags.add_argument(
- '--credentials_filename',
- default='',
- help='(optional) Filename for fetching/storing credentials.')
- shared_flags.add_argument(
- '--service_account_json_keyfile',
- default='',
- help=('Filename for a JSON service account key downloaded from '
- 'the Google Developer Console.'))
-
- parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class=argparse.RawDescriptionHelpFormatter,
- )
- subparsers = parser.add_subparsers(dest='command')
-
- # email
- email = subparsers.add_parser('email', help=_Email.__doc__,
- parents=[shared_flags])
- email.set_defaults(func=_Email)
- email.add_argument(
- 'access_token',
- help=('Access token to print associated email address for. Must have '
- 'the userinfo.email scope.'))
-
- # fetch
- fetch = subparsers.add_parser('fetch', help=_Fetch.__doc__,
- parents=[shared_flags])
- fetch.set_defaults(func=_Fetch)
- fetch.add_argument(
- '-f', '--credentials_format',
- default='pretty', choices=sorted(_FORMATS),
- help='Output format for token.')
- fetch.add_argument(
- 'scope',
- nargs='*',
- help='Scope to fetch. May be provided multiple times.')
-
- # header
- header = subparsers.add_parser('header', help=_Header.__doc__,
- parents=[shared_flags])
- header.set_defaults(func=_Header)
- header.add_argument(
- 'scope',
- nargs='*',
- help='Scope to header. May be provided multiple times.')
-
- # scopes
- scopes = subparsers.add_parser('scopes', help=_Scopes.__doc__,
- parents=[shared_flags])
- scopes.set_defaults(func=_Scopes)
- scopes.add_argument(
- 'access_token',
- help=('Scopes associated with this token will be printed.'))
-
- # userinfo
- userinfo = subparsers.add_parser('userinfo', help=_Userinfo.__doc__,
- parents=[shared_flags])
- userinfo.set_defaults(func=_Userinfo)
- userinfo.add_argument(
- '-f', '--format',
- default='json', choices=('json', 'json_compact'),
- help='Output format for userinfo.')
- userinfo.add_argument(
- 'access_token',
- help=('Access token to print associated email address for. Must have '
- 'the userinfo.email scope.'))
-
- # validate
- validate = subparsers.add_parser('validate', help=_Validate.__doc__,
- parents=[shared_flags])
- validate.set_defaults(func=_Validate)
- validate.add_argument(
- 'access_token',
- help='Access token to validate.')
-
- return parser
-
-
-def main(argv=None):
- argv = argv or sys.argv
- # Invoke the newly created parser.
- args = _GetParser().parse_args(argv[1:])
- try:
- exit_code = args.func(args)
- except BaseException as e:
- print('Error encountered in {0} operation: {1}'.format(
- args.command, e))
- return 1
- return exit_code
-
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv))
diff --git a/apitools/scripts/oauth2l_test.py b/apitools/scripts/oauth2l_test.py
deleted file mode 100644
index e13a688..0000000
--- a/apitools/scripts/oauth2l_test.py
+++ /dev/null
@@ -1,351 +0,0 @@
-#
-# Copyright 2015 Google Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tests for oauth2l."""
-
-import json
-import os
-import sys
-
-import mock
-import oauth2client.client
-import six
-from six.moves import http_client
-import unittest2
-
-import apitools.base.py as apitools_base
-from apitools.scripts import oauth2l
-
-_OAUTH2L_MAIN_RUN = False
-
-
-class _FakeResponse(object):
-
- def __init__(self, status_code, scopes=None):
- self.status_code = status_code
- self.info = {
- 'reason': str(http_client.responses[self.status_code]),
- 'status': str(self.status_code),
- }
- if self.status_code == http_client.OK:
- self.content = json.dumps({'scope': ' '.join(scopes or [])})
- else:
- self.content = 'Error'
- self.request_url = 'some-url'
-
-
-def _GetCommandOutput(command_name, command_argv):
- orig_stdout = sys.stdout
- orig_stderr = sys.stderr
- new_stdout = six.StringIO()
- new_stderr = six.StringIO()
- try:
- sys.stdout = new_stdout
- sys.stderr = new_stderr
- oauth2l.main(['oauth2l', command_name] + command_argv)
- finally:
- sys.stdout = orig_stdout
- sys.stderr = orig_stderr
- new_stdout.seek(0)
- return new_stdout.getvalue().rstrip()
-
-
-class InvalidCommandTest(unittest2.TestCase):
-
- def testOutput(self):
- self.assertRaises(SystemExit,
- _GetCommandOutput, 'foo', [])
-
-
-class Oauth2lFormattingTest(unittest2.TestCase):
-
- def setUp(self):
- # Set up an access token to use
- self.access_token = 'ya29.abdefghijklmnopqrstuvwxyz'
- self.user_agent = 'oauth2l/1.0'
- self.credentials = oauth2client.client.AccessTokenCredentials(
- self.access_token, self.user_agent)
-
- def _Args(self, credentials_format):
- return ['--credentials_format=' + credentials_format, 'userinfo.email']
-
- def testFormatBare(self):
- with mock.patch.object(oauth2l, '_FetchCredentials',
- return_value=self.credentials,
- autospec=True) as mock_credentials:
- output = _GetCommandOutput('fetch', self._Args('bare'))
- self.assertEqual(self.access_token, output)
- self.assertEqual(1, mock_credentials.call_count)
-
- def testFormatHeader(self):
- with mock.patch.object(oauth2l, '_FetchCredentials',
- return_value=self.credentials,
- autospec=True) as mock_credentials:
- output = _GetCommandOutput('fetch', self._Args('header'))
- header = 'Authorization: Bearer %s' % self.access_token
- self.assertEqual(header, output)
- self.assertEqual(1, mock_credentials.call_count)
-
- def testHeaderCommand(self):
- with mock.patch.object(oauth2l, '_FetchCredentials',
- return_value=self.credentials,
- autospec=True) as mock_credentials:
- output = _GetCommandOutput('header', ['userinfo.email'])
- header = 'Authorization: Bearer %s' % self.access_token
- self.assertEqual(header, output)
- self.assertEqual(1, mock_credentials.call_count)
-
- def testFormatJson(self):
- with mock.patch.object(oauth2l, '_FetchCredentials',
- return_value=self.credentials,
- autospec=True) as mock_credentials:
- output = _GetCommandOutput('fetch', self._Args('json'))
- output_lines = [l.strip() for l in output.splitlines()]
- expected_lines = [
- '"_class": "AccessTokenCredentials",',
- '"access_token": "%s",' % self.access_token,
- ]
- for line in expected_lines:
- self.assertIn(line, output_lines)
- self.assertEqual(1, mock_credentials.call_count)
-
- def testFormatJsonCompact(self):
- with mock.patch.object(oauth2l, '_FetchCredentials',
- return_value=self.credentials,
- autospec=True) as mock_credentials:
- output = _GetCommandOutput('fetch', self._Args('json_compact'))
- expected_clauses = [
- '"_class":"AccessTokenCredentials",',
- '"access_token":"%s",' % self.access_token,
- ]
- for clause in expected_clauses:
- self.assertIn(clause, output)
- self.assertEqual(1, len(output.splitlines()))
- self.assertEqual(1, mock_credentials.call_count)
-
- def testFormatPretty(self):
- with mock.patch.object(oauth2l, '_FetchCredentials',
- return_value=self.credentials,
- autospec=True) as mock_credentials:
- output = _GetCommandOutput('fetch', self._Args('pretty'))
- expecteds = ['oauth2client.client.AccessTokenCredentials',
- self.access_token]
- for expected in expecteds:
- self.assertIn(expected, output)
- self.assertEqual(1, mock_credentials.call_count)
-
- def testFakeFormat(self):
- self.assertRaises(ValueError,
- oauth2l._Format, 'xml', self.credentials)
-
-
-class TestFetch(unittest2.TestCase):
-
- def setUp(self):
- # Set up an access token to use
- self.access_token = 'ya29.abdefghijklmnopqrstuvwxyz'
- self.user_agent = 'oauth2l/1.0'
- self.credentials = oauth2client.client.AccessTokenCredentials(
- self.access_token, self.user_agent)
-
- def testNoScopes(self):
- output = _GetCommandOutput('fetch', [])
- self.assertEqual(
- 'Error encountered in fetch operation: No scopes provided',
- output)
-
- def testScopes(self):
- expected_scopes = [
- 'https://www.googleapis.com/auth/userinfo.email',
- 'https://www.googleapis.com/auth/cloud-platform',
- ]
- with mock.patch.object(apitools_base, 'GetCredentials',
- return_value=self.credentials,
- autospec=True) as mock_fetch:
- with mock.patch.object(oauth2l, '_GetTokenScopes',
- return_value=expected_scopes,
- autospec=True) as mock_get_scopes:
- output = _GetCommandOutput(
- 'fetch', ['userinfo.email', 'cloud-platform'])
- self.assertIn(self.access_token, output)
- self.assertEqual(1, mock_fetch.call_count)
- args, _ = mock_fetch.call_args
- self.assertEqual(expected_scopes, args[-1])
- self.assertEqual(1, mock_get_scopes.call_count)
- self.assertEqual((self.access_token,),
- mock_get_scopes.call_args[0])
-
- def testCredentialsRefreshed(self):
- with mock.patch.object(apitools_base, 'GetCredentials',
- return_value=self.credentials,
- autospec=True) as mock_fetch:
- with mock.patch.object(oauth2l, '_ValidateToken',
- return_value=False,
- autospec=True) as mock_validate:
- with mock.patch.object(self.credentials, 'refresh',
- return_value=None,
- autospec=True) as mock_refresh:
- output = _GetCommandOutput('fetch', ['userinfo.email'])
- self.assertIn(self.access_token, output)
- self.assertEqual(1, mock_fetch.call_count)
- self.assertEqual(1, mock_validate.call_count)
- self.assertEqual(1, mock_refresh.call_count)
-
- def testDefaultClientInfo(self):
- with mock.patch.object(apitools_base, 'GetCredentials',
- return_value=self.credentials,
- autospec=True) as mock_fetch:
- with mock.patch.object(oauth2l, '_ValidateToken',
- return_value=True,
- autospec=True) as mock_validate:
- output = _GetCommandOutput('fetch', ['userinfo.email'])
- self.assertIn(self.access_token, output)
- self.assertEqual(1, mock_fetch.call_count)
- _, kwargs = mock_fetch.call_args
- self.assertEqual(
- '1042881264118.apps.googleusercontent.com',
- kwargs['client_id'])
- self.assertEqual(1, mock_validate.call_count)
-
- def testMissingClientSecrets(self):
- self.assertRaises(
- ValueError,
- oauth2l.GetClientInfoFromFlags, '/non/existent/file')
-
- def testWrongClientSecretsFormat(self):
- client_secrets = os.path.join(
- os.path.dirname(__file__),
- 'testdata/noninstalled_client_secrets.json')
- self.assertRaises(
- ValueError,
- oauth2l.GetClientInfoFromFlags, client_secrets)
-
- def testCustomClientInfo(self):
- client_secrets_path = os.path.join(
- os.path.dirname(__file__), 'testdata/fake_client_secrets.json')
- with mock.patch.object(apitools_base, 'GetCredentials',
- return_value=self.credentials,
- autospec=True) as mock_fetch:
- with mock.patch.object(oauth2l, '_ValidateToken',
- return_value=True,
- autospec=True) as mock_validate:
- fetch_args = [
- '--client_secrets=' + client_secrets_path,
- 'userinfo.email']
- output = _GetCommandOutput('fetch', fetch_args)
- self.assertIn(self.access_token, output)
- self.assertEqual(1, mock_fetch.call_count)
- _, kwargs = mock_fetch.call_args
- self.assertEqual('144169.apps.googleusercontent.com',
- kwargs['client_id'])
- self.assertEqual('awesomesecret',
- kwargs['client_secret'])
- self.assertEqual(1, mock_validate.call_count)
-
-
-class TestOtherCommands(unittest2.TestCase):
-
- def setUp(self):
- # Set up an access token to use
- self.access_token = 'ya29.abdefghijklmnopqrstuvwxyz'
- self.user_agent = 'oauth2l/1.0'
- self.credentials = oauth2client.client.AccessTokenCredentials(
- self.access_token, self.user_agent)
-
- def testEmail(self):
- user_info = {'email': 'foo@example.com'}
- with mock.patch.object(apitools_base, 'GetUserinfo',
- return_value=user_info,
- autospec=True) as mock_get_userinfo:
- output = _GetCommandOutput('email', [self.access_token])
- self.assertEqual(user_info['email'], output)
- self.assertEqual(1, mock_get_userinfo.call_count)
- self.assertEqual(self.access_token,
- mock_get_userinfo.call_args[0][0].access_token)
-
- def testNoEmail(self):
- with mock.patch.object(apitools_base, 'GetUserinfo',
- return_value={},
- autospec=True) as mock_get_userinfo:
- output = _GetCommandOutput('email', [self.access_token])
- self.assertEqual('', output)
- self.assertEqual(1, mock_get_userinfo.call_count)
-
- def testUserinfo(self):
- user_info = {'email': 'foo@example.com'}
- with mock.patch.object(apitools_base, 'GetUserinfo',
- return_value=user_info,
- autospec=True) as mock_get_userinfo:
- output = _GetCommandOutput('userinfo', [self.access_token])
- self.assertEqual(json.dumps(user_info, indent=4), output)
- self.assertEqual(1, mock_get_userinfo.call_count)
- self.assertEqual(self.access_token,
- mock_get_userinfo.call_args[0][0].access_token)
-
- def testUserinfoCompact(self):
- user_info = {'email': 'foo@example.com'}
- with mock.patch.object(apitools_base, 'GetUserinfo',
- return_value=user_info,
- autospec=True) as mock_get_userinfo:
- output = _GetCommandOutput(
- 'userinfo', ['--format=json_compact', self.access_token])
- self.assertEqual(json.dumps(user_info, separators=(',', ':')),
- output)
- self.assertEqual(1, mock_get_userinfo.call_count)
- self.assertEqual(self.access_token,
- mock_get_userinfo.call_args[0][0].access_token)
-
- def testScopes(self):
- scopes = [u'https://www.googleapis.com/auth/userinfo.email',
- u'https://www.googleapis.com/auth/cloud-platform']
- response = _FakeResponse(http_client.OK, scopes=scopes)
- with mock.patch.object(apitools_base, 'MakeRequest',
- return_value=response,
- autospec=True) as mock_make_request:
- output = _GetCommandOutput('scopes', [self.access_token])
- self.assertEqual(sorted(scopes), output.splitlines())
- self.assertEqual(1, mock_make_request.call_count)
-
- def testValidate(self):
- scopes = [u'https://www.googleapis.com/auth/userinfo.email',
- u'https://www.googleapis.com/auth/cloud-platform']
- response = _FakeResponse(http_client.OK, scopes=scopes)
- with mock.patch.object(apitools_base, 'MakeRequest',
- return_value=response,
- autospec=True) as mock_make_request:
- output = _GetCommandOutput('validate', [self.access_token])
- self.assertEqual('', output)
- self.assertEqual(1, mock_make_request.call_count)
-
- def testBadResponseCode(self):
- response = _FakeResponse(http_client.BAD_REQUEST)
- with mock.patch.object(apitools_base, 'MakeRequest',
- return_value=response,
- autospec=True) as mock_make_request:
- output = _GetCommandOutput('scopes', [self.access_token])
- self.assertEqual('', output)
- self.assertEqual(1, mock_make_request.call_count)
-
- def testUnexpectedResponseCode(self):
- response = _FakeResponse(http_client.INTERNAL_SERVER_ERROR)
- with mock.patch.object(apitools_base, 'MakeRequest',
- return_value=response,
- autospec=True) as mock_make_request:
- output = _GetCommandOutput('scopes', [self.access_token])
- self.assertIn(str(http_client.responses[response.status_code]),
- output)
- self.assertIn('Error encountered in scopes operation: HttpError',
- output)
- self.assertEqual(1, mock_make_request.call_count)
diff --git a/default.pylintrc b/default.pylintrc
deleted file mode 100644
index 2b06d98..0000000
--- a/default.pylintrc
+++ /dev/null
@@ -1,353 +0,0 @@
-# PyLint config for apitools code.
-#
-# NOTES:
-#
-# - Rules for test / demo code are generated into 'pylintrc_reduced'
-# as deltas from this configuration by the 'run_pylint.py' script.
-#
-# - 'RATIONALE: API mapping' as a defense for non-default settings is
-# based on the fact that this library maps APIs which are outside our
-# control, and adhering to the out-of-the-box defaults would induce
-# breakage / complexity in those mappings
-#
-[MASTER]
-
-# Specify a configuration file.
-# DEFAULT: rcfile=
-
-# Python code to execute, usually for sys.path manipulation such as
-# pygtk.require().
-# DEFAULT: init-hook=
-
-# Profiled execution.
-# DEFAULT: profile=no
-
-# Add files or directories to the blacklist. They should be base names, not
-# paths.
-# DEFAULT: ignore=CVS
-# NOTE: This path must be relative due to the use of
-# os.walk in astroid.modutils.get_module_files.
-
-# Pickle collected data for later comparisons.
-# DEFAULT: persistent=yes
-
-# List of plugins (as comma separated values of python modules names) to load,
-# usually to register additional checkers.
-# DEFAULT: load-plugins=
-
-# DEPRECATED
-# DEFAULT: include-ids=no
-
-# DEPRECATED
-# DEFAULT: symbols=no
-
-
-[MESSAGES CONTROL]
-
-# TODO: remove cyclic-import.
-disable =
- cyclic-import,
- fixme,
- import-error,
- inconsistent-return-statements,
- locally-disabled,
- locally-enabled,
- no-member,
- no-name-in-module,
- no-self-use,
- super-on-old-class,
- too-many-arguments,
- too-many-function-args,
-
-
-[REPORTS]
-
-# Set the output format. Available formats are text, parseable, colorized, msvs
-# (visual studio) and html. You can also give a reporter class, eg
-# mypackage.mymodule.MyReporterClass.
-# DEFAULT: output-format=text
-
-# Put messages in a separate file for each module / package specified on the
-# command line instead of printing them on stdout. Reports (if any) will be
-# written in a file name "pylint_global.[txt|html]".
-# DEFAULT: files-output=no
-
-# Tells whether to display a full report or only the messages
-# DEFAULT: reports=yes
-# RATIONALE: run from Travis / tox, and don't need / want to parse output.
-reports=no
-
-# Python expression which should return a note less than 10 (10 is the highest
-# note). You have access to the variables errors warning, statement which
-# respectively contain the number of errors / warnings messages and the total
-# number of statements analyzed. This is used by the global evaluation report
-# (RP0004).
-# DEFAULT: evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
-
-# Add a comment according to your evaluation note. This is used by the global
-# evaluation report (RP0004).
-# DEFAULT: comment=no
-
-# Template used to display messages. This is a python new-style format string
-# used to format the message information. See doc for all details
-#msg-template=
-
-
-[SIMILARITIES]
-
-# Minimum lines number of a similarity.
-# DEFAULT: min-similarity-lines=4
-min-similarity-lines=15
-
-# Ignore comments when computing similarities.
-# DEFAULT: ignore-comments=yes
-
-# Ignore docstrings when computing similarities.
-# DEFAULT: ignore-docstrings=yes
-
-# Ignore imports when computing similarities.
-# DEFAULT: ignore-imports=no
-ignore-imports=yes
-
-
-[VARIABLES]
-
-# Tells whether we should check for unused import in __init__ files.
-# DEFAULT: init-import=no
-
-# A regular expression matching the name of dummy variables (i.e. expectedly
-# not used).
-dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_)
-
-
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid to define new builtins when possible.
-# DEFAULT: additional-builtins=
-
-
-[LOGGING]
-
-# Logging modules to check that the string format arguments are in logging
-# function parameter format
-# DEFAULT: logging-modules=logging
-
-
-[FORMAT]
-
-# Maximum number of characters on a single line.
-# DEFAULT: max-line-length=80
-
-# Regexp for a line that is allowed to be longer than the limit.
-# DEFAULT: ignore-long-lines=^\s*(# )?<?https?://\S+>?$
-
-# Allow the body of an if to be on the same line as the test if there is no
-# else.
-# DEFAULT: single-line-if-stmt=no
-
-# List of optional constructs for which whitespace checking is disabled
-# DEFAULT: no-space-check=trailing-comma,dict-separator
-# RATIONALE: pylint ignores whitespace checks around the
-# constructs "dict-separator" (cases like {1:2}) and
-# "trailing-comma" (cases like {1: 2, }).
-# By setting "no-space-check" to empty whitespace checks will be
-# enforced around both constructs.
-no-space-check =
-
-# Maximum number of lines in a module
-# DEFAULT: max-module-lines=1000
-max-module-lines=1500
-
-# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
-# tab).
-# DEFAULT: indent-string=' '
-
-# Number of spaces of indent required inside a hanging or continued line.
-# DEFAULT: indent-after-paren=4
-
-
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-# DEFAULT: notes=FIXME,XXX,TODO
-
-
-[BASIC]
-
-# Regular expression which should only match function or class names that do
-# not require a docstring.
-# DEFAULT: no-docstring-rgx=__.*__
-no-docstring-rgx=(__.*__|main)
-
-# Minimum line length for functions/classes that require docstrings, shorter
-# ones are exempt.
-# DEFAULT: docstring-min-length=-1
-docstring-min-length=10
-
-# Regular expression which should only match correct module names. The
-# leading underscore is sanctioned for private modules by Google's style
-# guide.
-module-rgx=^(_?[a-z][a-z0-9_]*)|__init__$
-
-# Regular expression matching correct constant names
-# DEFAULT: const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
-const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
-
-# Regular expression matching correct class attribute names
-# DEFAULT: class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
-class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
-
-# Regular expression matching correct class names
-# DEFAULT: class-rgx=[A-Z_][a-zA-Z0-9]+$
-class-rgx=^_?[A-Z][a-zA-Z0-9]*$
-
-# Regular expression which should only match correct function names.
-# 'camel_case' and 'snake_case' group names are used for consistency of naming
-# styles across functions and methods.
-function-rgx=^(?:(?P<camel_case>_?[A-Z][a-zA-Z0-9]*)|(?P<snake_case>_?[a-z][a-z0-9_]*))$
-
-# Regular expression which should only match correct method names.
-# 'camel_case' and 'snake_case' group names are used for consistency of naming
-# styles across functions and methods. 'exempt' indicates a name which is
-# consistent with all naming styles.
-method-rgx=^(?:(?P<exempt>__[a-z0-9_]+__|next)|(?P<camel_case>_{0,2}[A-Z][a-zA-Z0-9]*)|(?P<snake_case>_{0,2}[a-z][a-z0-9_]*))$
-
-# Regular expression matching correct attribute names
-# DEFAULT: attr-rgx=[a-z_][a-z0-9_]{2,30}$
-attr-rgx=^_{0,2}[a-z][a-z0-9_]*$
-
-# Regular expression matching correct argument names
-# DEFAULT: argument-rgx=[a-z_][a-z0-9_]{2,30}$
-argument-rgx=^[a-z][a-z0-9_]*$
-
-# Regular expression matching correct variable names
-# DEFAULT: variable-rgx=[a-z_][a-z0-9_]{2,30}$
-variable-rgx=^[a-z][a-z0-9_]*$
-
-# Regular expression matching correct inline iteration names
-# DEFAULT: inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
-inlinevar-rgx=^[a-z][a-z0-9_]*$
-
-# Good variable names which should always be accepted, separated by a comma
-# DEFAULT: good-names=i,j,k,ex,Run,_
-good-names=main,_
-
-# Bad variable names which should always be refused, separated by a comma
-# DEFAULT: bad-names=foo,bar,baz,toto,tutu,tata
-bad-names=
-
-# List of builtins function names that should not be used, separated by a comma
-# <http://go/python-style#Deprecated_Language_Features>
-bad-functions=input,apply,reduce
-
-
-[TYPECHECK]
-
-# Tells whether missing members accessed in mixin class should be ignored. A
-# mixin class is detected if its name ends with "mixin" (case insensitive).
-# DEFAULT: ignore-mixin-members=yes
-
-# List of module names for which member attributes should not be checked
-# (useful for modules/projects where namespaces are manipulated during runtime
-# and thus existing member attributes cannot be deduced by static analysis
-# DEFAULT: ignored-modules=
-
-# List of classes names for which member attributes should not be checked
-# (useful for classes with attributes dynamically set).
-# DEFAULT: ignored-classes=SQLObject
-
-# When zope mode is activated, add a predefined set of Zope acquired attributes
-# to generated-members.
-# DEFAULT: zope=no
-
-# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E0201 when accessed. Python regular
-# expressions are accepted.
-# DEFAULT: generated-members=REQUEST,acl_users,aq_parent
-
-
-[IMPORTS]
-
-# Deprecated modules which should not be used, separated by a comma
-# DEFAULT: deprecated-modules=regsub,TERMIOS,Bastion,rexec
-
-# Create a graph of every (i.e. internal and external) dependencies in the
-# given file (report RP0402 must not be disabled)
-# DEFAULT: import-graph=
-
-# Create a graph of external dependencies in the given file (report RP0402 must
-# not be disabled)
-# DEFAULT: ext-import-graph=
-
-# Create a graph of internal dependencies in the given file (report RP0402 must
-# not be disabled)
-# DEFAULT: int-import-graph=
-
-
-[CLASSES]
-
-# List of interface methods to ignore, separated by a comma. This is used for
-# instance to not check methods defines in Zope's Interface base class.
-# DEFAULT: ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
-
-# List of method names used to declare (i.e. assign) instance attributes.
-# DEFAULT: defining-attr-methods=__init__,__new__,setUp
-
-# List of valid names for the first argument in a class method.
-# DEFAULT: valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-# DEFAULT: valid-metaclass-classmethod-first-arg=mcs
-
-
-[DESIGN]
-
-# Maximum number of arguments for function / method
-# DEFAULT: max-args=5
-# RATIONALE: API-mapping
-max-args = 14
-
-# Argument names that match this expression will be ignored. Default to name
-# with leading underscore
-# DEFAULT: ignored-argument-names=_.*
-
-# Maximum number of locals for function / method body
-# DEFAULT: max-locals=15
-max-locals=24
-
-# Maximum number of return / yield for function / method body
-# DEFAULT: max-returns=6
-max-returns=9
-
-# Maximum number of branch for function / method body
-# DEFAULT: max-branches=12
-max-branches=21
-
-# Maximum number of statements in function / method body
-# DEFAULT: max-statements=50
-
-# Maximum number of parents for a class (see R0901).
-# DEFAULT: max-parents=7
-
-# Maximum number of attributes for a class (see R0902).
-# DEFAULT: max-attributes=7
-# RATIONALE: API mapping
-max-attributes=19
-
-# Minimum number of public methods for a class (see R0903).
-# DEFAULT: min-public-methods=2
-# RATIONALE: context mgrs may have *no* public methods
-min-public-methods=0
-
-# Maximum number of public methods for a class (see R0904).
-# DEFAULT: max-public-methods=20
-# RATIONALE: API mapping
-max-public-methods=40
-
-[ELIF]
-max-nested-blocks=6
-
-[EXCEPTIONS]
-
-# Exceptions that will emit a warning when being caught. Defaults to
-# "Exception"
-# DEFAULT: overgeneral-exceptions=Exception
diff --git a/run_pylint.py b/run_pylint.py
deleted file mode 100644
index c7e532e..0000000
--- a/run_pylint.py
+++ /dev/null
@@ -1,237 +0,0 @@
-#
-# Copyright 2015 Google Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Custom script to run PyLint on apitools codebase.
-
-"Inspired" by the similar script in gcloud-python.
-
-This runs pylint as a script via subprocess in two different
-subprocesses. The first lints the production/library code
-using the default rc file (PRODUCTION_RC). The second lints the
-demo/test code using an rc file (TEST_RC) which allows more style
-violations (hence it has a reduced number of style checks).
-"""
-
-import ConfigParser
-import copy
-import os
-import subprocess
-import sys
-
-
-IGNORED_DIRECTORIES = [
- 'apitools/gen/testdata',
- 'samples/bigquery_sample/bigquery_v2',
- 'samples/dns_sample/dns_v1',
- 'samples/fusiontables_sample/fusiontables_v1',
- 'samples/iam_sample/iam_v1',
- 'samples/servicemanagement_sample/servicemanagement_v1',
- 'samples/storage_sample/storage_v1',
- 'venv',
-]
-IGNORED_FILES = [
- 'ez_setup.py',
- 'run_pylint.py',
- 'setup.py',
- 'apitools/base/py/gzip.py',
- 'apitools/base/py/gzip_test.py',
-]
-PRODUCTION_RC = 'default.pylintrc'
-TEST_RC = 'reduced.pylintrc'
-TEST_DISABLED_MESSAGES = [
- 'exec-used',
- 'invalid-name',
- 'missing-docstring',
- 'protected-access',
-]
-TEST_RC_ADDITIONS = {
- 'MESSAGES CONTROL': {
- 'disable': ',\n'.join(TEST_DISABLED_MESSAGES),
- },
-}
-
-
-def read_config(filename):
- """Reads pylintrc config onto native ConfigParser object."""
- config = ConfigParser.ConfigParser()
- with open(filename, 'r') as file_obj:
- config.readfp(file_obj)
- return config
-
-
-def make_test_rc(base_rc_filename, additions_dict, target_filename):
- """Combines a base rc and test additions into single file."""
- main_cfg = read_config(base_rc_filename)
-
- # Create fresh config for test, which must extend production.
- test_cfg = ConfigParser.ConfigParser()
- test_cfg._sections = copy.deepcopy(main_cfg._sections)
-
- for section, opts in additions_dict.items():
- curr_section = test_cfg._sections.setdefault(
- section, test_cfg._dict())
- for opt, opt_val in opts.items():
- curr_val = curr_section.get(opt)
- if curr_val is None:
- raise KeyError('Expected to be adding to existing option.')
- curr_section[opt] = '%s\n%s' % (curr_val, opt_val)
-
- with open(target_filename, 'w') as file_obj:
- test_cfg.write(file_obj)
-
-
-def valid_filename(filename):
- """Checks if a file is a Python file and is not ignored."""
- for directory in IGNORED_DIRECTORIES:
- if filename.startswith(directory):
- return False
- return (filename.endswith('.py') and
- filename not in IGNORED_FILES)
-
-
-def is_production_filename(filename):
- """Checks if the file contains production code.
-
- :rtype: boolean
- :returns: Boolean indicating production status.
- """
- return not ('demo' in filename or 'test' in filename or
- filename.startswith('regression'))
-
-
-def get_files_for_linting(allow_limited=True, diff_base=None):
- """Gets a list of files in the repository.
-
- By default, returns all files via ``git ls-files``. However, in some cases
- uses a specific commit or branch (a so-called diff base) to compare
- against for changed files. (This requires ``allow_limited=True``.)
-
- To speed up linting on Travis pull requests against master, we manually
- set the diff base to origin/master. We don't do this on non-pull requests
- since origin/master will be equivalent to the currently checked out code.
- One could potentially use ${TRAVIS_COMMIT_RANGE} to find a diff base but
- this value is not dependable.
-
- :type allow_limited: boolean
- :param allow_limited: Boolean indicating if a reduced set of files can
- be used.
-
- :rtype: pair
- :returns: Tuple of the diff base using the the list of filenames to be
- linted.
- """
- if os.getenv('TRAVIS') == 'true':
- # In travis, don't default to master.
- diff_base = None
-
- if (os.getenv('TRAVIS_BRANCH') == 'master' and
- os.getenv('TRAVIS_PULL_REQUEST') != 'false'):
- # In the case of a pull request into master, we want to
- # diff against HEAD in master.
- diff_base = 'origin/master'
-
- if diff_base is not None and allow_limited:
- result = subprocess.check_output(['git', 'diff', '--name-only',
- diff_base])
- print 'Using files changed relative to %s:' % (diff_base,)
- print '-' * 60
- print result.rstrip('\n') # Don't print trailing newlines.
- print '-' * 60
- else:
- print 'Diff base not specified, listing all files in repository.'
- result = subprocess.check_output(['git', 'ls-files'])
-
- return result.rstrip('\n').split('\n'), diff_base
-
-
-def get_python_files(all_files=None, diff_base=None):
- """Gets a list of all Python files in the repository that need linting.
-
- Relies on :func:`get_files_for_linting()` to determine which files should
- be considered.
-
- NOTE: This requires ``git`` to be installed and requires that this
- is run within the ``git`` repository.
-
- :type all_files: list or ``NoneType``
- :param all_files: Optional list of files to be linted.
-
- :rtype: tuple
- :returns: A tuple containing two lists and a boolean. The first list
- contains all production files, the next all test/demo files and
- the boolean indicates if a restricted fileset was used.
- """
- using_restricted = False
- if all_files is None:
- all_files, diff_base = get_files_for_linting(diff_base=diff_base)
- using_restricted = diff_base is not None
-
- library_files = []
- non_library_files = []
- for filename in all_files:
- if valid_filename(filename):
- if is_production_filename(filename):
- library_files.append(filename)
- else:
- non_library_files.append(filename)
-
- return library_files, non_library_files, using_restricted
-
-
-def lint_fileset(filenames, rcfile, description):
- """Lints a group of files using a given rcfile."""
- # Only lint filenames that exist. For example, 'git diff --name-only'
- # could spit out deleted / renamed files. Another alternative could
- # be to use 'git diff --name-status' and filter out files with a
- # status of 'D'.
- filenames = [filename for filename in filenames
- if os.path.exists(filename)]
- if filenames:
- rc_flag = '--rcfile=%s' % (rcfile,)
- pylint_shell_command = ['pylint', rc_flag] + filenames
- status_code = subprocess.call(pylint_shell_command)
- if status_code != 0:
- error_message = ('Pylint failed on %s with '
- 'status %d.' % (description, status_code))
- print >> sys.stderr, error_message
- sys.exit(status_code)
- else:
- print 'Skipping %s, no files to lint.' % (description,)
-
-
-def main(argv):
- """Script entry point. Lints both sets of files."""
- diff_base = argv[1] if len(argv) > 1 else None
- make_test_rc(PRODUCTION_RC, TEST_RC_ADDITIONS, TEST_RC)
- library_files, non_library_files, using_restricted = get_python_files(
- diff_base=diff_base)
- try:
- lint_fileset(library_files, PRODUCTION_RC, 'library code')
- lint_fileset(non_library_files, TEST_RC, 'test and demo code')
- except SystemExit:
- if not using_restricted:
- raise
-
- message = 'Restricted lint failed, expanding to full fileset.'
- print >> sys.stderr, message
- all_files, _ = get_files_for_linting(allow_limited=False)
- library_files, non_library_files, _ = get_python_files(
- all_files=all_files)
- lint_fileset(library_files, PRODUCTION_RC, 'library code')
- lint_fileset(non_library_files, TEST_RC, 'test and demo code')
-
-
-if __name__ == '__main__':
- main(sys.argv)
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..7390cbb
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,3 @@
+[pycodestyle]
+count = False
+ignore = E722,E741
diff --git a/tox.ini b/tox.ini
index b421af5..c81e32a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -23,11 +23,10 @@
basepython =
python2.7
commands =
- pip install six google-apitools[testing]
- pep8 apitools
- python run_pylint.py
+ pip install six google-apitools
+ pycodestyle apitools
deps =
- pep8
+ pycodestyle
pylint
unittest2