chore: blacken (#772)

diff --git a/googleapiclient/__init__.py b/googleapiclient/__init__.py
index feba5ce..01147bf 100644
--- a/googleapiclient/__init__.py
+++ b/googleapiclient/__init__.py
@@ -20,8 +20,10 @@
 try:  # Python 2.7+
     from logging import NullHandler
 except ImportError:
+
     class NullHandler(logging.Handler):
         def emit(self, record):
             pass
 
+
 logging.getLogger(__name__).addHandler(NullHandler())
diff --git a/googleapiclient/_auth.py b/googleapiclient/_auth.py
index 9d6d363..8a2f673 100644
--- a/googleapiclient/_auth.py
+++ b/googleapiclient/_auth.py
@@ -19,6 +19,7 @@
 try:
     import google.auth
     import google.auth.credentials
+
     HAS_GOOGLE_AUTH = True
 except ImportError:  # pragma: NO COVER
     HAS_GOOGLE_AUTH = False
@@ -31,6 +32,7 @@
 try:
     import oauth2client
     import oauth2client.client
+
     HAS_OAUTH2CLIENT = True
 except ImportError:  # pragma: NO COVER
     HAS_OAUTH2CLIENT = False
@@ -45,8 +47,9 @@
         return oauth2client.client.GoogleCredentials.get_application_default()
     else:
         raise EnvironmentError(
-            'No authentication library is available. Please install either '
-            'google-auth or oauth2client.')
+            "No authentication library is available. Please install either "
+            "google-auth or oauth2client."
+        )
 
 
 def with_scopes(credentials, scopes):
@@ -62,10 +65,8 @@
         Union[google.auth.credentials.Credentials,
             oauth2client.client.Credentials]: The scoped credentials.
     """
-    if HAS_GOOGLE_AUTH and isinstance(
-            credentials, google.auth.credentials.Credentials):
-        return google.auth.credentials.with_scopes_if_required(
-            credentials, scopes)
+    if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials):
+        return google.auth.credentials.with_scopes_if_required(credentials, scopes)
     else:
         try:
             if credentials.create_scoped_required():
@@ -90,16 +91,15 @@
     """
     from googleapiclient.http import build_http
 
-    if HAS_GOOGLE_AUTH and isinstance(
-            credentials, google.auth.credentials.Credentials):
+    if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials):
         if google_auth_httplib2 is None:
             raise ValueError(
-                'Credentials from google.auth specified, but '
-                'google-api-python-client is unable to use these credentials '
-                'unless google-auth-httplib2 is installed. Please install '
-                'google-auth-httplib2.')
-        return google_auth_httplib2.AuthorizedHttp(credentials,
-                                                   http=build_http())
+                "Credentials from google.auth specified, but "
+                "google-api-python-client is unable to use these credentials "
+                "unless google-auth-httplib2 is installed. Please install "
+                "google-auth-httplib2."
+            )
+        return google_auth_httplib2.AuthorizedHttp(credentials, http=build_http())
     else:
         return credentials.authorize(build_http())
 
@@ -110,8 +110,7 @@
     # Http instance which would cause a weird recursive loop of refreshing
     # and likely tear a hole in spacetime.
     refresh_http = httplib2.Http()
-    if HAS_GOOGLE_AUTH and isinstance(
-            credentials, google.auth.credentials.Credentials):
+    if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials):
         request = google_auth_httplib2.Request(refresh_http)
         return credentials.refresh(request)
     else:
@@ -126,22 +125,23 @@
 
 
 def is_valid(credentials):
-    if HAS_GOOGLE_AUTH and isinstance(
-            credentials, google.auth.credentials.Credentials):
+    if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials):
         return credentials.valid
     else:
         return (
-            credentials.access_token is not None and
-            not credentials.access_token_expired)
+            credentials.access_token is not None
+            and not credentials.access_token_expired
+        )
 
 
 def get_credentials_from_http(http):
     if http is None:
         return None
-    elif hasattr(http.request, 'credentials'):
+    elif hasattr(http.request, "credentials"):
         return http.request.credentials
-    elif (hasattr(http, 'credentials')
-          and not isinstance(http.credentials, httplib2.Credentials)):
+    elif hasattr(http, "credentials") and not isinstance(
+        http.credentials, httplib2.Credentials
+    ):
         return http.credentials
     else:
         return None
diff --git a/googleapiclient/_helpers.py b/googleapiclient/_helpers.py
index 5e8184b..66ccf79 100644
--- a/googleapiclient/_helpers.py
+++ b/googleapiclient/_helpers.py
@@ -25,17 +25,18 @@
 
 logger = logging.getLogger(__name__)
 
-POSITIONAL_WARNING = 'WARNING'
-POSITIONAL_EXCEPTION = 'EXCEPTION'
-POSITIONAL_IGNORE = 'IGNORE'
-POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
-                            POSITIONAL_IGNORE])
+POSITIONAL_WARNING = "WARNING"
+POSITIONAL_EXCEPTION = "EXCEPTION"
+POSITIONAL_IGNORE = "IGNORE"
+POSITIONAL_SET = frozenset(
+    [POSITIONAL_WARNING, POSITIONAL_EXCEPTION, POSITIONAL_IGNORE]
+)
 
 positional_parameters_enforcement = POSITIONAL_WARNING
 
-_SYM_LINK_MESSAGE = 'File: {0}: Is a symbolic link.'
-_IS_DIR_MESSAGE = '{0}: Is a directory'
-_MISSING_FILE_MESSAGE = 'Cannot access {0}: No such file or directory'
+_SYM_LINK_MESSAGE = "File: {0}: Is a symbolic link."
+_IS_DIR_MESSAGE = "{0}: Is a directory"
+_MISSING_FILE_MESSAGE = "Cannot access {0}: No such file or directory"
 
 
 def positional(max_positional_args):
@@ -114,20 +115,24 @@
         @functools.wraps(wrapped)
         def positional_wrapper(*args, **kwargs):
             if len(args) > max_positional_args:
-                plural_s = ''
+                plural_s = ""
                 if max_positional_args != 1:
-                    plural_s = 's'
-                message = ('{function}() takes at most {args_max} positional '
-                           'argument{plural} ({args_given} given)'.format(
-                               function=wrapped.__name__,
-                               args_max=max_positional_args,
-                               args_given=len(args),
-                               plural=plural_s))
+                    plural_s = "s"
+                message = (
+                    "{function}() takes at most {args_max} positional "
+                    "argument{plural} ({args_given} given)".format(
+                        function=wrapped.__name__,
+                        args_max=max_positional_args,
+                        args_given=len(args),
+                        plural=plural_s,
+                    )
+                )
                 if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
                     raise TypeError(message)
                 elif positional_parameters_enforcement == POSITIONAL_WARNING:
                     logger.warning(message)
             return wrapped(*args, **kwargs)
+
         return positional_wrapper
 
     if isinstance(max_positional_args, six.integer_types):
@@ -153,8 +158,10 @@
     params = {}
     for key, value in six.iteritems(urlencoded_params):
         if len(value) != 1:
-            msg = ('URL-encoded content contains a repeated value:'
-                   '%s -> %s' % (key, ', '.join(value)))
+            msg = "URL-encoded content contains a repeated value:" "%s -> %s" % (
+                key,
+                ", ".join(value),
+            )
             raise ValueError(msg)
         params[key] = value[0]
     return params
diff --git a/googleapiclient/channel.py b/googleapiclient/channel.py
index 3caee13..efff0f6 100644
--- a/googleapiclient/channel.py
+++ b/googleapiclient/channel.py
@@ -85,32 +85,32 @@
 # Map the names of the parameters in the JSON channel description to
 # the parameter names we use in the Channel class.
 CHANNEL_PARAMS = {
-    'address': 'address',
-    'id': 'id',
-    'expiration': 'expiration',
-    'params': 'params',
-    'resourceId': 'resource_id',
-    'resourceUri': 'resource_uri',
-    'type': 'type',
-    'token': 'token',
-    }
+    "address": "address",
+    "id": "id",
+    "expiration": "expiration",
+    "params": "params",
+    "resourceId": "resource_id",
+    "resourceUri": "resource_uri",
+    "type": "type",
+    "token": "token",
+}
 
-X_GOOG_CHANNEL_ID     = 'X-GOOG-CHANNEL-ID'
-X_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER'
-X_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE'
-X_GOOG_RESOURCE_URI   = 'X-GOOG-RESOURCE-URI'
-X_GOOG_RESOURCE_ID    = 'X-GOOG-RESOURCE-ID'
+X_GOOG_CHANNEL_ID = "X-GOOG-CHANNEL-ID"
+X_GOOG_MESSAGE_NUMBER = "X-GOOG-MESSAGE-NUMBER"
+X_GOOG_RESOURCE_STATE = "X-GOOG-RESOURCE-STATE"
+X_GOOG_RESOURCE_URI = "X-GOOG-RESOURCE-URI"
+X_GOOG_RESOURCE_ID = "X-GOOG-RESOURCE-ID"
 
 
 def _upper_header_keys(headers):
-  new_headers = {}
-  for k, v in six.iteritems(headers):
-    new_headers[k.upper()] = v
-  return new_headers
+    new_headers = {}
+    for k, v in six.iteritems(headers):
+        new_headers[k.upper()] = v
+    return new_headers
 
 
 class Notification(object):
-  """A Notification from a Channel.
+    """A Notification from a Channel.
 
   Notifications are not usually constructed directly, but are returned
   from functions like notification_from_headers().
@@ -122,9 +122,10 @@
     resource_id: str, The unique identifier of the version of the resource at
       this event.
   """
-  @util.positional(5)
-  def __init__(self, message_number, state, resource_uri, resource_id):
-    """Notification constructor.
+
+    @util.positional(5)
+    def __init__(self, message_number, state, resource_uri, resource_id):
+        """Notification constructor.
 
     Args:
       message_number: int, The unique id number of this notification.
@@ -133,14 +134,14 @@
       resource_uri: str, The address of the resource being monitored.
       resource_id: str, The identifier of the watched resource.
     """
-    self.message_number = message_number
-    self.state = state
-    self.resource_uri = resource_uri
-    self.resource_id = resource_id
+        self.message_number = message_number
+        self.state = state
+        self.resource_uri = resource_uri
+        self.resource_id = resource_id
 
 
 class Channel(object):
-  """A Channel for notifications.
+    """A Channel for notifications.
 
   Usually not constructed directly, instead it is returned from helper
   functions like new_webhook_channel().
@@ -163,10 +164,19 @@
     resource_uri: str, The canonicalized ID of the watched resource.
   """
 
-  @util.positional(5)
-  def __init__(self, type, id, token, address, expiration=None,
-               params=None, resource_id="", resource_uri=""):
-    """Create a new Channel.
+    @util.positional(5)
+    def __init__(
+        self,
+        type,
+        id,
+        token,
+        address,
+        expiration=None,
+        params=None,
+        resource_id="",
+        resource_uri="",
+    ):
+        """Create a new Channel.
 
     In user code, this Channel constructor will not typically be called
     manually since there are functions for creating channels for each specific
@@ -189,17 +199,17 @@
         being watched. Stable across different API versions.
       resource_uri: str, The canonicalized ID of the watched resource.
     """
-    self.type = type
-    self.id = id
-    self.token = token
-    self.address = address
-    self.expiration = expiration
-    self.params = params
-    self.resource_id = resource_id
-    self.resource_uri = resource_uri
+        self.type = type
+        self.id = id
+        self.token = token
+        self.address = address
+        self.expiration = expiration
+        self.params = params
+        self.resource_id = resource_id
+        self.resource_uri = resource_uri
 
-  def body(self):
-    """Build a body from the Channel.
+    def body(self):
+        """Build a body from the Channel.
 
     Constructs a dictionary that's appropriate for passing into watch()
     methods as the value of body argument.
@@ -207,25 +217,25 @@
     Returns:
       A dictionary representation of the channel.
     """
-    result = {
-        'id': self.id,
-        'token': self.token,
-        'type': self.type,
-        'address': self.address
+        result = {
+            "id": self.id,
+            "token": self.token,
+            "type": self.type,
+            "address": self.address,
         }
-    if self.params:
-      result['params'] = self.params
-    if self.resource_id:
-      result['resourceId'] = self.resource_id
-    if self.resource_uri:
-      result['resourceUri'] = self.resource_uri
-    if self.expiration:
-      result['expiration'] = self.expiration
+        if self.params:
+            result["params"] = self.params
+        if self.resource_id:
+            result["resourceId"] = self.resource_id
+        if self.resource_uri:
+            result["resourceUri"] = self.resource_uri
+        if self.expiration:
+            result["expiration"] = self.expiration
 
-    return result
+        return result
 
-  def update(self, resp):
-    """Update a channel with information from the response of watch().
+    def update(self, resp):
+        """Update a channel with information from the response of watch().
 
     When a request is sent to watch() a resource, the response returned
     from the watch() request is a dictionary with updated channel information,
@@ -234,14 +244,14 @@
     Args:
       resp: dict, The response from a watch() method.
     """
-    for json_name, param_name in six.iteritems(CHANNEL_PARAMS):
-      value = resp.get(json_name)
-      if value is not None:
-        setattr(self, param_name, value)
+        for json_name, param_name in six.iteritems(CHANNEL_PARAMS):
+            value = resp.get(json_name)
+            if value is not None:
+                setattr(self, param_name, value)
 
 
 def notification_from_headers(channel, headers):
-  """Parse a notification from the webhook request headers, validate
+    """Parse a notification from the webhook request headers, validate
     the notification, and return a Notification object.
 
   Args:
@@ -256,17 +266,18 @@
     errors.InvalidNotificationError if the notification is invalid.
     ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int.
   """
-  headers = _upper_header_keys(headers)
-  channel_id = headers[X_GOOG_CHANNEL_ID]
-  if channel.id != channel_id:
-    raise errors.InvalidNotificationError(
-        'Channel id mismatch: %s != %s' % (channel.id, channel_id))
-  else:
-    message_number = int(headers[X_GOOG_MESSAGE_NUMBER])
-    state = headers[X_GOOG_RESOURCE_STATE]
-    resource_uri = headers[X_GOOG_RESOURCE_URI]
-    resource_id = headers[X_GOOG_RESOURCE_ID]
-    return Notification(message_number, state, resource_uri, resource_id)
+    headers = _upper_header_keys(headers)
+    channel_id = headers[X_GOOG_CHANNEL_ID]
+    if channel.id != channel_id:
+        raise errors.InvalidNotificationError(
+            "Channel id mismatch: %s != %s" % (channel.id, channel_id)
+        )
+    else:
+        message_number = int(headers[X_GOOG_MESSAGE_NUMBER])
+        state = headers[X_GOOG_RESOURCE_STATE]
+        resource_uri = headers[X_GOOG_RESOURCE_URI]
+        resource_id = headers[X_GOOG_RESOURCE_ID]
+        return Notification(message_number, state, resource_uri, resource_id)
 
 
 @util.positional(2)
@@ -289,13 +300,18 @@
     """
     expiration_ms = 0
     if expiration:
-      delta = expiration - EPOCH
-      expiration_ms = delta.microseconds/1000 + (
-          delta.seconds + delta.days*24*3600)*1000
-      if expiration_ms < 0:
-        expiration_ms = 0
+        delta = expiration - EPOCH
+        expiration_ms = (
+            delta.microseconds / 1000 + (delta.seconds + delta.days * 24 * 3600) * 1000
+        )
+        if expiration_ms < 0:
+            expiration_ms = 0
 
-    return Channel('web_hook', str(uuid.uuid4()),
-                   token, url, expiration=expiration_ms,
-                   params=params)
-
+    return Channel(
+        "web_hook",
+        str(uuid.uuid4()),
+        token,
+        url,
+        expiration=expiration_ms,
+        params=params,
+    )
diff --git a/googleapiclient/discovery.py b/googleapiclient/discovery.py
index 771d9fc..87403b9 100644
--- a/googleapiclient/discovery.py
+++ b/googleapiclient/discovery.py
@@ -20,25 +20,20 @@
 import six
 from six.moves import zip
 
-__author__ = 'jcgregorio@google.com (Joe Gregorio)'
-__all__ = [
-    'build',
-    'build_from_document',
-    'fix_method_name',
-    'key2param',
-    ]
+__author__ = "jcgregorio@google.com (Joe Gregorio)"
+__all__ = ["build", "build_from_document", "fix_method_name", "key2param"]
 
 from six import BytesIO
 from six.moves import http_client
-from six.moves.urllib.parse import urlencode, urlparse, urljoin, \
-  urlunparse, parse_qsl
+from six.moves.urllib.parse import urlencode, urlparse, urljoin, urlunparse, parse_qsl
 
 # Standard library imports
 import copy
+
 try:
-  from email.generator import BytesGenerator
+    from email.generator import BytesGenerator
 except ImportError:
-  from email.generator import Generator as BytesGenerator
+    from email.generator import Generator as BytesGenerator
 from email.mime.multipart import MIMEMultipart
 from email.mime.nonmultipart import MIMENonMultipart
 import json
@@ -82,50 +77,54 @@
 
 logger = logging.getLogger(__name__)
 
-URITEMPLATE = re.compile('{[^}]*}')
-VARNAME = re.compile('[a-zA-Z0-9_-]+')
-DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
-                 '{api}/{apiVersion}/rest')
+URITEMPLATE = re.compile("{[^}]*}")
+VARNAME = re.compile("[a-zA-Z0-9_-]+")
+DISCOVERY_URI = (
+    "https://www.googleapis.com/discovery/v1/apis/" "{api}/{apiVersion}/rest"
+)
 V1_DISCOVERY_URI = DISCOVERY_URI
-V2_DISCOVERY_URI = ('https://{api}.googleapis.com/$discovery/rest?'
-                    'version={apiVersion}')
-DEFAULT_METHOD_DOC = 'A description of how to use this function'
-HTTP_PAYLOAD_METHODS = frozenset(['PUT', 'POST', 'PATCH'])
+V2_DISCOVERY_URI = (
+    "https://{api}.googleapis.com/$discovery/rest?" "version={apiVersion}"
+)
+DEFAULT_METHOD_DOC = "A description of how to use this function"
+HTTP_PAYLOAD_METHODS = frozenset(["PUT", "POST", "PATCH"])
 
-_MEDIA_SIZE_BIT_SHIFTS = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
-BODY_PARAMETER_DEFAULT_VALUE = {
-    'description': 'The request body.',
-    'type': 'object',
-}
+_MEDIA_SIZE_BIT_SHIFTS = {"KB": 10, "MB": 20, "GB": 30, "TB": 40}
+BODY_PARAMETER_DEFAULT_VALUE = {"description": "The request body.", "type": "object"}
 MEDIA_BODY_PARAMETER_DEFAULT_VALUE = {
-    'description': ('The filename of the media request body, or an instance '
-                    'of a MediaUpload object.'),
-    'type': 'string',
-    'required': False,
+    "description": (
+        "The filename of the media request body, or an instance "
+        "of a MediaUpload object."
+    ),
+    "type": "string",
+    "required": False,
 }
 MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE = {
-    'description': ('The MIME type of the media request body, or an instance '
-                    'of a MediaUpload object.'),
-    'type': 'string',
-    'required': False,
+    "description": (
+        "The MIME type of the media request body, or an instance "
+        "of a MediaUpload object."
+    ),
+    "type": "string",
+    "required": False,
 }
-_PAGE_TOKEN_NAMES = ('pageToken', 'nextPageToken')
+_PAGE_TOKEN_NAMES = ("pageToken", "nextPageToken")
 
 # Parameters accepted by the stack, but not visible via discovery.
 # TODO(dhermes): Remove 'userip' in 'v2'.
-STACK_QUERY_PARAMETERS = frozenset(['trace', 'pp', 'userip', 'strict'])
-STACK_QUERY_PARAMETER_DEFAULT_VALUE = {'type': 'string', 'location': 'query'}
+STACK_QUERY_PARAMETERS = frozenset(["trace", "pp", "userip", "strict"])
+STACK_QUERY_PARAMETER_DEFAULT_VALUE = {"type": "string", "location": "query"}
 
 # Library-specific reserved words beyond Python keywords.
-RESERVED_WORDS = frozenset(['body'])
+RESERVED_WORDS = frozenset(["body"])
 
 # patch _write_lines to avoid munging '\r' into '\n'
 # ( https://bugs.python.org/issue18886 https://bugs.python.org/issue19003 )
 class _BytesGenerator(BytesGenerator):
-  _write_lines = BytesGenerator.write
+    _write_lines = BytesGenerator.write
+
 
 def fix_method_name(name):
-  """Fix method names to avoid '$' characters and reserved word conflicts.
+    """Fix method names to avoid '$' characters and reserved word conflicts.
 
   Args:
     name: string, method name.
@@ -134,15 +133,15 @@
     The name with '_' appended if the name is a reserved word and '$' 
     replaced with '_'. 
   """
-  name = name.replace('$', '_')
-  if keyword.iskeyword(name) or name in RESERVED_WORDS:
-    return name + '_'
-  else:
-    return name
+    name = name.replace("$", "_")
+    if keyword.iskeyword(name) or name in RESERVED_WORDS:
+        return name + "_"
+    else:
+        return name
 
 
 def key2param(key):
-  """Converts key names into parameter names.
+    """Converts key names into parameter names.
 
   For example, converting "max-results" -> "max_results"
 
@@ -152,31 +151,33 @@
   Returns:
     A safe method name based on the key name.
   """
-  result = []
-  key = list(key)
-  if not key[0].isalpha():
-    result.append('x')
-  for c in key:
-    if c.isalnum():
-      result.append(c)
-    else:
-      result.append('_')
+    result = []
+    key = list(key)
+    if not key[0].isalpha():
+        result.append("x")
+    for c in key:
+        if c.isalnum():
+            result.append(c)
+        else:
+            result.append("_")
 
-  return ''.join(result)
+    return "".join(result)
 
 
 @positional(2)
-def build(serviceName,
-          version,
-          http=None,
-          discoveryServiceUrl=DISCOVERY_URI,
-          developerKey=None,
-          model=None,
-          requestBuilder=HttpRequest,
-          credentials=None,
-          cache_discovery=True,
-          cache=None):
-  """Construct a Resource for interacting with an API.
+def build(
+    serviceName,
+    version,
+    http=None,
+    discoveryServiceUrl=DISCOVERY_URI,
+    developerKey=None,
+    model=None,
+    requestBuilder=HttpRequest,
+    credentials=None,
+    cache_discovery=True,
+    cache=None,
+):
+    """Construct a Resource for interacting with an API.
 
   Construct a Resource object for interacting with an API. The serviceName and
   version are the names from the Discovery service.
@@ -205,38 +206,40 @@
   Returns:
     A Resource object with methods for interacting with the service.
   """
-  params = {
-      'api': serviceName,
-      'apiVersion': version
-      }
+    params = {"api": serviceName, "apiVersion": version}
 
-  if http is None:
-    discovery_http = build_http()
-  else:
-    discovery_http = http
+    if http is None:
+        discovery_http = build_http()
+    else:
+        discovery_http = http
 
-  for discovery_url in (discoveryServiceUrl, V2_DISCOVERY_URI,):
-    requested_url = uritemplate.expand(discovery_url, params)
+    for discovery_url in (discoveryServiceUrl, V2_DISCOVERY_URI):
+        requested_url = uritemplate.expand(discovery_url, params)
 
-    try:
-      content = _retrieve_discovery_doc(
-        requested_url, discovery_http, cache_discovery, cache, developerKey)
-      return build_from_document(content, base=discovery_url, http=http,
-          developerKey=developerKey, model=model, requestBuilder=requestBuilder,
-          credentials=credentials)
-    except HttpError as e:
-      if e.resp.status == http_client.NOT_FOUND:
-        continue
-      else:
-        raise e
+        try:
+            content = _retrieve_discovery_doc(
+                requested_url, discovery_http, cache_discovery, cache, developerKey
+            )
+            return build_from_document(
+                content,
+                base=discovery_url,
+                http=http,
+                developerKey=developerKey,
+                model=model,
+                requestBuilder=requestBuilder,
+                credentials=credentials,
+            )
+        except HttpError as e:
+            if e.resp.status == http_client.NOT_FOUND:
+                continue
+            else:
+                raise e
 
-  raise UnknownApiNameOrVersion(
-        "name: %s  version: %s" % (serviceName, version))
+    raise UnknownApiNameOrVersion("name: %s  version: %s" % (serviceName, version))
 
 
-def _retrieve_discovery_doc(url, http, cache_discovery, cache=None,
-                            developerKey=None):
-  """Retrieves the discovery_doc from cache or the internet.
+def _retrieve_discovery_doc(url, http, cache_discovery, cache=None, developerKey=None):
+    """Retrieves the discovery_doc from cache or the internet.
 
   Args:
     url: string, the URL of the discovery document.
@@ -249,45 +252,46 @@
   Returns:
     A unicode string representation of the discovery document.
   """
-  if cache_discovery:
-    from . import discovery_cache
-    from .discovery_cache import base
-    if cache is None:
-      cache = discovery_cache.autodetect()
-    if cache:
-      content = cache.get(url)
-      if content:
-        return content
+    if cache_discovery:
+        from . import discovery_cache
+        from .discovery_cache import base
 
-  actual_url = url
-  # REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
-  # variable that contains the network address of the client sending the
-  # request. If it exists then add that to the request for the discovery
-  # document to avoid exceeding the quota on discovery requests.
-  if 'REMOTE_ADDR' in os.environ:
-    actual_url = _add_query_parameter(url, 'userIp', os.environ['REMOTE_ADDR'])
-  if developerKey:
-    actual_url = _add_query_parameter(url, 'key', developerKey)
-  logger.info('URL being requested: GET %s', actual_url)
+        if cache is None:
+            cache = discovery_cache.autodetect()
+        if cache:
+            content = cache.get(url)
+            if content:
+                return content
 
-  resp, content = http.request(actual_url)
+    actual_url = url
+    # REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
+    # variable that contains the network address of the client sending the
+    # request. If it exists then add that to the request for the discovery
+    # document to avoid exceeding the quota on discovery requests.
+    if "REMOTE_ADDR" in os.environ:
+        actual_url = _add_query_parameter(url, "userIp", os.environ["REMOTE_ADDR"])
+    if developerKey:
+        actual_url = _add_query_parameter(url, "key", developerKey)
+    logger.info("URL being requested: GET %s", actual_url)
 
-  if resp.status >= 400:
-    raise HttpError(resp, content, uri=actual_url)
+    resp, content = http.request(actual_url)
 
-  try:
-    content = content.decode('utf-8')
-  except AttributeError:
-    pass
+    if resp.status >= 400:
+        raise HttpError(resp, content, uri=actual_url)
 
-  try:
-    service = json.loads(content)
-  except ValueError as e:
-    logger.error('Failed to parse as JSON: ' + content)
-    raise InvalidJsonError()
-  if cache_discovery and cache:
-    cache.set(url, content)
-  return content
+    try:
+        content = content.decode("utf-8")
+    except AttributeError:
+        pass
+
+    try:
+        service = json.loads(content)
+    except ValueError as e:
+        logger.error("Failed to parse as JSON: " + content)
+        raise InvalidJsonError()
+    if cache_discovery and cache:
+        cache.set(url, content)
+    return content
 
 
 @positional(1)
@@ -299,8 +303,9 @@
     developerKey=None,
     model=None,
     requestBuilder=HttpRequest,
-    credentials=None):
-  """Create a Resource for interacting with an API.
+    credentials=None,
+):
+    """Create a Resource for interacting with an API.
 
   Same as `build()`, but constructs the Resource object from a discovery
   document that is it given, as opposed to retrieving one over HTTP.
@@ -328,65 +333,74 @@
     A Resource object with methods for interacting with the service.
   """
 
-  if http is not None and credentials is not None:
-    raise ValueError('Arguments http and credentials are mutually exclusive.')
+    if http is not None and credentials is not None:
+        raise ValueError("Arguments http and credentials are mutually exclusive.")
 
-  if isinstance(service, six.string_types):
-    service = json.loads(service)
-  elif isinstance(service, six.binary_type):
-    service = json.loads(service.decode('utf-8'))
+    if isinstance(service, six.string_types):
+        service = json.loads(service)
+    elif isinstance(service, six.binary_type):
+        service = json.loads(service.decode("utf-8"))
 
-  if  'rootUrl' not in service and (isinstance(http, (HttpMock,
-                                                      HttpMockSequence))):
-      logger.error("You are using HttpMock or HttpMockSequence without" +
-                   "having the service discovery doc in cache. Try calling " +
-                   "build() without mocking once first to populate the " +
-                   "cache.")
-      raise InvalidJsonError()
+    if "rootUrl" not in service and (isinstance(http, (HttpMock, HttpMockSequence))):
+        logger.error(
+            "You are using HttpMock or HttpMockSequence without"
+            + "having the service discovery doc in cache. Try calling "
+            + "build() without mocking once first to populate the "
+            + "cache."
+        )
+        raise InvalidJsonError()
 
-  base = urljoin(service['rootUrl'], service['servicePath'])
-  schema = Schemas(service)
+    base = urljoin(service["rootUrl"], service["servicePath"])
+    schema = Schemas(service)
 
-  # If the http client is not specified, then we must construct an http client
-  # to make requests. If the service has scopes, then we also need to setup
-  # authentication.
-  if http is None:
-    # Does the service require scopes?
-    scopes = list(
-      service.get('auth', {}).get('oauth2', {}).get('scopes', {}).keys())
-
-    # If so, then the we need to setup authentication if no developerKey is
-    # specified.
-    if scopes and not developerKey:
-      # If the user didn't pass in credentials, attempt to acquire application
-      # default credentials.
-      if credentials is None:
-        credentials = _auth.default_credentials()
-
-      # The credentials need to be scoped.
-      credentials = _auth.with_scopes(credentials, scopes)
-
-    # If credentials are provided, create an authorized http instance;
-    # otherwise, skip authentication.
-    if credentials:
-      http = _auth.authorized_http(credentials)
-
-    # If the service doesn't require scopes then there is no need for
+    # If the http client is not specified, then we must construct an http client
+    # to make requests. If the service has scopes, then we also need to setup
     # authentication.
-    else:
-      http = build_http()
+    if http is None:
+        # Does the service require scopes?
+        scopes = list(
+            service.get("auth", {}).get("oauth2", {}).get("scopes", {}).keys()
+        )
 
-  if model is None:
-    features = service.get('features', [])
-    model = JsonModel('dataWrapper' in features)
+        # If so, then the we need to setup authentication if no developerKey is
+        # specified.
+        if scopes and not developerKey:
+            # If the user didn't pass in credentials, attempt to acquire application
+            # default credentials.
+            if credentials is None:
+                credentials = _auth.default_credentials()
 
-  return Resource(http=http, baseUrl=base, model=model,
-                  developerKey=developerKey, requestBuilder=requestBuilder,
-                  resourceDesc=service, rootDesc=service, schema=schema)
+            # The credentials need to be scoped.
+            credentials = _auth.with_scopes(credentials, scopes)
+
+        # If credentials are provided, create an authorized http instance;
+        # otherwise, skip authentication.
+        if credentials:
+            http = _auth.authorized_http(credentials)
+
+        # If the service doesn't require scopes then there is no need for
+        # authentication.
+        else:
+            http = build_http()
+
+    if model is None:
+        features = service.get("features", [])
+        model = JsonModel("dataWrapper" in features)
+
+    return Resource(
+        http=http,
+        baseUrl=base,
+        model=model,
+        developerKey=developerKey,
+        requestBuilder=requestBuilder,
+        resourceDesc=service,
+        rootDesc=service,
+        schema=schema,
+    )
 
 
 def _cast(value, schema_type):
-  """Convert value to a string based on JSON Schema type.
+    """Convert value to a string based on JSON Schema type.
 
   See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
   JSON Schema.
@@ -398,26 +412,26 @@
   Returns:
     A string representation of 'value' based on the schema_type.
   """
-  if schema_type == 'string':
-    if type(value) == type('') or type(value) == type(u''):
-      return value
+    if schema_type == "string":
+        if type(value) == type("") or type(value) == type(u""):
+            return value
+        else:
+            return str(value)
+    elif schema_type == "integer":
+        return str(int(value))
+    elif schema_type == "number":
+        return str(float(value))
+    elif schema_type == "boolean":
+        return str(bool(value)).lower()
     else:
-      return str(value)
-  elif schema_type == 'integer':
-    return str(int(value))
-  elif schema_type == 'number':
-    return str(float(value))
-  elif schema_type == 'boolean':
-    return str(bool(value)).lower()
-  else:
-    if type(value) == type('') or type(value) == type(u''):
-      return value
-    else:
-      return str(value)
+        if type(value) == type("") or type(value) == type(u""):
+            return value
+        else:
+            return str(value)
 
 
 def _media_size_to_long(maxSize):
-  """Convert a string media size, such as 10GB or 3TB into an integer.
+    """Convert a string media size, such as 10GB or 3TB into an integer.
 
   Args:
     maxSize: string, size as a string, such as 2MB or 7GB.
@@ -425,18 +439,18 @@
   Returns:
     The size as an integer value.
   """
-  if len(maxSize) < 2:
-    return 0
-  units = maxSize[-2:].upper()
-  bit_shift = _MEDIA_SIZE_BIT_SHIFTS.get(units)
-  if bit_shift is not None:
-    return int(maxSize[:-2]) << bit_shift
-  else:
-    return int(maxSize)
+    if len(maxSize) < 2:
+        return 0
+    units = maxSize[-2:].upper()
+    bit_shift = _MEDIA_SIZE_BIT_SHIFTS.get(units)
+    if bit_shift is not None:
+        return int(maxSize[:-2]) << bit_shift
+    else:
+        return int(maxSize)
 
 
 def _media_path_url_from_info(root_desc, path_url):
-  """Creates an absolute media path URL.
+    """Creates an absolute media path URL.
 
   Constructed using the API root URI and service path from the discovery
   document and the relative path for the API method.
@@ -449,15 +463,15 @@
   Returns:
     String; the absolute URI for media upload for the API method.
   """
-  return '%(root)supload/%(service_path)s%(path)s' % {
-      'root': root_desc['rootUrl'],
-      'service_path': root_desc['servicePath'],
-      'path': path_url,
-  }
+    return "%(root)supload/%(service_path)s%(path)s" % {
+        "root": root_desc["rootUrl"],
+        "service_path": root_desc["servicePath"],
+        "path": path_url,
+    }
 
 
 def _fix_up_parameters(method_desc, root_desc, http_method, schema):
-  """Updates parameters of an API method with values specific to this library.
+    """Updates parameters of an API method with values specific to this library.
 
   Specifically, adds whatever global parameters are specified by the API to the
   parameters for the individual method. Also adds parameters which don't
@@ -480,28 +494,28 @@
     The updated Dictionary stored in the 'parameters' key of the method
         description dictionary.
   """
-  parameters = method_desc.setdefault('parameters', {})
+    parameters = method_desc.setdefault("parameters", {})
 
-  # Add in the parameters common to all methods.
-  for name, description in six.iteritems(root_desc.get('parameters', {})):
-    parameters[name] = description
+    # Add in the parameters common to all methods.
+    for name, description in six.iteritems(root_desc.get("parameters", {})):
+        parameters[name] = description
 
-  # Add in undocumented query parameters.
-  for name in STACK_QUERY_PARAMETERS:
-    parameters[name] = STACK_QUERY_PARAMETER_DEFAULT_VALUE.copy()
+    # Add in undocumented query parameters.
+    for name in STACK_QUERY_PARAMETERS:
+        parameters[name] = STACK_QUERY_PARAMETER_DEFAULT_VALUE.copy()
 
-  # Add 'body' (our own reserved word) to parameters if the method supports
-  # a request payload.
-  if http_method in HTTP_PAYLOAD_METHODS and 'request' in method_desc:
-    body = BODY_PARAMETER_DEFAULT_VALUE.copy()
-    body.update(method_desc['request'])
-    parameters['body'] = body
+    # Add 'body' (our own reserved word) to parameters if the method supports
+    # a request payload.
+    if http_method in HTTP_PAYLOAD_METHODS and "request" in method_desc:
+        body = BODY_PARAMETER_DEFAULT_VALUE.copy()
+        body.update(method_desc["request"])
+        parameters["body"] = body
 
-  return parameters
+    return parameters
 
 
 def _fix_up_media_upload(method_desc, root_desc, path_url, parameters):
-  """Adds 'media_body' and 'media_mime_type' parameters if supported by method.
+    """Adds 'media_body' and 'media_mime_type' parameters if supported by method.
 
   SIDE EFFECTS: If there is a 'mediaUpload' in the method description, adds
   'media_upload' key to parameters.
@@ -528,21 +542,21 @@
         the discovery document and the relative path for the API method. If
         media upload is not supported, this is None.
   """
-  media_upload = method_desc.get('mediaUpload', {})
-  accept = media_upload.get('accept', [])
-  max_size = _media_size_to_long(media_upload.get('maxSize', ''))
-  media_path_url = None
+    media_upload = method_desc.get("mediaUpload", {})
+    accept = media_upload.get("accept", [])
+    max_size = _media_size_to_long(media_upload.get("maxSize", ""))
+    media_path_url = None
 
-  if media_upload:
-    media_path_url = _media_path_url_from_info(root_desc, path_url)
-    parameters['media_body'] = MEDIA_BODY_PARAMETER_DEFAULT_VALUE.copy()
-    parameters['media_mime_type'] = MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE.copy()
+    if media_upload:
+        media_path_url = _media_path_url_from_info(root_desc, path_url)
+        parameters["media_body"] = MEDIA_BODY_PARAMETER_DEFAULT_VALUE.copy()
+        parameters["media_mime_type"] = MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE.copy()
 
-  return accept, max_size, media_path_url
+    return accept, max_size, media_path_url
 
 
 def _fix_up_method_description(method_desc, root_desc, schema):
-  """Updates a method description in a discovery document.
+    """Updates a method description in a discovery document.
 
   SIDE EFFECTS: Changes the parameters dictionary in the method description with
   extra parameters which are used locally.
@@ -573,40 +587,41 @@
         the discovery document and the relative path for the API method. If
         media upload is not supported, this is None.
   """
-  path_url = method_desc['path']
-  http_method = method_desc['httpMethod']
-  method_id = method_desc['id']
+    path_url = method_desc["path"]
+    http_method = method_desc["httpMethod"]
+    method_id = method_desc["id"]
 
-  parameters = _fix_up_parameters(method_desc, root_desc, http_method, schema)
-  # Order is important. `_fix_up_media_upload` needs `method_desc` to have a
-  # 'parameters' key and needs to know if there is a 'body' parameter because it
-  # also sets a 'media_body' parameter.
-  accept, max_size, media_path_url = _fix_up_media_upload(
-      method_desc, root_desc, path_url, parameters)
+    parameters = _fix_up_parameters(method_desc, root_desc, http_method, schema)
+    # Order is important. `_fix_up_media_upload` needs `method_desc` to have a
+    # 'parameters' key and needs to know if there is a 'body' parameter because it
+    # also sets a 'media_body' parameter.
+    accept, max_size, media_path_url = _fix_up_media_upload(
+        method_desc, root_desc, path_url, parameters
+    )
 
-  return path_url, http_method, method_id, accept, max_size, media_path_url
+    return path_url, http_method, method_id, accept, max_size, media_path_url
 
 
 def _urljoin(base, url):
-  """Custom urljoin replacement supporting : before / in url."""
-  # In general, it's unsafe to simply join base and url. However, for
-  # the case of discovery documents, we know:
-  #  * base will never contain params, query, or fragment
-  #  * url will never contain a scheme or net_loc.
-  # In general, this means we can safely join on /; we just need to
-  # ensure we end up with precisely one / joining base and url. The
-  # exception here is the case of media uploads, where url will be an
-  # absolute url.
-  if url.startswith('http://') or url.startswith('https://'):
-    return urljoin(base, url)
-  new_base = base if base.endswith('/') else base + '/'
-  new_url = url[1:] if url.startswith('/') else url
-  return new_base + new_url
+    """Custom urljoin replacement supporting : before / in url."""
+    # In general, it's unsafe to simply join base and url. However, for
+    # the case of discovery documents, we know:
+    #  * base will never contain params, query, or fragment
+    #  * url will never contain a scheme or net_loc.
+    # In general, this means we can safely join on /; we just need to
+    # ensure we end up with precisely one / joining base and url. The
+    # exception here is the case of media uploads, where url will be an
+    # absolute url.
+    if url.startswith("http://") or url.startswith("https://"):
+        return urljoin(base, url)
+    new_base = base if base.endswith("/") else base + "/"
+    new_url = url[1:] if url.startswith("/") else url
+    return new_base + new_url
 
 
 # TODO(dhermes): Convert this class to ResourceMethod and make it callable
 class ResourceMethodParameters(object):
-  """Represents the parameters associated with a method.
+    """Represents the parameters associated with a method.
 
   Attributes:
     argmap: Map from method parameter name (string) to query parameter name
@@ -630,8 +645,8 @@
        where each list of strings is the list of acceptable enum values.
   """
 
-  def __init__(self, method_desc):
-    """Constructor for ResourceMethodParameters.
+    def __init__(self, method_desc):
+        """Constructor for ResourceMethodParameters.
 
     Sets default values and defers to set_parameters to populate.
 
@@ -640,21 +655,21 @@
           comes from the dictionary of methods stored in the 'methods' key in
           the deserialized discovery document.
     """
-    self.argmap = {}
-    self.required_params = []
-    self.repeated_params = []
-    self.pattern_params = {}
-    self.query_params = []
-    # TODO(dhermes): Change path_params to a list if the extra URITEMPLATE
-    #                parsing is gotten rid of.
-    self.path_params = set()
-    self.param_types = {}
-    self.enum_params = {}
+        self.argmap = {}
+        self.required_params = []
+        self.repeated_params = []
+        self.pattern_params = {}
+        self.query_params = []
+        # TODO(dhermes): Change path_params to a list if the extra URITEMPLATE
+        #                parsing is gotten rid of.
+        self.path_params = set()
+        self.param_types = {}
+        self.enum_params = {}
 
-    self.set_parameters(method_desc)
+        self.set_parameters(method_desc)
 
-  def set_parameters(self, method_desc):
-    """Populates maps and lists based on method description.
+    def set_parameters(self, method_desc):
+        """Populates maps and lists based on method description.
 
     Iterates through each parameter for the method and parses the values from
     the parameter dictionary.
@@ -664,37 +679,37 @@
           comes from the dictionary of methods stored in the 'methods' key in
           the deserialized discovery document.
     """
-    for arg, desc in six.iteritems(method_desc.get('parameters', {})):
-      param = key2param(arg)
-      self.argmap[param] = arg
+        for arg, desc in six.iteritems(method_desc.get("parameters", {})):
+            param = key2param(arg)
+            self.argmap[param] = arg
 
-      if desc.get('pattern'):
-        self.pattern_params[param] = desc['pattern']
-      if desc.get('enum'):
-        self.enum_params[param] = desc['enum']
-      if desc.get('required'):
-        self.required_params.append(param)
-      if desc.get('repeated'):
-        self.repeated_params.append(param)
-      if desc.get('location') == 'query':
-        self.query_params.append(param)
-      if desc.get('location') == 'path':
-        self.path_params.add(param)
-      self.param_types[param] = desc.get('type', 'string')
+            if desc.get("pattern"):
+                self.pattern_params[param] = desc["pattern"]
+            if desc.get("enum"):
+                self.enum_params[param] = desc["enum"]
+            if desc.get("required"):
+                self.required_params.append(param)
+            if desc.get("repeated"):
+                self.repeated_params.append(param)
+            if desc.get("location") == "query":
+                self.query_params.append(param)
+            if desc.get("location") == "path":
+                self.path_params.add(param)
+            self.param_types[param] = desc.get("type", "string")
 
-    # TODO(dhermes): Determine if this is still necessary. Discovery based APIs
-    #                should have all path parameters already marked with
-    #                'location: path'.
-    for match in URITEMPLATE.finditer(method_desc['path']):
-      for namematch in VARNAME.finditer(match.group(0)):
-        name = key2param(namematch.group(0))
-        self.path_params.add(name)
-        if name in self.query_params:
-          self.query_params.remove(name)
+        # TODO(dhermes): Determine if this is still necessary. Discovery based APIs
+        #                should have all path parameters already marked with
+        #                'location: path'.
+        for match in URITEMPLATE.finditer(method_desc["path"]):
+            for namematch in VARNAME.finditer(match.group(0)):
+                name = key2param(namematch.group(0))
+                self.path_params.add(name)
+                if name in self.query_params:
+                    self.query_params.remove(name)
 
 
 def createMethod(methodName, methodDesc, rootDesc, schema):
-  """Creates a method for attaching to a Resource.
+    """Creates a method for attaching to a Resource.
 
   Args:
     methodName: string, name of the method to use.
@@ -703,239 +718,262 @@
     rootDesc: object, the entire deserialized discovery document.
     schema: object, mapping of schema names to schema descriptions.
   """
-  methodName = fix_method_name(methodName)
-  (pathUrl, httpMethod, methodId, accept,
-   maxSize, mediaPathUrl) = _fix_up_method_description(methodDesc, rootDesc, schema)
+    methodName = fix_method_name(methodName)
+    (
+        pathUrl,
+        httpMethod,
+        methodId,
+        accept,
+        maxSize,
+        mediaPathUrl,
+    ) = _fix_up_method_description(methodDesc, rootDesc, schema)
 
-  parameters = ResourceMethodParameters(methodDesc)
+    parameters = ResourceMethodParameters(methodDesc)
 
-  def method(self, **kwargs):
-    # Don't bother with doc string, it will be over-written by createMethod.
+    def method(self, **kwargs):
+        # Don't bother with doc string, it will be over-written by createMethod.
 
-    for name in six.iterkeys(kwargs):
-      if name not in parameters.argmap:
-        raise TypeError('Got an unexpected keyword argument "%s"' % name)
+        for name in six.iterkeys(kwargs):
+            if name not in parameters.argmap:
+                raise TypeError('Got an unexpected keyword argument "%s"' % name)
 
-    # Remove args that have a value of None.
-    keys = list(kwargs.keys())
-    for name in keys:
-      if kwargs[name] is None:
-        del kwargs[name]
+        # Remove args that have a value of None.
+        keys = list(kwargs.keys())
+        for name in keys:
+            if kwargs[name] is None:
+                del kwargs[name]
 
-    for name in parameters.required_params:
-      if name not in kwargs:
-        # temporary workaround for non-paging methods incorrectly requiring
-        # page token parameter (cf. drive.changes.watch vs. drive.changes.list)
-        if name not in _PAGE_TOKEN_NAMES or _findPageTokenName(
-            _methodProperties(methodDesc, schema, 'response')):
-          raise TypeError('Missing required parameter "%s"' % name)
+        for name in parameters.required_params:
+            if name not in kwargs:
+                # temporary workaround for non-paging methods incorrectly requiring
+                # page token parameter (cf. drive.changes.watch vs. drive.changes.list)
+                if name not in _PAGE_TOKEN_NAMES or _findPageTokenName(
+                    _methodProperties(methodDesc, schema, "response")
+                ):
+                    raise TypeError('Missing required parameter "%s"' % name)
 
-    for name, regex in six.iteritems(parameters.pattern_params):
-      if name in kwargs:
-        if isinstance(kwargs[name], six.string_types):
-          pvalues = [kwargs[name]]
+        for name, regex in six.iteritems(parameters.pattern_params):
+            if name in kwargs:
+                if isinstance(kwargs[name], six.string_types):
+                    pvalues = [kwargs[name]]
+                else:
+                    pvalues = kwargs[name]
+                for pvalue in pvalues:
+                    if re.match(regex, pvalue) is None:
+                        raise TypeError(
+                            'Parameter "%s" value "%s" does not match the pattern "%s"'
+                            % (name, pvalue, regex)
+                        )
+
+        for name, enums in six.iteritems(parameters.enum_params):
+            if name in kwargs:
+                # We need to handle the case of a repeated enum
+                # name differently, since we want to handle both
+                # arg='value' and arg=['value1', 'value2']
+                if name in parameters.repeated_params and not isinstance(
+                    kwargs[name], six.string_types
+                ):
+                    values = kwargs[name]
+                else:
+                    values = [kwargs[name]]
+                for value in values:
+                    if value not in enums:
+                        raise TypeError(
+                            'Parameter "%s" value "%s" is not an allowed value in "%s"'
+                            % (name, value, str(enums))
+                        )
+
+        actual_query_params = {}
+        actual_path_params = {}
+        for key, value in six.iteritems(kwargs):
+            to_type = parameters.param_types.get(key, "string")
+            # For repeated parameters we cast each member of the list.
+            if key in parameters.repeated_params and type(value) == type([]):
+                cast_value = [_cast(x, to_type) for x in value]
+            else:
+                cast_value = _cast(value, to_type)
+            if key in parameters.query_params:
+                actual_query_params[parameters.argmap[key]] = cast_value
+            if key in parameters.path_params:
+                actual_path_params[parameters.argmap[key]] = cast_value
+        body_value = kwargs.get("body", None)
+        media_filename = kwargs.get("media_body", None)
+        media_mime_type = kwargs.get("media_mime_type", None)
+
+        if self._developerKey:
+            actual_query_params["key"] = self._developerKey
+
+        model = self._model
+        if methodName.endswith("_media"):
+            model = MediaModel()
+        elif "response" not in methodDesc:
+            model = RawModel()
+
+        headers = {}
+        headers, params, query, body = model.request(
+            headers, actual_path_params, actual_query_params, body_value
+        )
+
+        expanded_url = uritemplate.expand(pathUrl, params)
+        url = _urljoin(self._baseUrl, expanded_url + query)
+
+        resumable = None
+        multipart_boundary = ""
+
+        if media_filename:
+            # Ensure we end up with a valid MediaUpload object.
+            if isinstance(media_filename, six.string_types):
+                if media_mime_type is None:
+                    logger.warning(
+                        "media_mime_type argument not specified: trying to auto-detect for %s",
+                        media_filename,
+                    )
+                    media_mime_type, _ = mimetypes.guess_type(media_filename)
+                if media_mime_type is None:
+                    raise UnknownFileType(media_filename)
+                if not mimeparse.best_match([media_mime_type], ",".join(accept)):
+                    raise UnacceptableMimeTypeError(media_mime_type)
+                media_upload = MediaFileUpload(media_filename, mimetype=media_mime_type)
+            elif isinstance(media_filename, MediaUpload):
+                media_upload = media_filename
+            else:
+                raise TypeError("media_filename must be str or MediaUpload.")
+
+            # Check the maxSize
+            if media_upload.size() is not None and media_upload.size() > maxSize > 0:
+                raise MediaUploadSizeError("Media larger than: %s" % maxSize)
+
+            # Use the media path uri for media uploads
+            expanded_url = uritemplate.expand(mediaPathUrl, params)
+            url = _urljoin(self._baseUrl, expanded_url + query)
+            if media_upload.resumable():
+                url = _add_query_parameter(url, "uploadType", "resumable")
+
+            if media_upload.resumable():
+                # This is all we need to do for resumable, if the body exists it gets
+                # sent in the first request, otherwise an empty body is sent.
+                resumable = media_upload
+            else:
+                # A non-resumable upload
+                if body is None:
+                    # This is a simple media upload
+                    headers["content-type"] = media_upload.mimetype()
+                    body = media_upload.getbytes(0, media_upload.size())
+                    url = _add_query_parameter(url, "uploadType", "media")
+                else:
+                    # This is a multipart/related upload.
+                    msgRoot = MIMEMultipart("related")
+                    # msgRoot should not write out it's own headers
+                    setattr(msgRoot, "_write_headers", lambda self: None)
+
+                    # attach the body as one part
+                    msg = MIMENonMultipart(*headers["content-type"].split("/"))
+                    msg.set_payload(body)
+                    msgRoot.attach(msg)
+
+                    # attach the media as the second part
+                    msg = MIMENonMultipart(*media_upload.mimetype().split("/"))
+                    msg["Content-Transfer-Encoding"] = "binary"
+
+                    payload = media_upload.getbytes(0, media_upload.size())
+                    msg.set_payload(payload)
+                    msgRoot.attach(msg)
+                    # encode the body: note that we can't use `as_string`, because
+                    # it plays games with `From ` lines.
+                    fp = BytesIO()
+                    g = _BytesGenerator(fp, mangle_from_=False)
+                    g.flatten(msgRoot, unixfrom=False)
+                    body = fp.getvalue()
+
+                    multipart_boundary = msgRoot.get_boundary()
+                    headers["content-type"] = (
+                        "multipart/related; " 'boundary="%s"'
+                    ) % multipart_boundary
+                    url = _add_query_parameter(url, "uploadType", "multipart")
+
+        logger.info("URL being requested: %s %s" % (httpMethod, url))
+        return self._requestBuilder(
+            self._http,
+            model.response,
+            url,
+            method=httpMethod,
+            body=body,
+            headers=headers,
+            methodId=methodId,
+            resumable=resumable,
+        )
+
+    docs = [methodDesc.get("description", DEFAULT_METHOD_DOC), "\n\n"]
+    if len(parameters.argmap) > 0:
+        docs.append("Args:\n")
+
+    # Skip undocumented params and params common to all methods.
+    skip_parameters = list(rootDesc.get("parameters", {}).keys())
+    skip_parameters.extend(STACK_QUERY_PARAMETERS)
+
+    all_args = list(parameters.argmap.keys())
+    args_ordered = [key2param(s) for s in methodDesc.get("parameterOrder", [])]
+
+    # Move body to the front of the line.
+    if "body" in all_args:
+        args_ordered.append("body")
+
+    for name in all_args:
+        if name not in args_ordered:
+            args_ordered.append(name)
+
+    for arg in args_ordered:
+        if arg in skip_parameters:
+            continue
+
+        repeated = ""
+        if arg in parameters.repeated_params:
+            repeated = " (repeated)"
+        required = ""
+        if arg in parameters.required_params:
+            required = " (required)"
+        paramdesc = methodDesc["parameters"][parameters.argmap[arg]]
+        paramdoc = paramdesc.get("description", "A parameter")
+        if "$ref" in paramdesc:
+            docs.append(
+                ("  %s: object, %s%s%s\n    The object takes the" " form of:\n\n%s\n\n")
+                % (
+                    arg,
+                    paramdoc,
+                    required,
+                    repeated,
+                    schema.prettyPrintByName(paramdesc["$ref"]),
+                )
+            )
         else:
-          pvalues = kwargs[name]
-        for pvalue in pvalues:
-          if re.match(regex, pvalue) is None:
-            raise TypeError(
-                'Parameter "%s" value "%s" does not match the pattern "%s"' %
-                (name, pvalue, regex))
-
-    for name, enums in six.iteritems(parameters.enum_params):
-      if name in kwargs:
-        # We need to handle the case of a repeated enum
-        # name differently, since we want to handle both
-        # arg='value' and arg=['value1', 'value2']
-        if (name in parameters.repeated_params and
-            not isinstance(kwargs[name], six.string_types)):
-          values = kwargs[name]
+            paramtype = paramdesc.get("type", "string")
+            docs.append(
+                "  %s: %s, %s%s%s\n" % (arg, paramtype, paramdoc, required, repeated)
+            )
+        enum = paramdesc.get("enum", [])
+        enumDesc = paramdesc.get("enumDescriptions", [])
+        if enum and enumDesc:
+            docs.append("    Allowed values\n")
+            for (name, desc) in zip(enum, enumDesc):
+                docs.append("      %s - %s\n" % (name, desc))
+    if "response" in methodDesc:
+        if methodName.endswith("_media"):
+            docs.append("\nReturns:\n  The media object as a string.\n\n    ")
         else:
-          values = [kwargs[name]]
-        for value in values:
-          if value not in enums:
-            raise TypeError(
-                'Parameter "%s" value "%s" is not an allowed value in "%s"' %
-                (name, value, str(enums)))
+            docs.append("\nReturns:\n  An object of the form:\n\n    ")
+            docs.append(schema.prettyPrintSchema(methodDesc["response"]))
 
-    actual_query_params = {}
-    actual_path_params = {}
-    for key, value in six.iteritems(kwargs):
-      to_type = parameters.param_types.get(key, 'string')
-      # For repeated parameters we cast each member of the list.
-      if key in parameters.repeated_params and type(value) == type([]):
-        cast_value = [_cast(x, to_type) for x in value]
-      else:
-        cast_value = _cast(value, to_type)
-      if key in parameters.query_params:
-        actual_query_params[parameters.argmap[key]] = cast_value
-      if key in parameters.path_params:
-        actual_path_params[parameters.argmap[key]] = cast_value
-    body_value = kwargs.get('body', None)
-    media_filename = kwargs.get('media_body', None)
-    media_mime_type = kwargs.get('media_mime_type', None)
-
-    if self._developerKey:
-      actual_query_params['key'] = self._developerKey
-
-    model = self._model
-    if methodName.endswith('_media'):
-      model = MediaModel()
-    elif 'response' not in methodDesc:
-      model = RawModel()
-
-    headers = {}
-    headers, params, query, body = model.request(headers,
-        actual_path_params, actual_query_params, body_value)
-
-    expanded_url = uritemplate.expand(pathUrl, params)
-    url = _urljoin(self._baseUrl, expanded_url + query)
-
-    resumable = None
-    multipart_boundary = ''
-
-    if media_filename:
-      # Ensure we end up with a valid MediaUpload object.
-      if isinstance(media_filename, six.string_types):
-        if media_mime_type is None:
-          logger.warning(
-              'media_mime_type argument not specified: trying to auto-detect for %s',
-              media_filename)
-          media_mime_type, _ = mimetypes.guess_type(media_filename)
-        if media_mime_type is None:
-          raise UnknownFileType(media_filename)
-        if not mimeparse.best_match([media_mime_type], ','.join(accept)):
-          raise UnacceptableMimeTypeError(media_mime_type)
-        media_upload = MediaFileUpload(media_filename,
-                                       mimetype=media_mime_type)
-      elif isinstance(media_filename, MediaUpload):
-        media_upload = media_filename
-      else:
-        raise TypeError('media_filename must be str or MediaUpload.')
-
-      # Check the maxSize
-      if media_upload.size() is not None and media_upload.size() > maxSize > 0:
-        raise MediaUploadSizeError("Media larger than: %s" % maxSize)
-
-      # Use the media path uri for media uploads
-      expanded_url = uritemplate.expand(mediaPathUrl, params)
-      url = _urljoin(self._baseUrl, expanded_url + query)
-      if media_upload.resumable():
-        url = _add_query_parameter(url, 'uploadType', 'resumable')
-
-      if media_upload.resumable():
-        # This is all we need to do for resumable, if the body exists it gets
-        # sent in the first request, otherwise an empty body is sent.
-        resumable = media_upload
-      else:
-        # A non-resumable upload
-        if body is None:
-          # This is a simple media upload
-          headers['content-type'] = media_upload.mimetype()
-          body = media_upload.getbytes(0, media_upload.size())
-          url = _add_query_parameter(url, 'uploadType', 'media')
-        else:
-          # This is a multipart/related upload.
-          msgRoot = MIMEMultipart('related')
-          # msgRoot should not write out it's own headers
-          setattr(msgRoot, '_write_headers', lambda self: None)
-
-          # attach the body as one part
-          msg = MIMENonMultipart(*headers['content-type'].split('/'))
-          msg.set_payload(body)
-          msgRoot.attach(msg)
-
-          # attach the media as the second part
-          msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
-          msg['Content-Transfer-Encoding'] = 'binary'
-
-          payload = media_upload.getbytes(0, media_upload.size())
-          msg.set_payload(payload)
-          msgRoot.attach(msg)
-          # encode the body: note that we can't use `as_string`, because
-          # it plays games with `From ` lines.
-          fp = BytesIO()
-          g = _BytesGenerator(fp, mangle_from_=False)
-          g.flatten(msgRoot, unixfrom=False)
-          body = fp.getvalue()
-
-          multipart_boundary = msgRoot.get_boundary()
-          headers['content-type'] = ('multipart/related; '
-                                     'boundary="%s"') % multipart_boundary
-          url = _add_query_parameter(url, 'uploadType', 'multipart')
-
-    logger.info('URL being requested: %s %s' % (httpMethod,url))
-    return self._requestBuilder(self._http,
-                                model.response,
-                                url,
-                                method=httpMethod,
-                                body=body,
-                                headers=headers,
-                                methodId=methodId,
-                                resumable=resumable)
-
-  docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
-  if len(parameters.argmap) > 0:
-    docs.append('Args:\n')
-
-  # Skip undocumented params and params common to all methods.
-  skip_parameters = list(rootDesc.get('parameters', {}).keys())
-  skip_parameters.extend(STACK_QUERY_PARAMETERS)
-
-  all_args = list(parameters.argmap.keys())
-  args_ordered = [key2param(s) for s in methodDesc.get('parameterOrder', [])]
-
-  # Move body to the front of the line.
-  if 'body' in all_args:
-    args_ordered.append('body')
-
-  for name in all_args:
-    if name not in args_ordered:
-      args_ordered.append(name)
-
-  for arg in args_ordered:
-    if arg in skip_parameters:
-      continue
-
-    repeated = ''
-    if arg in parameters.repeated_params:
-      repeated = ' (repeated)'
-    required = ''
-    if arg in parameters.required_params:
-      required = ' (required)'
-    paramdesc = methodDesc['parameters'][parameters.argmap[arg]]
-    paramdoc = paramdesc.get('description', 'A parameter')
-    if '$ref' in paramdesc:
-      docs.append(
-          ('  %s: object, %s%s%s\n    The object takes the'
-          ' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
-            schema.prettyPrintByName(paramdesc['$ref'])))
-    else:
-      paramtype = paramdesc.get('type', 'string')
-      docs.append('  %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
-                                          repeated))
-    enum = paramdesc.get('enum', [])
-    enumDesc = paramdesc.get('enumDescriptions', [])
-    if enum and enumDesc:
-      docs.append('    Allowed values\n')
-      for (name, desc) in zip(enum, enumDesc):
-        docs.append('      %s - %s\n' % (name, desc))
-  if 'response' in methodDesc:
-    if methodName.endswith('_media'):
-      docs.append('\nReturns:\n  The media object as a string.\n\n    ')
-    else:
-      docs.append('\nReturns:\n  An object of the form:\n\n    ')
-      docs.append(schema.prettyPrintSchema(methodDesc['response']))
-
-  setattr(method, '__doc__', ''.join(docs))
-  return (methodName, method)
+    setattr(method, "__doc__", "".join(docs))
+    return (methodName, method)
 
 
-def createNextMethod(methodName,
-                     pageTokenName='pageToken',
-                     nextPageTokenName='nextPageToken',
-                     isPageTokenParameter=True):
-  """Creates any _next methods for attaching to a Resource.
+def createNextMethod(
+    methodName,
+    pageTokenName="pageToken",
+    nextPageTokenName="nextPageToken",
+    isPageTokenParameter=True,
+):
+    """Creates any _next methods for attaching to a Resource.
 
   The _next methods allow for easy iteration through list() responses.
 
@@ -946,10 +984,10 @@
     isPageTokenParameter: Boolean, True if request page token is a query
         parameter, False if request page token is a field of the request body.
   """
-  methodName = fix_method_name(methodName)
+    methodName = fix_method_name(methodName)
 
-  def methodNext(self, previous_request, previous_response):
-    """Retrieves the next page of results.
+    def methodNext(self, previous_request, previous_response):
+        """Retrieves the next page of results.
 
 Args:
   previous_request: The request for the previous page. (required)
@@ -959,39 +997,49 @@
   A request object that you can call 'execute()' on to request the next
   page. Returns None if there are no more items in the collection.
     """
-    # Retrieve nextPageToken from previous_response
-    # Use as pageToken in previous_request to create new request.
+        # Retrieve nextPageToken from previous_response
+        # Use as pageToken in previous_request to create new request.
 
-    nextPageToken = previous_response.get(nextPageTokenName, None)
-    if not nextPageToken:
-      return None
+        nextPageToken = previous_response.get(nextPageTokenName, None)
+        if not nextPageToken:
+            return None
 
-    request = copy.copy(previous_request)
+        request = copy.copy(previous_request)
 
-    if isPageTokenParameter:
-        # Replace pageToken value in URI
-        request.uri = _add_query_parameter(
-            request.uri, pageTokenName, nextPageToken)
-        logger.info('Next page request URL: %s %s' % (methodName, request.uri))
-    else:
-        # Replace pageToken value in request body
-        model = self._model
-        body = model.deserialize(request.body)
-        body[pageTokenName] = nextPageToken
-        request.body = model.serialize(body)
-        logger.info('Next page request body: %s %s' % (methodName, body))
+        if isPageTokenParameter:
+            # Replace pageToken value in URI
+            request.uri = _add_query_parameter(
+                request.uri, pageTokenName, nextPageToken
+            )
+            logger.info("Next page request URL: %s %s" % (methodName, request.uri))
+        else:
+            # Replace pageToken value in request body
+            model = self._model
+            body = model.deserialize(request.body)
+            body[pageTokenName] = nextPageToken
+            request.body = model.serialize(body)
+            logger.info("Next page request body: %s %s" % (methodName, body))
 
-    return request
+        return request
 
-  return (methodName, methodNext)
+    return (methodName, methodNext)
 
 
 class Resource(object):
-  """A class for interacting with a resource."""
+    """A class for interacting with a resource."""
 
-  def __init__(self, http, baseUrl, model, requestBuilder, developerKey,
-               resourceDesc, rootDesc, schema):
-    """Build a Resource from the API description.
+    def __init__(
+        self,
+        http,
+        baseUrl,
+        model,
+        requestBuilder,
+        developerKey,
+        resourceDesc,
+        rootDesc,
+        schema,
+    ):
+        """Build a Resource from the API description.
 
     Args:
       http: httplib2.Http, Object to make http requests with.
@@ -1008,63 +1056,66 @@
       rootDesc: object, the entire deserialized discovery document.
       schema: object, mapping of schema names to schema descriptions.
     """
-    self._dynamic_attrs = []
+        self._dynamic_attrs = []
 
-    self._http = http
-    self._baseUrl = baseUrl
-    self._model = model
-    self._developerKey = developerKey
-    self._requestBuilder = requestBuilder
-    self._resourceDesc = resourceDesc
-    self._rootDesc = rootDesc
-    self._schema = schema
+        self._http = http
+        self._baseUrl = baseUrl
+        self._model = model
+        self._developerKey = developerKey
+        self._requestBuilder = requestBuilder
+        self._resourceDesc = resourceDesc
+        self._rootDesc = rootDesc
+        self._schema = schema
 
-    self._set_service_methods()
+        self._set_service_methods()
 
-  def _set_dynamic_attr(self, attr_name, value):
-    """Sets an instance attribute and tracks it in a list of dynamic attributes.
+    def _set_dynamic_attr(self, attr_name, value):
+        """Sets an instance attribute and tracks it in a list of dynamic attributes.
 
     Args:
       attr_name: string; The name of the attribute to be set
       value: The value being set on the object and tracked in the dynamic cache.
     """
-    self._dynamic_attrs.append(attr_name)
-    self.__dict__[attr_name] = value
+        self._dynamic_attrs.append(attr_name)
+        self.__dict__[attr_name] = value
 
-  def __getstate__(self):
-    """Trim the state down to something that can be pickled.
+    def __getstate__(self):
+        """Trim the state down to something that can be pickled.
 
     Uses the fact that the instance variable _dynamic_attrs holds attrs that
     will be wiped and restored on pickle serialization.
     """
-    state_dict = copy.copy(self.__dict__)
-    for dynamic_attr in self._dynamic_attrs:
-      del state_dict[dynamic_attr]
-    del state_dict['_dynamic_attrs']
-    return state_dict
+        state_dict = copy.copy(self.__dict__)
+        for dynamic_attr in self._dynamic_attrs:
+            del state_dict[dynamic_attr]
+        del state_dict["_dynamic_attrs"]
+        return state_dict
 
-  def __setstate__(self, state):
-    """Reconstitute the state of the object from being pickled.
+    def __setstate__(self, state):
+        """Reconstitute the state of the object from being pickled.
 
     Uses the fact that the instance variable _dynamic_attrs holds attrs that
     will be wiped and restored on pickle serialization.
     """
-    self.__dict__.update(state)
-    self._dynamic_attrs = []
-    self._set_service_methods()
+        self.__dict__.update(state)
+        self._dynamic_attrs = []
+        self._set_service_methods()
 
-  def _set_service_methods(self):
-    self._add_basic_methods(self._resourceDesc, self._rootDesc, self._schema)
-    self._add_nested_resources(self._resourceDesc, self._rootDesc, self._schema)
-    self._add_next_methods(self._resourceDesc, self._schema)
+    def _set_service_methods(self):
+        self._add_basic_methods(self._resourceDesc, self._rootDesc, self._schema)
+        self._add_nested_resources(self._resourceDesc, self._rootDesc, self._schema)
+        self._add_next_methods(self._resourceDesc, self._schema)
 
-  def _add_basic_methods(self, resourceDesc, rootDesc, schema):
-    # If this is the root Resource, add a new_batch_http_request() method.
-    if resourceDesc == rootDesc:
-      batch_uri = '%s%s' % (
-        rootDesc['rootUrl'], rootDesc.get('batchPath', 'batch'))
-      def new_batch_http_request(callback=None):
-        """Create a BatchHttpRequest object based on the discovery document.
+    def _add_basic_methods(self, resourceDesc, rootDesc, schema):
+        # If this is the root Resource, add a new_batch_http_request() method.
+        if resourceDesc == rootDesc:
+            batch_uri = "%s%s" % (
+                rootDesc["rootUrl"],
+                rootDesc.get("batchPath", "batch"),
+            )
+
+            def new_batch_http_request(callback=None):
+                """Create a BatchHttpRequest object based on the discovery document.
 
         Args:
           callback: callable, A callback to be called for each response, of the
@@ -1077,83 +1128,100 @@
         Returns:
           A BatchHttpRequest object based on the discovery document.
         """
-        return BatchHttpRequest(callback=callback, batch_uri=batch_uri)
-      self._set_dynamic_attr('new_batch_http_request', new_batch_http_request)
+                return BatchHttpRequest(callback=callback, batch_uri=batch_uri)
 
-    # Add basic methods to Resource
-    if 'methods' in resourceDesc:
-      for methodName, methodDesc in six.iteritems(resourceDesc['methods']):
-        fixedMethodName, method = createMethod(
-            methodName, methodDesc, rootDesc, schema)
-        self._set_dynamic_attr(fixedMethodName,
-                               method.__get__(self, self.__class__))
-        # Add in _media methods. The functionality of the attached method will
-        # change when it sees that the method name ends in _media.
-        if methodDesc.get('supportsMediaDownload', False):
-          fixedMethodName, method = createMethod(
-              methodName + '_media', methodDesc, rootDesc, schema)
-          self._set_dynamic_attr(fixedMethodName,
-                                 method.__get__(self, self.__class__))
+            self._set_dynamic_attr("new_batch_http_request", new_batch_http_request)
 
-  def _add_nested_resources(self, resourceDesc, rootDesc, schema):
-    # Add in nested resources
-    if 'resources' in resourceDesc:
+        # Add basic methods to Resource
+        if "methods" in resourceDesc:
+            for methodName, methodDesc in six.iteritems(resourceDesc["methods"]):
+                fixedMethodName, method = createMethod(
+                    methodName, methodDesc, rootDesc, schema
+                )
+                self._set_dynamic_attr(
+                    fixedMethodName, method.__get__(self, self.__class__)
+                )
+                # Add in _media methods. The functionality of the attached method will
+                # change when it sees that the method name ends in _media.
+                if methodDesc.get("supportsMediaDownload", False):
+                    fixedMethodName, method = createMethod(
+                        methodName + "_media", methodDesc, rootDesc, schema
+                    )
+                    self._set_dynamic_attr(
+                        fixedMethodName, method.__get__(self, self.__class__)
+                    )
 
-      def createResourceMethod(methodName, methodDesc):
-        """Create a method on the Resource to access a nested Resource.
+    def _add_nested_resources(self, resourceDesc, rootDesc, schema):
+        # Add in nested resources
+        if "resources" in resourceDesc:
+
+            def createResourceMethod(methodName, methodDesc):
+                """Create a method on the Resource to access a nested Resource.
 
         Args:
           methodName: string, name of the method to use.
           methodDesc: object, fragment of deserialized discovery document that
             describes the method.
         """
-        methodName = fix_method_name(methodName)
+                methodName = fix_method_name(methodName)
 
-        def methodResource(self):
-          return Resource(http=self._http, baseUrl=self._baseUrl,
-                          model=self._model, developerKey=self._developerKey,
-                          requestBuilder=self._requestBuilder,
-                          resourceDesc=methodDesc, rootDesc=rootDesc,
-                          schema=schema)
+                def methodResource(self):
+                    return Resource(
+                        http=self._http,
+                        baseUrl=self._baseUrl,
+                        model=self._model,
+                        developerKey=self._developerKey,
+                        requestBuilder=self._requestBuilder,
+                        resourceDesc=methodDesc,
+                        rootDesc=rootDesc,
+                        schema=schema,
+                    )
 
-        setattr(methodResource, '__doc__', 'A collection resource.')
-        setattr(methodResource, '__is_resource__', True)
+                setattr(methodResource, "__doc__", "A collection resource.")
+                setattr(methodResource, "__is_resource__", True)
 
-        return (methodName, methodResource)
+                return (methodName, methodResource)
 
-      for methodName, methodDesc in six.iteritems(resourceDesc['resources']):
-        fixedMethodName, method = createResourceMethod(methodName, methodDesc)
-        self._set_dynamic_attr(fixedMethodName,
-                               method.__get__(self, self.__class__))
+            for methodName, methodDesc in six.iteritems(resourceDesc["resources"]):
+                fixedMethodName, method = createResourceMethod(methodName, methodDesc)
+                self._set_dynamic_attr(
+                    fixedMethodName, method.__get__(self, self.__class__)
+                )
 
-  def _add_next_methods(self, resourceDesc, schema):
-    # Add _next() methods if and only if one of the names 'pageToken' or
-    # 'nextPageToken' occurs among the fields of both the method's response
-    # type either the method's request (query parameters) or request body.
-    if 'methods' not in resourceDesc:
-      return
-    for methodName, methodDesc in six.iteritems(resourceDesc['methods']):
-      nextPageTokenName = _findPageTokenName(
-          _methodProperties(methodDesc, schema, 'response'))
-      if not nextPageTokenName:
-        continue
-      isPageTokenParameter = True
-      pageTokenName = _findPageTokenName(methodDesc.get('parameters', {}))
-      if not pageTokenName:
-        isPageTokenParameter = False
-        pageTokenName = _findPageTokenName(
-            _methodProperties(methodDesc, schema, 'request'))
-      if not pageTokenName:
-        continue
-      fixedMethodName, method = createNextMethod(
-          methodName + '_next', pageTokenName, nextPageTokenName,
-          isPageTokenParameter)
-      self._set_dynamic_attr(fixedMethodName,
-                             method.__get__(self, self.__class__))
+    def _add_next_methods(self, resourceDesc, schema):
+        # Add _next() methods if and only if one of the names 'pageToken' or
+        # 'nextPageToken' occurs among the fields of both the method's response
+        # type either the method's request (query parameters) or request body.
+        if "methods" not in resourceDesc:
+            return
+        for methodName, methodDesc in six.iteritems(resourceDesc["methods"]):
+            nextPageTokenName = _findPageTokenName(
+                _methodProperties(methodDesc, schema, "response")
+            )
+            if not nextPageTokenName:
+                continue
+            isPageTokenParameter = True
+            pageTokenName = _findPageTokenName(methodDesc.get("parameters", {}))
+            if not pageTokenName:
+                isPageTokenParameter = False
+                pageTokenName = _findPageTokenName(
+                    _methodProperties(methodDesc, schema, "request")
+                )
+            if not pageTokenName:
+                continue
+            fixedMethodName, method = createNextMethod(
+                methodName + "_next",
+                pageTokenName,
+                nextPageTokenName,
+                isPageTokenParameter,
+            )
+            self._set_dynamic_attr(
+                fixedMethodName, method.__get__(self, self.__class__)
+            )
 
 
 def _findPageTokenName(fields):
-  """Search field names for one like a page token.
+    """Search field names for one like a page token.
 
   Args:
     fields: container of string, names of fields.
@@ -1162,11 +1230,13 @@
     First name that is either 'pageToken' or 'nextPageToken' if one exists,
     otherwise None.
   """
-  return next((tokenName for tokenName in _PAGE_TOKEN_NAMES
-              if tokenName in fields), None)
+    return next(
+        (tokenName for tokenName in _PAGE_TOKEN_NAMES if tokenName in fields), None
+    )
+
 
 def _methodProperties(methodDesc, schema, name):
-  """Get properties of a field in a method description.
+    """Get properties of a field in a method description.
 
   Args:
     methodDesc: object, fragment of deserialized discovery document that
@@ -1179,7 +1249,7 @@
     corresponding to 'properties' field of object corresponding to named field
     in method description, if it exists, otherwise empty dict.
   """
-  desc = methodDesc.get(name, {})
-  if '$ref' in desc:
-    desc = schema.get(desc['$ref'], {})
-  return desc.get('properties', {})
+    desc = methodDesc.get(name, {})
+    if "$ref" in desc:
+        desc = schema.get(desc["$ref"], {})
+    return desc.get("properties", {})
diff --git a/googleapiclient/discovery_cache/__init__.py b/googleapiclient/discovery_cache/__init__.py
index f86a06d..3e4e9a5 100644
--- a/googleapiclient/discovery_cache/__init__.py
+++ b/googleapiclient/discovery_cache/__init__.py
@@ -26,20 +26,22 @@
 
 
 def autodetect():
-  """Detects an appropriate cache module and returns it.
+    """Detects an appropriate cache module and returns it.
 
   Returns:
     googleapiclient.discovery_cache.base.Cache, a cache object which
     is auto detected, or None if no cache object is available.
   """
-  try:
-    from google.appengine.api import memcache
-    from . import appengine_memcache
-    return appengine_memcache.cache
-  except Exception:
     try:
-      from . import file_cache
-      return file_cache.cache
-    except Exception as e:
-      LOGGER.warning(e, exc_info=True)
-      return None
+        from google.appengine.api import memcache
+        from . import appengine_memcache
+
+        return appengine_memcache.cache
+    except Exception:
+        try:
+            from . import file_cache
+
+            return file_cache.cache
+        except Exception as e:
+            LOGGER.warning(e, exc_info=True)
+            return None
diff --git a/googleapiclient/discovery_cache/appengine_memcache.py b/googleapiclient/discovery_cache/appengine_memcache.py
index 7e43e66..1d18d7a 100644
--- a/googleapiclient/discovery_cache/appengine_memcache.py
+++ b/googleapiclient/discovery_cache/appengine_memcache.py
@@ -26,30 +26,31 @@
 
 LOGGER = logging.getLogger(__name__)
 
-NAMESPACE = 'google-api-client'
+NAMESPACE = "google-api-client"
 
 
 class Cache(base.Cache):
-  """A cache with app engine memcache API."""
+    """A cache with app engine memcache API."""
 
-  def __init__(self, max_age):
-      """Constructor.
+    def __init__(self, max_age):
+        """Constructor.
 
       Args:
         max_age: Cache expiration in seconds.
       """
-      self._max_age = max_age
+        self._max_age = max_age
 
-  def get(self, url):
-    try:
-      return memcache.get(url, namespace=NAMESPACE)
-    except Exception as e:
-      LOGGER.warning(e, exc_info=True)
+    def get(self, url):
+        try:
+            return memcache.get(url, namespace=NAMESPACE)
+        except Exception as e:
+            LOGGER.warning(e, exc_info=True)
 
-  def set(self, url, content):
-    try:
-      memcache.set(url, content, time=int(self._max_age), namespace=NAMESPACE)
-    except Exception as e:
-      LOGGER.warning(e, exc_info=True)
+    def set(self, url, content):
+        try:
+            memcache.set(url, content, time=int(self._max_age), namespace=NAMESPACE)
+        except Exception as e:
+            LOGGER.warning(e, exc_info=True)
+
 
 cache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)
diff --git a/googleapiclient/discovery_cache/base.py b/googleapiclient/discovery_cache/base.py
index 00e466d..fbe4459 100644
--- a/googleapiclient/discovery_cache/base.py
+++ b/googleapiclient/discovery_cache/base.py
@@ -18,12 +18,13 @@
 
 
 class Cache(object):
-  """A base abstract cache class."""
-  __metaclass__ = abc.ABCMeta
+    """A base abstract cache class."""
 
-  @abc.abstractmethod
-  def get(self, url):
-    """Gets the content from the memcache with a given key.
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def get(self, url):
+        """Gets the content from the memcache with a given key.
 
     Args:
       url: string, the key for the cache.
@@ -32,14 +33,14 @@
       object, the value in the cache for the given key, or None if the key is
       not in the cache.
     """
-    raise NotImplementedError()
+        raise NotImplementedError()
 
-  @abc.abstractmethod
-  def set(self, url, content):
-    """Sets the given key and content in the cache.
+    @abc.abstractmethod
+    def set(self, url, content):
+        """Sets the given key and content in the cache.
 
     Args:
       url: string, the key for the cache.
       content: string, the discovery document.
     """
-    raise NotImplementedError()
+        raise NotImplementedError()
diff --git a/googleapiclient/discovery_cache/file_cache.py b/googleapiclient/discovery_cache/file_cache.py
index 48bddea..36eb29a 100644
--- a/googleapiclient/discovery_cache/file_cache.py
+++ b/googleapiclient/discovery_cache/file_cache.py
@@ -30,112 +30,117 @@
 import threading
 
 try:
-  from oauth2client.contrib.locked_file import LockedFile
+    from oauth2client.contrib.locked_file import LockedFile
 except ImportError:
-  # oauth2client < 2.0.0
-  try:
-    from oauth2client.locked_file import LockedFile
-  except ImportError:
-    # oauth2client > 4.0.0 or google-auth
-    raise ImportError(
-      'file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth')
+    # oauth2client < 2.0.0
+    try:
+        from oauth2client.locked_file import LockedFile
+    except ImportError:
+        # oauth2client > 4.0.0 or google-auth
+        raise ImportError(
+            "file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth"
+        )
 
 from . import base
 from ..discovery_cache import DISCOVERY_DOC_MAX_AGE
 
 LOGGER = logging.getLogger(__name__)
 
-FILENAME = 'google-api-python-client-discovery-doc.cache'
+FILENAME = "google-api-python-client-discovery-doc.cache"
 EPOCH = datetime.datetime.utcfromtimestamp(0)
 
 
 def _to_timestamp(date):
-  try:
-    return (date - EPOCH).total_seconds()
-  except AttributeError:
-    # The following is the equivalent of total_seconds() in Python2.6.
-    # See also: https://docs.python.org/2/library/datetime.html
-    delta = date - EPOCH
-    return ((delta.microseconds + (delta.seconds + delta.days * 24 * 3600)
-             * 10**6) / 10**6)
+    try:
+        return (date - EPOCH).total_seconds()
+    except AttributeError:
+        # The following is the equivalent of total_seconds() in Python2.6.
+        # See also: https://docs.python.org/2/library/datetime.html
+        delta = date - EPOCH
+        return (
+            delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
+        ) / 10 ** 6
 
 
 def _read_or_initialize_cache(f):
-  f.file_handle().seek(0)
-  try:
-    cache = json.load(f.file_handle())
-  except Exception:
-    # This means it opens the file for the first time, or the cache is
-    # corrupted, so initializing the file with an empty dict.
-    cache = {}
-    f.file_handle().truncate(0)
     f.file_handle().seek(0)
-    json.dump(cache, f.file_handle())
-  return cache
+    try:
+        cache = json.load(f.file_handle())
+    except Exception:
+        # This means it opens the file for the first time, or the cache is
+        # corrupted, so initializing the file with an empty dict.
+        cache = {}
+        f.file_handle().truncate(0)
+        f.file_handle().seek(0)
+        json.dump(cache, f.file_handle())
+    return cache
 
 
 class Cache(base.Cache):
-  """A file based cache for the discovery documents."""
+    """A file based cache for the discovery documents."""
 
-  def __init__(self, max_age):
-      """Constructor.
+    def __init__(self, max_age):
+        """Constructor.
 
       Args:
         max_age: Cache expiration in seconds.
       """
-      self._max_age = max_age
-      self._file = os.path.join(tempfile.gettempdir(), FILENAME)
-      f = LockedFile(self._file, 'a+', 'r')
-      try:
-        f.open_and_lock()
-        if f.is_locked():
-          _read_or_initialize_cache(f)
-        # If we can not obtain the lock, other process or thread must
-        # have initialized the file.
-      except Exception as e:
-        LOGGER.warning(e, exc_info=True)
-      finally:
-        f.unlock_and_close()
+        self._max_age = max_age
+        self._file = os.path.join(tempfile.gettempdir(), FILENAME)
+        f = LockedFile(self._file, "a+", "r")
+        try:
+            f.open_and_lock()
+            if f.is_locked():
+                _read_or_initialize_cache(f)
+            # If we can not obtain the lock, other process or thread must
+            # have initialized the file.
+        except Exception as e:
+            LOGGER.warning(e, exc_info=True)
+        finally:
+            f.unlock_and_close()
 
-  def get(self, url):
-    f = LockedFile(self._file, 'r+', 'r')
-    try:
-      f.open_and_lock()
-      if f.is_locked():
-        cache = _read_or_initialize_cache(f)
-        if url in cache:
-          content, t = cache.get(url, (None, 0))
-          if _to_timestamp(datetime.datetime.now()) < t + self._max_age:
-            return content
-        return None
-      else:
-        LOGGER.debug('Could not obtain a lock for the cache file.')
-        return None
-    except Exception as e:
-      LOGGER.warning(e, exc_info=True)
-    finally:
-      f.unlock_and_close()
+    def get(self, url):
+        f = LockedFile(self._file, "r+", "r")
+        try:
+            f.open_and_lock()
+            if f.is_locked():
+                cache = _read_or_initialize_cache(f)
+                if url in cache:
+                    content, t = cache.get(url, (None, 0))
+                    if _to_timestamp(datetime.datetime.now()) < t + self._max_age:
+                        return content
+                return None
+            else:
+                LOGGER.debug("Could not obtain a lock for the cache file.")
+                return None
+        except Exception as e:
+            LOGGER.warning(e, exc_info=True)
+        finally:
+            f.unlock_and_close()
 
-  def set(self, url, content):
-    f = LockedFile(self._file, 'r+', 'r')
-    try:
-      f.open_and_lock()
-      if f.is_locked():
-        cache = _read_or_initialize_cache(f)
-        cache[url] = (content, _to_timestamp(datetime.datetime.now()))
-        # Remove stale cache.
-        for k, (_, timestamp) in list(cache.items()):
-          if _to_timestamp(datetime.datetime.now()) >= timestamp + self._max_age:
-            del cache[k]
-        f.file_handle().truncate(0)
-        f.file_handle().seek(0)
-        json.dump(cache, f.file_handle())
-      else:
-        LOGGER.debug('Could not obtain a lock for the cache file.')
-    except Exception as e:
-      LOGGER.warning(e, exc_info=True)
-    finally:
-      f.unlock_and_close()
+    def set(self, url, content):
+        f = LockedFile(self._file, "r+", "r")
+        try:
+            f.open_and_lock()
+            if f.is_locked():
+                cache = _read_or_initialize_cache(f)
+                cache[url] = (content, _to_timestamp(datetime.datetime.now()))
+                # Remove stale cache.
+                for k, (_, timestamp) in list(cache.items()):
+                    if (
+                        _to_timestamp(datetime.datetime.now())
+                        >= timestamp + self._max_age
+                    ):
+                        del cache[k]
+                f.file_handle().truncate(0)
+                f.file_handle().seek(0)
+                json.dump(cache, f.file_handle())
+            else:
+                LOGGER.debug("Could not obtain a lock for the cache file.")
+        except Exception as e:
+            LOGGER.warning(e, exc_info=True)
+        finally:
+            f.unlock_and_close()
 
 
 cache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)
diff --git a/googleapiclient/errors.py b/googleapiclient/errors.py
index 442c213..64853a4 100644
--- a/googleapiclient/errors.py
+++ b/googleapiclient/errors.py
@@ -19,7 +19,7 @@
 """
 from __future__ import absolute_import
 
-__author__ = 'jcgregorio@google.com (Joe Gregorio)'
+__author__ = "jcgregorio@google.com (Joe Gregorio)"
 
 import json
 
@@ -27,133 +27,154 @@
 
 
 class Error(Exception):
-  """Base error for this module."""
-  pass
+    """Base error for this module."""
+
+    pass
 
 
 class HttpError(Error):
-  """HTTP data was invalid or unexpected."""
+    """HTTP data was invalid or unexpected."""
 
-  @util.positional(3)
-  def __init__(self, resp, content, uri=None):
-    self.resp = resp
-    if not isinstance(content, bytes):
-        raise TypeError("HTTP content should be bytes")
-    self.content = content
-    self.uri = uri
-    self.error_details = ''
+    @util.positional(3)
+    def __init__(self, resp, content, uri=None):
+        self.resp = resp
+        if not isinstance(content, bytes):
+            raise TypeError("HTTP content should be bytes")
+        self.content = content
+        self.uri = uri
+        self.error_details = ""
 
-  def _get_reason(self):
-    """Calculate the reason for the error from the response content."""
-    reason = self.resp.reason
-    try:
-      data = json.loads(self.content.decode('utf-8'))
-      if isinstance(data, dict):
-        reason = data['error']['message']
-        if 'details' in data['error']:
-            self.error_details = data['error']['details']
-        elif 'detail' in data['error']:
-            self.error_details = data['error']['detail']
-      elif isinstance(data, list) and len(data) > 0:
-        first_error = data[0]
-        reason = first_error['error']['message']
-        if 'details' in first_error['error']:
-            self.error_details = first_error['error']['details']
-    except (ValueError, KeyError, TypeError):
-      pass
-    if reason is None:
-      reason = ''
-    return reason
+    def _get_reason(self):
+        """Calculate the reason for the error from the response content."""
+        reason = self.resp.reason
+        try:
+            data = json.loads(self.content.decode("utf-8"))
+            if isinstance(data, dict):
+                reason = data["error"]["message"]
+                if "details" in data["error"]:
+                    self.error_details = data["error"]["details"]
+                elif "detail" in data["error"]:
+                    self.error_details = data["error"]["detail"]
+            elif isinstance(data, list) and len(data) > 0:
+                first_error = data[0]
+                reason = first_error["error"]["message"]
+                if "details" in first_error["error"]:
+                    self.error_details = first_error["error"]["details"]
+        except (ValueError, KeyError, TypeError):
+            pass
+        if reason is None:
+            reason = ""
+        return reason
 
-  def __repr__(self):
-    reason = self._get_reason()
-    if self.error_details:
-      return '<HttpError %s when requesting %s returned "%s". Details: "%s">' % \
-             (self.resp.status, self.uri, reason.strip(), self.error_details)
-    elif self.uri:
-      return '<HttpError %s when requesting %s returned "%s">' % (
-          self.resp.status, self.uri, self._get_reason().strip())
-    else:
-      return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
+    def __repr__(self):
+        reason = self._get_reason()
+        if self.error_details:
+            return '<HttpError %s when requesting %s returned "%s". Details: "%s">' % (
+                self.resp.status,
+                self.uri,
+                reason.strip(),
+                self.error_details,
+            )
+        elif self.uri:
+            return '<HttpError %s when requesting %s returned "%s">' % (
+                self.resp.status,
+                self.uri,
+                self._get_reason().strip(),
+            )
+        else:
+            return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
 
-  __str__ = __repr__
+    __str__ = __repr__
 
 
 class InvalidJsonError(Error):
-  """The JSON returned could not be parsed."""
-  pass
+    """The JSON returned could not be parsed."""
+
+    pass
 
 
 class UnknownFileType(Error):
-  """File type unknown or unexpected."""
-  pass
+    """File type unknown or unexpected."""
+
+    pass
 
 
 class UnknownLinkType(Error):
-  """Link type unknown or unexpected."""
-  pass
+    """Link type unknown or unexpected."""
+
+    pass
 
 
 class UnknownApiNameOrVersion(Error):
-  """No API with that name and version exists."""
-  pass
+    """No API with that name and version exists."""
+
+    pass
 
 
 class UnacceptableMimeTypeError(Error):
-  """That is an unacceptable mimetype for this operation."""
-  pass
+    """That is an unacceptable mimetype for this operation."""
+
+    pass
 
 
 class MediaUploadSizeError(Error):
-  """Media is larger than the method can accept."""
-  pass
+    """Media is larger than the method can accept."""
+
+    pass
 
 
 class ResumableUploadError(HttpError):
-  """Error occured during resumable upload."""
-  pass
+    """Error occured during resumable upload."""
+
+    pass
 
 
 class InvalidChunkSizeError(Error):
-  """The given chunksize is not valid."""
-  pass
+    """The given chunksize is not valid."""
+
+    pass
+
 
 class InvalidNotificationError(Error):
-  """The channel Notification is invalid."""
-  pass
+    """The channel Notification is invalid."""
+
+    pass
+
 
 class BatchError(HttpError):
-  """Error occured during batch operations."""
+    """Error occured during batch operations."""
 
-  @util.positional(2)
-  def __init__(self, reason, resp=None, content=None):
-    self.resp = resp
-    self.content = content
-    self.reason = reason
+    @util.positional(2)
+    def __init__(self, reason, resp=None, content=None):
+        self.resp = resp
+        self.content = content
+        self.reason = reason
 
-  def __repr__(self):
-    if getattr(self.resp, 'status', None) is None:
-      return '<BatchError "%s">' % (self.reason)
-    else:
-      return '<BatchError %s "%s">' % (self.resp.status, self.reason)
+    def __repr__(self):
+        if getattr(self.resp, "status", None) is None:
+            return '<BatchError "%s">' % (self.reason)
+        else:
+            return '<BatchError %s "%s">' % (self.resp.status, self.reason)
 
-  __str__ = __repr__
+    __str__ = __repr__
 
 
 class UnexpectedMethodError(Error):
-  """Exception raised by RequestMockBuilder on unexpected calls."""
+    """Exception raised by RequestMockBuilder on unexpected calls."""
 
-  @util.positional(1)
-  def __init__(self, methodId=None):
-    """Constructor for an UnexpectedMethodError."""
-    super(UnexpectedMethodError, self).__init__(
-        'Received unexpected call %s' % methodId)
+    @util.positional(1)
+    def __init__(self, methodId=None):
+        """Constructor for an UnexpectedMethodError."""
+        super(UnexpectedMethodError, self).__init__(
+            "Received unexpected call %s" % methodId
+        )
 
 
 class UnexpectedBodyError(Error):
-  """Exception raised by RequestMockBuilder on unexpected bodies."""
+    """Exception raised by RequestMockBuilder on unexpected bodies."""
 
-  def __init__(self, expected, provided):
-    """Constructor for an UnexpectedMethodError."""
-    super(UnexpectedBodyError, self).__init__(
-        'Expected: [%s] - Provided: [%s]' % (expected, provided))
+    def __init__(self, expected, provided):
+        """Constructor for an UnexpectedMethodError."""
+        super(UnexpectedBodyError, self).__init__(
+            "Expected: [%s] - Provided: [%s]" % (expected, provided)
+        )
diff --git a/googleapiclient/http.py b/googleapiclient/http.py
index 9733aa3..5b5ea15 100644
--- a/googleapiclient/http.py
+++ b/googleapiclient/http.py
@@ -23,7 +23,7 @@
 from six.moves import http_client
 from six.moves import range
 
-__author__ = 'jcgregorio@google.com (Joe Gregorio)'
+__author__ = "jcgregorio@google.com (Joe Gregorio)"
 
 from six import BytesIO, StringIO
 from six.moves.urllib.parse import urlparse, urlunparse, quote, unquote
@@ -44,11 +44,11 @@
 
 # TODO(issue 221): Remove this conditional import jibbajabba.
 try:
-  import ssl
+    import ssl
 except ImportError:
-  _ssl_SSLError = object()
+    _ssl_SSLError = object()
 else:
-  _ssl_SSLError = ssl.SSLError
+    _ssl_SSLError = ssl.SSLError
 
 from email.generator import Generator
 from email.mime.multipart import MIMEMultipart
@@ -69,7 +69,7 @@
 
 LOGGER = logging.getLogger(__name__)
 
-DEFAULT_CHUNK_SIZE = 100*1024*1024
+DEFAULT_CHUNK_SIZE = 100 * 1024 * 1024
 
 MAX_URI_LENGTH = 2048
 
@@ -79,11 +79,11 @@
 
 DEFAULT_HTTP_TIMEOUT_SEC = 60
 
-_LEGACY_BATCH_URI = 'https://www.googleapis.com/batch'
+_LEGACY_BATCH_URI = "https://www.googleapis.com/batch"
 
 
 def _should_retry_response(resp_status, content):
-  """Determines whether a response should be retried.
+    """Determines whether a response should be retried.
 
   Args:
     resp_status: The response status received.
@@ -92,45 +92,46 @@
   Returns:
     True if the response should be retried, otherwise False.
   """
-  # Retry on 5xx errors.
-  if resp_status >= 500:
-    return True
+    # Retry on 5xx errors.
+    if resp_status >= 500:
+        return True
 
-  # Retry on 429 errors.
-  if resp_status == _TOO_MANY_REQUESTS:
-    return True
+    # Retry on 429 errors.
+    if resp_status == _TOO_MANY_REQUESTS:
+        return True
 
-  # For 403 errors, we have to check for the `reason` in the response to
-  # determine if we should retry.
-  if resp_status == six.moves.http_client.FORBIDDEN:
-    # If there's no details about the 403 type, don't retry.
-    if not content:
-      return False
+    # For 403 errors, we have to check for the `reason` in the response to
+    # determine if we should retry.
+    if resp_status == six.moves.http_client.FORBIDDEN:
+        # If there's no details about the 403 type, don't retry.
+        if not content:
+            return False
 
-    # Content is in JSON format.
-    try:
-      data = json.loads(content.decode('utf-8'))
-      if isinstance(data, dict):
-        reason = data['error']['errors'][0]['reason']
-      else:
-        reason = data[0]['error']['errors']['reason']
-    except (UnicodeDecodeError, ValueError, KeyError):
-      LOGGER.warning('Invalid JSON content from response: %s', content)
-      return False
+        # Content is in JSON format.
+        try:
+            data = json.loads(content.decode("utf-8"))
+            if isinstance(data, dict):
+                reason = data["error"]["errors"][0]["reason"]
+            else:
+                reason = data[0]["error"]["errors"]["reason"]
+        except (UnicodeDecodeError, ValueError, KeyError):
+            LOGGER.warning("Invalid JSON content from response: %s", content)
+            return False
 
-    LOGGER.warning('Encountered 403 Forbidden with reason "%s"', reason)
+        LOGGER.warning('Encountered 403 Forbidden with reason "%s"', reason)
 
-    # Only retry on rate limit related failures.
-    if reason in ('userRateLimitExceeded', 'rateLimitExceeded', ):
-      return True
+        # Only retry on rate limit related failures.
+        if reason in ("userRateLimitExceeded", "rateLimitExceeded"):
+            return True
 
-  # Everything else is a success or non-retriable so break.
-  return False
+    # Everything else is a success or non-retriable so break.
+    return False
 
 
-def _retry_request(http, num_retries, req_type, sleep, rand, uri, method, *args,
-                   **kwargs):
-  """Retries an HTTP request multiple times while handling errors.
+def _retry_request(
+    http, num_retries, req_type, sleep, rand, uri, method, *args, **kwargs
+):
+    """Retries an HTTP request multiple times while handling errors.
 
   If after all retries the request still fails, last error is either returned as
   return value (for HTTP 5xx errors) or thrown (for ssl.SSLError).
@@ -147,105 +148,115 @@
   Returns:
     resp, content - Response from the http request (may be HTTP 5xx).
   """
-  resp = None
-  content = None
-  exception = None
-  for retry_num in range(num_retries + 1):
-    if retry_num > 0:
-      # Sleep before retrying.
-      sleep_time = rand() * 2 ** retry_num
-      LOGGER.warning(
-          'Sleeping %.2f seconds before retry %d of %d for %s: %s %s, after %s',
-          sleep_time, retry_num, num_retries, req_type, method, uri,
-          resp.status if resp else exception)
-      sleep(sleep_time)
+    resp = None
+    content = None
+    exception = None
+    for retry_num in range(num_retries + 1):
+        if retry_num > 0:
+            # Sleep before retrying.
+            sleep_time = rand() * 2 ** retry_num
+            LOGGER.warning(
+                "Sleeping %.2f seconds before retry %d of %d for %s: %s %s, after %s",
+                sleep_time,
+                retry_num,
+                num_retries,
+                req_type,
+                method,
+                uri,
+                resp.status if resp else exception,
+            )
+            sleep(sleep_time)
 
-    try:
-      exception = None
-      resp, content = http.request(uri, method, *args, **kwargs)
-    # Retry on SSL errors and socket timeout errors.
-    except _ssl_SSLError as ssl_error:
-      exception = ssl_error
-    except socket.timeout as socket_timeout:
-      # It's important that this be before socket.error as it's a subclass
-      # socket.timeout has no errorcode
-      exception = socket_timeout
-    except socket.error as socket_error:
-      # errno's contents differ by platform, so we have to match by name.
-      if socket.errno.errorcode.get(socket_error.errno) not in {
-        'WSAETIMEDOUT', 'ETIMEDOUT', 'EPIPE', 'ECONNABORTED'}:
-        raise
-      exception = socket_error
-    except httplib2.ServerNotFoundError as server_not_found_error:
-      exception = server_not_found_error
+        try:
+            exception = None
+            resp, content = http.request(uri, method, *args, **kwargs)
+        # Retry on SSL errors and socket timeout errors.
+        except _ssl_SSLError as ssl_error:
+            exception = ssl_error
+        except socket.timeout as socket_timeout:
+            # It's important that this be before socket.error as it's a subclass
+            # socket.timeout has no errorcode
+            exception = socket_timeout
+        except socket.error as socket_error:
+            # errno's contents differ by platform, so we have to match by name.
+            if socket.errno.errorcode.get(socket_error.errno) not in {
+                "WSAETIMEDOUT",
+                "ETIMEDOUT",
+                "EPIPE",
+                "ECONNABORTED",
+            }:
+                raise
+            exception = socket_error
+        except httplib2.ServerNotFoundError as server_not_found_error:
+            exception = server_not_found_error
 
-    if exception:
-      if retry_num == num_retries:
-        raise exception
-      else:
-        continue
+        if exception:
+            if retry_num == num_retries:
+                raise exception
+            else:
+                continue
 
-    if not _should_retry_response(resp.status, content):
-      break
+        if not _should_retry_response(resp.status, content):
+            break
 
-  return resp, content
+    return resp, content
 
 
 class MediaUploadProgress(object):
-  """Status of a resumable upload."""
+    """Status of a resumable upload."""
 
-  def __init__(self, resumable_progress, total_size):
-    """Constructor.
+    def __init__(self, resumable_progress, total_size):
+        """Constructor.
 
     Args:
       resumable_progress: int, bytes sent so far.
       total_size: int, total bytes in complete upload, or None if the total
         upload size isn't known ahead of time.
     """
-    self.resumable_progress = resumable_progress
-    self.total_size = total_size
+        self.resumable_progress = resumable_progress
+        self.total_size = total_size
 
-  def progress(self):
-    """Percent of upload completed, as a float.
+    def progress(self):
+        """Percent of upload completed, as a float.
 
     Returns:
       the percentage complete as a float, returning 0.0 if the total size of
       the upload is unknown.
     """
-    if self.total_size is not None and self.total_size != 0:
-      return float(self.resumable_progress) / float(self.total_size)
-    else:
-      return 0.0
+        if self.total_size is not None and self.total_size != 0:
+            return float(self.resumable_progress) / float(self.total_size)
+        else:
+            return 0.0
 
 
 class MediaDownloadProgress(object):
-  """Status of a resumable download."""
+    """Status of a resumable download."""
 
-  def __init__(self, resumable_progress, total_size):
-    """Constructor.
+    def __init__(self, resumable_progress, total_size):
+        """Constructor.
 
     Args:
       resumable_progress: int, bytes received so far.
       total_size: int, total bytes in complete download.
     """
-    self.resumable_progress = resumable_progress
-    self.total_size = total_size
+        self.resumable_progress = resumable_progress
+        self.total_size = total_size
 
-  def progress(self):
-    """Percent of download completed, as a float.
+    def progress(self):
+        """Percent of download completed, as a float.
 
     Returns:
       the percentage complete as a float, returning 0.0 if the total size of
       the download is unknown.
     """
-    if self.total_size is not None and self.total_size != 0:
-      return float(self.resumable_progress) / float(self.total_size)
-    else:
-      return 0.0
+        if self.total_size is not None and self.total_size != 0:
+            return float(self.resumable_progress) / float(self.total_size)
+        else:
+            return 0.0
 
 
 class MediaUpload(object):
-  """Describes a media object to upload.
+    """Describes a media object to upload.
 
   Base class that defines the interface of MediaUpload subclasses.
 
@@ -271,40 +282,40 @@
   needs.
   """
 
-  def chunksize(self):
-    """Chunk size for resumable uploads.
+    def chunksize(self):
+        """Chunk size for resumable uploads.
 
     Returns:
       Chunk size in bytes.
     """
-    raise NotImplementedError()
+        raise NotImplementedError()
 
-  def mimetype(self):
-    """Mime type of the body.
+    def mimetype(self):
+        """Mime type of the body.
 
     Returns:
       Mime type.
     """
-    return 'application/octet-stream'
+        return "application/octet-stream"
 
-  def size(self):
-    """Size of upload.
+    def size(self):
+        """Size of upload.
 
     Returns:
       Size of the body, or None of the size is unknown.
     """
-    return None
+        return None
 
-  def resumable(self):
-    """Whether this upload is resumable.
+    def resumable(self):
+        """Whether this upload is resumable.
 
     Returns:
       True if resumable upload or False.
     """
-    return False
+        return False
 
-  def getbytes(self, begin, end):
-    """Get bytes from the media.
+    def getbytes(self, begin, end):
+        """Get bytes from the media.
 
     Args:
       begin: int, offset from beginning of file.
@@ -314,10 +325,10 @@
       A string of bytes read. May be shorter than length if EOF was reached
       first.
     """
-    raise NotImplementedError()
+        raise NotImplementedError()
 
-  def has_stream(self):
-    """Does the underlying upload support a streaming interface.
+    def has_stream(self):
+        """Does the underlying upload support a streaming interface.
 
     Streaming means it is an io.IOBase subclass that supports seek, i.e.
     seekable() returns True.
@@ -326,20 +337,20 @@
       True if the call to stream() will return an instance of a seekable io.Base
       subclass.
     """
-    return False
+        return False
 
-  def stream(self):
-    """A stream interface to the data being uploaded.
+    def stream(self):
+        """A stream interface to the data being uploaded.
 
     Returns:
       The returned value is an io.IOBase subclass that supports seek, i.e.
       seekable() returns True.
     """
-    raise NotImplementedError()
+        raise NotImplementedError()
 
-  @util.positional(1)
-  def _to_json(self, strip=None):
-    """Utility function for creating a JSON representation of a MediaUpload.
+    @util.positional(1)
+    def _to_json(self, strip=None):
+        """Utility function for creating a JSON representation of a MediaUpload.
 
     Args:
       strip: array, An array of names of members to not include in the JSON.
@@ -348,27 +359,27 @@
        string, a JSON representation of this instance, suitable to pass to
        from_json().
     """
-    t = type(self)
-    d = copy.copy(self.__dict__)
-    if strip is not None:
-      for member in strip:
-        del d[member]
-    d['_class'] = t.__name__
-    d['_module'] = t.__module__
-    return json.dumps(d)
+        t = type(self)
+        d = copy.copy(self.__dict__)
+        if strip is not None:
+            for member in strip:
+                del d[member]
+        d["_class"] = t.__name__
+        d["_module"] = t.__module__
+        return json.dumps(d)
 
-  def to_json(self):
-    """Create a JSON representation of an instance of MediaUpload.
+    def to_json(self):
+        """Create a JSON representation of an instance of MediaUpload.
 
     Returns:
        string, a JSON representation of this instance, suitable to pass to
        from_json().
     """
-    return self._to_json()
+        return self._to_json()
 
-  @classmethod
-  def new_from_json(cls, s):
-    """Utility class method to instantiate a MediaUpload subclass from a JSON
+    @classmethod
+    def new_from_json(cls, s):
+        """Utility class method to instantiate a MediaUpload subclass from a JSON
     representation produced by to_json().
 
     Args:
@@ -378,17 +389,17 @@
       An instance of the subclass of MediaUpload that was serialized with
       to_json().
     """
-    data = json.loads(s)
-    # Find and call the right classmethod from_json() to restore the object.
-    module = data['_module']
-    m = __import__(module, fromlist=module.split('.')[:-1])
-    kls = getattr(m, data['_class'])
-    from_json = getattr(kls, 'from_json')
-    return from_json(s)
+        data = json.loads(s)
+        # Find and call the right classmethod from_json() to restore the object.
+        module = data["_module"]
+        m = __import__(module, fromlist=module.split(".")[:-1])
+        kls = getattr(m, data["_class"])
+        from_json = getattr(kls, "from_json")
+        return from_json(s)
 
 
 class MediaIoBaseUpload(MediaUpload):
-  """A MediaUpload for a io.Base objects.
+    """A MediaUpload for a io.Base objects.
 
   Note that the Python file object is compatible with io.Base and can be used
   with this class also.
@@ -410,10 +421,9 @@
   your chunksize larger than 5MB, or to -1.
   """
 
-  @util.positional(3)
-  def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
-      resumable=False):
-    """Constructor.
+    @util.positional(3)
+    def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
+        """Constructor.
 
     Args:
       fd: io.Base or file object, The source of the bytes to upload. MUST be
@@ -429,51 +439,51 @@
       resumable: bool, True if this is a resumable upload. False means upload
         in a single request.
     """
-    super(MediaIoBaseUpload, self).__init__()
-    self._fd = fd
-    self._mimetype = mimetype
-    if not (chunksize == -1 or chunksize > 0):
-      raise InvalidChunkSizeError()
-    self._chunksize = chunksize
-    self._resumable = resumable
+        super(MediaIoBaseUpload, self).__init__()
+        self._fd = fd
+        self._mimetype = mimetype
+        if not (chunksize == -1 or chunksize > 0):
+            raise InvalidChunkSizeError()
+        self._chunksize = chunksize
+        self._resumable = resumable
 
-    self._fd.seek(0, os.SEEK_END)
-    self._size = self._fd.tell()
+        self._fd.seek(0, os.SEEK_END)
+        self._size = self._fd.tell()
 
-  def chunksize(self):
-    """Chunk size for resumable uploads.
+    def chunksize(self):
+        """Chunk size for resumable uploads.
 
     Returns:
       Chunk size in bytes.
     """
-    return self._chunksize
+        return self._chunksize
 
-  def mimetype(self):
-    """Mime type of the body.
+    def mimetype(self):
+        """Mime type of the body.
 
     Returns:
       Mime type.
     """
-    return self._mimetype
+        return self._mimetype
 
-  def size(self):
-    """Size of upload.
+    def size(self):
+        """Size of upload.
 
     Returns:
       Size of the body, or None of the size is unknown.
     """
-    return self._size
+        return self._size
 
-  def resumable(self):
-    """Whether this upload is resumable.
+    def resumable(self):
+        """Whether this upload is resumable.
 
     Returns:
       True if resumable upload or False.
     """
-    return self._resumable
+        return self._resumable
 
-  def getbytes(self, begin, length):
-    """Get bytes from the media.
+    def getbytes(self, begin, length):
+        """Get bytes from the media.
 
     Args:
       begin: int, offset from beginning of file.
@@ -483,11 +493,11 @@
       A string of bytes read. May be shorted than length if EOF was reached
       first.
     """
-    self._fd.seek(begin)
-    return self._fd.read(length)
+        self._fd.seek(begin)
+        return self._fd.read(length)
 
-  def has_stream(self):
-    """Does the underlying upload support a streaming interface.
+    def has_stream(self):
+        """Does the underlying upload support a streaming interface.
 
     Streaming means it is an io.IOBase subclass that supports seek, i.e.
     seekable() returns True.
@@ -496,24 +506,24 @@
       True if the call to stream() will return an instance of a seekable io.Base
       subclass.
     """
-    return True
+        return True
 
-  def stream(self):
-    """A stream interface to the data being uploaded.
+    def stream(self):
+        """A stream interface to the data being uploaded.
 
     Returns:
       The returned value is an io.IOBase subclass that supports seek, i.e.
       seekable() returns True.
     """
-    return self._fd
+        return self._fd
 
-  def to_json(self):
-    """This upload type is not serializable."""
-    raise NotImplementedError('MediaIoBaseUpload is not serializable.')
+    def to_json(self):
+        """This upload type is not serializable."""
+        raise NotImplementedError("MediaIoBaseUpload is not serializable.")
 
 
 class MediaFileUpload(MediaIoBaseUpload):
-  """A MediaUpload for a file.
+    """A MediaUpload for a file.
 
   Construct a MediaFileUpload and pass as the media_body parameter of the
   method. For example, if we had a service that allowed uploading images:
@@ -534,10 +544,11 @@
   your chunksize larger than 5MB, or to -1.
   """
 
-  @util.positional(2)
-  def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE,
-               resumable=False):
-    """Constructor.
+    @util.positional(2)
+    def __init__(
+        self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, resumable=False
+    ):
+        """Constructor.
 
     Args:
       filename: string, Name of the file.
@@ -551,48 +562,57 @@
       resumable: bool, True if this is a resumable upload. False means upload
         in a single request.
     """
-    self._filename = filename
-    fd = open(self._filename, 'rb')
-    if mimetype is None:
-      # No mimetype provided, make a guess.
-      mimetype, _ = mimetypes.guess_type(filename)
-      if mimetype is None:
-        # Guess failed, use octet-stream.
-        mimetype = 'application/octet-stream'
-    super(MediaFileUpload, self).__init__(fd, mimetype,
-                                          chunksize=chunksize,
-                                          resumable=resumable)
+        self._filename = filename
+        fd = open(self._filename, "rb")
+        if mimetype is None:
+            # No mimetype provided, make a guess.
+            mimetype, _ = mimetypes.guess_type(filename)
+            if mimetype is None:
+                # Guess failed, use octet-stream.
+                mimetype = "application/octet-stream"
+        super(MediaFileUpload, self).__init__(
+            fd, mimetype, chunksize=chunksize, resumable=resumable
+        )
 
-  def __del__(self):
-    self._fd.close()
+    def __del__(self):
+        self._fd.close()
 
-  def to_json(self):
-    """Creating a JSON representation of an instance of MediaFileUpload.
+    def to_json(self):
+        """Creating a JSON representation of an instance of MediaFileUpload.
 
     Returns:
        string, a JSON representation of this instance, suitable to pass to
        from_json().
     """
-    return self._to_json(strip=['_fd'])
+        return self._to_json(strip=["_fd"])
 
-  @staticmethod
-  def from_json(s):
-    d = json.loads(s)
-    return MediaFileUpload(d['_filename'], mimetype=d['_mimetype'],
-                           chunksize=d['_chunksize'], resumable=d['_resumable'])
+    @staticmethod
+    def from_json(s):
+        d = json.loads(s)
+        return MediaFileUpload(
+            d["_filename"],
+            mimetype=d["_mimetype"],
+            chunksize=d["_chunksize"],
+            resumable=d["_resumable"],
+        )
 
 
 class MediaInMemoryUpload(MediaIoBaseUpload):
-  """MediaUpload for a chunk of bytes.
+    """MediaUpload for a chunk of bytes.
 
   DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
   the stream.
   """
 
-  @util.positional(2)
-  def __init__(self, body, mimetype='application/octet-stream',
-               chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
-    """Create a new MediaInMemoryUpload.
+    @util.positional(2)
+    def __init__(
+        self,
+        body,
+        mimetype="application/octet-stream",
+        chunksize=DEFAULT_CHUNK_SIZE,
+        resumable=False,
+    ):
+        """Create a new MediaInMemoryUpload.
 
   DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
   the stream.
@@ -606,13 +626,14 @@
     resumable: bool, True if this is a resumable upload. False means upload
       in a single request.
     """
-    fd = BytesIO(body)
-    super(MediaInMemoryUpload, self).__init__(fd, mimetype, chunksize=chunksize,
-                                              resumable=resumable)
+        fd = BytesIO(body)
+        super(MediaInMemoryUpload, self).__init__(
+            fd, mimetype, chunksize=chunksize, resumable=resumable
+        )
 
 
 class MediaIoBaseDownload(object):
-  """"Download media resources.
+    """"Download media resources.
 
   Note that the Python file object is compatible with io.Base and can be used
   with this class also.
@@ -631,9 +652,9 @@
     print "Download Complete!"
   """
 
-  @util.positional(3)
-  def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
-    """Constructor.
+    @util.positional(3)
+    def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
+        """Constructor.
 
     Args:
       fd: io.Base or file object, The stream in which to write the downloaded
@@ -642,29 +663,29 @@
         chunks.
       chunksize: int, File will be downloaded in chunks of this many bytes.
     """
-    self._fd = fd
-    self._request = request
-    self._uri = request.uri
-    self._chunksize = chunksize
-    self._progress = 0
-    self._total_size = None
-    self._done = False
+        self._fd = fd
+        self._request = request
+        self._uri = request.uri
+        self._chunksize = chunksize
+        self._progress = 0
+        self._total_size = None
+        self._done = False
 
-    # Stubs for testing.
-    self._sleep = time.sleep
-    self._rand = random.random
+        # Stubs for testing.
+        self._sleep = time.sleep
+        self._rand = random.random
 
-    self._headers = {}
-    for k, v in six.iteritems(request.headers):
-      # allow users to supply custom headers by setting them on the request
-      # but strip out the ones that are set by default on requests generated by
-      # API methods like Drive's files().get(fileId=...)
-      if not k.lower() in ('accept', 'accept-encoding', 'user-agent'):
-        self._headers[k] = v
+        self._headers = {}
+        for k, v in six.iteritems(request.headers):
+            # allow users to supply custom headers by setting them on the request
+            # but strip out the ones that are set by default on requests generated by
+            # API methods like Drive's files().get(fileId=...)
+            if not k.lower() in ("accept", "accept-encoding", "user-agent"):
+                self._headers[k] = v
 
-  @util.positional(1)
-  def next_chunk(self, num_retries=0):
-    """Get the next chunk of the download.
+    @util.positional(1)
+    def next_chunk(self, num_retries=0):
+        """Get the next chunk of the download.
 
     Args:
       num_retries: Integer, number of times to retry with randomized
@@ -681,37 +702,46 @@
       googleapiclient.errors.HttpError if the response was not a 2xx.
       httplib2.HttpLib2Error if a transport error has occured.
     """
-    headers = self._headers.copy()
-    headers['range'] = 'bytes=%d-%d' % (
-            self._progress, self._progress + self._chunksize)
-    http = self._request.http
+        headers = self._headers.copy()
+        headers["range"] = "bytes=%d-%d" % (
+            self._progress,
+            self._progress + self._chunksize,
+        )
+        http = self._request.http
 
-    resp, content = _retry_request(
-        http, num_retries, 'media download', self._sleep, self._rand, self._uri,
-        'GET', headers=headers)
+        resp, content = _retry_request(
+            http,
+            num_retries,
+            "media download",
+            self._sleep,
+            self._rand,
+            self._uri,
+            "GET",
+            headers=headers,
+        )
 
-    if resp.status in [200, 206]:
-      if 'content-location' in resp and resp['content-location'] != self._uri:
-        self._uri = resp['content-location']
-      self._progress += len(content)
-      self._fd.write(content)
+        if resp.status in [200, 206]:
+            if "content-location" in resp and resp["content-location"] != self._uri:
+                self._uri = resp["content-location"]
+            self._progress += len(content)
+            self._fd.write(content)
 
-      if 'content-range' in resp:
-        content_range = resp['content-range']
-        length = content_range.rsplit('/', 1)[1]
-        self._total_size = int(length)
-      elif 'content-length' in resp:
-        self._total_size = int(resp['content-length'])
+            if "content-range" in resp:
+                content_range = resp["content-range"]
+                length = content_range.rsplit("/", 1)[1]
+                self._total_size = int(length)
+            elif "content-length" in resp:
+                self._total_size = int(resp["content-length"])
 
-      if self._total_size is None or self._progress == self._total_size:
-        self._done = True
-      return MediaDownloadProgress(self._progress, self._total_size), self._done
-    else:
-      raise HttpError(resp, content, uri=self._uri)
+            if self._total_size is None or self._progress == self._total_size:
+                self._done = True
+            return MediaDownloadProgress(self._progress, self._total_size), self._done
+        else:
+            raise HttpError(resp, content, uri=self._uri)
 
 
 class _StreamSlice(object):
-  """Truncated stream.
+    """Truncated stream.
 
   Takes a stream and presents a stream that is a slice of the original stream.
   This is used when uploading media in chunks. In later versions of Python a
@@ -720,21 +750,21 @@
   wrapper presents a virtual stream that only reads to the end of the chunk.
   """
 
-  def __init__(self, stream, begin, chunksize):
-    """Constructor.
+    def __init__(self, stream, begin, chunksize):
+        """Constructor.
 
     Args:
       stream: (io.Base, file object), the stream to wrap.
       begin: int, the seek position the chunk begins at.
       chunksize: int, the size of the chunk.
     """
-    self._stream = stream
-    self._begin = begin
-    self._chunksize = chunksize
-    self._stream.seek(begin)
+        self._stream = stream
+        self._begin = begin
+        self._chunksize = chunksize
+        self._stream.seek(begin)
 
-  def read(self, n=-1):
-    """Read n bytes.
+    def read(self, n=-1):
+        """Read n bytes.
 
     Args:
       n, int, the number of bytes to read.
@@ -742,25 +772,30 @@
     Returns:
       A string of length 'n', or less if EOF is reached.
     """
-    # The data left available to read sits in [cur, end)
-    cur = self._stream.tell()
-    end = self._begin + self._chunksize
-    if n == -1 or cur + n > end:
-      n = end - cur
-    return self._stream.read(n)
+        # The data left available to read sits in [cur, end)
+        cur = self._stream.tell()
+        end = self._begin + self._chunksize
+        if n == -1 or cur + n > end:
+            n = end - cur
+        return self._stream.read(n)
 
 
 class HttpRequest(object):
-  """Encapsulates a single HTTP request."""
+    """Encapsulates a single HTTP request."""
 
-  @util.positional(4)
-  def __init__(self, http, postproc, uri,
-               method='GET',
-               body=None,
-               headers=None,
-               methodId=None,
-               resumable=None):
-    """Constructor for an HttpRequest.
+    @util.positional(4)
+    def __init__(
+        self,
+        http,
+        postproc,
+        uri,
+        method="GET",
+        body=None,
+        headers=None,
+        methodId=None,
+        resumable=None,
+    ):
+        """Constructor for an HttpRequest.
 
     Args:
       http: httplib2.Http, the transport object to use to make a request
@@ -774,33 +809,33 @@
       methodId: string, a unique identifier for the API method being called.
       resumable: MediaUpload, None if this is not a resumbale request.
     """
-    self.uri = uri
-    self.method = method
-    self.body = body
-    self.headers = headers or {}
-    self.methodId = methodId
-    self.http = http
-    self.postproc = postproc
-    self.resumable = resumable
-    self.response_callbacks = []
-    self._in_error_state = False
+        self.uri = uri
+        self.method = method
+        self.body = body
+        self.headers = headers or {}
+        self.methodId = methodId
+        self.http = http
+        self.postproc = postproc
+        self.resumable = resumable
+        self.response_callbacks = []
+        self._in_error_state = False
 
-    # The size of the non-media part of the request.
-    self.body_size = len(self.body or '')
+        # The size of the non-media part of the request.
+        self.body_size = len(self.body or "")
 
-    # The resumable URI to send chunks to.
-    self.resumable_uri = None
+        # The resumable URI to send chunks to.
+        self.resumable_uri = None
 
-    # The bytes that have been uploaded.
-    self.resumable_progress = 0
+        # The bytes that have been uploaded.
+        self.resumable_progress = 0
 
-    # Stubs for testing.
-    self._rand = random.random
-    self._sleep = time.sleep
+        # Stubs for testing.
+        self._rand = random.random
+        self._sleep = time.sleep
 
-  @util.positional(1)
-  def execute(self, http=None, num_retries=0):
-    """Execute the request.
+    @util.positional(1)
+    def execute(self, http=None, num_retries=0):
+        """Execute the request.
 
     Args:
       http: httplib2.Http, an http object to be used in place of the
@@ -818,47 +853,54 @@
       googleapiclient.errors.HttpError if the response was not a 2xx.
       httplib2.HttpLib2Error if a transport error has occured.
     """
-    if http is None:
-      http = self.http
+        if http is None:
+            http = self.http
 
-    if self.resumable:
-      body = None
-      while body is None:
-        _, body = self.next_chunk(http=http, num_retries=num_retries)
-      return body
+        if self.resumable:
+            body = None
+            while body is None:
+                _, body = self.next_chunk(http=http, num_retries=num_retries)
+            return body
 
-    # Non-resumable case.
+        # Non-resumable case.
 
-    if 'content-length' not in self.headers:
-      self.headers['content-length'] = str(self.body_size)
-    # If the request URI is too long then turn it into a POST request.
-    # Assume that a GET request never contains a request body.
-    if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET':
-      self.method = 'POST'
-      self.headers['x-http-method-override'] = 'GET'
-      self.headers['content-type'] = 'application/x-www-form-urlencoded'
-      parsed = urlparse(self.uri)
-      self.uri = urlunparse(
-          (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None,
-           None)
-          )
-      self.body = parsed.query
-      self.headers['content-length'] = str(len(self.body))
+        if "content-length" not in self.headers:
+            self.headers["content-length"] = str(self.body_size)
+        # If the request URI is too long then turn it into a POST request.
+        # Assume that a GET request never contains a request body.
+        if len(self.uri) > MAX_URI_LENGTH and self.method == "GET":
+            self.method = "POST"
+            self.headers["x-http-method-override"] = "GET"
+            self.headers["content-type"] = "application/x-www-form-urlencoded"
+            parsed = urlparse(self.uri)
+            self.uri = urlunparse(
+                (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None, None)
+            )
+            self.body = parsed.query
+            self.headers["content-length"] = str(len(self.body))
 
-    # Handle retries for server-side errors.
-    resp, content = _retry_request(
-          http, num_retries, 'request', self._sleep, self._rand, str(self.uri),
-          method=str(self.method), body=self.body, headers=self.headers)
+        # Handle retries for server-side errors.
+        resp, content = _retry_request(
+            http,
+            num_retries,
+            "request",
+            self._sleep,
+            self._rand,
+            str(self.uri),
+            method=str(self.method),
+            body=self.body,
+            headers=self.headers,
+        )
 
-    for callback in self.response_callbacks:
-      callback(resp)
-    if resp.status >= 300:
-      raise HttpError(resp, content, uri=self.uri)
-    return self.postproc(resp, content)
+        for callback in self.response_callbacks:
+            callback(resp)
+        if resp.status >= 300:
+            raise HttpError(resp, content, uri=self.uri)
+        return self.postproc(resp, content)
 
-  @util.positional(2)
-  def add_response_callback(self, cb):
-    """add_response_headers_callback
+    @util.positional(2)
+    def add_response_callback(self, cb):
+        """add_response_headers_callback
 
     Args:
       cb: Callback to be called on receiving the response headers, of signature:
@@ -866,11 +908,11 @@
       def cb(resp):
         # Where resp is an instance of httplib2.Response
     """
-    self.response_callbacks.append(cb)
+        self.response_callbacks.append(cb)
 
-  @util.positional(1)
-  def next_chunk(self, http=None, num_retries=0):
-    """Execute the next step of a resumable upload.
+    @util.positional(1)
+    def next_chunk(self, http=None, num_retries=0):
+        """Execute the next step of a resumable upload.
 
     Can only be used if the method being executed supports media uploads and
     the MediaUpload object passed in was flagged as using resumable upload.
@@ -907,95 +949,103 @@
       googleapiclient.errors.HttpError if the response was not a 2xx.
       httplib2.HttpLib2Error if a transport error has occured.
     """
-    if http is None:
-      http = self.http
+        if http is None:
+            http = self.http
 
-    if self.resumable.size() is None:
-      size = '*'
-    else:
-      size = str(self.resumable.size())
+        if self.resumable.size() is None:
+            size = "*"
+        else:
+            size = str(self.resumable.size())
 
-    if self.resumable_uri is None:
-      start_headers = copy.copy(self.headers)
-      start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
-      if size != '*':
-        start_headers['X-Upload-Content-Length'] = size
-      start_headers['content-length'] = str(self.body_size)
+        if self.resumable_uri is None:
+            start_headers = copy.copy(self.headers)
+            start_headers["X-Upload-Content-Type"] = self.resumable.mimetype()
+            if size != "*":
+                start_headers["X-Upload-Content-Length"] = size
+            start_headers["content-length"] = str(self.body_size)
 
-      resp, content = _retry_request(
-          http, num_retries, 'resumable URI request', self._sleep, self._rand,
-          self.uri, method=self.method, body=self.body, headers=start_headers)
+            resp, content = _retry_request(
+                http,
+                num_retries,
+                "resumable URI request",
+                self._sleep,
+                self._rand,
+                self.uri,
+                method=self.method,
+                body=self.body,
+                headers=start_headers,
+            )
 
-      if resp.status == 200 and 'location' in resp:
-        self.resumable_uri = resp['location']
-      else:
-        raise ResumableUploadError(resp, content)
-    elif self._in_error_state:
-      # If we are in an error state then query the server for current state of
-      # the upload by sending an empty PUT and reading the 'range' header in
-      # the response.
-      headers = {
-          'Content-Range': 'bytes */%s' % size,
-          'content-length': '0'
-          }
-      resp, content = http.request(self.resumable_uri, 'PUT',
-                                   headers=headers)
-      status, body = self._process_response(resp, content)
-      if body:
-        # The upload was complete.
-        return (status, body)
+            if resp.status == 200 and "location" in resp:
+                self.resumable_uri = resp["location"]
+            else:
+                raise ResumableUploadError(resp, content)
+        elif self._in_error_state:
+            # If we are in an error state then query the server for current state of
+            # the upload by sending an empty PUT and reading the 'range' header in
+            # the response.
+            headers = {"Content-Range": "bytes */%s" % size, "content-length": "0"}
+            resp, content = http.request(self.resumable_uri, "PUT", headers=headers)
+            status, body = self._process_response(resp, content)
+            if body:
+                # The upload was complete.
+                return (status, body)
 
-    if self.resumable.has_stream():
-      data = self.resumable.stream()
-      if self.resumable.chunksize() == -1:
-        data.seek(self.resumable_progress)
-        chunk_end = self.resumable.size() - self.resumable_progress - 1
-      else:
-        # Doing chunking with a stream, so wrap a slice of the stream.
-        data = _StreamSlice(data, self.resumable_progress,
-                            self.resumable.chunksize())
-        chunk_end = min(
-            self.resumable_progress + self.resumable.chunksize() - 1,
-            self.resumable.size() - 1)
-    else:
-      data = self.resumable.getbytes(
-          self.resumable_progress, self.resumable.chunksize())
+        if self.resumable.has_stream():
+            data = self.resumable.stream()
+            if self.resumable.chunksize() == -1:
+                data.seek(self.resumable_progress)
+                chunk_end = self.resumable.size() - self.resumable_progress - 1
+            else:
+                # Doing chunking with a stream, so wrap a slice of the stream.
+                data = _StreamSlice(
+                    data, self.resumable_progress, self.resumable.chunksize()
+                )
+                chunk_end = min(
+                    self.resumable_progress + self.resumable.chunksize() - 1,
+                    self.resumable.size() - 1,
+                )
+        else:
+            data = self.resumable.getbytes(
+                self.resumable_progress, self.resumable.chunksize()
+            )
 
-      # A short read implies that we are at EOF, so finish the upload.
-      if len(data) < self.resumable.chunksize():
-        size = str(self.resumable_progress + len(data))
+            # A short read implies that we are at EOF, so finish the upload.
+            if len(data) < self.resumable.chunksize():
+                size = str(self.resumable_progress + len(data))
 
-      chunk_end = self.resumable_progress + len(data) - 1
+            chunk_end = self.resumable_progress + len(data) - 1
 
-    headers = {
-        'Content-Range': 'bytes %d-%d/%s' % (
-            self.resumable_progress, chunk_end, size),
-        # Must set the content-length header here because httplib can't
-        # calculate the size when working with _StreamSlice.
-        'Content-Length': str(chunk_end - self.resumable_progress + 1)
+        headers = {
+            "Content-Range": "bytes %d-%d/%s"
+            % (self.resumable_progress, chunk_end, size),
+            # Must set the content-length header here because httplib can't
+            # calculate the size when working with _StreamSlice.
+            "Content-Length": str(chunk_end - self.resumable_progress + 1),
         }
 
-    for retry_num in range(num_retries + 1):
-      if retry_num > 0:
-        self._sleep(self._rand() * 2**retry_num)
-        LOGGER.warning(
-            'Retry #%d for media upload: %s %s, following status: %d'
-            % (retry_num, self.method, self.uri, resp.status))
+        for retry_num in range(num_retries + 1):
+            if retry_num > 0:
+                self._sleep(self._rand() * 2 ** retry_num)
+                LOGGER.warning(
+                    "Retry #%d for media upload: %s %s, following status: %d"
+                    % (retry_num, self.method, self.uri, resp.status)
+                )
 
-      try:
-        resp, content = http.request(self.resumable_uri, method='PUT',
-                                     body=data,
-                                     headers=headers)
-      except:
-        self._in_error_state = True
-        raise
-      if not _should_retry_response(resp.status, content):
-        break
+            try:
+                resp, content = http.request(
+                    self.resumable_uri, method="PUT", body=data, headers=headers
+                )
+            except:
+                self._in_error_state = True
+                raise
+            if not _should_retry_response(resp.status, content):
+                break
 
-    return self._process_response(resp, content)
+        return self._process_response(resp, content)
 
-  def _process_response(self, resp, content):
-    """Process the response from a single chunk upload.
+    def _process_response(self, resp, content):
+        """Process the response from a single chunk upload.
 
     Args:
       resp: httplib2.Response, the response object.
@@ -1008,57 +1058,60 @@
     Raises:
       googleapiclient.errors.HttpError if the response was not a 2xx or a 308.
     """
-    if resp.status in [200, 201]:
-      self._in_error_state = False
-      return None, self.postproc(resp, content)
-    elif resp.status == 308:
-      self._in_error_state = False
-      # A "308 Resume Incomplete" indicates we are not done.
-      try:
-        self.resumable_progress = int(resp['range'].split('-')[1]) + 1
-      except KeyError:
-        # If resp doesn't contain range header, resumable progress is 0
-        self.resumable_progress = 0
-      if 'location' in resp:
-        self.resumable_uri = resp['location']
-    else:
-      self._in_error_state = True
-      raise HttpError(resp, content, uri=self.uri)
+        if resp.status in [200, 201]:
+            self._in_error_state = False
+            return None, self.postproc(resp, content)
+        elif resp.status == 308:
+            self._in_error_state = False
+            # A "308 Resume Incomplete" indicates we are not done.
+            try:
+                self.resumable_progress = int(resp["range"].split("-")[1]) + 1
+            except KeyError:
+                # If resp doesn't contain range header, resumable progress is 0
+                self.resumable_progress = 0
+            if "location" in resp:
+                self.resumable_uri = resp["location"]
+        else:
+            self._in_error_state = True
+            raise HttpError(resp, content, uri=self.uri)
 
-    return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
-            None)
+        return (
+            MediaUploadProgress(self.resumable_progress, self.resumable.size()),
+            None,
+        )
 
-  def to_json(self):
-    """Returns a JSON representation of the HttpRequest."""
-    d = copy.copy(self.__dict__)
-    if d['resumable'] is not None:
-      d['resumable'] = self.resumable.to_json()
-    del d['http']
-    del d['postproc']
-    del d['_sleep']
-    del d['_rand']
+    def to_json(self):
+        """Returns a JSON representation of the HttpRequest."""
+        d = copy.copy(self.__dict__)
+        if d["resumable"] is not None:
+            d["resumable"] = self.resumable.to_json()
+        del d["http"]
+        del d["postproc"]
+        del d["_sleep"]
+        del d["_rand"]
 
-    return json.dumps(d)
+        return json.dumps(d)
 
-  @staticmethod
-  def from_json(s, http, postproc):
-    """Returns an HttpRequest populated with info from a JSON object."""
-    d = json.loads(s)
-    if d['resumable'] is not None:
-      d['resumable'] = MediaUpload.new_from_json(d['resumable'])
-    return HttpRequest(
-        http,
-        postproc,
-        uri=d['uri'],
-        method=d['method'],
-        body=d['body'],
-        headers=d['headers'],
-        methodId=d['methodId'],
-        resumable=d['resumable'])
+    @staticmethod
+    def from_json(s, http, postproc):
+        """Returns an HttpRequest populated with info from a JSON object."""
+        d = json.loads(s)
+        if d["resumable"] is not None:
+            d["resumable"] = MediaUpload.new_from_json(d["resumable"])
+        return HttpRequest(
+            http,
+            postproc,
+            uri=d["uri"],
+            method=d["method"],
+            body=d["body"],
+            headers=d["headers"],
+            methodId=d["methodId"],
+            resumable=d["resumable"],
+        )
 
 
 class BatchHttpRequest(object):
-  """Batches multiple HttpRequest objects into a single HTTP request.
+    """Batches multiple HttpRequest objects into a single HTTP request.
 
   Example:
     from googleapiclient.http import BatchHttpRequest
@@ -1090,9 +1143,9 @@
     batch.execute(http=http)
   """
 
-  @util.positional(1)
-  def __init__(self, callback=None, batch_uri=None):
-    """Constructor for a BatchHttpRequest.
+    @util.positional(1)
+    def __init__(self, callback=None, batch_uri=None):
+        """Constructor for a BatchHttpRequest.
 
     Args:
       callback: callable, A callback to be called for each response, of the
@@ -1102,77 +1155,77 @@
         occurred while processing the request, or None if no error occurred.
       batch_uri: string, URI to send batch requests to.
     """
-    if batch_uri is None:
-      batch_uri = _LEGACY_BATCH_URI
+        if batch_uri is None:
+            batch_uri = _LEGACY_BATCH_URI
 
-    if batch_uri == _LEGACY_BATCH_URI:
-      LOGGER.warn(
-        "You have constructed a BatchHttpRequest using the legacy batch "
-        "endpoint %s. This endpoint will be turned down on March 25, 2019. "
-        "Please provide the API-specific endpoint or use "
-        "service.new_batch_http_request(). For more details see "
-        "https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html"
-        "and https://developers.google.com/api-client-library/python/guide/batch.",
-        _LEGACY_BATCH_URI)
-    self._batch_uri = batch_uri
+        if batch_uri == _LEGACY_BATCH_URI:
+            LOGGER.warn(
+                "You have constructed a BatchHttpRequest using the legacy batch "
+                "endpoint %s. This endpoint will be turned down on March 25, 2019. "
+                "Please provide the API-specific endpoint or use "
+                "service.new_batch_http_request(). For more details see "
+                "https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html"
+                "and https://developers.google.com/api-client-library/python/guide/batch.",
+                _LEGACY_BATCH_URI,
+            )
+        self._batch_uri = batch_uri
 
-    # Global callback to be called for each individual response in the batch.
-    self._callback = callback
+        # Global callback to be called for each individual response in the batch.
+        self._callback = callback
 
-    # A map from id to request.
-    self._requests = {}
+        # A map from id to request.
+        self._requests = {}
 
-    # A map from id to callback.
-    self._callbacks = {}
+        # A map from id to callback.
+        self._callbacks = {}
 
-    # List of request ids, in the order in which they were added.
-    self._order = []
+        # List of request ids, in the order in which they were added.
+        self._order = []
 
-    # The last auto generated id.
-    self._last_auto_id = 0
+        # The last auto generated id.
+        self._last_auto_id = 0
 
-    # Unique ID on which to base the Content-ID headers.
-    self._base_id = None
+        # Unique ID on which to base the Content-ID headers.
+        self._base_id = None
 
-    # A map from request id to (httplib2.Response, content) response pairs
-    self._responses = {}
+        # A map from request id to (httplib2.Response, content) response pairs
+        self._responses = {}
 
-    # A map of id(Credentials) that have been refreshed.
-    self._refreshed_credentials = {}
+        # A map of id(Credentials) that have been refreshed.
+        self._refreshed_credentials = {}
 
-  def _refresh_and_apply_credentials(self, request, http):
-    """Refresh the credentials and apply to the request.
+    def _refresh_and_apply_credentials(self, request, http):
+        """Refresh the credentials and apply to the request.
 
     Args:
       request: HttpRequest, the request.
       http: httplib2.Http, the global http object for the batch.
     """
-    # For the credentials to refresh, but only once per refresh_token
-    # If there is no http per the request then refresh the http passed in
-    # via execute()
-    creds = None
-    request_credentials = False
+        # For the credentials to refresh, but only once per refresh_token
+        # If there is no http per the request then refresh the http passed in
+        # via execute()
+        creds = None
+        request_credentials = False
 
-    if request.http is not None:
-      creds = _auth.get_credentials_from_http(request.http)
-      request_credentials = True
+        if request.http is not None:
+            creds = _auth.get_credentials_from_http(request.http)
+            request_credentials = True
 
-    if creds is None and http is not None:
-      creds = _auth.get_credentials_from_http(http)
+        if creds is None and http is not None:
+            creds = _auth.get_credentials_from_http(http)
 
-    if creds is not None:
-      if id(creds) not in self._refreshed_credentials:
-        _auth.refresh_credentials(creds)
-        self._refreshed_credentials[id(creds)] = 1
+        if creds is not None:
+            if id(creds) not in self._refreshed_credentials:
+                _auth.refresh_credentials(creds)
+                self._refreshed_credentials[id(creds)] = 1
 
-    # Only apply the credentials if we are using the http object passed in,
-    # otherwise apply() will get called during _serialize_request().
-    if request.http is None or not request_credentials:
-      _auth.apply_credentials(creds, request.headers)
+        # Only apply the credentials if we are using the http object passed in,
+        # otherwise apply() will get called during _serialize_request().
+        if request.http is None or not request_credentials:
+            _auth.apply_credentials(creds, request.headers)
 
-
-  def _id_to_header(self, id_):
-    """Convert an id to a Content-ID header value.
+    def _id_to_header(self, id_):
+        """Convert an id to a Content-ID header value.
 
     Args:
       id_: string, identifier of individual request.
@@ -1182,16 +1235,16 @@
       the value because Content-ID headers are supposed to be universally
       unique.
     """
-    if self._base_id is None:
-      self._base_id = uuid.uuid4()
+        if self._base_id is None:
+            self._base_id = uuid.uuid4()
 
-    # NB: we intentionally leave whitespace between base/id and '+', so RFC2822
-    # line folding works properly on Python 3; see
-    # https://github.com/google/google-api-python-client/issues/164
-    return '<%s + %s>' % (self._base_id, quote(id_))
+        # NB: we intentionally leave whitespace between base/id and '+', so RFC2822
+        # line folding works properly on Python 3; see
+        # https://github.com/google/google-api-python-client/issues/164
+        return "<%s + %s>" % (self._base_id, quote(id_))
 
-  def _header_to_id(self, header):
-    """Convert a Content-ID header value to an id.
+    def _header_to_id(self, header):
+        """Convert a Content-ID header value to an id.
 
     Presumes the Content-ID header conforms to the format that _id_to_header()
     returns.
@@ -1205,16 +1258,16 @@
     Raises:
       BatchError if the header is not in the expected format.
     """
-    if header[0] != '<' or header[-1] != '>':
-      raise BatchError("Invalid value for Content-ID: %s" % header)
-    if '+' not in header:
-      raise BatchError("Invalid value for Content-ID: %s" % header)
-    base, id_ = header[1:-1].split(' + ', 1)
+        if header[0] != "<" or header[-1] != ">":
+            raise BatchError("Invalid value for Content-ID: %s" % header)
+        if "+" not in header:
+            raise BatchError("Invalid value for Content-ID: %s" % header)
+        base, id_ = header[1:-1].split(" + ", 1)
 
-    return unquote(id_)
+        return unquote(id_)
 
-  def _serialize_request(self, request):
-    """Convert an HttpRequest object into a string.
+    def _serialize_request(self, request):
+        """Convert an HttpRequest object into a string.
 
     Args:
       request: HttpRequest, the request to serialize.
@@ -1222,45 +1275,47 @@
     Returns:
       The request as a string in application/http format.
     """
-    # Construct status line
-    parsed = urlparse(request.uri)
-    request_line = urlunparse(
-        ('', '', parsed.path, parsed.params, parsed.query, '')
+        # Construct status line
+        parsed = urlparse(request.uri)
+        request_line = urlunparse(
+            ("", "", parsed.path, parsed.params, parsed.query, "")
         )
-    status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
-    major, minor = request.headers.get('content-type', 'application/json').split('/')
-    msg = MIMENonMultipart(major, minor)
-    headers = request.headers.copy()
+        status_line = request.method + " " + request_line + " HTTP/1.1\n"
+        major, minor = request.headers.get("content-type", "application/json").split(
+            "/"
+        )
+        msg = MIMENonMultipart(major, minor)
+        headers = request.headers.copy()
 
-    if request.http is not None:
-      credentials = _auth.get_credentials_from_http(request.http)
-      if credentials is not None:
-        _auth.apply_credentials(credentials, headers)
+        if request.http is not None:
+            credentials = _auth.get_credentials_from_http(request.http)
+            if credentials is not None:
+                _auth.apply_credentials(credentials, headers)
 
-    # MIMENonMultipart adds its own Content-Type header.
-    if 'content-type' in headers:
-      del headers['content-type']
+        # MIMENonMultipart adds its own Content-Type header.
+        if "content-type" in headers:
+            del headers["content-type"]
 
-    for key, value in six.iteritems(headers):
-      msg[key] = value
-    msg['Host'] = parsed.netloc
-    msg.set_unixfrom(None)
+        for key, value in six.iteritems(headers):
+            msg[key] = value
+        msg["Host"] = parsed.netloc
+        msg.set_unixfrom(None)
 
-    if request.body is not None:
-      msg.set_payload(request.body)
-      msg['content-length'] = str(len(request.body))
+        if request.body is not None:
+            msg.set_payload(request.body)
+            msg["content-length"] = str(len(request.body))
 
-    # Serialize the mime message.
-    fp = StringIO()
-    # maxheaderlen=0 means don't line wrap headers.
-    g = Generator(fp, maxheaderlen=0)
-    g.flatten(msg, unixfrom=False)
-    body = fp.getvalue()
+        # Serialize the mime message.
+        fp = StringIO()
+        # maxheaderlen=0 means don't line wrap headers.
+        g = Generator(fp, maxheaderlen=0)
+        g.flatten(msg, unixfrom=False)
+        body = fp.getvalue()
 
-    return status_line + body
+        return status_line + body
 
-  def _deserialize_response(self, payload):
-    """Convert string into httplib2 response and content.
+    def _deserialize_response(self, payload):
+        """Convert string into httplib2 response and content.
 
     Args:
       payload: string, headers and body as a string.
@@ -1268,41 +1323,41 @@
     Returns:
       A pair (resp, content), such as would be returned from httplib2.request.
     """
-    # Strip off the status line
-    status_line, payload = payload.split('\n', 1)
-    protocol, status, reason = status_line.split(' ', 2)
+        # Strip off the status line
+        status_line, payload = payload.split("\n", 1)
+        protocol, status, reason = status_line.split(" ", 2)
 
-    # Parse the rest of the response
-    parser = FeedParser()
-    parser.feed(payload)
-    msg = parser.close()
-    msg['status'] = status
+        # Parse the rest of the response
+        parser = FeedParser()
+        parser.feed(payload)
+        msg = parser.close()
+        msg["status"] = status
 
-    # Create httplib2.Response from the parsed headers.
-    resp = httplib2.Response(msg)
-    resp.reason = reason
-    resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
+        # Create httplib2.Response from the parsed headers.
+        resp = httplib2.Response(msg)
+        resp.reason = reason
+        resp.version = int(protocol.split("/", 1)[1].replace(".", ""))
 
-    content = payload.split('\r\n\r\n', 1)[1]
+        content = payload.split("\r\n\r\n", 1)[1]
 
-    return resp, content
+        return resp, content
 
-  def _new_id(self):
-    """Create a new id.
+    def _new_id(self):
+        """Create a new id.
 
     Auto incrementing number that avoids conflicts with ids already used.
 
     Returns:
        string, a new unique id.
     """
-    self._last_auto_id += 1
-    while str(self._last_auto_id) in self._requests:
-      self._last_auto_id += 1
-    return str(self._last_auto_id)
+        self._last_auto_id += 1
+        while str(self._last_auto_id) in self._requests:
+            self._last_auto_id += 1
+        return str(self._last_auto_id)
 
-  @util.positional(2)
-  def add(self, request, callback=None, request_id=None):
-    """Add a new request.
+    @util.positional(2)
+    def add(self, request, callback=None, request_id=None):
+        """Add a new request.
 
     Every callback added will be paired with a unique id, the request_id. That
     unique id will be passed back to the callback when the response comes back
@@ -1330,21 +1385,23 @@
       KeyError is the request_id is not unique.
     """
 
-    if len(self._order) >= MAX_BATCH_LIMIT:
-      raise BatchError("Exceeded the maximum calls(%d) in a single batch request."
-                       % MAX_BATCH_LIMIT)
-    if request_id is None:
-      request_id = self._new_id()
-    if request.resumable is not None:
-      raise BatchError("Media requests cannot be used in a batch request.")
-    if request_id in self._requests:
-      raise KeyError("A request with this ID already exists: %s" % request_id)
-    self._requests[request_id] = request
-    self._callbacks[request_id] = callback
-    self._order.append(request_id)
+        if len(self._order) >= MAX_BATCH_LIMIT:
+            raise BatchError(
+                "Exceeded the maximum calls(%d) in a single batch request."
+                % MAX_BATCH_LIMIT
+            )
+        if request_id is None:
+            request_id = self._new_id()
+        if request.resumable is not None:
+            raise BatchError("Media requests cannot be used in a batch request.")
+        if request_id in self._requests:
+            raise KeyError("A request with this ID already exists: %s" % request_id)
+        self._requests[request_id] = request
+        self._callbacks[request_id] = callback
+        self._order.append(request_id)
 
-  def _execute(self, http, order, requests):
-    """Serialize batch request, send to server, process response.
+    def _execute(self, http, order, requests):
+        """Serialize batch request, send to server, process response.
 
     Args:
       http: httplib2.Http, an http object to be used to make the request with.
@@ -1356,66 +1413,69 @@
       httplib2.HttpLib2Error if a transport error has occured.
       googleapiclient.errors.BatchError if the response is the wrong format.
     """
-    message = MIMEMultipart('mixed')
-    # Message should not write out it's own headers.
-    setattr(message, '_write_headers', lambda self: None)
+        message = MIMEMultipart("mixed")
+        # Message should not write out it's own headers.
+        setattr(message, "_write_headers", lambda self: None)
 
-    # Add all the individual requests.
-    for request_id in order:
-      request = requests[request_id]
+        # Add all the individual requests.
+        for request_id in order:
+            request = requests[request_id]
 
-      msg = MIMENonMultipart('application', 'http')
-      msg['Content-Transfer-Encoding'] = 'binary'
-      msg['Content-ID'] = self._id_to_header(request_id)
+            msg = MIMENonMultipart("application", "http")
+            msg["Content-Transfer-Encoding"] = "binary"
+            msg["Content-ID"] = self._id_to_header(request_id)
 
-      body = self._serialize_request(request)
-      msg.set_payload(body)
-      message.attach(msg)
+            body = self._serialize_request(request)
+            msg.set_payload(body)
+            message.attach(msg)
 
-    # encode the body: note that we can't use `as_string`, because
-    # it plays games with `From ` lines.
-    fp = StringIO()
-    g = Generator(fp, mangle_from_=False)
-    g.flatten(message, unixfrom=False)
-    body = fp.getvalue()
+        # encode the body: note that we can't use `as_string`, because
+        # it plays games with `From ` lines.
+        fp = StringIO()
+        g = Generator(fp, mangle_from_=False)
+        g.flatten(message, unixfrom=False)
+        body = fp.getvalue()
 
-    headers = {}
-    headers['content-type'] = ('multipart/mixed; '
-                               'boundary="%s"') % message.get_boundary()
+        headers = {}
+        headers["content-type"] = (
+            "multipart/mixed; " 'boundary="%s"'
+        ) % message.get_boundary()
 
-    resp, content = http.request(self._batch_uri, method='POST', body=body,
-                                 headers=headers)
+        resp, content = http.request(
+            self._batch_uri, method="POST", body=body, headers=headers
+        )
 
-    if resp.status >= 300:
-      raise HttpError(resp, content, uri=self._batch_uri)
+        if resp.status >= 300:
+            raise HttpError(resp, content, uri=self._batch_uri)
 
-    # Prepend with a content-type header so FeedParser can handle it.
-    header = 'content-type: %s\r\n\r\n' % resp['content-type']
-    # PY3's FeedParser only accepts unicode. So we should decode content
-    # here, and encode each payload again.
-    if six.PY3:
-      content = content.decode('utf-8')
-    for_parser = header + content
+        # Prepend with a content-type header so FeedParser can handle it.
+        header = "content-type: %s\r\n\r\n" % resp["content-type"]
+        # PY3's FeedParser only accepts unicode. So we should decode content
+        # here, and encode each payload again.
+        if six.PY3:
+            content = content.decode("utf-8")
+        for_parser = header + content
 
-    parser = FeedParser()
-    parser.feed(for_parser)
-    mime_response = parser.close()
+        parser = FeedParser()
+        parser.feed(for_parser)
+        mime_response = parser.close()
 
-    if not mime_response.is_multipart():
-      raise BatchError("Response not in multipart/mixed format.", resp=resp,
-                       content=content)
+        if not mime_response.is_multipart():
+            raise BatchError(
+                "Response not in multipart/mixed format.", resp=resp, content=content
+            )
 
-    for part in mime_response.get_payload():
-      request_id = self._header_to_id(part['Content-ID'])
-      response, content = self._deserialize_response(part.get_payload())
-      # We encode content here to emulate normal http response.
-      if isinstance(content, six.text_type):
-        content = content.encode('utf-8')
-      self._responses[request_id] = (response, content)
+        for part in mime_response.get_payload():
+            request_id = self._header_to_id(part["Content-ID"])
+            response, content = self._deserialize_response(part.get_payload())
+            # We encode content here to emulate normal http response.
+            if isinstance(content, six.text_type):
+                content = content.encode("utf-8")
+            self._responses[request_id] = (response, content)
 
-  @util.positional(1)
-  def execute(self, http=None):
-    """Execute all the requests as a single batched HTTP request.
+    @util.positional(1)
+    def execute(self, http=None):
+        """Execute all the requests as a single batched HTTP request.
 
     Args:
       http: httplib2.Http, an http object to be used in place of the one the
@@ -1429,80 +1489,80 @@
       httplib2.HttpLib2Error if a transport error has occured.
       googleapiclient.errors.BatchError if the response is the wrong format.
     """
-    # If we have no requests return
-    if len(self._order) == 0:
-      return None
+        # If we have no requests return
+        if len(self._order) == 0:
+            return None
 
-    # If http is not supplied use the first valid one given in the requests.
-    if http is None:
-      for request_id in self._order:
-        request = self._requests[request_id]
-        if request is not None:
-          http = request.http
-          break
+        # If http is not supplied use the first valid one given in the requests.
+        if http is None:
+            for request_id in self._order:
+                request = self._requests[request_id]
+                if request is not None:
+                    http = request.http
+                    break
 
-    if http is None:
-      raise ValueError("Missing a valid http object.")
+        if http is None:
+            raise ValueError("Missing a valid http object.")
 
-    # Special case for OAuth2Credentials-style objects which have not yet been
-    # refreshed with an initial access_token.
-    creds = _auth.get_credentials_from_http(http)
-    if creds is not None:
-      if not _auth.is_valid(creds):
-        LOGGER.info('Attempting refresh to obtain initial access_token')
-        _auth.refresh_credentials(creds)
+        # Special case for OAuth2Credentials-style objects which have not yet been
+        # refreshed with an initial access_token.
+        creds = _auth.get_credentials_from_http(http)
+        if creds is not None:
+            if not _auth.is_valid(creds):
+                LOGGER.info("Attempting refresh to obtain initial access_token")
+                _auth.refresh_credentials(creds)
 
-    self._execute(http, self._order, self._requests)
+        self._execute(http, self._order, self._requests)
 
-    # Loop over all the requests and check for 401s. For each 401 request the
-    # credentials should be refreshed and then sent again in a separate batch.
-    redo_requests = {}
-    redo_order = []
+        # Loop over all the requests and check for 401s. For each 401 request the
+        # credentials should be refreshed and then sent again in a separate batch.
+        redo_requests = {}
+        redo_order = []
 
-    for request_id in self._order:
-      resp, content = self._responses[request_id]
-      if resp['status'] == '401':
-        redo_order.append(request_id)
-        request = self._requests[request_id]
-        self._refresh_and_apply_credentials(request, http)
-        redo_requests[request_id] = request
+        for request_id in self._order:
+            resp, content = self._responses[request_id]
+            if resp["status"] == "401":
+                redo_order.append(request_id)
+                request = self._requests[request_id]
+                self._refresh_and_apply_credentials(request, http)
+                redo_requests[request_id] = request
 
-    if redo_requests:
-      self._execute(http, redo_order, redo_requests)
+        if redo_requests:
+            self._execute(http, redo_order, redo_requests)
 
-    # Now process all callbacks that are erroring, and raise an exception for
-    # ones that return a non-2xx response? Or add extra parameter to callback
-    # that contains an HttpError?
+        # Now process all callbacks that are erroring, and raise an exception for
+        # ones that return a non-2xx response? Or add extra parameter to callback
+        # that contains an HttpError?
 
-    for request_id in self._order:
-      resp, content = self._responses[request_id]
+        for request_id in self._order:
+            resp, content = self._responses[request_id]
 
-      request = self._requests[request_id]
-      callback = self._callbacks[request_id]
+            request = self._requests[request_id]
+            callback = self._callbacks[request_id]
 
-      response = None
-      exception = None
-      try:
-        if resp.status >= 300:
-          raise HttpError(resp, content, uri=request.uri)
-        response = request.postproc(resp, content)
-      except HttpError as e:
-        exception = e
+            response = None
+            exception = None
+            try:
+                if resp.status >= 300:
+                    raise HttpError(resp, content, uri=request.uri)
+                response = request.postproc(resp, content)
+            except HttpError as e:
+                exception = e
 
-      if callback is not None:
-        callback(request_id, response, exception)
-      if self._callback is not None:
-        self._callback(request_id, response, exception)
+            if callback is not None:
+                callback(request_id, response, exception)
+            if self._callback is not None:
+                self._callback(request_id, response, exception)
 
 
 class HttpRequestMock(object):
-  """Mock of HttpRequest.
+    """Mock of HttpRequest.
 
   Do not construct directly, instead use RequestMockBuilder.
   """
 
-  def __init__(self, resp, content, postproc):
-    """Constructor for HttpRequestMock
+    def __init__(self, resp, content, postproc):
+        """Constructor for HttpRequestMock
 
     Args:
       resp: httplib2.Response, the response to emulate coming from the request
@@ -1510,25 +1570,25 @@
       postproc: callable, the post processing function usually supplied by
                 the model class. See model.JsonModel.response() as an example.
     """
-    self.resp = resp
-    self.content = content
-    self.postproc = postproc
-    if resp is None:
-      self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
-    if 'reason' in self.resp:
-      self.resp.reason = self.resp['reason']
+        self.resp = resp
+        self.content = content
+        self.postproc = postproc
+        if resp is None:
+            self.resp = httplib2.Response({"status": 200, "reason": "OK"})
+        if "reason" in self.resp:
+            self.resp.reason = self.resp["reason"]
 
-  def execute(self, http=None):
-    """Execute the request.
+    def execute(self, http=None):
+        """Execute the request.
 
     Same behavior as HttpRequest.execute(), but the response is
     mocked and not really from an HTTP request/response.
     """
-    return self.postproc(self.resp, self.content)
+        return self.postproc(self.resp, self.content)
 
 
 class RequestMockBuilder(object):
-  """A simple mock of HttpRequest
+    """A simple mock of HttpRequest
 
     Pass in a dictionary to the constructor that maps request methodIds to
     tuples of (httplib2.Response, content, opt_expected_body) that should be
@@ -1554,8 +1614,8 @@
     For more details see the project wiki.
   """
 
-  def __init__(self, responses, check_unexpected=False):
-    """Constructor for RequestMockBuilder
+    def __init__(self, responses, check_unexpected=False):
+        """Constructor for RequestMockBuilder
 
     The constructed object should be a callable object
     that can replace the class HttpResponse.
@@ -1567,79 +1627,90 @@
     check_unexpected - A boolean setting whether or not UnexpectedMethodError
                        should be raised on unsupplied method.
     """
-    self.responses = responses
-    self.check_unexpected = check_unexpected
+        self.responses = responses
+        self.check_unexpected = check_unexpected
 
-  def __call__(self, http, postproc, uri, method='GET', body=None,
-               headers=None, methodId=None, resumable=None):
-    """Implements the callable interface that discovery.build() expects
+    def __call__(
+        self,
+        http,
+        postproc,
+        uri,
+        method="GET",
+        body=None,
+        headers=None,
+        methodId=None,
+        resumable=None,
+    ):
+        """Implements the callable interface that discovery.build() expects
     of requestBuilder, which is to build an object compatible with
     HttpRequest.execute(). See that method for the description of the
     parameters and the expected response.
     """
-    if methodId in self.responses:
-      response = self.responses[methodId]
-      resp, content = response[:2]
-      if len(response) > 2:
-        # Test the body against the supplied expected_body.
-        expected_body = response[2]
-        if bool(expected_body) != bool(body):
-          # Not expecting a body and provided one
-          # or expecting a body and not provided one.
-          raise UnexpectedBodyError(expected_body, body)
-        if isinstance(expected_body, str):
-          expected_body = json.loads(expected_body)
-        body = json.loads(body)
-        if body != expected_body:
-          raise UnexpectedBodyError(expected_body, body)
-      return HttpRequestMock(resp, content, postproc)
-    elif self.check_unexpected:
-      raise UnexpectedMethodError(methodId=methodId)
-    else:
-      model = JsonModel(False)
-      return HttpRequestMock(None, '{}', model.response)
+        if methodId in self.responses:
+            response = self.responses[methodId]
+            resp, content = response[:2]
+            if len(response) > 2:
+                # Test the body against the supplied expected_body.
+                expected_body = response[2]
+                if bool(expected_body) != bool(body):
+                    # Not expecting a body and provided one
+                    # or expecting a body and not provided one.
+                    raise UnexpectedBodyError(expected_body, body)
+                if isinstance(expected_body, str):
+                    expected_body = json.loads(expected_body)
+                body = json.loads(body)
+                if body != expected_body:
+                    raise UnexpectedBodyError(expected_body, body)
+            return HttpRequestMock(resp, content, postproc)
+        elif self.check_unexpected:
+            raise UnexpectedMethodError(methodId=methodId)
+        else:
+            model = JsonModel(False)
+            return HttpRequestMock(None, "{}", model.response)
 
 
 class HttpMock(object):
-  """Mock of httplib2.Http"""
+    """Mock of httplib2.Http"""
 
-  def __init__(self, filename=None, headers=None):
-    """
+    def __init__(self, filename=None, headers=None):
+        """
     Args:
       filename: string, absolute filename to read response from
       headers: dict, header to return with response
     """
-    if headers is None:
-      headers = {'status': '200'}
-    if filename:
-      f = open(filename, 'rb')
-      self.data = f.read()
-      f.close()
-    else:
-      self.data = None
-    self.response_headers = headers
-    self.headers = None
-    self.uri = None
-    self.method = None
-    self.body = None
-    self.headers = None
+        if headers is None:
+            headers = {"status": "200"}
+        if filename:
+            f = open(filename, "rb")
+            self.data = f.read()
+            f.close()
+        else:
+            self.data = None
+        self.response_headers = headers
+        self.headers = None
+        self.uri = None
+        self.method = None
+        self.body = None
+        self.headers = None
 
-
-  def request(self, uri,
-              method='GET',
-              body=None,
-              headers=None,
-              redirections=1,
-              connection_type=None):
-    self.uri = uri
-    self.method = method
-    self.body = body
-    self.headers = headers
-    return httplib2.Response(self.response_headers), self.data
+    def request(
+        self,
+        uri,
+        method="GET",
+        body=None,
+        headers=None,
+        redirections=1,
+        connection_type=None,
+    ):
+        self.uri = uri
+        self.method = method
+        self.body = body
+        self.headers = headers
+        return httplib2.Response(self.response_headers), self.data
 
 
 class HttpMockSequence(object):
-  """Mock of httplib2.Http
+    """Mock of httplib2.Http
 
   Mocks a sequence of calls to request returning different responses for each
   call. Create an instance initialized with the desired response headers
@@ -1662,39 +1733,42 @@
   'echo_request_uri' means return the request uri in the response body
   """
 
-  def __init__(self, iterable):
-    """
+    def __init__(self, iterable):
+        """
     Args:
       iterable: iterable, a sequence of pairs of (headers, body)
     """
-    self._iterable = iterable
-    self.follow_redirects = True
+        self._iterable = iterable
+        self.follow_redirects = True
 
-  def request(self, uri,
-              method='GET',
-              body=None,
-              headers=None,
-              redirections=1,
-              connection_type=None):
-    resp, content = self._iterable.pop(0)
-    if content == 'echo_request_headers':
-      content = headers
-    elif content == 'echo_request_headers_as_json':
-      content = json.dumps(headers)
-    elif content == 'echo_request_body':
-      if hasattr(body, 'read'):
-        content = body.read()
-      else:
-        content = body
-    elif content == 'echo_request_uri':
-      content = uri
-    if isinstance(content, six.text_type):
-      content = content.encode('utf-8')
-    return httplib2.Response(resp), content
+    def request(
+        self,
+        uri,
+        method="GET",
+        body=None,
+        headers=None,
+        redirections=1,
+        connection_type=None,
+    ):
+        resp, content = self._iterable.pop(0)
+        if content == "echo_request_headers":
+            content = headers
+        elif content == "echo_request_headers_as_json":
+            content = json.dumps(headers)
+        elif content == "echo_request_body":
+            if hasattr(body, "read"):
+                content = body.read()
+            else:
+                content = body
+        elif content == "echo_request_uri":
+            content = uri
+        if isinstance(content, six.text_type):
+            content = content.encode("utf-8")
+        return httplib2.Response(resp), content
 
 
 def set_user_agent(http, user_agent):
-  """Set the user-agent on every request.
+    """Set the user-agent on every request.
 
   Args:
      http - An instance of httplib2.Http
@@ -1712,29 +1786,40 @@
   Most of the time the user-agent will be set doing auth, this is for the rare
   cases where you are accessing an unauthenticated endpoint.
   """
-  request_orig = http.request
+    request_orig = http.request
 
-  # The closure that will replace 'httplib2.Http.request'.
-  def new_request(uri, method='GET', body=None, headers=None,
-                  redirections=httplib2.DEFAULT_MAX_REDIRECTS,
-                  connection_type=None):
-    """Modify the request headers to add the user-agent."""
-    if headers is None:
-      headers = {}
-    if 'user-agent' in headers:
-      headers['user-agent'] = user_agent + ' ' + headers['user-agent']
-    else:
-      headers['user-agent'] = user_agent
-    resp, content = request_orig(uri, method=method, body=body, headers=headers,
-                        redirections=redirections, connection_type=connection_type)
-    return resp, content
+    # The closure that will replace 'httplib2.Http.request'.
+    def new_request(
+        uri,
+        method="GET",
+        body=None,
+        headers=None,
+        redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+        connection_type=None,
+    ):
+        """Modify the request headers to add the user-agent."""
+        if headers is None:
+            headers = {}
+        if "user-agent" in headers:
+            headers["user-agent"] = user_agent + " " + headers["user-agent"]
+        else:
+            headers["user-agent"] = user_agent
+        resp, content = request_orig(
+            uri,
+            method=method,
+            body=body,
+            headers=headers,
+            redirections=redirections,
+            connection_type=connection_type,
+        )
+        return resp, content
 
-  http.request = new_request
-  return http
+    http.request = new_request
+    return http
 
 
 def tunnel_patch(http):
-  """Tunnel PATCH requests over POST.
+    """Tunnel PATCH requests over POST.
   Args:
      http - An instance of httplib2.Http
          or something that acts like it.
@@ -1751,31 +1836,43 @@
   Apply this last if you are using OAuth 1.0, as changing the method
   will result in a different signature.
   """
-  request_orig = http.request
+    request_orig = http.request
 
-  # The closure that will replace 'httplib2.Http.request'.
-  def new_request(uri, method='GET', body=None, headers=None,
-                  redirections=httplib2.DEFAULT_MAX_REDIRECTS,
-                  connection_type=None):
-    """Modify the request headers to add the user-agent."""
-    if headers is None:
-      headers = {}
-    if method == 'PATCH':
-      if 'oauth_token' in headers.get('authorization', ''):
-        LOGGER.warning(
-            'OAuth 1.0 request made with Credentials after tunnel_patch.')
-      headers['x-http-method-override'] = "PATCH"
-      method = 'POST'
-    resp, content = request_orig(uri, method=method, body=body, headers=headers,
-                        redirections=redirections, connection_type=connection_type)
-    return resp, content
+    # The closure that will replace 'httplib2.Http.request'.
+    def new_request(
+        uri,
+        method="GET",
+        body=None,
+        headers=None,
+        redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+        connection_type=None,
+    ):
+        """Modify the request headers to add the user-agent."""
+        if headers is None:
+            headers = {}
+        if method == "PATCH":
+            if "oauth_token" in headers.get("authorization", ""):
+                LOGGER.warning(
+                    "OAuth 1.0 request made with Credentials after tunnel_patch."
+                )
+            headers["x-http-method-override"] = "PATCH"
+            method = "POST"
+        resp, content = request_orig(
+            uri,
+            method=method,
+            body=body,
+            headers=headers,
+            redirections=redirections,
+            connection_type=connection_type,
+        )
+        return resp, content
 
-  http.request = new_request
-  return http
+    http.request = new_request
+    return http
 
 
 def build_http():
-  """Builds httplib2.Http object
+    """Builds httplib2.Http object
 
   Returns:
   A httplib2.Http object, which is used to make http requests, and which has timeout set by default.
@@ -1785,8 +1882,8 @@
 
   before interacting with this method.
   """
-  if socket.getdefaulttimeout() is not None:
-    http_timeout = socket.getdefaulttimeout()
-  else:
-    http_timeout = DEFAULT_HTTP_TIMEOUT_SEC
-  return httplib2.Http(timeout=http_timeout)
+    if socket.getdefaulttimeout() is not None:
+        http_timeout = socket.getdefaulttimeout()
+    else:
+        http_timeout = DEFAULT_HTTP_TIMEOUT_SEC
+    return httplib2.Http(timeout=http_timeout)
diff --git a/googleapiclient/mimeparse.py b/googleapiclient/mimeparse.py
index bc9ad09..6051628 100644
--- a/googleapiclient/mimeparse.py
+++ b/googleapiclient/mimeparse.py
@@ -25,11 +25,11 @@
 from functools import reduce
 import six
 
-__version__ = '0.1.3'
-__author__ = 'Joe Gregorio'
-__email__ = 'joe@bitworking.org'
-__license__ = 'MIT License'
-__credits__ = ''
+__version__ = "0.1.3"
+__author__ = "Joe Gregorio"
+__email__ = "joe@bitworking.org"
+__license__ = "MIT License"
+__credits__ = ""
 
 
 def parse_mime_type(mime_type):
@@ -42,16 +42,16 @@
 
        ('application', 'xhtml', {'q', '0.5'})
        """
-    parts = mime_type.split(';')
-    params = dict([tuple([s.strip() for s in param.split('=', 1)])\
-            for param in parts[1:]
-                  ])
+    parts = mime_type.split(";")
+    params = dict(
+        [tuple([s.strip() for s in param.split("=", 1)]) for param in parts[1:]]
+    )
     full_type = parts[0].strip()
     # Java URLConnection class sends an Accept header that includes a
     # single '*'. Turn it into a legal wildcard.
-    if full_type == '*':
-        full_type = '*/*'
-    (type, subtype) = full_type.split('/')
+    if full_type == "*":
+        full_type = "*/*"
+    (type, subtype) = full_type.split("/")
 
     return (type.strip(), subtype.strip(), params)
 
@@ -71,10 +71,14 @@
     necessary.
     """
     (type, subtype, params) = parse_mime_type(range)
-    if 'q' not in params or not params['q'] or \
-            not float(params['q']) or float(params['q']) > 1\
-            or float(params['q']) < 0:
-        params['q'] = '1'
+    if (
+        "q" not in params
+        or not params["q"]
+        or not float(params["q"])
+        or float(params["q"]) > 1
+        or float(params["q"]) < 0
+    ):
+        params["q"] = "1"
 
     return (type, subtype, params)
 
@@ -90,25 +94,28 @@
     """
     best_fitness = -1
     best_fit_q = 0
-    (target_type, target_subtype, target_params) =\
-            parse_media_range(mime_type)
+    (target_type, target_subtype, target_params) = parse_media_range(mime_type)
     for (type, subtype, params) in parsed_ranges:
-        type_match = (type == target_type or\
-                      type == '*' or\
-                      target_type == '*')
-        subtype_match = (subtype == target_subtype or\
-                         subtype == '*' or\
-                         target_subtype == '*')
+        type_match = type == target_type or type == "*" or target_type == "*"
+        subtype_match = (
+            subtype == target_subtype or subtype == "*" or target_subtype == "*"
+        )
         if type_match and subtype_match:
-            param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in \
-                    six.iteritems(target_params) if key != 'q' and \
-                    key in params and value == params[key]], 0)
+            param_matches = reduce(
+                lambda x, y: x + y,
+                [
+                    1
+                    for (key, value) in six.iteritems(target_params)
+                    if key != "q" and key in params and value == params[key]
+                ],
+                0,
+            )
             fitness = (type == target_type) and 100 or 0
             fitness += (subtype == target_subtype) and 10 or 0
             fitness += param_matches
             if fitness > best_fitness:
                 best_fitness = fitness
-                best_fit_q = params['q']
+                best_fit_q = params["q"]
 
     return best_fitness, float(best_fit_q)
 
@@ -137,7 +144,7 @@
     0.7
 
     """
-    parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
+    parsed_ranges = [parse_media_range(r) for r in ranges.split(",")]
 
     return quality_parsed(mime_type, parsed_ranges)
 
@@ -156,17 +163,18 @@
                    'text/*;q=0.5,*/*; q=0.1')
     'text/xml'
     """
-    split_header = _filter_blank(header.split(','))
+    split_header = _filter_blank(header.split(","))
     parsed_header = [parse_media_range(r) for r in split_header]
     weighted_matches = []
     pos = 0
     for mime_type in supported:
-        weighted_matches.append((fitness_and_quality_parsed(mime_type,
-                                 parsed_header), pos, mime_type))
+        weighted_matches.append(
+            (fitness_and_quality_parsed(mime_type, parsed_header), pos, mime_type)
+        )
         pos += 1
     weighted_matches.sort()
 
-    return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
+    return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ""
 
 
 def _filter_blank(i):
diff --git a/googleapiclient/model.py b/googleapiclient/model.py
index 7ab80e9..0449a92 100644
--- a/googleapiclient/model.py
+++ b/googleapiclient/model.py
@@ -22,7 +22,7 @@
 from __future__ import absolute_import
 import six
 
-__author__ = 'jcgregorio@google.com (Joe Gregorio)'
+__author__ = "jcgregorio@google.com (Joe Gregorio)"
 
 import json
 import logging
@@ -41,19 +41,19 @@
 
 
 def _abstract():
-  raise NotImplementedError('You need to override this function')
+    raise NotImplementedError("You need to override this function")
 
 
 class Model(object):
-  """Model base class.
+    """Model base class.
 
   All Model classes should implement this interface.
   The Model serializes and de-serializes between a wire
   format such as JSON and a Python object representation.
   """
 
-  def request(self, headers, path_params, query_params, body_value):
-    """Updates outgoing requests with a serialized body.
+    def request(self, headers, path_params, query_params, body_value):
+        """Updates outgoing requests with a serialized body.
 
     Args:
       headers: dict, request headers
@@ -69,10 +69,10 @@
       query: string, query part of the request URI
       body: string, the body serialized in the desired wire format.
     """
-    _abstract()
+        _abstract()
 
-  def response(self, resp, content):
-    """Convert the response wire format into a Python object.
+    def response(self, resp, content):
+        """Convert the response wire format into a Python object.
 
     Args:
       resp: httplib2.Response, the HTTP response headers and status
@@ -84,11 +84,11 @@
     Raises:
       googleapiclient.errors.HttpError if a non 2xx response is received.
     """
-    _abstract()
+        _abstract()
 
 
 class BaseModel(Model):
-  """Base model class.
+    """Base model class.
 
   Subclasses should provide implementations for the "serialize" and
   "deserialize" methods, as well as values for the following class attributes.
@@ -101,29 +101,29 @@
     alt_param: The value to supply as the "alt" query parameter for requests.
   """
 
-  accept = None
-  content_type = None
-  no_content_response = None
-  alt_param = None
+    accept = None
+    content_type = None
+    no_content_response = None
+    alt_param = None
 
-  def _log_request(self, headers, path_params, query, body):
-    """Logs debugging information about the request if requested."""
-    if dump_request_response:
-      LOGGER.info('--request-start--')
-      LOGGER.info('-headers-start-')
-      for h, v in six.iteritems(headers):
-        LOGGER.info('%s: %s', h, v)
-      LOGGER.info('-headers-end-')
-      LOGGER.info('-path-parameters-start-')
-      for h, v in six.iteritems(path_params):
-        LOGGER.info('%s: %s', h, v)
-      LOGGER.info('-path-parameters-end-')
-      LOGGER.info('body: %s', body)
-      LOGGER.info('query: %s', query)
-      LOGGER.info('--request-end--')
+    def _log_request(self, headers, path_params, query, body):
+        """Logs debugging information about the request if requested."""
+        if dump_request_response:
+            LOGGER.info("--request-start--")
+            LOGGER.info("-headers-start-")
+            for h, v in six.iteritems(headers):
+                LOGGER.info("%s: %s", h, v)
+            LOGGER.info("-headers-end-")
+            LOGGER.info("-path-parameters-start-")
+            for h, v in six.iteritems(path_params):
+                LOGGER.info("%s: %s", h, v)
+            LOGGER.info("-path-parameters-end-")
+            LOGGER.info("body: %s", body)
+            LOGGER.info("query: %s", query)
+            LOGGER.info("--request-end--")
 
-  def request(self, headers, path_params, query_params, body_value):
-    """Updates outgoing requests with a serialized body.
+    def request(self, headers, path_params, query_params, body_value):
+        """Updates outgoing requests with a serialized body.
 
     Args:
       headers: dict, request headers
@@ -139,28 +139,31 @@
       query: string, query part of the request URI
       body: string, the body serialized as JSON
     """
-    query = self._build_query(query_params)
-    headers['accept'] = self.accept
-    headers['accept-encoding'] = 'gzip, deflate'
-    if 'user-agent' in headers:
-      headers['user-agent'] += ' '
-    else:
-      headers['user-agent'] = ''
-    headers['user-agent'] += '(gzip)'
-    if 'x-goog-api-client' in headers:
-      headers['x-goog-api-client'] += ' '
-    else:
-      headers['x-goog-api-client'] = ''
-    headers['x-goog-api-client'] += 'gdcl/%s gl-python/%s' % (__version__, _PY_VERSION)
+        query = self._build_query(query_params)
+        headers["accept"] = self.accept
+        headers["accept-encoding"] = "gzip, deflate"
+        if "user-agent" in headers:
+            headers["user-agent"] += " "
+        else:
+            headers["user-agent"] = ""
+        headers["user-agent"] += "(gzip)"
+        if "x-goog-api-client" in headers:
+            headers["x-goog-api-client"] += " "
+        else:
+            headers["x-goog-api-client"] = ""
+        headers["x-goog-api-client"] += "gdcl/%s gl-python/%s" % (
+            __version__,
+            _PY_VERSION,
+        )
 
-    if body_value is not None:
-      headers['content-type'] = self.content_type
-      body_value = self.serialize(body_value)
-    self._log_request(headers, path_params, query, body_value)
-    return (headers, path_params, query, body_value)
+        if body_value is not None:
+            headers["content-type"] = self.content_type
+            body_value = self.serialize(body_value)
+        self._log_request(headers, path_params, query, body_value)
+        return (headers, path_params, query, body_value)
 
-  def _build_query(self, params):
-    """Builds a query string.
+    def _build_query(self, params):
+        """Builds a query string.
 
     Args:
       params: dict, the query parameters
@@ -168,32 +171,32 @@
     Returns:
       The query parameters properly encoded into an HTTP URI query string.
     """
-    if self.alt_param is not None:
-      params.update({'alt': self.alt_param})
-    astuples = []
-    for key, value in six.iteritems(params):
-      if type(value) == type([]):
-        for x in value:
-          x = x.encode('utf-8')
-          astuples.append((key, x))
-      else:
-        if isinstance(value, six.text_type) and callable(value.encode):
-          value = value.encode('utf-8')
-        astuples.append((key, value))
-    return '?' + urlencode(astuples)
+        if self.alt_param is not None:
+            params.update({"alt": self.alt_param})
+        astuples = []
+        for key, value in six.iteritems(params):
+            if type(value) == type([]):
+                for x in value:
+                    x = x.encode("utf-8")
+                    astuples.append((key, x))
+            else:
+                if isinstance(value, six.text_type) and callable(value.encode):
+                    value = value.encode("utf-8")
+                astuples.append((key, value))
+        return "?" + urlencode(astuples)
 
-  def _log_response(self, resp, content):
-    """Logs debugging information about the response if requested."""
-    if dump_request_response:
-      LOGGER.info('--response-start--')
-      for h, v in six.iteritems(resp):
-        LOGGER.info('%s: %s', h, v)
-      if content:
-        LOGGER.info(content)
-      LOGGER.info('--response-end--')
+    def _log_response(self, resp, content):
+        """Logs debugging information about the response if requested."""
+        if dump_request_response:
+            LOGGER.info("--response-start--")
+            for h, v in six.iteritems(resp):
+                LOGGER.info("%s: %s", h, v)
+            if content:
+                LOGGER.info(content)
+            LOGGER.info("--response-end--")
 
-  def response(self, resp, content):
-    """Convert the response wire format into a Python object.
+    def response(self, resp, content):
+        """Convert the response wire format into a Python object.
 
     Args:
       resp: httplib2.Response, the HTTP response headers and status
@@ -205,21 +208,21 @@
     Raises:
       googleapiclient.errors.HttpError if a non 2xx response is received.
     """
-    self._log_response(resp, content)
-    # Error handling is TBD, for example, do we retry
-    # for some operation/error combinations?
-    if resp.status < 300:
-      if resp.status == 204:
-        # A 204: No Content response should be treated differently
-        # to all the other success states
-        return self.no_content_response
-      return self.deserialize(content)
-    else:
-      LOGGER.debug('Content from bad request was: %s' % content)
-      raise HttpError(resp, content)
+        self._log_response(resp, content)
+        # Error handling is TBD, for example, do we retry
+        # for some operation/error combinations?
+        if resp.status < 300:
+            if resp.status == 204:
+                # A 204: No Content response should be treated differently
+                # to all the other success states
+                return self.no_content_response
+            return self.deserialize(content)
+        else:
+            LOGGER.debug("Content from bad request was: %s" % content)
+            raise HttpError(resp, content)
 
-  def serialize(self, body_value):
-    """Perform the actual Python object serialization.
+    def serialize(self, body_value):
+        """Perform the actual Python object serialization.
 
     Args:
       body_value: object, the request body as a Python object.
@@ -227,10 +230,10 @@
     Returns:
       string, the body in serialized form.
     """
-    _abstract()
+        _abstract()
 
-  def deserialize(self, content):
-    """Perform the actual deserialization from response string to Python
+    def deserialize(self, content):
+        """Perform the actual deserialization from response string to Python
     object.
 
     Args:
@@ -239,98 +242,105 @@
     Returns:
       The body de-serialized as a Python object.
     """
-    _abstract()
+        _abstract()
 
 
 class JsonModel(BaseModel):
-  """Model class for JSON.
+    """Model class for JSON.
 
   Serializes and de-serializes between JSON and the Python
   object representation of HTTP request and response bodies.
   """
-  accept = 'application/json'
-  content_type = 'application/json'
-  alt_param = 'json'
 
-  def __init__(self, data_wrapper=False):
-    """Construct a JsonModel.
+    accept = "application/json"
+    content_type = "application/json"
+    alt_param = "json"
+
+    def __init__(self, data_wrapper=False):
+        """Construct a JsonModel.
 
     Args:
       data_wrapper: boolean, wrap requests and responses in a data wrapper
     """
-    self._data_wrapper = data_wrapper
+        self._data_wrapper = data_wrapper
 
-  def serialize(self, body_value):
-    if (isinstance(body_value, dict) and 'data' not in body_value and
-        self._data_wrapper):
-      body_value = {'data': body_value}
-    return json.dumps(body_value)
+    def serialize(self, body_value):
+        if (
+            isinstance(body_value, dict)
+            and "data" not in body_value
+            and self._data_wrapper
+        ):
+            body_value = {"data": body_value}
+        return json.dumps(body_value)
 
-  def deserialize(self, content):
-    try:
-        content = content.decode('utf-8')
-    except AttributeError:
-        pass
-    body = json.loads(content)
-    if self._data_wrapper and isinstance(body, dict) and 'data' in body:
-      body = body['data']
-    return body
+    def deserialize(self, content):
+        try:
+            content = content.decode("utf-8")
+        except AttributeError:
+            pass
+        body = json.loads(content)
+        if self._data_wrapper and isinstance(body, dict) and "data" in body:
+            body = body["data"]
+        return body
 
-  @property
-  def no_content_response(self):
-    return {}
+    @property
+    def no_content_response(self):
+        return {}
 
 
 class RawModel(JsonModel):
-  """Model class for requests that don't return JSON.
+    """Model class for requests that don't return JSON.
 
   Serializes and de-serializes between JSON and the Python
   object representation of HTTP request, and returns the raw bytes
   of the response body.
   """
-  accept = '*/*'
-  content_type = 'application/json'
-  alt_param = None
 
-  def deserialize(self, content):
-    return content
+    accept = "*/*"
+    content_type = "application/json"
+    alt_param = None
 
-  @property
-  def no_content_response(self):
-    return ''
+    def deserialize(self, content):
+        return content
+
+    @property
+    def no_content_response(self):
+        return ""
 
 
 class MediaModel(JsonModel):
-  """Model class for requests that return Media.
+    """Model class for requests that return Media.
 
   Serializes and de-serializes between JSON and the Python
   object representation of HTTP request, and returns the raw bytes
   of the response body.
   """
-  accept = '*/*'
-  content_type = 'application/json'
-  alt_param = 'media'
 
-  def deserialize(self, content):
-    return content
+    accept = "*/*"
+    content_type = "application/json"
+    alt_param = "media"
 
-  @property
-  def no_content_response(self):
-    return ''
+    def deserialize(self, content):
+        return content
+
+    @property
+    def no_content_response(self):
+        return ""
 
 
 class ProtocolBufferModel(BaseModel):
-  """Model class for protocol buffers.
+    """Model class for protocol buffers.
 
   Serializes and de-serializes the binary protocol buffer sent in the HTTP
   request and response bodies.
   """
-  accept = 'application/x-protobuf'
-  content_type = 'application/x-protobuf'
-  alt_param = 'proto'
 
-  def __init__(self, protocol_buffer):
-    """Constructs a ProtocolBufferModel.
+    accept = "application/x-protobuf"
+    content_type = "application/x-protobuf"
+    alt_param = "proto"
+
+    def __init__(self, protocol_buffer):
+        """Constructs a ProtocolBufferModel.
 
     The serialzed protocol buffer returned in an HTTP response will be
     de-serialized using the given protocol buffer class.
@@ -339,21 +349,21 @@
       protocol_buffer: The protocol buffer class used to de-serialize a
       response from the API.
     """
-    self._protocol_buffer = protocol_buffer
+        self._protocol_buffer = protocol_buffer
 
-  def serialize(self, body_value):
-    return body_value.SerializeToString()
+    def serialize(self, body_value):
+        return body_value.SerializeToString()
 
-  def deserialize(self, content):
-    return self._protocol_buffer.FromString(content)
+    def deserialize(self, content):
+        return self._protocol_buffer.FromString(content)
 
-  @property
-  def no_content_response(self):
-    return self._protocol_buffer()
+    @property
+    def no_content_response(self):
+        return self._protocol_buffer()
 
 
 def makepatch(original, modified):
-  """Create a patch object.
+    """Create a patch object.
 
   Some methods support PATCH, an efficient way to send updates to a resource.
   This method allows the easy construction of patch bodies by looking at the
@@ -373,24 +383,24 @@
     service.activities.patch(postid=postid, userid=userid,
       body=makepatch(original, item)).execute()
   """
-  patch = {}
-  for key, original_value in six.iteritems(original):
-    modified_value = modified.get(key, None)
-    if modified_value is None:
-      # Use None to signal that the element is deleted
-      patch[key] = None
-    elif original_value != modified_value:
-      if type(original_value) == type({}):
-        # Recursively descend objects
-        patch[key] = makepatch(original_value, modified_value)
-      else:
-        # In the case of simple types or arrays we just replace
-        patch[key] = modified_value
-    else:
-      # Don't add anything to patch if there's no change
-      pass
-  for key in modified:
-    if key not in original:
-      patch[key] = modified[key]
+    patch = {}
+    for key, original_value in six.iteritems(original):
+        modified_value = modified.get(key, None)
+        if modified_value is None:
+            # Use None to signal that the element is deleted
+            patch[key] = None
+        elif original_value != modified_value:
+            if type(original_value) == type({}):
+                # Recursively descend objects
+                patch[key] = makepatch(original_value, modified_value)
+            else:
+                # In the case of simple types or arrays we just replace
+                patch[key] = modified_value
+        else:
+            # Don't add anything to patch if there's no change
+            pass
+    for key in modified:
+        if key not in original:
+            patch[key] = modified[key]
 
-  return patch
+    return patch
diff --git a/googleapiclient/sample_tools.py b/googleapiclient/sample_tools.py
index 5cb7a06..2b6a21b 100644
--- a/googleapiclient/sample_tools.py
+++ b/googleapiclient/sample_tools.py
@@ -18,8 +18,8 @@
 """
 from __future__ import absolute_import
 
-__author__ = 'jcgregorio@google.com (Joe Gregorio)'
-__all__ = ['init']
+__author__ = "jcgregorio@google.com (Joe Gregorio)"
+__all__ = ["init"]
 
 
 import argparse
@@ -28,8 +28,11 @@
 from googleapiclient import discovery
 from googleapiclient.http import build_http
 
-def init(argv, name, version, doc, filename, scope=None, parents=[], discovery_filename=None):
-  """A common initialization routine for samples.
+
+def init(
+    argv, name, version, doc, filename, scope=None, parents=[], discovery_filename=None
+):
+    """A common initialization routine for samples.
 
   Many of the sample applications do the same initialization, which has now
   been consolidated into this function. This function uses common idioms found
@@ -52,55 +55,56 @@
     A tuple of (service, flags), where service is the service object and flags
     is the parsed command-line flags.
   """
-  try:
-      from oauth2client import client
-      from oauth2client import file
-      from oauth2client import tools
-  except ImportError:
-      raise ImportError('googleapiclient.sample_tools requires oauth2client. Please install oauth2client and try again.')
+    try:
+        from oauth2client import client
+        from oauth2client import file
+        from oauth2client import tools
+    except ImportError:
+        raise ImportError(
+            "googleapiclient.sample_tools requires oauth2client. Please install oauth2client and try again."
+        )
 
-  if scope is None:
-    scope = 'https://www.googleapis.com/auth/' + name
+    if scope is None:
+        scope = "https://www.googleapis.com/auth/" + name
 
-  # Parser command-line arguments.
-  parent_parsers = [tools.argparser]
-  parent_parsers.extend(parents)
-  parser = argparse.ArgumentParser(
-      description=doc,
-      formatter_class=argparse.RawDescriptionHelpFormatter,
-      parents=parent_parsers)
-  flags = parser.parse_args(argv[1:])
+    # Parser command-line arguments.
+    parent_parsers = [tools.argparser]
+    parent_parsers.extend(parents)
+    parser = argparse.ArgumentParser(
+        description=doc,
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+        parents=parent_parsers,
+    )
+    flags = parser.parse_args(argv[1:])
 
-  # Name of a file containing the OAuth 2.0 information for this
-  # application, including client_id and client_secret, which are found
-  # on the API Access tab on the Google APIs
-  # Console <http://code.google.com/apis/console>.
-  client_secrets = os.path.join(os.path.dirname(filename),
-                                'client_secrets.json')
+    # Name of a file containing the OAuth 2.0 information for this
+    # application, including client_id and client_secret, which are found
+    # on the API Access tab on the Google APIs
+    # Console <http://code.google.com/apis/console>.
+    client_secrets = os.path.join(os.path.dirname(filename), "client_secrets.json")
 
-  # Set up a Flow object to be used if we need to authenticate.
-  flow = client.flow_from_clientsecrets(client_secrets,
-      scope=scope,
-      message=tools.message_if_missing(client_secrets))
+    # Set up a Flow object to be used if we need to authenticate.
+    flow = client.flow_from_clientsecrets(
+        client_secrets, scope=scope, message=tools.message_if_missing(client_secrets)
+    )
 
-  # Prepare credentials, and authorize HTTP object with them.
-  # If the credentials don't exist or are invalid run through the native client
-  # flow. The Storage object will ensure that if successful the good
-  # credentials will get written back to a file.
-  storage = file.Storage(name + '.dat')
-  credentials = storage.get()
-  if credentials is None or credentials.invalid:
-    credentials = tools.run_flow(flow, storage, flags)
-  http = credentials.authorize(http=build_http())
+    # Prepare credentials, and authorize HTTP object with them.
+    # If the credentials don't exist or are invalid run through the native client
+    # flow. The Storage object will ensure that if successful the good
+    # credentials will get written back to a file.
+    storage = file.Storage(name + ".dat")
+    credentials = storage.get()
+    if credentials is None or credentials.invalid:
+        credentials = tools.run_flow(flow, storage, flags)
+    http = credentials.authorize(http=build_http())
 
-  if discovery_filename is None:
-    # Construct a service object via the discovery service.
-    service = discovery.build(name, version, http=http)
-  else:
-    # Construct a service object using a local discovery document file.
-    with open(discovery_filename) as discovery_file:
-      service = discovery.build_from_document(
-          discovery_file.read(),
-          base='https://www.googleapis.com/',
-          http=http)
-  return (service, flags)
+    if discovery_filename is None:
+        # Construct a service object via the discovery service.
+        service = discovery.build(name, version, http=http)
+    else:
+        # Construct a service object using a local discovery document file.
+        with open(discovery_filename) as discovery_file:
+            service = discovery.build_from_document(
+                discovery_file.read(), base="https://www.googleapis.com/", http=http
+            )
+    return (service, flags)
diff --git a/googleapiclient/schema.py b/googleapiclient/schema.py
index 10d4a1b..022cb0a 100644
--- a/googleapiclient/schema.py
+++ b/googleapiclient/schema.py
@@ -61,7 +61,7 @@
 
 # TODO(jcgregorio) support format, enum, minimum, maximum
 
-__author__ = 'jcgregorio@google.com (Joe Gregorio)'
+__author__ = "jcgregorio@google.com (Joe Gregorio)"
 
 import copy
 
@@ -69,23 +69,23 @@
 
 
 class Schemas(object):
-  """Schemas for an API."""
+    """Schemas for an API."""
 
-  def __init__(self, discovery):
-    """Constructor.
+    def __init__(self, discovery):
+        """Constructor.
 
     Args:
       discovery: object, Deserialized discovery document from which we pull
         out the named schema.
     """
-    self.schemas = discovery.get('schemas', {})
+        self.schemas = discovery.get("schemas", {})
 
-    # Cache of pretty printed schemas.
-    self.pretty = {}
+        # Cache of pretty printed schemas.
+        self.pretty = {}
 
-  @util.positional(2)
-  def _prettyPrintByName(self, name, seen=None, dent=0):
-    """Get pretty printed object prototype from the schema name.
+    @util.positional(2)
+    def _prettyPrintByName(self, name, seen=None, dent=0):
+        """Get pretty printed object prototype from the schema name.
 
     Args:
       name: string, Name of schema in the discovery document.
@@ -96,24 +96,25 @@
       string, A string that contains a prototype object with
         comments that conforms to the given schema.
     """
-    if seen is None:
-      seen = []
+        if seen is None:
+            seen = []
 
-    if name in seen:
-      # Do not fall into an infinite loop over recursive definitions.
-      return '# Object with schema name: %s' % name
-    seen.append(name)
+        if name in seen:
+            # Do not fall into an infinite loop over recursive definitions.
+            return "# Object with schema name: %s" % name
+        seen.append(name)
 
-    if name not in self.pretty:
-      self.pretty[name] = _SchemaToStruct(self.schemas[name],
-          seen, dent=dent).to_str(self._prettyPrintByName)
+        if name not in self.pretty:
+            self.pretty[name] = _SchemaToStruct(
+                self.schemas[name], seen, dent=dent
+            ).to_str(self._prettyPrintByName)
 
-    seen.pop()
+        seen.pop()
 
-    return self.pretty[name]
+        return self.pretty[name]
 
-  def prettyPrintByName(self, name):
-    """Get pretty printed object prototype from the schema name.
+    def prettyPrintByName(self, name):
+        """Get pretty printed object prototype from the schema name.
 
     Args:
       name: string, Name of schema in the discovery document.
@@ -122,12 +123,12 @@
       string, A string that contains a prototype object with
         comments that conforms to the given schema.
     """
-    # Return with trailing comma and newline removed.
-    return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
+        # Return with trailing comma and newline removed.
+        return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
 
-  @util.positional(2)
-  def _prettyPrintSchema(self, schema, seen=None, dent=0):
-    """Get pretty printed object prototype of schema.
+    @util.positional(2)
+    def _prettyPrintSchema(self, schema, seen=None, dent=0):
+        """Get pretty printed object prototype of schema.
 
     Args:
       schema: object, Parsed JSON schema.
@@ -138,13 +139,13 @@
       string, A string that contains a prototype object with
         comments that conforms to the given schema.
     """
-    if seen is None:
-      seen = []
+        if seen is None:
+            seen = []
 
-    return _SchemaToStruct(schema, seen, dent=dent).to_str(self._prettyPrintByName)
+        return _SchemaToStruct(schema, seen, dent=dent).to_str(self._prettyPrintByName)
 
-  def prettyPrintSchema(self, schema):
-    """Get pretty printed object prototype of schema.
+    def prettyPrintSchema(self, schema):
+        """Get pretty printed object prototype of schema.
 
     Args:
       schema: object, Parsed JSON schema.
@@ -153,25 +154,25 @@
       string, A string that contains a prototype object with
         comments that conforms to the given schema.
     """
-    # Return with trailing comma and newline removed.
-    return self._prettyPrintSchema(schema, dent=1)[:-2]
+        # Return with trailing comma and newline removed.
+        return self._prettyPrintSchema(schema, dent=1)[:-2]
 
-  def get(self, name, default=None):
-    """Get deserialized JSON schema from the schema name.
+    def get(self, name, default=None):
+        """Get deserialized JSON schema from the schema name.
 
     Args:
       name: string, Schema name.
       default: object, return value if name not found.
     """
-    return self.schemas.get(name, default)
+        return self.schemas.get(name, default)
 
 
 class _SchemaToStruct(object):
-  """Convert schema to a prototype object."""
+    """Convert schema to a prototype object."""
 
-  @util.positional(3)
-  def __init__(self, schema, seen, dent=0):
-    """Constructor.
+    @util.positional(3)
+    def __init__(self, schema, seen, dent=0):
+        """Constructor.
 
     Args:
       schema: object, Parsed JSON schema.
@@ -179,67 +180,67 @@
         handle recursive definitions.
       dent: int, Initial indentation depth.
     """
-    # The result of this parsing kept as list of strings.
-    self.value = []
+        # The result of this parsing kept as list of strings.
+        self.value = []
 
-    # The final value of the parsing.
-    self.string = None
+        # The final value of the parsing.
+        self.string = None
 
-    # The parsed JSON schema.
-    self.schema = schema
+        # The parsed JSON schema.
+        self.schema = schema
 
-    # Indentation level.
-    self.dent = dent
+        # Indentation level.
+        self.dent = dent
 
-    # Method that when called returns a prototype object for the schema with
-    # the given name.
-    self.from_cache = None
+        # Method that when called returns a prototype object for the schema with
+        # the given name.
+        self.from_cache = None
 
-    # List of names of schema already seen while parsing.
-    self.seen = seen
+        # List of names of schema already seen while parsing.
+        self.seen = seen
 
-  def emit(self, text):
-    """Add text as a line to the output.
+    def emit(self, text):
+        """Add text as a line to the output.
 
     Args:
       text: string, Text to output.
     """
-    self.value.extend(["  " * self.dent, text, '\n'])
+        self.value.extend(["  " * self.dent, text, "\n"])
 
-  def emitBegin(self, text):
-    """Add text to the output, but with no line terminator.
+    def emitBegin(self, text):
+        """Add text to the output, but with no line terminator.
 
     Args:
       text: string, Text to output.
       """
-    self.value.extend(["  " * self.dent, text])
+        self.value.extend(["  " * self.dent, text])
 
-  def emitEnd(self, text, comment):
-    """Add text and comment to the output with line terminator.
+    def emitEnd(self, text, comment):
+        """Add text and comment to the output with line terminator.
 
     Args:
       text: string, Text to output.
       comment: string, Python comment.
     """
-    if comment:
-      divider = '\n' + '  ' * (self.dent + 2) + '# '
-      lines = comment.splitlines()
-      lines = [x.rstrip() for x in lines]
-      comment = divider.join(lines)
-      self.value.extend([text, ' # ', comment, '\n'])
-    else:
-      self.value.extend([text, '\n'])
+        if comment:
+            divider = "\n" + "  " * (self.dent + 2) + "# "
+            lines = comment.splitlines()
+            lines = [x.rstrip() for x in lines]
+            comment = divider.join(lines)
+            self.value.extend([text, " # ", comment, "\n"])
+        else:
+            self.value.extend([text, "\n"])
 
-  def indent(self):
-    """Increase indentation level."""
-    self.dent += 1
+    def indent(self):
+        """Increase indentation level."""
+        self.dent += 1
 
-  def undent(self):
-    """Decrease indentation level."""
-    self.dent -= 1
+    def undent(self):
+        """Decrease indentation level."""
+        self.dent -= 1
 
-  def _to_str_impl(self, schema):
-    """Prototype object based on the schema, in Python code with comments.
+    def _to_str_impl(self, schema):
+        """Prototype object based on the schema, in Python code with comments.
 
     Args:
       schema: object, Parsed JSON schema file.
@@ -247,59 +248,59 @@
     Returns:
       Prototype object based on the schema, in Python code with comments.
     """
-    stype = schema.get('type')
-    if stype == 'object':
-      self.emitEnd('{', schema.get('description', ''))
-      self.indent()
-      if 'properties' in schema:
-        for pname, pschema in six.iteritems(schema.get('properties', {})):
-          self.emitBegin('"%s": ' % pname)
-          self._to_str_impl(pschema)
-      elif 'additionalProperties' in schema:
-        self.emitBegin('"a_key": ')
-        self._to_str_impl(schema['additionalProperties'])
-      self.undent()
-      self.emit('},')
-    elif '$ref' in schema:
-      schemaName = schema['$ref']
-      description = schema.get('description', '')
-      s = self.from_cache(schemaName, seen=self.seen)
-      parts = s.splitlines()
-      self.emitEnd(parts[0], description)
-      for line in parts[1:]:
-        self.emit(line.rstrip())
-    elif stype == 'boolean':
-      value = schema.get('default', 'True or False')
-      self.emitEnd('%s,' % str(value), schema.get('description', ''))
-    elif stype == 'string':
-      value = schema.get('default', 'A String')
-      self.emitEnd('"%s",' % str(value), schema.get('description', ''))
-    elif stype == 'integer':
-      value = schema.get('default', '42')
-      self.emitEnd('%s,' % str(value), schema.get('description', ''))
-    elif stype == 'number':
-      value = schema.get('default', '3.14')
-      self.emitEnd('%s,' % str(value), schema.get('description', ''))
-    elif stype == 'null':
-      self.emitEnd('None,', schema.get('description', ''))
-    elif stype == 'any':
-      self.emitEnd('"",', schema.get('description', ''))
-    elif stype == 'array':
-      self.emitEnd('[', schema.get('description'))
-      self.indent()
-      self.emitBegin('')
-      self._to_str_impl(schema['items'])
-      self.undent()
-      self.emit('],')
-    else:
-      self.emit('Unknown type! %s' % stype)
-      self.emitEnd('', '')
+        stype = schema.get("type")
+        if stype == "object":
+            self.emitEnd("{", schema.get("description", ""))
+            self.indent()
+            if "properties" in schema:
+                for pname, pschema in six.iteritems(schema.get("properties", {})):
+                    self.emitBegin('"%s": ' % pname)
+                    self._to_str_impl(pschema)
+            elif "additionalProperties" in schema:
+                self.emitBegin('"a_key": ')
+                self._to_str_impl(schema["additionalProperties"])
+            self.undent()
+            self.emit("},")
+        elif "$ref" in schema:
+            schemaName = schema["$ref"]
+            description = schema.get("description", "")
+            s = self.from_cache(schemaName, seen=self.seen)
+            parts = s.splitlines()
+            self.emitEnd(parts[0], description)
+            for line in parts[1:]:
+                self.emit(line.rstrip())
+        elif stype == "boolean":
+            value = schema.get("default", "True or False")
+            self.emitEnd("%s," % str(value), schema.get("description", ""))
+        elif stype == "string":
+            value = schema.get("default", "A String")
+            self.emitEnd('"%s",' % str(value), schema.get("description", ""))
+        elif stype == "integer":
+            value = schema.get("default", "42")
+            self.emitEnd("%s," % str(value), schema.get("description", ""))
+        elif stype == "number":
+            value = schema.get("default", "3.14")
+            self.emitEnd("%s," % str(value), schema.get("description", ""))
+        elif stype == "null":
+            self.emitEnd("None,", schema.get("description", ""))
+        elif stype == "any":
+            self.emitEnd('"",', schema.get("description", ""))
+        elif stype == "array":
+            self.emitEnd("[", schema.get("description"))
+            self.indent()
+            self.emitBegin("")
+            self._to_str_impl(schema["items"])
+            self.undent()
+            self.emit("],")
+        else:
+            self.emit("Unknown type! %s" % stype)
+            self.emitEnd("", "")
 
-    self.string = ''.join(self.value)
-    return self.string
+        self.string = "".join(self.value)
+        return self.string
 
-  def to_str(self, from_cache):
-    """Prototype object based on the schema, in Python code with comments.
+    def to_str(self, from_cache):
+        """Prototype object based on the schema, in Python code with comments.
 
     Args:
       from_cache: callable(name, seen), Callable that retrieves an object
@@ -310,5 +311,5 @@
       Prototype object based on the schema, in Python code with comments.
       The lines of the code will all be properly indented.
     """
-    self.from_cache = from_cache
-    return self._to_str_impl(self.schema)
+        self.from_cache = from_cache
+        return self._to_str_impl(self.schema)