chore: code clean up (#1442)

Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly:
- [x] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/google-api-python-client/issues/new/choose) before writing your code!  That way we can discuss the change, evaluate designs, and agree on the general idea
- [x] Ensure the tests and linter pass
- [x] Code coverage does not decrease (if any source code was changed)
- [x] Appropriate docs were updated (if necessary)

Fixes #1441 🦕
diff --git a/describe.py b/describe.py
index b565872..49ae274 100755
--- a/describe.py
+++ b/describe.py
@@ -24,7 +24,6 @@
 
 __author__ = "jcgregorio@google.com (Joe Gregorio)"
 
-from collections import OrderedDict
 import argparse
 import collections
 import json
@@ -36,14 +35,15 @@
 from googleapiclient.discovery import DISCOVERY_URI
 from googleapiclient.discovery import build
 from googleapiclient.discovery import build_from_document
-from googleapiclient.discovery import UnknownApiNameOrVersion
 from googleapiclient.http import build_http
-from googleapiclient.errors import HttpError
 
 import uritemplate
 
 DISCOVERY_DOC_DIR = (
-    pathlib.Path(__file__).parent.resolve() / "googleapiclient" / "discovery_cache" / "documents"
+    pathlib.Path(__file__).parent.resolve()
+    / "googleapiclient"
+    / "discovery_cache"
+    / "documents"
 )
 
 CSS = """<style>
@@ -171,16 +171,16 @@
 def safe_version(version):
     """Create a safe version of the verion string.
 
-  Needed so that we can distinguish between versions
-  and sub-collections in URIs. I.e. we don't want
-  adsense_v1.1 to refer to the '1' collection in the v1
-  version of the adsense api.
+    Needed so that we can distinguish between versions
+    and sub-collections in URIs. I.e. we don't want
+    adsense_v1.1 to refer to the '1' collection in the v1
+    version of the adsense api.
 
-  Args:
-    version: string, The version string.
-  Returns:
-    The string with '.' replaced with '_'.
-  """
+    Args:
+      version: string, The version string.
+    Returns:
+      The string with '.' replaced with '_'.
+    """
 
     return version.replace(".", "_")
 
@@ -188,14 +188,14 @@
 def unsafe_version(version):
     """Undoes what safe_version() does.
 
-  See safe_version() for the details.
+    See safe_version() for the details.
 
 
-  Args:
-    version: string, The safe version string.
-  Returns:
-    The string with '_' replaced with '.'.
-  """
+    Args:
+      version: string, The safe version string.
+    Returns:
+      The string with '_' replaced with '.'.
+    """
 
     return version.replace("_", ".")
 
@@ -203,12 +203,12 @@
 def method_params(doc):
     """Document the parameters of a method.
 
-  Args:
-    doc: string, The method's docstring.
+    Args:
+      doc: string, The method's docstring.
 
-  Returns:
-    The method signature as a string.
-  """
+    Returns:
+      The method signature as a string.
+    """
     doclines = doc.splitlines()
     if "Args:" in doclines:
         begin = doclines.index("Args:")
@@ -253,10 +253,10 @@
 def method(name, doc):
     """Documents an individual method.
 
-  Args:
-    name: string, Name of the method.
-    doc: string, The methods docstring.
-  """
+    Args:
+      name: string, Name of the method.
+      doc: string, The methods docstring.
+    """
     import html
 
     params = method_params(doc)
@@ -269,13 +269,13 @@
 def breadcrumbs(path, root_discovery):
     """Create the breadcrumb trail to this page of documentation.
 
-  Args:
-    path: string, Dot separated name of the resource.
-    root_discovery: Deserialized discovery document.
+    Args:
+      path: string, Dot separated name of the resource.
+      root_discovery: Deserialized discovery document.
 
-  Returns:
-    HTML with links to each of the parent resources of this resource.
-  """
+    Returns:
+      HTML with links to each of the parent resources of this resource.
+    """
     parts = path.split(".")
 
     crumbs = []
@@ -299,14 +299,14 @@
 def document_collection(resource, path, root_discovery, discovery, css=CSS):
     """Document a single collection in an API.
 
-  Args:
-    resource: Collection or service being documented.
-    path: string, Dot separated name of the resource.
-    root_discovery: Deserialized discovery document.
-    discovery: Deserialized discovery document, but just the portion that
-      describes the resource.
-    css: string, The CSS to include in the generated file.
-  """
+    Args:
+      resource: Collection or service being documented.
+      path: string, Dot separated name of the resource.
+      root_discovery: Deserialized discovery document.
+      discovery: Deserialized discovery document, but just the portion that
+        describes the resource.
+      css: string, The CSS to include in the generated file.
+    """
     collections = []
     methods = []
     resource_name = path.split(".")[-2]
@@ -357,7 +357,9 @@
     return "\n".join(html)
 
 
-def document_collection_recursive(resource, path, root_discovery, discovery, doc_destination_dir):
+def document_collection_recursive(
+    resource, path, root_discovery, discovery, doc_destination_dir
+):
     html = document_collection(resource, path, root_discovery, discovery)
 
     f = open(pathlib.Path(doc_destination_dir).joinpath(path + "html"), "w")
@@ -379,7 +381,7 @@
                 path + name + ".",
                 root_discovery,
                 discovery["resources"].get(dname, {}),
-                doc_destination_dir
+                doc_destination_dir,
             )
 
 
@@ -392,10 +394,11 @@
         uri (str): URI of the API's discovery document
         doc_destination_dir (str): relative path where the reference
             documentation should be saved.
-  """
+    """
     http = build_http()
     resp, content = http.request(
-        uri or uritemplate.expand(
+        uri
+        or uritemplate.expand(
             FLAGS.discovery_uri_template, {"api": name, "apiVersion": version}
         )
     )
@@ -413,11 +416,11 @@
         with open(discovery_file_path, "r+") as f:
             try:
                 json_data = json.load(f)
-                revision = json_data['revision']
+                revision = json_data["revision"]
             except json.JSONDecodeError:
                 revision = None
 
-            if revision is None or discovery['revision'] >= revision:
+            if revision is None or discovery["revision"] >= revision:
                 # Reset position to the beginning
                 f.seek(0)
                 # Write the changes to disk
@@ -426,25 +429,35 @@
                 f.truncate()
 
     elif resp.status == 404:
-        print("Warning: {} {} not found. HTTP Code: {}".format(name, version, resp.status))
+        print(
+            "Warning: {} {} not found. HTTP Code: {}".format(name, version, resp.status)
+        )
         return
     else:
-        print("Warning: {} {} could not be built. HTTP Code: {}".format(name, version, resp.status))
+        print(
+            "Warning: {} {} could not be built. HTTP Code: {}".format(
+                name, version, resp.status
+            )
+        )
         return
 
     document_collection_recursive(
-        service, "{}_{}.".format(name, safe_version(version)), discovery, discovery, doc_destination_dir
+        service,
+        "{}_{}.".format(name, safe_version(version)),
+        discovery,
+        discovery,
+        doc_destination_dir,
     )
 
 
 def document_api_from_discovery_document(discovery_url, doc_destination_dir):
     """Document the given API.
 
-  Args:
-    discovery_url (str): URI of discovery document.
-    doc_destination_dir (str): relative path where the reference
-        documentation should be saved.
-  """
+    Args:
+      discovery_url (str): URI of discovery document.
+      doc_destination_dir (str): relative path where the reference
+          documentation should be saved.
+    """
     http = build_http()
     response, content = http.request(discovery_url)
     discovery = json.loads(content)
@@ -455,11 +468,16 @@
     version = safe_version(discovery["version"])
 
     document_collection_recursive(
-        service, "{}_{}.".format(name, version), discovery, discovery, doc_destination_dir
+        service,
+        "{}_{}.".format(name, version),
+        discovery,
+        discovery,
+        doc_destination_dir,
     )
 
+
 def generate_all_api_documents(directory_uri=DIRECTORY_URI, doc_destination_dir=BASE):
-    """ Retrieve discovery artifacts and fetch reference documentations
+    """Retrieve discovery artifacts and fetch reference documentations
     for all apis listed in the public discovery directory.
     args:
         directory_uri (str): uri of the public discovery directory.
@@ -472,13 +490,18 @@
     if resp.status == 200:
         directory = json.loads(content)["items"]
         for api in directory:
-            document_api(api["name"], api["version"], api["discoveryRestUrl"], doc_destination_dir)
+            document_api(
+                api["name"],
+                api["version"],
+                api["discoveryRestUrl"],
+                doc_destination_dir,
+            )
             api_directory[api["name"]].append(api["version"])
 
         # sort by api name and version number
         for api in api_directory:
             api_directory[api] = sorted(api_directory[api])
-        api_directory = OrderedDict(
+        api_directory = collections.OrderedDict(
             sorted(api_directory.items(), key=lambda x: x[0])
         )
 
@@ -499,9 +522,14 @@
     else:
         sys.exit("Failed to load the discovery document.")
 
+
 if __name__ == "__main__":
     FLAGS = parser.parse_args(sys.argv[1:])
     if FLAGS.discovery_uri:
-        document_api_from_discovery_document(discovery_url=FLAGS.discovery_uri, doc_destination_dir=FLAGS.dest)
+        document_api_from_discovery_document(
+            discovery_url=FLAGS.discovery_uri, doc_destination_dir=FLAGS.dest
+        )
     else:
-        generate_all_api_documents(directory_uri=FLAGS.directory_uri, doc_destination_dir=FLAGS.dest)
+        generate_all_api_documents(
+            directory_uri=FLAGS.directory_uri, doc_destination_dir=FLAGS.dest
+        )
diff --git a/googleapiclient/http.py b/googleapiclient/http.py
index 5d4227f..0dd9c32 100644
--- a/googleapiclient/http.py
+++ b/googleapiclient/http.py
@@ -20,8 +20,6 @@
 """
 from __future__ import absolute_import
 import six
-from six.moves import http_client
-from six.moves import range
 
 __author__ = "jcgregorio@google.com (Joe Gregorio)"
 
@@ -87,13 +85,13 @@
 def _should_retry_response(resp_status, content):
     """Determines whether a response should be retried.
 
-  Args:
-    resp_status: The response status received.
-    content: The response content body.
+    Args:
+      resp_status: The response status received.
+      content: The response content body.
 
-  Returns:
-    True if the response should be retried, otherwise False.
-  """
+    Returns:
+      True if the response should be retried, otherwise False.
+    """
     reason = None
 
     # Retry on 5xx errors.
@@ -122,7 +120,14 @@
                 # first.
                 # See Issue #1243
                 # https://github.com/googleapis/google-api-python-client/issues/1243
-                error_detail_keyword = next((kw for kw in ["errors", "status", "message"] if kw in data["error"]), "")
+                error_detail_keyword = next(
+                    (
+                        kw
+                        for kw in ["errors", "status", "message"]
+                        if kw in data["error"]
+                    ),
+                    "",
+                )
 
                 if error_detail_keyword:
                     reason = data["error"][error_detail_keyword]
@@ -152,25 +157,25 @@
 ):
     """Retries an HTTP request multiple times while handling errors.
 
-  If after all retries the request still fails, last error is either returned as
-  return value (for HTTP 5xx errors) or thrown (for ssl.SSLError).
+    If after all retries the request still fails, last error is either returned as
+    return value (for HTTP 5xx errors) or thrown (for ssl.SSLError).
 
-  Args:
-    http: Http object to be used to execute request.
-    num_retries: Maximum number of retries.
-    req_type: Type of the request (used for logging retries).
-    sleep, rand: Functions to sleep for random time between retries.
-    uri: URI to be requested.
-    method: HTTP method to be used.
-    args, kwargs: Additional arguments passed to http.request.
+    Args:
+      http: Http object to be used to execute request.
+      num_retries: Maximum number of retries.
+      req_type: Type of the request (used for logging retries).
+      sleep, rand: Functions to sleep for random time between retries.
+      uri: URI to be requested.
+      method: HTTP method to be used.
+      args, kwargs: Additional arguments passed to http.request.
 
-  Returns:
-    resp, content - Response from the http request (may be HTTP 5xx).
-  """
+    Returns:
+      resp, content - Response from the http request (may be HTTP 5xx).
+    """
     resp = None
     content = None
     exception = None
-    for retry_num in range(num_retries + 1):
+    for retry_num in six.moves.range(num_retries + 1):
         if retry_num > 0:
             # Sleep before retrying.
             sleep_time = rand() * 2 ** retry_num
@@ -235,21 +240,21 @@
     def __init__(self, resumable_progress, total_size):
         """Constructor.
 
-    Args:
-      resumable_progress: int, bytes sent so far.
-      total_size: int, total bytes in complete upload, or None if the total
-        upload size isn't known ahead of time.
-    """
+        Args:
+          resumable_progress: int, bytes sent so far.
+          total_size: int, total bytes in complete upload, or None if the total
+            upload size isn't known ahead of time.
+        """
         self.resumable_progress = resumable_progress
         self.total_size = total_size
 
     def progress(self):
         """Percent of upload completed, as a float.
 
-    Returns:
-      the percentage complete as a float, returning 0.0 if the total size of
-      the upload is unknown.
-    """
+        Returns:
+          the percentage complete as a float, returning 0.0 if the total size of
+          the upload is unknown.
+        """
         if self.total_size is not None and self.total_size != 0:
             return float(self.resumable_progress) / float(self.total_size)
         else:
@@ -262,20 +267,20 @@
     def __init__(self, resumable_progress, total_size):
         """Constructor.
 
-    Args:
-      resumable_progress: int, bytes received so far.
-      total_size: int, total bytes in complete download.
-    """
+        Args:
+          resumable_progress: int, bytes received so far.
+          total_size: int, total bytes in complete download.
+        """
         self.resumable_progress = resumable_progress
         self.total_size = total_size
 
     def progress(self):
         """Percent of download completed, as a float.
 
-    Returns:
-      the percentage complete as a float, returning 0.0 if the total size of
-      the download is unknown.
-    """
+        Returns:
+          the percentage complete as a float, returning 0.0 if the total size of
+          the download is unknown.
+        """
         if self.total_size is not None and self.total_size != 0:
             return float(self.resumable_progress) / float(self.total_size)
         else:
@@ -285,107 +290,107 @@
 class MediaUpload(object):
     """Describes a media object to upload.
 
-  Base class that defines the interface of MediaUpload subclasses.
+    Base class that defines the interface of MediaUpload subclasses.
 
-  Note that subclasses of MediaUpload may allow you to control the chunksize
-  when uploading a media object. It is important to keep the size of the chunk
-  as large as possible to keep the upload efficient. Other factors may influence
-  the size of the chunk you use, particularly if you are working in an
-  environment where individual HTTP requests may have a hardcoded time limit,
-  such as under certain classes of requests under Google App Engine.
+    Note that subclasses of MediaUpload may allow you to control the chunksize
+    when uploading a media object. It is important to keep the size of the chunk
+    as large as possible to keep the upload efficient. Other factors may influence
+    the size of the chunk you use, particularly if you are working in an
+    environment where individual HTTP requests may have a hardcoded time limit,
+    such as under certain classes of requests under Google App Engine.
 
-  Streams are io.Base compatible objects that support seek(). Some MediaUpload
-  subclasses support using streams directly to upload data. Support for
-  streaming may be indicated by a MediaUpload sub-class and if appropriate for a
-  platform that stream will be used for uploading the media object. The support
-  for streaming is indicated by has_stream() returning True. The stream() method
-  should return an io.Base object that supports seek(). On platforms where the
-  underlying httplib module supports streaming, for example Python 2.6 and
-  later, the stream will be passed into the http library which will result in
-  less memory being used and possibly faster uploads.
+    Streams are io.Base compatible objects that support seek(). Some MediaUpload
+    subclasses support using streams directly to upload data. Support for
+    streaming may be indicated by a MediaUpload sub-class and if appropriate for a
+    platform that stream will be used for uploading the media object. The support
+    for streaming is indicated by has_stream() returning True. The stream() method
+    should return an io.Base object that supports seek(). On platforms where the
+    underlying httplib module supports streaming, for example Python 2.6 and
+    later, the stream will be passed into the http library which will result in
+    less memory being used and possibly faster uploads.
 
-  If you need to upload media that can't be uploaded using any of the existing
-  MediaUpload sub-class then you can sub-class MediaUpload for your particular
-  needs.
-  """
+    If you need to upload media that can't be uploaded using any of the existing
+    MediaUpload sub-class then you can sub-class MediaUpload for your particular
+    needs.
+    """
 
     def chunksize(self):
         """Chunk size for resumable uploads.
 
-    Returns:
-      Chunk size in bytes.
-    """
+        Returns:
+          Chunk size in bytes.
+        """
         raise NotImplementedError()
 
     def mimetype(self):
         """Mime type of the body.
 
-    Returns:
-      Mime type.
-    """
+        Returns:
+          Mime type.
+        """
         return "application/octet-stream"
 
     def size(self):
         """Size of upload.
 
-    Returns:
-      Size of the body, or None of the size is unknown.
-    """
+        Returns:
+          Size of the body, or None of the size is unknown.
+        """
         return None
 
     def resumable(self):
         """Whether this upload is resumable.
 
-    Returns:
-      True if resumable upload or False.
-    """
+        Returns:
+          True if resumable upload or False.
+        """
         return False
 
     def getbytes(self, begin, end):
         """Get bytes from the media.
 
-    Args:
-      begin: int, offset from beginning of file.
-      length: int, number of bytes to read, starting at begin.
+        Args:
+          begin: int, offset from beginning of file.
+          length: int, number of bytes to read, starting at begin.
 
-    Returns:
-      A string of bytes read. May be shorter than length if EOF was reached
-      first.
-    """
+        Returns:
+          A string of bytes read. May be shorter than length if EOF was reached
+          first.
+        """
         raise NotImplementedError()
 
     def has_stream(self):
         """Does the underlying upload support a streaming interface.
 
-    Streaming means it is an io.IOBase subclass that supports seek, i.e.
-    seekable() returns True.
+        Streaming means it is an io.IOBase subclass that supports seek, i.e.
+        seekable() returns True.
 
-    Returns:
-      True if the call to stream() will return an instance of a seekable io.Base
-      subclass.
-    """
+        Returns:
+          True if the call to stream() will return an instance of a seekable io.Base
+          subclass.
+        """
         return False
 
     def stream(self):
         """A stream interface to the data being uploaded.
 
-    Returns:
-      The returned value is an io.IOBase subclass that supports seek, i.e.
-      seekable() returns True.
-    """
+        Returns:
+          The returned value is an io.IOBase subclass that supports seek, i.e.
+          seekable() returns True.
+        """
         raise NotImplementedError()
 
     @util.positional(1)
     def _to_json(self, strip=None):
         """Utility function for creating a JSON representation of a MediaUpload.
 
-    Args:
-      strip: array, An array of names of members to not include in the JSON.
+        Args:
+          strip: array, An array of names of members to not include in the JSON.
 
-    Returns:
-       string, a JSON representation of this instance, suitable to pass to
-       from_json().
-    """
+        Returns:
+           string, a JSON representation of this instance, suitable to pass to
+           from_json().
+        """
         t = type(self)
         d = copy.copy(self.__dict__)
         if strip is not None:
@@ -398,24 +403,24 @@
     def to_json(self):
         """Create a JSON representation of an instance of MediaUpload.
 
-    Returns:
-       string, a JSON representation of this instance, suitable to pass to
-       from_json().
-    """
+        Returns:
+           string, a JSON representation of this instance, suitable to pass to
+           from_json().
+        """
         return self._to_json()
 
     @classmethod
     def new_from_json(cls, s):
         """Utility class method to instantiate a MediaUpload subclass from a JSON
-    representation produced by to_json().
+        representation produced by to_json().
 
-    Args:
-      s: string, JSON from to_json().
+        Args:
+          s: string, JSON from to_json().
 
-    Returns:
-      An instance of the subclass of MediaUpload that was serialized with
-      to_json().
-    """
+        Returns:
+          An instance of the subclass of MediaUpload that was serialized with
+          to_json().
+        """
         data = json.loads(s)
         # Find and call the right classmethod from_json() to restore the object.
         module = data["_module"]
@@ -428,44 +433,44 @@
 class MediaIoBaseUpload(MediaUpload):
     """A MediaUpload for a io.Base objects.
 
-  Note that the Python file object is compatible with io.Base and can be used
-  with this class also.
+    Note that the Python file object is compatible with io.Base and can be used
+    with this class also.
 
-    fh = BytesIO('...Some data to upload...')
-    media = MediaIoBaseUpload(fh, mimetype='image/png',
-      chunksize=1024*1024, resumable=True)
-    farm.animals().insert(
-        id='cow',
-        name='cow.png',
-        media_body=media).execute()
+      fh = BytesIO('...Some data to upload...')
+      media = MediaIoBaseUpload(fh, mimetype='image/png',
+        chunksize=1024*1024, resumable=True)
+      farm.animals().insert(
+          id='cow',
+          name='cow.png',
+          media_body=media).execute()
 
-  Depending on the platform you are working on, you may pass -1 as the
-  chunksize, which indicates that the entire file should be uploaded in a single
-  request. If the underlying platform supports streams, such as Python 2.6 or
-  later, then this can be very efficient as it avoids multiple connections, and
-  also avoids loading the entire file into memory before sending it. Note that
-  Google App Engine has a 5MB limit on request size, so you should never set
-  your chunksize larger than 5MB, or to -1.
-  """
+    Depending on the platform you are working on, you may pass -1 as the
+    chunksize, which indicates that the entire file should be uploaded in a single
+    request. If the underlying platform supports streams, such as Python 2.6 or
+    later, then this can be very efficient as it avoids multiple connections, and
+    also avoids loading the entire file into memory before sending it. Note that
+    Google App Engine has a 5MB limit on request size, so you should never set
+    your chunksize larger than 5MB, or to -1.
+    """
 
     @util.positional(3)
     def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
         """Constructor.
 
-    Args:
-      fd: io.Base or file object, The source of the bytes to upload. MUST be
-        opened in blocking mode, do not use streams opened in non-blocking mode.
-        The given stream must be seekable, that is, it must be able to call
-        seek() on fd.
-      mimetype: string, Mime-type of the file.
-      chunksize: int, File will be uploaded in chunks of this many bytes. Only
-        used if resumable=True. Pass in a value of -1 if the file is to be
-        uploaded as a single chunk. Note that Google App Engine has a 5MB limit
-        on request size, so you should never set your chunksize larger than 5MB,
-        or to -1.
-      resumable: bool, True if this is a resumable upload. False means upload
-        in a single request.
-    """
+        Args:
+          fd: io.Base or file object, The source of the bytes to upload. MUST be
+            opened in blocking mode, do not use streams opened in non-blocking mode.
+            The given stream must be seekable, that is, it must be able to call
+            seek() on fd.
+          mimetype: string, Mime-type of the file.
+          chunksize: int, File will be uploaded in chunks of this many bytes. Only
+            used if resumable=True. Pass in a value of -1 if the file is to be
+            uploaded as a single chunk. Note that Google App Engine has a 5MB limit
+            on request size, so you should never set your chunksize larger than 5MB,
+            or to -1.
+          resumable: bool, True if this is a resumable upload. False means upload
+            in a single request.
+        """
         super(MediaIoBaseUpload, self).__init__()
         self._fd = fd
         self._mimetype = mimetype
@@ -480,68 +485,68 @@
     def chunksize(self):
         """Chunk size for resumable uploads.
 
-    Returns:
-      Chunk size in bytes.
-    """
+        Returns:
+          Chunk size in bytes.
+        """
         return self._chunksize
 
     def mimetype(self):
         """Mime type of the body.
 
-    Returns:
-      Mime type.
-    """
+        Returns:
+          Mime type.
+        """
         return self._mimetype
 
     def size(self):
         """Size of upload.
 
-    Returns:
-      Size of the body, or None of the size is unknown.
-    """
+        Returns:
+          Size of the body, or None of the size is unknown.
+        """
         return self._size
 
     def resumable(self):
         """Whether this upload is resumable.
 
-    Returns:
-      True if resumable upload or False.
-    """
+        Returns:
+          True if resumable upload or False.
+        """
         return self._resumable
 
     def getbytes(self, begin, length):
         """Get bytes from the media.
 
-    Args:
-      begin: int, offset from beginning of file.
-      length: int, number of bytes to read, starting at begin.
+        Args:
+          begin: int, offset from beginning of file.
+          length: int, number of bytes to read, starting at begin.
 
-    Returns:
-      A string of bytes read. May be shorted than length if EOF was reached
-      first.
-    """
+        Returns:
+          A string of bytes read. May be shorted than length if EOF was reached
+          first.
+        """
         self._fd.seek(begin)
         return self._fd.read(length)
 
     def has_stream(self):
         """Does the underlying upload support a streaming interface.
 
-    Streaming means it is an io.IOBase subclass that supports seek, i.e.
-    seekable() returns True.
+        Streaming means it is an io.IOBase subclass that supports seek, i.e.
+        seekable() returns True.
 
-    Returns:
-      True if the call to stream() will return an instance of a seekable io.Base
-      subclass.
-    """
+        Returns:
+          True if the call to stream() will return an instance of a seekable io.Base
+          subclass.
+        """
         return True
 
     def stream(self):
         """A stream interface to the data being uploaded.
 
-    Returns:
-      The returned value is an io.IOBase subclass that supports seek, i.e.
-      seekable() returns True.
-    """
+        Returns:
+          The returned value is an io.IOBase subclass that supports seek, i.e.
+          seekable() returns True.
+        """
         return self._fd
 
     def to_json(self):
@@ -552,24 +557,24 @@
 class MediaFileUpload(MediaIoBaseUpload):
     """A MediaUpload for a file.
 
-  Construct a MediaFileUpload and pass as the media_body parameter of the
-  method. For example, if we had a service that allowed uploading images:
+    Construct a MediaFileUpload and pass as the media_body parameter of the
+    method. For example, if we had a service that allowed uploading images:
 
-    media = MediaFileUpload('cow.png', mimetype='image/png',
-      chunksize=1024*1024, resumable=True)
-    farm.animals().insert(
-        id='cow',
-        name='cow.png',
-        media_body=media).execute()
+      media = MediaFileUpload('cow.png', mimetype='image/png',
+        chunksize=1024*1024, resumable=True)
+      farm.animals().insert(
+          id='cow',
+          name='cow.png',
+          media_body=media).execute()
 
-  Depending on the platform you are working on, you may pass -1 as the
-  chunksize, which indicates that the entire file should be uploaded in a single
-  request. If the underlying platform supports streams, such as Python 2.6 or
-  later, then this can be very efficient as it avoids multiple connections, and
-  also avoids loading the entire file into memory before sending it. Note that
-  Google App Engine has a 5MB limit on request size, so you should never set
-  your chunksize larger than 5MB, or to -1.
-  """
+    Depending on the platform you are working on, you may pass -1 as the
+    chunksize, which indicates that the entire file should be uploaded in a single
+    request. If the underlying platform supports streams, such as Python 2.6 or
+    later, then this can be very efficient as it avoids multiple connections, and
+    also avoids loading the entire file into memory before sending it. Note that
+    Google App Engine has a 5MB limit on request size, so you should never set
+    your chunksize larger than 5MB, or to -1.
+    """
 
     @util.positional(2)
     def __init__(
@@ -577,18 +582,18 @@
     ):
         """Constructor.
 
-    Args:
-      filename: string, Name of the file.
-      mimetype: string, Mime-type of the file. If None then a mime-type will be
-        guessed from the file extension.
-      chunksize: int, File will be uploaded in chunks of this many bytes. Only
-        used if resumable=True. Pass in a value of -1 if the file is to be
-        uploaded in a single chunk. Note that Google App Engine has a 5MB limit
-        on request size, so you should never set your chunksize larger than 5MB,
-        or to -1.
-      resumable: bool, True if this is a resumable upload. False means upload
-        in a single request.
-    """
+        Args:
+          filename: string, Name of the file.
+          mimetype: string, Mime-type of the file. If None then a mime-type will be
+            guessed from the file extension.
+          chunksize: int, File will be uploaded in chunks of this many bytes. Only
+            used if resumable=True. Pass in a value of -1 if the file is to be
+            uploaded in a single chunk. Note that Google App Engine has a 5MB limit
+            on request size, so you should never set your chunksize larger than 5MB,
+            or to -1.
+          resumable: bool, True if this is a resumable upload. False means upload
+            in a single request.
+        """
         self._fd = None
         self._filename = filename
         self._fd = open(self._filename, "rb")
@@ -609,10 +614,10 @@
     def to_json(self):
         """Creating a JSON representation of an instance of MediaFileUpload.
 
-    Returns:
-       string, a JSON representation of this instance, suitable to pass to
-       from_json().
-    """
+        Returns:
+           string, a JSON representation of this instance, suitable to pass to
+           from_json().
+        """
         return self._to_json(strip=["_fd"])
 
     @staticmethod
@@ -629,9 +634,9 @@
 class MediaInMemoryUpload(MediaIoBaseUpload):
     """MediaUpload for a chunk of bytes.
 
-  DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
-  the stream.
-  """
+    DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
+    the stream.
+    """
 
     @util.positional(2)
     def __init__(
@@ -643,18 +648,18 @@
     ):
         """Create a new MediaInMemoryUpload.
 
-  DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
-  the stream.
+        DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
+        the stream.
 
-  Args:
-    body: string, Bytes of body content.
-    mimetype: string, Mime-type of the file or default of
-      'application/octet-stream'.
-    chunksize: int, File will be uploaded in chunks of this many bytes. Only
-      used if resumable=True.
-    resumable: bool, True if this is a resumable upload. False means upload
-      in a single request.
-    """
+        Args:
+          body: string, Bytes of body content.
+          mimetype: string, Mime-type of the file or default of
+            'application/octet-stream'.
+          chunksize: int, File will be uploaded in chunks of this many bytes. Only
+            used if resumable=True.
+          resumable: bool, True if this is a resumable upload. False means upload
+            in a single request.
+        """
         fd = BytesIO(body)
         super(MediaInMemoryUpload, self).__init__(
             fd, mimetype, chunksize=chunksize, resumable=resumable
@@ -662,36 +667,36 @@
 
 
 class MediaIoBaseDownload(object):
-    """"Download media resources.
+    """ "Download media resources.
 
-  Note that the Python file object is compatible with io.Base and can be used
-  with this class also.
+    Note that the Python file object is compatible with io.Base and can be used
+    with this class also.
 
 
-  Example:
-    request = farms.animals().get_media(id='cow')
-    fh = io.FileIO('cow.png', mode='wb')
-    downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
+    Example:
+      request = farms.animals().get_media(id='cow')
+      fh = io.FileIO('cow.png', mode='wb')
+      downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
 
-    done = False
-    while done is False:
-      status, done = downloader.next_chunk()
-      if status:
-        print "Download %d%%." % int(status.progress() * 100)
-    print "Download Complete!"
-  """
+      done = False
+      while done is False:
+        status, done = downloader.next_chunk()
+        if status:
+          print "Download %d%%." % int(status.progress() * 100)
+      print "Download Complete!"
+    """
 
     @util.positional(3)
     def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
         """Constructor.
 
-    Args:
-      fd: io.Base or file object, The stream in which to write the downloaded
-        bytes.
-      request: googleapiclient.http.HttpRequest, the media request to perform in
-        chunks.
-      chunksize: int, File will be downloaded in chunks of this many bytes.
-    """
+        Args:
+          fd: io.Base or file object, The stream in which to write the downloaded
+            bytes.
+          request: googleapiclient.http.HttpRequest, the media request to perform in
+            chunks.
+          chunksize: int, File will be downloaded in chunks of this many bytes.
+        """
         self._fd = fd
         self._request = request
         self._uri = request.uri
@@ -716,21 +721,21 @@
     def next_chunk(self, num_retries=0):
         """Get the next chunk of the download.
 
-    Args:
-      num_retries: Integer, number of times to retry with randomized
-            exponential backoff. If all retries fail, the raised HttpError
-            represents the last request. If zero (default), we attempt the
-            request only once.
+        Args:
+          num_retries: Integer, number of times to retry with randomized
+                exponential backoff. If all retries fail, the raised HttpError
+                represents the last request. If zero (default), we attempt the
+                request only once.
 
-    Returns:
-      (status, done): (MediaDownloadProgress, boolean)
-         The value of 'done' will be True when the media has been fully
-         downloaded or the total size of the media is unknown.
+        Returns:
+          (status, done): (MediaDownloadProgress, boolean)
+             The value of 'done' will be True when the media has been fully
+             downloaded or the total size of the media is unknown.
 
-    Raises:
-      googleapiclient.errors.HttpError if the response was not a 2xx.
-      httplib2.HttpLib2Error if a transport error has occurred.
-    """
+        Raises:
+          googleapiclient.errors.HttpError if the response was not a 2xx.
+          httplib2.HttpLib2Error if a transport error has occurred.
+        """
         headers = self._headers.copy()
         headers["range"] = "bytes=%d-%d" % (
             self._progress,
@@ -773,28 +778,31 @@
             self._total_size = int(length)
             if self._total_size == 0:
                 self._done = True
-                return MediaDownloadProgress(self._progress, self._total_size), self._done
+                return (
+                    MediaDownloadProgress(self._progress, self._total_size),
+                    self._done,
+                )
         raise HttpError(resp, content, uri=self._uri)
 
 
 class _StreamSlice(object):
     """Truncated stream.
 
-  Takes a stream and presents a stream that is a slice of the original stream.
-  This is used when uploading media in chunks. In later versions of Python a
-  stream can be passed to httplib in place of the string of data to send. The
-  problem is that httplib just blindly reads to the end of the stream. This
-  wrapper presents a virtual stream that only reads to the end of the chunk.
-  """
+    Takes a stream and presents a stream that is a slice of the original stream.
+    This is used when uploading media in chunks. In later versions of Python a
+    stream can be passed to httplib in place of the string of data to send. The
+    problem is that httplib just blindly reads to the end of the stream. This
+    wrapper presents a virtual stream that only reads to the end of the chunk.
+    """
 
     def __init__(self, stream, begin, chunksize):
         """Constructor.
 
-    Args:
-      stream: (io.Base, file object), the stream to wrap.
-      begin: int, the seek position the chunk begins at.
-      chunksize: int, the size of the chunk.
-    """
+        Args:
+          stream: (io.Base, file object), the stream to wrap.
+          begin: int, the seek position the chunk begins at.
+          chunksize: int, the size of the chunk.
+        """
         self._stream = stream
         self._begin = begin
         self._chunksize = chunksize
@@ -803,12 +811,12 @@
     def read(self, n=-1):
         """Read n bytes.
 
-    Args:
-      n, int, the number of bytes to read.
+        Args:
+          n, int, the number of bytes to read.
 
-    Returns:
-      A string of length 'n', or less if EOF is reached.
-    """
+        Returns:
+          A string of length 'n', or less if EOF is reached.
+        """
         # The data left available to read sits in [cur, end)
         cur = self._stream.tell()
         end = self._begin + self._chunksize
@@ -834,18 +842,18 @@
     ):
         """Constructor for an HttpRequest.
 
-    Args:
-      http: httplib2.Http, the transport object to use to make a request
-      postproc: callable, called on the HTTP response and content to transform
-                it into a data object before returning, or raising an exception
-                on an error.
-      uri: string, the absolute URI to send the request to
-      method: string, the HTTP method to use
-      body: string, the request body of the HTTP request,
-      headers: dict, the HTTP request headers
-      methodId: string, a unique identifier for the API method being called.
-      resumable: MediaUpload, None if this is not a resumbale request.
-    """
+        Args:
+          http: httplib2.Http, the transport object to use to make a request
+          postproc: callable, called on the HTTP response and content to transform
+                    it into a data object before returning, or raising an exception
+                    on an error.
+          uri: string, the absolute URI to send the request to
+          method: string, the HTTP method to use
+          body: string, the request body of the HTTP request,
+          headers: dict, the HTTP request headers
+          methodId: string, a unique identifier for the API method being called.
+          resumable: MediaUpload, None if this is not a resumbale request.
+        """
         self.uri = uri
         self.method = method
         self.body = body
@@ -874,22 +882,22 @@
     def execute(self, http=None, num_retries=0):
         """Execute the request.
 
-    Args:
-      http: httplib2.Http, an http object to be used in place of the
-            one the HttpRequest request object was constructed with.
-      num_retries: Integer, number of times to retry with randomized
-            exponential backoff. If all retries fail, the raised HttpError
-            represents the last request. If zero (default), we attempt the
-            request only once.
+        Args:
+          http: httplib2.Http, an http object to be used in place of the
+                one the HttpRequest request object was constructed with.
+          num_retries: Integer, number of times to retry with randomized
+                exponential backoff. If all retries fail, the raised HttpError
+                represents the last request. If zero (default), we attempt the
+                request only once.
 
-    Returns:
-      A deserialized object model of the response body as determined
-      by the postproc.
+        Returns:
+          A deserialized object model of the response body as determined
+          by the postproc.
 
-    Raises:
-      googleapiclient.errors.HttpError if the response was not a 2xx.
-      httplib2.HttpLib2Error if a transport error has occurred.
-    """
+        Raises:
+          googleapiclient.errors.HttpError if the response was not a 2xx.
+          httplib2.HttpLib2Error if a transport error has occurred.
+        """
         if http is None:
             http = self.http
 
@@ -939,53 +947,53 @@
     def add_response_callback(self, cb):
         """add_response_headers_callback
 
-    Args:
-      cb: Callback to be called on receiving the response headers, of signature:
+        Args:
+          cb: Callback to be called on receiving the response headers, of signature:
 
-      def cb(resp):
-        # Where resp is an instance of httplib2.Response
-    """
+          def cb(resp):
+            # Where resp is an instance of httplib2.Response
+        """
         self.response_callbacks.append(cb)
 
     @util.positional(1)
     def next_chunk(self, http=None, num_retries=0):
         """Execute the next step of a resumable upload.
 
-    Can only be used if the method being executed supports media uploads and
-    the MediaUpload object passed in was flagged as using resumable upload.
+        Can only be used if the method being executed supports media uploads and
+        the MediaUpload object passed in was flagged as using resumable upload.
 
-    Example:
+        Example:
 
-      media = MediaFileUpload('cow.png', mimetype='image/png',
-                              chunksize=1000, resumable=True)
-      request = farm.animals().insert(
-          id='cow',
-          name='cow.png',
-          media_body=media)
+          media = MediaFileUpload('cow.png', mimetype='image/png',
+                                  chunksize=1000, resumable=True)
+          request = farm.animals().insert(
+              id='cow',
+              name='cow.png',
+              media_body=media)
 
-      response = None
-      while response is None:
-        status, response = request.next_chunk()
-        if status:
-          print "Upload %d%% complete." % int(status.progress() * 100)
+          response = None
+          while response is None:
+            status, response = request.next_chunk()
+            if status:
+              print "Upload %d%% complete." % int(status.progress() * 100)
 
 
-    Args:
-      http: httplib2.Http, an http object to be used in place of the
-            one the HttpRequest request object was constructed with.
-      num_retries: Integer, number of times to retry with randomized
-            exponential backoff. If all retries fail, the raised HttpError
-            represents the last request. If zero (default), we attempt the
-            request only once.
+        Args:
+          http: httplib2.Http, an http object to be used in place of the
+                one the HttpRequest request object was constructed with.
+          num_retries: Integer, number of times to retry with randomized
+                exponential backoff. If all retries fail, the raised HttpError
+                represents the last request. If zero (default), we attempt the
+                request only once.
 
-    Returns:
-      (status, body): (ResumableMediaStatus, object)
-         The body will be None until the resumable media is fully uploaded.
+        Returns:
+          (status, body): (ResumableMediaStatus, object)
+             The body will be None until the resumable media is fully uploaded.
 
-    Raises:
-      googleapiclient.errors.HttpError if the response was not a 2xx.
-      httplib2.HttpLib2Error if a transport error has occurred.
-    """
+        Raises:
+          googleapiclient.errors.HttpError if the response was not a 2xx.
+          httplib2.HttpLib2Error if a transport error has occurred.
+        """
         if http is None:
             http = self.http
 
@@ -1063,9 +1071,13 @@
         # sending "bytes 0--1/0" results in an invalid request
         # Only add header "Content-Range" if chunk_end != -1
         if chunk_end != -1:
-            headers["Content-Range"] = "bytes %d-%d/%s" % (self.resumable_progress, chunk_end, size)
+            headers["Content-Range"] = "bytes %d-%d/%s" % (
+                self.resumable_progress,
+                chunk_end,
+                size,
+            )
 
-        for retry_num in range(num_retries + 1):
+        for retry_num in six.moves.range(num_retries + 1):
             if retry_num > 0:
                 self._sleep(self._rand() * 2 ** retry_num)
                 LOGGER.warning(
@@ -1088,17 +1100,17 @@
     def _process_response(self, resp, content):
         """Process the response from a single chunk upload.
 
-    Args:
-      resp: httplib2.Response, the response object.
-      content: string, the content of the response.
+        Args:
+          resp: httplib2.Response, the response object.
+          content: string, the content of the response.
 
-    Returns:
-      (status, body): (ResumableMediaStatus, object)
-         The body will be None until the resumable media is fully uploaded.
+        Returns:
+          (status, body): (ResumableMediaStatus, object)
+             The body will be None until the resumable media is fully uploaded.
 
-    Raises:
-      googleapiclient.errors.HttpError if the response was not a 2xx or a 308.
-    """
+        Raises:
+          googleapiclient.errors.HttpError if the response was not a 2xx or a 308.
+        """
         if resp.status in [200, 201]:
             self._in_error_state = False
             return None, self.postproc(resp, content)
@@ -1158,48 +1170,48 @@
 class BatchHttpRequest(object):
     """Batches multiple HttpRequest objects into a single HTTP request.
 
-  Example:
-    from googleapiclient.http import BatchHttpRequest
+    Example:
+      from googleapiclient.http import BatchHttpRequest
 
-    def list_animals(request_id, response, exception):
-      \"\"\"Do something with the animals list response.\"\"\"
-      if exception is not None:
-        # Do something with the exception.
-        pass
-      else:
-        # Do something with the response.
-        pass
+      def list_animals(request_id, response, exception):
+        \"\"\"Do something with the animals list response.\"\"\"
+        if exception is not None:
+          # Do something with the exception.
+          pass
+        else:
+          # Do something with the response.
+          pass
 
-    def list_farmers(request_id, response, exception):
-      \"\"\"Do something with the farmers list response.\"\"\"
-      if exception is not None:
-        # Do something with the exception.
-        pass
-      else:
-        # Do something with the response.
-        pass
+      def list_farmers(request_id, response, exception):
+        \"\"\"Do something with the farmers list response.\"\"\"
+        if exception is not None:
+          # Do something with the exception.
+          pass
+        else:
+          # Do something with the response.
+          pass
 
-    service = build('farm', 'v2')
+      service = build('farm', 'v2')
 
-    batch = BatchHttpRequest()
+      batch = BatchHttpRequest()
 
-    batch.add(service.animals().list(), list_animals)
-    batch.add(service.farmers().list(), list_farmers)
-    batch.execute(http=http)
-  """
+      batch.add(service.animals().list(), list_animals)
+      batch.add(service.farmers().list(), list_farmers)
+      batch.execute(http=http)
+    """
 
     @util.positional(1)
     def __init__(self, callback=None, batch_uri=None):
         """Constructor for a BatchHttpRequest.
 
-    Args:
-      callback: callable, A callback to be called for each response, of the
-        form callback(id, response, exception). The first parameter is the
-        request id, and the second is the deserialized response object. The
-        third is an googleapiclient.errors.HttpError exception object if an HTTP error
-        occurred while processing the request, or None if no error occurred.
-      batch_uri: string, URI to send batch requests to.
-    """
+        Args:
+          callback: callable, A callback to be called for each response, of the
+            form callback(id, response, exception). The first parameter is the
+            request id, and the second is the deserialized response object. The
+            third is an googleapiclient.errors.HttpError exception object if an HTTP error
+            occurred while processing the request, or None if no error occurred.
+          batch_uri: string, URI to send batch requests to.
+        """
         if batch_uri is None:
             batch_uri = _LEGACY_BATCH_URI
 
@@ -1242,10 +1254,10 @@
     def _refresh_and_apply_credentials(self, request, http):
         """Refresh the credentials and apply to the request.
 
-    Args:
-      request: HttpRequest, the request.
-      http: httplib2.Http, the global http object for the batch.
-    """
+        Args:
+          request: HttpRequest, the request.
+          http: httplib2.Http, the global http object for the batch.
+        """
         # For the credentials to refresh, but only once per refresh_token
         # If there is no http per the request then refresh the http passed in
         # via execute()
@@ -1272,14 +1284,14 @@
     def _id_to_header(self, id_):
         """Convert an id to a Content-ID header value.
 
-    Args:
-      id_: string, identifier of individual request.
+        Args:
+          id_: string, identifier of individual request.
 
-    Returns:
-      A Content-ID header with the id_ encoded into it. A UUID is prepended to
-      the value because Content-ID headers are supposed to be universally
-      unique.
-    """
+        Returns:
+          A Content-ID header with the id_ encoded into it. A UUID is prepended to
+          the value because Content-ID headers are supposed to be universally
+          unique.
+        """
         if self._base_id is None:
             self._base_id = uuid.uuid4()
 
@@ -1291,18 +1303,18 @@
     def _header_to_id(self, header):
         """Convert a Content-ID header value to an id.
 
-    Presumes the Content-ID header conforms to the format that _id_to_header()
-    returns.
+        Presumes the Content-ID header conforms to the format that _id_to_header()
+        returns.
 
-    Args:
-      header: string, Content-ID header value.
+        Args:
+          header: string, Content-ID header value.
 
-    Returns:
-      The extracted id value.
+        Returns:
+          The extracted id value.
 
-    Raises:
-      BatchError if the header is not in the expected format.
-    """
+        Raises:
+          BatchError if the header is not in the expected format.
+        """
         if header[0] != "<" or header[-1] != ">":
             raise BatchError("Invalid value for Content-ID: %s" % header)
         if "+" not in header:
@@ -1314,12 +1326,12 @@
     def _serialize_request(self, request):
         """Convert an HttpRequest object into a string.
 
-    Args:
-      request: HttpRequest, the request to serialize.
+        Args:
+          request: HttpRequest, the request to serialize.
 
-    Returns:
-      The request as a string in application/http format.
-    """
+        Returns:
+          The request as a string in application/http format.
+        """
         # Construct status line
         parsed = urlparse(request.uri)
         request_line = urlunparse(
@@ -1362,12 +1374,12 @@
     def _deserialize_response(self, payload):
         """Convert string into httplib2 response and content.
 
-    Args:
-      payload: string, headers and body as a string.
+        Args:
+          payload: string, headers and body as a string.
 
-    Returns:
-      A pair (resp, content), such as would be returned from httplib2.request.
-    """
+        Returns:
+          A pair (resp, content), such as would be returned from httplib2.request.
+        """
         # Strip off the status line
         status_line, payload = payload.split("\n", 1)
         protocol, status, reason = status_line.split(" ", 2)
@@ -1390,11 +1402,11 @@
     def _new_id(self):
         """Create a new id.
 
-    Auto incrementing number that avoids conflicts with ids already used.
+        Auto incrementing number that avoids conflicts with ids already used.
 
-    Returns:
-       string, a new unique id.
-    """
+        Returns:
+           string, a new unique id.
+        """
         self._last_auto_id += 1
         while str(self._last_auto_id) in self._requests:
             self._last_auto_id += 1
@@ -1404,31 +1416,31 @@
     def add(self, request, callback=None, request_id=None):
         """Add a new request.
 
-    Every callback added will be paired with a unique id, the request_id. That
-    unique id will be passed back to the callback when the response comes back
-    from the server. The default behavior is to have the library generate it's
-    own unique id. If the caller passes in a request_id then they must ensure
-    uniqueness for each request_id, and if they are not an exception is
-    raised. Callers should either supply all request_ids or never supply a
-    request id, to avoid such an error.
+        Every callback added will be paired with a unique id, the request_id. That
+        unique id will be passed back to the callback when the response comes back
+        from the server. The default behavior is to have the library generate it's
+        own unique id. If the caller passes in a request_id then they must ensure
+        uniqueness for each request_id, and if they are not an exception is
+        raised. Callers should either supply all request_ids or never supply a
+        request id, to avoid such an error.
 
-    Args:
-      request: HttpRequest, Request to add to the batch.
-      callback: callable, A callback to be called for this response, of the
-        form callback(id, response, exception). The first parameter is the
-        request id, and the second is the deserialized response object. The
-        third is an googleapiclient.errors.HttpError exception object if an HTTP error
-        occurred while processing the request, or None if no errors occurred.
-      request_id: string, A unique id for the request. The id will be passed
-        to the callback with the response.
+        Args:
+          request: HttpRequest, Request to add to the batch.
+          callback: callable, A callback to be called for this response, of the
+            form callback(id, response, exception). The first parameter is the
+            request id, and the second is the deserialized response object. The
+            third is an googleapiclient.errors.HttpError exception object if an HTTP error
+            occurred while processing the request, or None if no errors occurred.
+          request_id: string, A unique id for the request. The id will be passed
+            to the callback with the response.
 
-    Returns:
-      None
+        Returns:
+          None
 
-    Raises:
-      BatchError if a media request is added to a batch.
-      KeyError is the request_id is not unique.
-    """
+        Raises:
+          BatchError if a media request is added to a batch.
+          KeyError is the request_id is not unique.
+        """
 
         if len(self._order) >= MAX_BATCH_LIMIT:
             raise BatchError(
@@ -1448,16 +1460,16 @@
     def _execute(self, http, order, requests):
         """Serialize batch request, send to server, process response.
 
-    Args:
-      http: httplib2.Http, an http object to be used to make the request with.
-      order: list, list of request ids in the order they were added to the
-        batch.
-      requests: list, list of request objects to send.
+        Args:
+          http: httplib2.Http, an http object to be used to make the request with.
+          order: list, list of request ids in the order they were added to the
+            batch.
+          requests: list, list of request objects to send.
 
-    Raises:
-      httplib2.HttpLib2Error if a transport error has occurred.
-      googleapiclient.errors.BatchError if the response is the wrong format.
-    """
+        Raises:
+          httplib2.HttpLib2Error if a transport error has occurred.
+          googleapiclient.errors.BatchError if the response is the wrong format.
+        """
         message = MIMEMultipart("mixed")
         # Message should not write out it's own headers.
         setattr(message, "_write_headers", lambda self: None)
@@ -1522,18 +1534,18 @@
     def execute(self, http=None):
         """Execute all the requests as a single batched HTTP request.
 
-    Args:
-      http: httplib2.Http, an http object to be used in place of the one the
-        HttpRequest request object was constructed with. If one isn't supplied
-        then use a http object from the requests in this batch.
+        Args:
+          http: httplib2.Http, an http object to be used in place of the one the
+            HttpRequest request object was constructed with. If one isn't supplied
+            then use a http object from the requests in this batch.
 
-    Returns:
-      None
+        Returns:
+          None
 
-    Raises:
-      httplib2.HttpLib2Error if a transport error has occurred.
-      googleapiclient.errors.BatchError if the response is the wrong format.
-    """
+        Raises:
+          httplib2.HttpLib2Error if a transport error has occurred.
+          googleapiclient.errors.BatchError if the response is the wrong format.
+        """
         # If we have no requests return
         if len(self._order) == 0:
             return None
@@ -1603,18 +1615,18 @@
 class HttpRequestMock(object):
     """Mock of HttpRequest.
 
-  Do not construct directly, instead use RequestMockBuilder.
-  """
+    Do not construct directly, instead use RequestMockBuilder.
+    """
 
     def __init__(self, resp, content, postproc):
         """Constructor for HttpRequestMock
 
-    Args:
-      resp: httplib2.Response, the response to emulate coming from the request
-      content: string, the response body
-      postproc: callable, the post processing function usually supplied by
-                the model class. See model.JsonModel.response() as an example.
-    """
+        Args:
+          resp: httplib2.Response, the response to emulate coming from the request
+          content: string, the response body
+          postproc: callable, the post processing function usually supplied by
+                    the model class. See model.JsonModel.response() as an example.
+        """
         self.resp = resp
         self.content = content
         self.postproc = postproc
@@ -1626,9 +1638,9 @@
     def execute(self, http=None):
         """Execute the request.
 
-    Same behavior as HttpRequest.execute(), but the response is
-    mocked and not really from an HTTP request/response.
-    """
+        Same behavior as HttpRequest.execute(), but the response is
+        mocked and not really from an HTTP request/response.
+        """
         return self.postproc(self.resp, self.content)
 
 
@@ -1657,21 +1669,21 @@
     in the discovery document.
 
     For more details see the project wiki.
-  """
+    """
 
     def __init__(self, responses, check_unexpected=False):
         """Constructor for RequestMockBuilder
 
-    The constructed object should be a callable object
-    that can replace the class HttpResponse.
+        The constructed object should be a callable object
+        that can replace the class HttpResponse.
 
-    responses - A dictionary that maps methodIds into tuples
-                of (httplib2.Response, content). The methodId
-                comes from the 'rpcName' field in the discovery
-                document.
-    check_unexpected - A boolean setting whether or not UnexpectedMethodError
-                       should be raised on unsupplied method.
-    """
+        responses - A dictionary that maps methodIds into tuples
+                    of (httplib2.Response, content). The methodId
+                    comes from the 'rpcName' field in the discovery
+                    document.
+        check_unexpected - A boolean setting whether or not UnexpectedMethodError
+                           should be raised on unsupplied method.
+        """
         self.responses = responses
         self.check_unexpected = check_unexpected
 
@@ -1687,10 +1699,10 @@
         resumable=None,
     ):
         """Implements the callable interface that discovery.build() expects
-    of requestBuilder, which is to build an object compatible with
-    HttpRequest.execute(). See that method for the description of the
-    parameters and the expected response.
-    """
+        of requestBuilder, which is to build an object compatible with
+        HttpRequest.execute(). See that method for the description of the
+        parameters and the expected response.
+        """
         if methodId in self.responses:
             response = self.responses[methodId]
             resp, content = response[:2]
@@ -1719,10 +1731,10 @@
 
     def __init__(self, filename=None, headers=None):
         """
-    Args:
-      filename: string, absolute filename to read response from
-      headers: dict, header to return with response
-    """
+        Args:
+          filename: string, absolute filename to read response from
+          headers: dict, header to return with response
+        """
         if headers is None:
             headers = {"status": "200"}
         if filename:
@@ -1755,35 +1767,36 @@
     def close(self):
         return None
 
+
 class HttpMockSequence(object):
     """Mock of httplib2.Http
 
-  Mocks a sequence of calls to request returning different responses for each
-  call. Create an instance initialized with the desired response headers
-  and content and then use as if an httplib2.Http instance.
+    Mocks a sequence of calls to request returning different responses for each
+    call. Create an instance initialized with the desired response headers
+    and content and then use as if an httplib2.Http instance.
 
-    http = HttpMockSequence([
-      ({'status': '401'}, ''),
-      ({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
-      ({'status': '200'}, 'echo_request_headers'),
-      ])
-    resp, content = http.request("http://examples.com")
+      http = HttpMockSequence([
+        ({'status': '401'}, ''),
+        ({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
+        ({'status': '200'}, 'echo_request_headers'),
+        ])
+      resp, content = http.request("http://examples.com")
 
-  There are special values you can pass in for content to trigger
-  behavours that are helpful in testing.
+    There are special values you can pass in for content to trigger
+    behavours that are helpful in testing.
 
-  'echo_request_headers' means return the request headers in the response body
-  'echo_request_headers_as_json' means return the request headers in
-     the response body
-  'echo_request_body' means return the request body in the response body
-  'echo_request_uri' means return the request uri in the response body
-  """
+    'echo_request_headers' means return the request headers in the response body
+    'echo_request_headers_as_json' means return the request headers in
+       the response body
+    'echo_request_body' means return the request body in the response body
+    'echo_request_uri' means return the request uri in the response body
+    """
 
     def __init__(self, iterable):
         """
-    Args:
-      iterable: iterable, a sequence of pairs of (headers, body)
-    """
+        Args:
+          iterable: iterable, a sequence of pairs of (headers, body)
+        """
         self._iterable = iterable
         self.follow_redirects = True
         self.request_sequence = list()
@@ -1821,22 +1834,22 @@
 def set_user_agent(http, user_agent):
     """Set the user-agent on every request.
 
-  Args:
-     http - An instance of httplib2.Http
-         or something that acts like it.
-     user_agent: string, the value for the user-agent header.
+    Args:
+       http - An instance of httplib2.Http
+           or something that acts like it.
+       user_agent: string, the value for the user-agent header.
 
-  Returns:
-     A modified instance of http that was passed in.
+    Returns:
+       A modified instance of http that was passed in.
 
-  Example:
+    Example:
 
-    h = httplib2.Http()
-    h = set_user_agent(h, "my-app-name/6.0")
+      h = httplib2.Http()
+      h = set_user_agent(h, "my-app-name/6.0")
 
-  Most of the time the user-agent will be set doing auth, this is for the rare
-  cases where you are accessing an unauthenticated endpoint.
-  """
+    Most of the time the user-agent will be set doing auth, this is for the rare
+    cases where you are accessing an unauthenticated endpoint.
+    """
     request_orig = http.request
 
     # The closure that will replace 'httplib2.Http.request'.
@@ -1871,22 +1884,22 @@
 
 def tunnel_patch(http):
     """Tunnel PATCH requests over POST.
-  Args:
-     http - An instance of httplib2.Http
-         or something that acts like it.
+    Args:
+       http - An instance of httplib2.Http
+           or something that acts like it.
 
-  Returns:
-     A modified instance of http that was passed in.
+    Returns:
+       A modified instance of http that was passed in.
 
-  Example:
+    Example:
 
-    h = httplib2.Http()
-    h = tunnel_patch(h, "my-app-name/6.0")
+      h = httplib2.Http()
+      h = tunnel_patch(h, "my-app-name/6.0")
 
-  Useful if you are running on a platform that doesn't support PATCH.
-  Apply this last if you are using OAuth 1.0, as changing the method
-  will result in a different signature.
-  """
+    Useful if you are running on a platform that doesn't support PATCH.
+    Apply this last if you are using OAuth 1.0, as changing the method
+    will result in a different signature.
+    """
     request_orig = http.request
 
     # The closure that will replace 'httplib2.Http.request'.
@@ -1925,14 +1938,14 @@
 def build_http():
     """Builds httplib2.Http object
 
-  Returns:
-  A httplib2.Http object, which is used to make http requests, and which has timeout set by default.
-  To override default timeout call
+    Returns:
+    A httplib2.Http object, which is used to make http requests, and which has timeout set by default.
+    To override default timeout call
 
-    socket.setdefaulttimeout(timeout_in_sec)
+      socket.setdefaulttimeout(timeout_in_sec)
 
-  before interacting with this method.
-  """
+    before interacting with this method.
+    """
     if socket.getdefaulttimeout() is not None:
         http_timeout = socket.getdefaulttimeout()
     else:
@@ -1943,12 +1956,12 @@
     # This asks httplib2 to exclude 308s from the status codes
     # it treats as redirects
     try:
-      http.redirect_codes = http.redirect_codes - {308}
+        http.redirect_codes = http.redirect_codes - {308}
     except AttributeError:
-      # Apache Beam tests depend on this library and cannot
-      # currently upgrade their httplib2 version
-      # http.redirect_codes does not exist in previous versions
-      # of httplib2, so pass
-      pass
+        # Apache Beam tests depend on this library and cannot
+        # currently upgrade their httplib2 version
+        # http.redirect_codes does not exist in previous versions
+        # of httplib2, so pass
+        pass
 
     return http
diff --git a/googleapiclient/schema.py b/googleapiclient/schema.py
index 2d58984..00f8588 100644
--- a/googleapiclient/schema.py
+++ b/googleapiclient/schema.py
@@ -63,7 +63,6 @@
 
 __author__ = "jcgregorio@google.com (Joe Gregorio)"
 
-import copy
 
 from collections import OrderedDict
 from googleapiclient import _helpers as util
@@ -75,10 +74,10 @@
     def __init__(self, discovery):
         """Constructor.
 
-    Args:
-      discovery: object, Deserialized discovery document from which we pull
-        out the named schema.
-    """
+        Args:
+          discovery: object, Deserialized discovery document from which we pull
+            out the named schema.
+        """
         self.schemas = discovery.get("schemas", {})
 
         # Cache of pretty printed schemas.
@@ -88,15 +87,15 @@
     def _prettyPrintByName(self, name, seen=None, dent=0):
         """Get pretty printed object prototype from the schema name.
 
-    Args:
-      name: string, Name of schema in the discovery document.
-      seen: list of string, Names of schema already seen. Used to handle
-        recursive definitions.
+        Args:
+          name: string, Name of schema in the discovery document.
+          seen: list of string, Names of schema already seen. Used to handle
+            recursive definitions.
 
-    Returns:
-      string, A string that contains a prototype object with
-        comments that conforms to the given schema.
-    """
+        Returns:
+          string, A string that contains a prototype object with
+            comments that conforms to the given schema.
+        """
         if seen is None:
             seen = []
 
@@ -117,13 +116,13 @@
     def prettyPrintByName(self, name):
         """Get pretty printed object prototype from the schema name.
 
-    Args:
-      name: string, Name of schema in the discovery document.
+        Args:
+          name: string, Name of schema in the discovery document.
 
-    Returns:
-      string, A string that contains a prototype object with
-        comments that conforms to the given schema.
-    """
+        Returns:
+          string, A string that contains a prototype object with
+            comments that conforms to the given schema.
+        """
         # Return with trailing comma and newline removed.
         return self._prettyPrintByName(name, seen=[], dent=0)[:-2]
 
@@ -131,15 +130,15 @@
     def _prettyPrintSchema(self, schema, seen=None, dent=0):
         """Get pretty printed object prototype of schema.
 
-    Args:
-      schema: object, Parsed JSON schema.
-      seen: list of string, Names of schema already seen. Used to handle
-        recursive definitions.
+        Args:
+          schema: object, Parsed JSON schema.
+          seen: list of string, Names of schema already seen. Used to handle
+            recursive definitions.
 
-    Returns:
-      string, A string that contains a prototype object with
-        comments that conforms to the given schema.
-    """
+        Returns:
+          string, A string that contains a prototype object with
+            comments that conforms to the given schema.
+        """
         if seen is None:
             seen = []
 
@@ -148,23 +147,23 @@
     def prettyPrintSchema(self, schema):
         """Get pretty printed object prototype of schema.
 
-    Args:
-      schema: object, Parsed JSON schema.
+        Args:
+          schema: object, Parsed JSON schema.
 
-    Returns:
-      string, A string that contains a prototype object with
-        comments that conforms to the given schema.
-    """
+        Returns:
+          string, A string that contains a prototype object with
+            comments that conforms to the given schema.
+        """
         # Return with trailing comma and newline removed.
         return self._prettyPrintSchema(schema, dent=0)[:-2]
 
     def get(self, name, default=None):
         """Get deserialized JSON schema from the schema name.
 
-    Args:
-      name: string, Schema name.
-      default: object, return value if name not found.
-    """
+        Args:
+          name: string, Schema name.
+          default: object, return value if name not found.
+        """
         return self.schemas.get(name, default)
 
 
@@ -175,12 +174,12 @@
     def __init__(self, schema, seen, dent=0):
         """Constructor.
 
-    Args:
-      schema: object, Parsed JSON schema.
-      seen: list, List of names of schema already seen while parsing. Used to
-        handle recursive definitions.
-      dent: int, Initial indentation depth.
-    """
+        Args:
+          schema: object, Parsed JSON schema.
+          seen: list, List of names of schema already seen while parsing. Used to
+            handle recursive definitions.
+          dent: int, Initial indentation depth.
+        """
         # The result of this parsing kept as list of strings.
         self.value = []
 
@@ -203,26 +202,26 @@
     def emit(self, text):
         """Add text as a line to the output.
 
-    Args:
-      text: string, Text to output.
-    """
+        Args:
+          text: string, Text to output.
+        """
         self.value.extend(["  " * self.dent, text, "\n"])
 
     def emitBegin(self, text):
         """Add text to the output, but with no line terminator.
 
-    Args:
-      text: string, Text to output.
-      """
+        Args:
+          text: string, Text to output.
+        """
         self.value.extend(["  " * self.dent, text])
 
     def emitEnd(self, text, comment):
         """Add text and comment to the output with line terminator.
 
-    Args:
-      text: string, Text to output.
-      comment: string, Python comment.
-    """
+        Args:
+          text: string, Text to output.
+          comment: string, Python comment.
+        """
         if comment:
             divider = "\n" + "  " * (self.dent + 2) + "# "
             lines = comment.splitlines()
@@ -243,12 +242,12 @@
     def _to_str_impl(self, schema):
         """Prototype object based on the schema, in Python code with comments.
 
-    Args:
-      schema: object, Parsed JSON schema file.
+        Args:
+          schema: object, Parsed JSON schema file.
 
-    Returns:
-      Prototype object based on the schema, in Python code with comments.
-    """
+        Returns:
+          Prototype object based on the schema, in Python code with comments.
+        """
         stype = schema.get("type")
         if stype == "object":
             self.emitEnd("{", schema.get("description", ""))
@@ -305,14 +304,14 @@
     def to_str(self, from_cache):
         """Prototype object based on the schema, in Python code with comments.
 
-    Args:
-      from_cache: callable(name, seen), Callable that retrieves an object
-         prototype for a schema with the given name. Seen is a list of schema
-         names already seen as we recursively descend the schema definition.
+        Args:
+          from_cache: callable(name, seen), Callable that retrieves an object
+             prototype for a schema with the given name. Seen is a list of schema
+             names already seen as we recursively descend the schema definition.
 
-    Returns:
-      Prototype object based on the schema, in Python code with comments.
-      The lines of the code will all be properly indented.
-    """
+        Returns:
+          Prototype object based on the schema, in Python code with comments.
+          The lines of the code will all be properly indented.
+        """
         self.from_cache = from_cache
         return self._to_str_impl(self.schema)
diff --git a/noxfile.py b/noxfile.py
index d33f26e..6e2cadc 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -1,4 +1,3 @@
-
 # Copyright 2020 Google LLC
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,8 +13,6 @@
 # limitations under the License.
 
 import os
-import sys
-
 import nox
 import shutil
 
@@ -61,21 +58,21 @@
 )
 def unit(session, oauth2client):
     # Clean up dist and build folders
-    shutil.rmtree('dist', ignore_errors=True)
-    shutil.rmtree('build', ignore_errors=True)
+    shutil.rmtree("dist", ignore_errors=True)
+    shutil.rmtree("build", ignore_errors=True)
 
     session.install(*test_dependencies)
     session.install(oauth2client)
 
     # Create and install wheels
-    session.run('python3', 'setup.py', 'bdist_wheel')
-    session.install(os.path.join('dist', os.listdir('dist').pop()))
+    session.run("python3", "setup.py", "bdist_wheel")
+    session.install(os.path.join("dist", os.listdir("dist").pop()))
 
     # Run tests from a different directory to test the package artifacts
     root_dir = os.path.dirname(os.path.realpath(__file__))
     temp_dir = session.create_tmp()
     session.chdir(temp_dir)
-    shutil.copytree(os.path.join(root_dir, 'tests'), 'tests')
+    shutil.copytree(os.path.join(root_dir, "tests"), "tests")
 
     # Run py.test against the unit tests.
     session.run(
@@ -91,6 +88,7 @@
         *session.posargs,
     )
 
+
 @nox.session(python=["3.9"])
 def scripts(session):
     session.install(*test_dependencies)