Merge "Update create_goldfish_action and error to be pylint compliant."
diff --git a/internal/lib/base_cloud_client.py b/internal/lib/base_cloud_client.py
index e9dd097..67e26b1 100755
--- a/internal/lib/base_cloud_client.py
+++ b/internal/lib/base_cloud_client.py
@@ -13,17 +13,16 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Base Cloud API Client.
 
 BasicCloudApiCliend does basic setup for a cloud API.
 """
 import httplib
 import logging
-import os
 import socket
 import ssl
 
+# pylint: disable=import-error
 from apiclient import errors as gerrors
 from apiclient.discovery import build
 import apiclient.http
@@ -83,16 +82,17 @@
         """
         http_auth = oauth2_credentials.authorize(httplib2.Http())
         return utils.RetryExceptionType(
-                exception_types=cls.RETRIABLE_AUTH_ERRORS,
-                max_retries=cls.RETRY_COUNT,
-                functor=build,
-                sleep_multiplier=cls.RETRY_SLEEP_MULTIPLIER,
-                retry_backoff_factor=cls.RETRY_BACKOFF_FACTOR,
-                serviceName=cls.API_NAME,
-                version=cls.API_VERSION,
-                http=http_auth)
+            exception_types=cls.RETRIABLE_AUTH_ERRORS,
+            max_retries=cls.RETRY_COUNT,
+            functor=build,
+            sleep_multiplier=cls.RETRY_SLEEP_MULTIPLIER,
+            retry_backoff_factor=cls.RETRY_BACKOFF_FACTOR,
+            serviceName=cls.API_NAME,
+            version=cls.API_VERSION,
+            http=http_auth)
 
-    def _ShouldRetry(self, exception, retry_http_codes,
+    @staticmethod
+    def _ShouldRetry(exception, retry_http_codes,
                      other_retriable_errors):
         """Check if exception is retriable.
 
@@ -116,12 +116,14 @@
                 logger.debug("_ShouldRetry: Exception code %s not in %s: %s",
                              exception.code, retry_http_codes, str(exception))
 
-        logger.debug(
-            "_ShouldRetry: Exception %s is not one of %s: %s", type(exception),
-            list(other_retriable_errors) + [errors.HttpError], str(exception))
+        logger.debug("_ShouldRetry: Exception %s is not one of %s: %s",
+                     type(exception),
+                     list(other_retriable_errors) + [errors.HttpError],
+                     str(exception))
         return False
 
-    def _TranslateError(self, exception):
+    @staticmethod
+    def _TranslateError(exception):
         """Translate the exception to a desired type.
 
         Args:
@@ -137,8 +139,8 @@
         if isinstance(exception, gerrors.HttpError):
             exception = errors.HttpError.CreateFromHttpError(exception)
             if exception.code == errors.HTTP_NOT_FOUND_CODE:
-                exception = errors.ResourceNotFoundError(exception.code,
-                                                         str(exception))
+                exception = errors.ResourceNotFoundError(
+                    exception.code, str(exception))
         return exception
 
     def ExecuteOnce(self, api):
@@ -185,12 +187,12 @@
         Raises:
           See ExecuteOnce.
         """
-        retry_http_codes = (self.RETRY_HTTP_CODES if retry_http_codes is None
-                            else retry_http_codes)
+        retry_http_codes = (self.RETRY_HTTP_CODES
+                            if retry_http_codes is None else retry_http_codes)
         max_retry = (self.RETRY_COUNT if max_retry is None else max_retry)
         sleep = (self.RETRY_SLEEP_MULTIPLIER if sleep is None else sleep)
-        backoff_factor = (self.RETRY_BACKOFF_FACTOR if backoff_factor is None
-                          else backoff_factor)
+        backoff_factor = (self.RETRY_BACKOFF_FACTOR
+                          if backoff_factor is None else backoff_factor)
         other_retriable_errors = (self.RETRIABLE_ERRORS
                                   if other_retriable_errors is None else
                                   other_retriable_errors)
@@ -212,9 +214,12 @@
             return False
 
         return utils.Retry(
-             _Handler, max_retries=max_retry, functor=self.ExecuteOnce,
-             sleep_multiplier=sleep, retry_backoff_factor=backoff_factor,
-             api=api)
+            _Handler,
+            max_retries=max_retry,
+            functor=self.ExecuteOnce,
+            sleep_multiplier=sleep,
+            retry_backoff_factor=backoff_factor,
+            api=api)
 
     def BatchExecuteOnce(self, requests):
         """Execute requests in a batch.
@@ -237,9 +242,8 @@
 
         batch = apiclient.http.BatchHttpRequest()
         for request_id, request in requests.iteritems():
-            batch.add(request=request,
-                      callback=_CallBack,
-                      request_id=request_id)
+            batch.add(
+                request=request, callback=_CallBack, request_id=request_id)
         batch.execute()
         return results
 
@@ -278,8 +282,8 @@
             max_retry=max_retry or self.RETRY_COUNT,
             sleep=sleep or self.RETRY_SLEEP_MULTIPLIER,
             backoff_factor=backoff_factor or self.RETRY_BACKOFF_FACTOR,
-            other_retriable_errors=other_retriable_errors or
-            self.RETRIABLE_ERRORS)
+            other_retriable_errors=other_retriable_errors
+            or self.RETRIABLE_ERRORS)
         executor.Execute()
         return executor.GetResults()
 
diff --git a/internal/lib/base_cloud_client_test.py b/internal/lib/base_cloud_client_test.py
index 96eb3c3..3254729 100644
--- a/internal/lib/base_cloud_client_test.py
+++ b/internal/lib/base_cloud_client_test.py
@@ -17,10 +17,12 @@
 """Tests for acloud.internal.lib.base_cloud_client."""
 
 import time
-import apiclient
-import mock
 
 import unittest
+import mock
+
+import apiclient
+
 from acloud.internal.lib import base_cloud_client
 from acloud.internal.lib import driver_test_lib
 from acloud.public import errors
@@ -33,14 +35,9 @@
 class BaseCloudApiClientTest(driver_test_lib.BaseDriverTest):
     """Test BaseCloudApiClient."""
 
-    def setUp(self):
-        """Set up test."""
-        super(BaseCloudApiClientTest, self).setUp()
-
     def testInitResourceHandle(self):
         """Test InitResourceHandle."""
         # Setup mocks
-        mock_credentials = mock.MagicMock()
         self.Patch(base_cloud_client, "build")
         # Call the method
         base_cloud_client.BaseCloudApiClient(mock.MagicMock())
@@ -113,7 +110,9 @@
             "r2": (None, error_1),
             "r3": (None, error_2)
         }
+
         self.assertEqual(results, expected_results)
+        # pylint: disable=no-member
         self.assertEqual(requests["r1"].execute.call_count, 1)
         self.assertEqual(requests["r2"].execute.call_count,
                          client.RETRY_COUNT + 1)
@@ -147,6 +146,7 @@
 
         api_mock = mock.MagicMock()
         api_mock.execute.side_effect = FakeError("fake retriable error.")
+        # pylint: disable=no-member
         self.assertRaises(
             FakeError,
             client.Execute,
diff --git a/internal/lib/cvd_compute_client.py b/internal/lib/cvd_compute_client.py
index 1b55faa..1bd3e99 100644
--- a/internal/lib/cvd_compute_client.py
+++ b/internal/lib/cvd_compute_client.py
@@ -41,7 +41,6 @@
 
 import getpass
 import logging
-import os
 
 from acloud.internal.lib import android_compute_client
 from acloud.internal.lib import gcompute_client
@@ -50,115 +49,85 @@
 
 
 class CvdComputeClient(android_compute_client.AndroidComputeClient):
-  """Client that manages Anadroid Virtual Device."""
+    """Client that manages Anadroid Virtual Device."""
 
-  DATA_POLICY_CREATE_IF_MISSING = "create_if_missing"
+    DATA_POLICY_CREATE_IF_MISSING = "create_if_missing"
 
-  def __init__(self, acloud_config, oauth2_credentials):
-    """Initialize.
+    # TODO: refactor CreateInstance to take in an object that contains these
+    # args, this method differs too and holds way too cf-specific args to put in
+    # the parent method.
+    # pylint: disable=arguments-differ
+    def CreateInstance(self, instance, image_name, image_project, build_target,
+                       branch, build_id, kernel_branch=None,
+                       kernel_build_id=None, blank_data_disk_size_gb=None):
+        """Create a cuttlefish instance given stable host image and build id.
 
-    Args:
-      acloud_config: An AcloudConfig object.
-      oauth2_credentials: An oauth2client.OAuth2Credentials instance.
-    """
-    super(CvdComputeClient, self).__init__(acloud_config, oauth2_credentials)
+        Args:
+          instance: instance name.
+          image_name: A string, the name of the GCE image.
+          image_project: A string, name of the project where the image belongs.
+                         Assume the default project if None.
+          build_target: Target name, e.g. "aosp_cf_x86_phone-userdebug"
+          branch: Branch name, e.g. "aosp-master"
+          build_id: Build id, a string, e.g. "2263051", "P2804227"
+          kernel_branch: Kernel branch name, e.g. "kernel-android-cf-4.4-x86_64"
+          kernel_build_id: Kernel build id, a string, e.g. "2263051", "P2804227"
+          blank_data_disk_size_gb: Size of the blank data disk in GB.
+        """
+        self._CheckMachineSize()
 
-  def _GetDiskArgs(self, disk_name, image_name, image_project, disk_size_gb):
-    """Helper to generate disk args that is used to create an instance.
+        # A blank data disk would be created on the host. Make sure the size of
+        # the boot disk is large enough to hold it.
+        boot_disk_size_gb = (
+            int(self.GetImage(image_name, image_project)["diskSizeGb"]) +
+            blank_data_disk_size_gb)
+        disk_args = self._GetDiskArgs(
+            instance, image_name, image_project, boot_disk_size_gb)
 
-    Args:
-      disk_name: A string
-      image_name: A string
-      image_project: A string
-      disk_size_gb: An integer
+        # Transitional metadata variable as outlined in go/cuttlefish-deployment
+        # These metadata tell the host instance to fetch and launch one
+        # cuttlefish device (cvd-01). Ideally we should use a separate tool to
+        # manage CVD devices on the host instance and not through metadata.
+        # TODO(b/77626419): Remove these metadata once the
+        # cuttlefish-google.service is turned off on the host instance.
+        metadata = self._metadata.copy()
+        resolution = self._resolution.split("x")
+        metadata["cvd_01_dpi"] = resolution[3]
+        metadata["cvd_01_fetch_android_build_target"] = build_target
+        metadata["cvd_01_fetch_android_bid"] = "{branch}/{build_id}".format(
+            branch=branch, build_id=build_id)
+        if kernel_branch and kernel_build_id:
+            metadata["cvd_01_fetch_kernel_bid"] = "{branch}/{build_id}".format(
+                branch=kernel_branch, build_id=kernel_build_id)
+        metadata["cvd_01_launch"] = "1"
+        metadata["cvd_01_x_res"] = resolution[0]
+        metadata["cvd_01_y_res"] = resolution[1]
+        if blank_data_disk_size_gb > 0:
+            # Policy 'create_if_missing' would create a blank userdata disk if
+            # missing. If already exist, reuse the disk.
+            metadata["cvd_01_data_policy"] = self.DATA_POLICY_CREATE_IF_MISSING
+            metadata["cvd_01_blank_data_disk_size"] = str(
+                blank_data_disk_size_gb * 1024)
 
-    Returns:
-      A dictionary representing disk args.
-    """
-    return [{
-        "type": "PERSISTENT",
-        "boot": True,
-        "mode": "READ_WRITE",
-        "autoDelete": True,
-        "initializeParams": {
-            "diskName": disk_name,
-            "sourceImage": self.GetImage(image_name, image_project)["selfLink"],
-            "diskSizeGb": disk_size_gb
-        },
-    }]
+        # Add per-instance ssh key
+        if self._ssh_public_key_path:
+            rsa = self._LoadSshPublicKey(self._ssh_public_key_path)
+            logger.info("ssh_public_key_path is specified in config: %s, "
+                        "will add the key to the instance.",
+                        self._ssh_public_key_path)
+            metadata["sshKeys"] = "%s:%s" % (getpass.getuser(), rsa)
+        else:
+            logger.warning(
+                "ssh_public_key_path is not specified in config, "
+                "only project-wide key will be effective.")
 
-  def CreateInstance(
-      self, instance, image_name, image_project, build_target, branch, build_id,
-      kernel_branch=None, kernel_build_id=None, blank_data_disk_size_gb=None):
-    """Create a cuttlefish instance given a stable host image and a build id.
-
-    Args:
-      instance: instance name.
-      image_name: A string, the name of the GCE image.
-      image_project: A string, name of the project where the image belongs.
-                     Assume the default project if None.
-      build_target: Target name, e.g. "aosp_cf_x86_phone-userdebug"
-      branch: Branch name, e.g. "aosp-master"
-      build_id: Build id, a string, e.g. "2263051", "P2804227"
-      kernel_branch: Kernel branch name, e.g. "kernel-android-cf-4.4-x86_64"
-      kernel_build_id: Kernel build id, a string, e.g. "2263051", "P2804227"
-      blank_data_disk_size_gb: Size of the blank data disk in GB.
-    """
-    self._CheckMachineSize()
-
-    # A blank data disk would be created on the host. Make sure the size of the
-    # boot disk is large enough to hold it.
-    boot_disk_size_gb = (
-        int(self.GetImage(image_name, image_project)["diskSizeGb"]) +
-        blank_data_disk_size_gb)
-    disk_args = self._GetDiskArgs(
-        instance, image_name, image_project, boot_disk_size_gb)
-
-    # Transitional metadata variable as outlined in go/cuttlefish-deployment
-    # These metadata tell the host instance to fetch and launch one cuttlefish
-    # device (cvd-01). Ideally we should use a separate tool to manage CVD
-    # devices on the host instance and not through metadata.
-    # TODO(b/77626419): Remove these metadata once the cuttlefish-google.service
-    # is
-    # turned off on the host instance.
-    metadata = self._metadata.copy()
-    resolution = self._resolution.split("x")
-    metadata["cvd_01_dpi"] = resolution[3]
-    metadata["cvd_01_fetch_android_build_target"] = build_target
-    metadata["cvd_01_fetch_android_bid"] = "{branch}/{build_id}".format(
-        branch=branch, build_id=build_id)
-    if kernel_branch and kernel_build_id:
-      metadata["cvd_01_fetch_kernel_bid"] = "{branch}/{build_id}".format(
-          branch=kernel_branch, build_id=kernel_build_id)
-    metadata["cvd_01_launch"] = "1"
-    metadata["cvd_01_x_res"] = resolution[0]
-    metadata["cvd_01_y_res"] = resolution[1]
-    if blank_data_disk_size_gb > 0:
-      # Policy 'create_if_missing' would create a blank userdata disk if
-      # missing. If already exist, reuse the disk.
-      metadata["cvd_01_data_policy"] = self.DATA_POLICY_CREATE_IF_MISSING
-      metadata["cvd_01_blank_data_disk_size"] = str(
-          blank_data_disk_size_gb * 1024)
-
-    # Add per-instance ssh key
-    if self._ssh_public_key_path:
-      rsa = self._LoadSshPublicKey(self._ssh_public_key_path)
-      logger.info("ssh_public_key_path is specified in config: %s, "
-                  "will add the key to the instance.",
-                  self._ssh_public_key_path)
-      metadata["sshKeys"] = "%s:%s" % (getpass.getuser(), rsa)
-    else:
-      logger.warning(
-          "ssh_public_key_path is not specified in config, "
-          "only project-wide key will be effective.")
-
-    gcompute_client.ComputeClient.CreateInstance(
-        self,
-        instance=instance,
-        image_name=image_name,
-        image_project=image_project,
-        disk_args=disk_args,
-        metadata=metadata,
-        machine_type=self._machine_type,
-        network=self._network,
-        zone=self._zone)
+        gcompute_client.ComputeClient.CreateInstance(
+            self,
+            instance=instance,
+            image_name=image_name,
+            image_project=image_project,
+            disk_args=disk_args,
+            metadata=metadata,
+            machine_type=self._machine_type,
+            network=self._network,
+            zone=self._zone)
diff --git a/internal/lib/cvd_compute_client_test.py b/internal/lib/cvd_compute_client_test.py
index 8b322ef..75005ed 100644
--- a/internal/lib/cvd_compute_client_test.py
+++ b/internal/lib/cvd_compute_client_test.py
@@ -16,102 +16,103 @@
 
 """Tests for acloud.internal.lib.cvd_compute_client."""
 
+import unittest
 import mock
 
-import unittest
 from acloud.internal.lib import cvd_compute_client
 from acloud.internal.lib import driver_test_lib
 from acloud.internal.lib import gcompute_client
 
 
 class CvdComputeClientTest(driver_test_lib.BaseDriverTest):
-  """Test CvdComputeClient."""
+    """Test CvdComputeClient."""
 
-  SSH_PUBLIC_KEY_PATH = ""
-  INSTANCE = "fake-instance"
-  IMAGE = "fake-image"
-  IMAGE_PROJECT = "fake-iamge-project"
-  MACHINE_TYPE = "fake-machine-type"
-  NETWORK = "fake-network"
-  ZONE = "fake-zone"
-  BRANCH = "fake-branch"
-  TARGET = "aosp_cf_x86_phone-userdebug"
-  BUILD_ID = "2263051"
-  KERNEL_BRANCH = "fake-kernel-branch"
-  KERNEL_BUILD_ID = "1234567"
-  DPI = 160
-  X_RES = 720
-  Y_RES = 1280
-  METADATA = {"metadata_key": "metadata_value"}
-  EXTRA_DATA_DISK_SIZE_GB = 4
-  BOOT_DISK_SIZE_GB = 10
+    SSH_PUBLIC_KEY_PATH = ""
+    INSTANCE = "fake-instance"
+    IMAGE = "fake-image"
+    IMAGE_PROJECT = "fake-iamge-project"
+    MACHINE_TYPE = "fake-machine-type"
+    NETWORK = "fake-network"
+    ZONE = "fake-zone"
+    BRANCH = "fake-branch"
+    TARGET = "aosp_cf_x86_phone-userdebug"
+    BUILD_ID = "2263051"
+    KERNEL_BRANCH = "fake-kernel-branch"
+    KERNEL_BUILD_ID = "1234567"
+    DPI = 160
+    X_RES = 720
+    Y_RES = 1280
+    METADATA = {"metadata_key": "metadata_value"}
+    EXTRA_DATA_DISK_SIZE_GB = 4
+    BOOT_DISK_SIZE_GB = 10
 
-  def _GetFakeConfig(self):
-    """Create a fake configuration object.
+    def _GetFakeConfig(self):
+        """Create a fake configuration object.
 
-    Returns:
-      A fake configuration mock object.
-    """
-    fake_cfg = mock.MagicMock()
-    fake_cfg.ssh_public_key_path = self.SSH_PUBLIC_KEY_PATH
-    fake_cfg.machine_type = self.MACHINE_TYPE
-    fake_cfg.network = self.NETWORK
-    fake_cfg.zone = self.ZONE
-    fake_cfg.resolution = "{x}x{y}x32x{dpi}".format(
-        x=self.X_RES, y=self.Y_RES, dpi=self.DPI)
-    fake_cfg.metadata_variable = self.METADATA
-    fake_cfg.extra_data_disk_size_gb = self.EXTRA_DATA_DISK_SIZE_GB
-    return fake_cfg
+        Returns:
+            A fake configuration mock object.
+        """
+        fake_cfg = mock.MagicMock()
+        fake_cfg.ssh_public_key_path = self.SSH_PUBLIC_KEY_PATH
+        fake_cfg.machine_type = self.MACHINE_TYPE
+        fake_cfg.network = self.NETWORK
+        fake_cfg.zone = self.ZONE
+        fake_cfg.resolution = "{x}x{y}x32x{dpi}".format(
+            x=self.X_RES, y=self.Y_RES, dpi=self.DPI)
+        fake_cfg.metadata_variable = self.METADATA
+        fake_cfg.extra_data_disk_size_gb = self.EXTRA_DATA_DISK_SIZE_GB
+        return fake_cfg
 
-  def setUp(self):
-    """Set up the test."""
-    super(CvdComputeClientTest, self).setUp()
-    self.Patch(cvd_compute_client.CvdComputeClient, "InitResourceHandle")
-    self.cvd_compute_client = cvd_compute_client.CvdComputeClient(
-        self._GetFakeConfig(), mock.MagicMock())
+    def setUp(self):
+        """Set up the test."""
+        super(CvdComputeClientTest, self).setUp()
+        self.Patch(cvd_compute_client.CvdComputeClient, "InitResourceHandle")
+        self.cvd_compute_client = cvd_compute_client.CvdComputeClient(
+            self._GetFakeConfig(), mock.MagicMock())
 
-  def testCreateInstance(self):
-    """Test CreateInstance."""
-    self.Patch(gcompute_client.ComputeClient, "CompareMachineSize",
-               return_value=1)
-    self.Patch(gcompute_client.ComputeClient, "GetImage",
-               return_value={"diskSizeGb": 10})
-    self.Patch(gcompute_client.ComputeClient, "CreateInstance")
-    self.Patch(cvd_compute_client.CvdComputeClient, "_GetDiskArgs",
-               return_value=[{"fake_arg": "fake_value"}])
+    @mock.patch.object(gcompute_client.ComputeClient, "CompareMachineSize",
+                       return_value=1)
+    @mock.patch.object(gcompute_client.ComputeClient, "GetImage",
+                       return_value={"diskSizeGb": 10})
+    @mock.patch.object(gcompute_client.ComputeClient, "CreateInstance")
+    @mock.patch.object(cvd_compute_client.CvdComputeClient, "_GetDiskArgs",
+                       return_value=[{"fake_arg": "fake_value"}])
+    def testCreateInstance(self, _get_disk_args, mock_create, _get_image,
+                           _compare_machine_size):
+        """Test CreateInstance."""
+        expected_metadata = {
+            "cvd_01_dpi": str(self.DPI),
+            "cvd_01_fetch_android_build_target": self.TARGET,
+            "cvd_01_fetch_android_bid": "{branch}/{build_id}".format(
+                branch=self.BRANCH, build_id=self.BUILD_ID),
+            "cvd_01_fetch_kernel_bid": "{branch}/{build_id}".format(
+                branch=self.KERNEL_BRANCH, build_id=self.KERNEL_BUILD_ID),
+            "cvd_01_launch": "1",
+            "cvd_01_x_res": str(self.X_RES),
+            "cvd_01_y_res": str(self.Y_RES),
+            "cvd_01_data_policy":
+                self.cvd_compute_client.DATA_POLICY_CREATE_IF_MISSING,
+            "cvd_01_blank_data_disk_size": str(self.EXTRA_DATA_DISK_SIZE_GB * 1024),
+        }
+        expected_metadata.update(self.METADATA)
+        expected_disk_args = [{"fake_arg": "fake_value"}]
 
-    expected_metadata = {
-        "cvd_01_dpi": str(self.DPI),
-        "cvd_01_fetch_android_build_target": self.TARGET,
-        "cvd_01_fetch_android_bid": "{branch}/{build_id}".format(
-            branch=self.BRANCH, build_id=self.BUILD_ID),
-        "cvd_01_fetch_kernel_bid": "{branch}/{build_id}".format(
-            branch=self.KERNEL_BRANCH, build_id=self.KERNEL_BUILD_ID),
-        "cvd_01_launch": "1",
-        "cvd_01_x_res": str(self.X_RES),
-        "cvd_01_y_res": str(self.Y_RES),
-        "cvd_01_data_policy":
-            self.cvd_compute_client.DATA_POLICY_CREATE_IF_MISSING,
-        "cvd_01_blank_data_disk_size": str(self.EXTRA_DATA_DISK_SIZE_GB * 1024),
-    }
-    expected_metadata.update(self.METADATA)
-    expected_disk_args = [{"fake_arg": "fake_value"}]
-
-    self.cvd_compute_client.CreateInstance(
-        self.INSTANCE, self.IMAGE, self.IMAGE_PROJECT, self.TARGET, self.BRANCH,
-        self.BUILD_ID, self.KERNEL_BRANCH, self.KERNEL_BUILD_ID,
-        self.EXTRA_DATA_DISK_SIZE_GB)
-    gcompute_client.ComputeClient.CreateInstance.assert_called_with(
-        self.cvd_compute_client,
-        instance=self.INSTANCE,
-        image_name=self.IMAGE,
-        image_project=self.IMAGE_PROJECT,
-        disk_args=expected_disk_args,
-        metadata=expected_metadata,
-        machine_type=self.MACHINE_TYPE,
-        network=self.NETWORK,
-        zone=self.ZONE)
+        self.cvd_compute_client.CreateInstance(
+            self.INSTANCE, self.IMAGE, self.IMAGE_PROJECT, self.TARGET, self.BRANCH,
+            self.BUILD_ID, self.KERNEL_BRANCH, self.KERNEL_BUILD_ID,
+            self.EXTRA_DATA_DISK_SIZE_GB)
+        # gcompute_client.ComputeClient.CreateInstance.assert_called_with(
+        mock_create.assert_called_with(
+            self.cvd_compute_client,
+            instance=self.INSTANCE,
+            image_name=self.IMAGE,
+            image_project=self.IMAGE_PROJECT,
+            disk_args=expected_disk_args,
+            metadata=expected_metadata,
+            machine_type=self.MACHINE_TYPE,
+            network=self.NETWORK,
+            zone=self.ZONE)
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/internal/lib/driver_test_lib.py b/internal/lib/driver_test_lib.py
index 4ca1bfc..a9f212c 100644
--- a/internal/lib/driver_test_lib.py
+++ b/internal/lib/driver_test_lib.py
@@ -13,11 +13,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Driver test library."""
-
-import mock
 import unittest
+import mock
 
 
 class BaseDriverTest(unittest.TestCase):
diff --git a/internal/lib/gcompute_client.py b/internal/lib/gcompute_client.py
index cfda687..a215d7a 100755
--- a/internal/lib/gcompute_client.py
+++ b/internal/lib/gcompute_client.py
@@ -26,6 +26,7 @@
 and it only keeps states about authentication. ComputeClient should be very
 generic, and only knows how to talk to Compute Engine APIs.
 """
+# pylint: disable=too-many-lines
 import copy
 import functools
 import logging
@@ -39,6 +40,14 @@
 
 _MAX_RETRIES_ON_FINGERPRINT_CONFLICT = 10
 
+BASE_DISK_ARGS = {
+    "type": "PERSISTENT",
+    "boot": True,
+    "mode": "READ_WRITE",
+    "autoDelete": True,
+    "initializeParams": {},
+}
+
 
 class OperationScope(object):
     """Represents operation scope enum."""
@@ -76,6 +85,7 @@
     return isinstance(exc, errors.HttpError) and exc.code == 412
 
 
+# pylint: disable=too-many-public-methods
 class ComputeClient(base_cloud_client.BaseCloudApiClient):
     """Client that manages GCE."""
 
@@ -980,53 +990,51 @@
                                "type": "ONE_TO_ONE_NAT"}]
         }
 
-    def _GetDiskArgs(self, disk_name, image_name, image_project=None):
+    def _GetDiskArgs(self, disk_name, image_name, image_project=None,
+                     disk_size_gb=None):
         """Helper to generate disk args that is used to create an instance.
 
         Args:
             disk_name: A string
             image_name: A string
             image_project: A string
+            disk_size_gb: An integer
 
         Returns:
-            A dictionary representing disk args.
+            List holding dict of disk args.
         """
-        return [{
-            "type": "PERSISTENT",
-            "boot": True,
-            "mode": "READ_WRITE",
-            "autoDelete": True,
-            "initializeParams": {
-                "diskName": disk_name,
-                "sourceImage": self.GetImage(image_name, image_project)["selfLink"],
-            },
-        }]
+        args = copy.deepcopy(BASE_DISK_ARGS)
+        args["initializeParams"] = {
+            "diskName": disk_name,
+            "sourceImage": self.GetImage(image_name, image_project)["selfLink"],
+        }
+        # TODO: Remove this check once it's validated that we can either pass in
+        # a None diskSizeGb or we find an appropriate default val.
+        if disk_size_gb:
+            args["diskSizeGb"] = disk_size_gb
+        return [args]
 
-    def CreateInstance(self,
-                       instance,
-                       image_name,
-                       machine_type,
-                       metadata,
-                       network,
-                       zone,
-                       disk_args=None,
-                       image_project=None,
+    # pylint: disable=too-many-locals
+    def CreateInstance(self, instance, image_name, machine_type, metadata,
+                       network, zone, disk_args=None, image_project=None,
                        gpu=None):
         """Create a gce instance with a gce image.
 
         Args:
-            instance: instance name.
-            image_name: A source image used to create this disk.
-            machine_type: A string, representing machine_type, e.g. "n1-standard-1"
-            metadata: A dictionary that maps a metadata name to its value.
+            instance: A string, instance name.
+            image_name: A string, source image used to create this disk.
+            machine_type: A string, representing machine_type,
+                          e.g. "n1-standard-1"
+            metadata: A dict, maps a metadata name to its value.
             network: A string, representing network name, e.g. "default"
             zone: A string, representing zone name, e.g. "us-central1-f"
-            disk_args: A list of extra disk args, see _GetDiskArgs for example,
-                       if None, will create a disk using the given image.
-            image_project: A string, name of the project where the image belongs.
-                           Assume the default project if None.
-            gpu: The type of gpu to attach. e.g. "nvidia-tesla-k80", if None no
-                 gpus will be attached. For more details see:
+            disk_args: A list of extra disk args (strings), see _GetDiskArgs
+                       for example, if None, will create a disk using the given
+                       image.
+            image_project: A string, name of the project where the image
+                           belongs. Assume the default project if None.
+            gpu: A string, type of gpu to attach. e.g. "nvidia-tesla-k80", if
+                 None no gpus will be attached. For more details see:
                  https://cloud.google.com/compute/docs/gpus/add-gpus
         """
         disk_args = (disk_args or self._GetDiskArgs(instance, image_name,
@@ -1055,7 +1063,7 @@
                              for key, val in metadata.iteritems()]
             body["metadata"] = {"items": metadata_list}
         logger.info("Creating instance: project %s, zone %s, body:%s",
-            	    self._project, zone, body)
+                    self._project, zone, body)
         api = self.service.instances().insert(project=self._project,
                                               zone=zone,
                                               body=body)
@@ -1319,8 +1327,8 @@
 
         entry = "%s:%s" % (user, rsa)
         logger.debug("New RSA entry: %s", entry)
-        sshkey_item["value"] = "\n".join([sshkey_item["value"].strip(), entry
-                                          ]).strip()
+        sshkey_item["value"] = "\n".join([sshkey_item["value"].strip(),
+                                          entry]).strip()
         self.SetCommonInstanceMetadata(metadata)
 
     def CheckAccess(self):
diff --git a/internal/lib/gcompute_client_test.py b/internal/lib/gcompute_client_test.py
index 01b0a9e..7535a72 100644
--- a/internal/lib/gcompute_client_test.py
+++ b/internal/lib/gcompute_client_test.py
@@ -13,16 +13,19 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Tests for acloud.internal.lib.gcompute_client."""
+# pylint: disable=too-many-lines
 
+import copy
 import os
 
-import apiclient.http
+import unittest
 import mock
+
+# pylint: disable=import-error
+import apiclient.http
 from absl.testing import parameterized
 
-import unittest
 from acloud.internal.lib import driver_test_lib
 from acloud.internal.lib import gcompute_client
 from acloud.internal.lib import utils
@@ -34,7 +37,7 @@
     "us-east1-d/disks/fake-disk")
 PROJECT = "fake-project"
 
-
+# pylint: disable=protected-access, too-many-public-methods
 class ComputeClientTest(driver_test_lib.BaseDriverTest, parameterized.TestCase):
     """Test ComputeClient."""
 
@@ -65,6 +68,11 @@
             fake_cfg, mock.MagicMock())
         self.compute_client._service = mock.MagicMock()
 
+        self._disk_args = copy.deepcopy(gcompute_client.BASE_DISK_ARGS)
+        self._disk_args["initializeParams"] = {"diskName": self.INSTANCE,
+                                               "sourceImage": self.IMAGE_URL}
+
+    # pylint: disable=invalid-name
     def _SetupMocksForGetOperationStatus(self, mock_result, operation_scope):
         """A helper class for setting up mocks for testGetOperationStatus*.
 
@@ -135,16 +143,17 @@
                                 {"name": self.OPERATION_NAME},
                                 gcompute_client.OperationScope.GLOBAL)
 
-    def testWaitOnOperation(self):
+    @mock.patch.object(errors, "GceOperationTimeoutError")
+    @mock.patch.object(utils, "PollAndWait")
+    def testWaitOnOperation(self, mock_poll, mock_gce_operation_timeout_error):
         """Test WaitOnOperation."""
         mock_error = mock.MagicMock()
-        self.Patch(utils, "PollAndWait")
-        self.Patch(errors, "GceOperationTimeoutError", return_value=mock_error)
+        mock_gce_operation_timeout_error.return_value = mock_error
         self.compute_client.WaitOnOperation(
             operation={"name": self.OPERATION_NAME},
             operation_scope=gcompute_client.OperationScope.REGION,
             scope_name=self.REGION)
-        utils.PollAndWait.assert_called_with(
+        mock_poll.assert_called_with(
             func=self.compute_client._GetOperationStatus,
             expected_return="DONE",
             timeout_exception=mock_error,
@@ -196,7 +205,7 @@
     # pyformat: enable
     def testCreateImage(self, source_uri, source_disk, labels, expected_body):
         """Test CreateImage."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
+        mock_wait = self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         resource_mock = mock.MagicMock()
         self.compute_client._service.images = mock.MagicMock(
             return_value=resource_mock)
@@ -205,34 +214,35 @@
             image_name=self.IMAGE, source_uri=source_uri,
             source_disk=source_disk, labels=labels)
         resource_mock.insert.assert_called_with(
-                project=PROJECT, body=expected_body)
-        self.compute_client.WaitOnOperation.assert_called_with(
+            project=PROJECT, body=expected_body)
+        mock_wait.assert_called_with(
             operation=mock.ANY,
             operation_scope=gcompute_client.OperationScope.GLOBAL)
 
-    @unittest.skip("Needs to use mock lib for proper mocking.")
-    def testSetImageLabel(self):
-        # TODO: AddMockObject() needs to be converted to use mock.
-        # self.AddMockObject(self.compute_client._service, "images",
-        #                    return_value=mock.MagicMock(setLabels=mock.MagicMock()))
-        image = {"name": self.IMAGE,
-                 "sourceDisk": GS_IMAGE_SOURCE_DISK,
-                 "labelFingerprint": self.IMAGE_FINGERPRINT,
-                 "labels": {"a": "aaa", "b": "bbb"}}
-        # self.AddMockObject(self.compute_client, "GetImage", return_value=image)
-        new_labels = {"a": "xxx", "c": "ccc"}
-        # Test
-        self.compute_client.SetImageLabels(
-            self.IMAGE, new_labels)
-        # Check result
-        expected_labels = {"a": "xxx", "b": "bbb", "c": "ccc"}
-        self.compute_client._service.images().setLabels.assert_called_with(
-            project=PROJECT,
-            resource=self.IMAGE,
-            body={
-                "labels": expected_labels,
-                "labelFingerprint": self.IMAGE_FINGERPRINT
-            })
+    @mock.patch.object(gcompute_client.ComputeClient, "GetImage")
+    def testSetImageLabel(self, mock_get_image):
+        """Test SetImageLabel."""
+        with mock.patch.object(self.compute_client._service, "images",
+                               return_value=mock.MagicMock(
+                                   setLabels=mock.MagicMock())) as _:
+            image = {"name": self.IMAGE,
+                     "sourceDisk": GS_IMAGE_SOURCE_DISK,
+                     "labelFingerprint": self.IMAGE_FINGERPRINT,
+                     "labels": {"a": "aaa", "b": "bbb"}}
+            mock_get_image.return_value = image
+            new_labels = {"a": "xxx", "c": "ccc"}
+            # Test
+            self.compute_client.SetImageLabels(
+                self.IMAGE, new_labels)
+            # Check result
+            expected_labels = {"a": "xxx", "b": "bbb", "c": "ccc"}
+            self.compute_client._service.images().setLabels.assert_called_with(
+                project=PROJECT,
+                resource=self.IMAGE,
+                body={
+                    "labels": expected_labels,
+                    "labelFingerprint": self.IMAGE_FINGERPRINT
+                })
 
     @parameterized.parameters(
         (GS_IMAGE_SOURCE_URI, GS_IMAGE_SOURCE_DISK),
@@ -243,18 +253,14 @@
                           image_name=self.IMAGE, source_uri=source_uri,
                           source_disk=source_disk)
 
-    def testCreateImageFail(self):
-        """Test CreateImage fails."""
-        self.Patch(
-            gcompute_client.ComputeClient,
-            "WaitOnOperation",
-            side_effect=errors.DriverError("Expected fake error"))
-        self.Patch(
-            gcompute_client.ComputeClient,
-            "CheckImageExists",
-            return_value=True)
-        self.Patch(gcompute_client.ComputeClient, "DeleteImage")
 
+    @mock.patch.object(gcompute_client.ComputeClient, "DeleteImage")
+    @mock.patch.object(gcompute_client.ComputeClient, "CheckImageExists",
+                       return_value=True)
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation",
+                       side_effect=errors.DriverError("Expected fake error"))
+    def testCreateImageFail(self, mock_wait, mock_check, mock_delete):
+        """Test CreateImage fails."""
         resource_mock = mock.MagicMock()
         self.compute_client._service.images = mock.MagicMock(
             return_value=resource_mock)
@@ -274,11 +280,11 @@
             source_uri=GS_IMAGE_SOURCE_URI)
         resource_mock.insert.assert_called_with(
             project=PROJECT, body=expected_body)
-        self.compute_client.WaitOnOperation.assert_called_with(
+        mock_wait.assert_called_with(
             operation=mock.ANY,
             operation_scope=gcompute_client.OperationScope.GLOBAL)
-        self.compute_client.CheckImageExists.assert_called_with(self.IMAGE)
-        self.compute_client.DeleteImage.assert_called_with(self.IMAGE)
+        mock_check.assert_called_with(self.IMAGE)
+        mock_delete.assert_called_with(self.IMAGE)
 
     def testCheckImageExistsTrue(self):
         """Test CheckImageExists return True."""
@@ -301,9 +307,9 @@
             side_effect=errors.ResourceNotFoundError(404, "no image"))
         self.assertFalse(self.compute_client.CheckImageExists(self.IMAGE))
 
-    def testDeleteImage(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testDeleteImage(self, mock_wait):
         """Test DeleteImage."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         resource_mock = mock.MagicMock()
         self.compute_client._service.images = mock.MagicMock(
             return_value=resource_mock)
@@ -311,7 +317,7 @@
         self.compute_client.DeleteImage(self.IMAGE)
         resource_mock.delete.assert_called_with(
             project=PROJECT, image=self.IMAGE)
-        self.assertTrue(self.compute_client.WaitOnOperation.called)
+        self.assertTrue(mock_wait.called)
 
     def _SetupBatchHttpRequestMock(self):
         """Setup BatchHttpRequest mock."""
@@ -330,10 +336,10 @@
         mock_batch.execute = _Execute
         self.Patch(apiclient.http, "BatchHttpRequest", return_value=mock_batch)
 
-    def testDeleteImages(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testDeleteImages(self, mock_wait):
         """Test DeleteImages."""
         self._SetupBatchHttpRequestMock()
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         fake_images = ["fake_image_1", "fake_image_2"]
         mock_api = mock.MagicMock()
         resource_mock = mock.MagicMock()
@@ -349,8 +355,7 @@
             mock.call(project=PROJECT, image="fake_image_2")
         ]
         resource_mock.delete.assert_has_calls(calls, any_order=True)
-        self.assertEqual(
-            gcompute_client.ComputeClient.WaitOnOperation.call_count, 2)
+        self.assertEqual(mock_wait.call_count, 2)
         self.assertEqual(error_msgs, [])
         self.assertEqual(failed, [])
         self.assertEqual(set(deleted), set(fake_images))
@@ -438,22 +443,16 @@
         resource_mock.list.assert_has_calls(calls)
         self.assertEqual(instances, [instance_1, instance_2])
 
-    def testCreateInstance(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "GetImage")
+    @mock.patch.object(gcompute_client.ComputeClient, "GetNetworkUrl")
+    @mock.patch.object(gcompute_client.ComputeClient, "GetMachineType")
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testCreateInstance(self, mock_wait, mock_get_mach_type,
+                           mock_get_network_url, mock_get_image):
         """Test CreateInstance."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
-        self.Patch(
-            gcompute_client.ComputeClient,
-            "GetMachineType",
-            return_value={"selfLink": self.MACHINE_TYPE_URL})
-        self.Patch(
-            gcompute_client.ComputeClient,
-            "GetNetworkUrl",
-            return_value=self.NETWORK_URL)
-        self.Patch(
-            gcompute_client.ComputeClient,
-            "GetImage",
-            return_value={"selfLink": self.IMAGE_URL})
-
+        mock_get_mach_type.return_value = {"selfLink": self.MACHINE_TYPE_URL}
+        mock_get_network_url.return_value = self.NETWORK_URL
+        mock_get_image.return_value = {"selfLink": self.IMAGE_URL}
         resource_mock = mock.MagicMock()
         self.compute_client._service.instances = mock.MagicMock(
             return_value=resource_mock)
@@ -471,18 +470,7 @@
                     ],
                 }
             ],
-            "disks": [
-                {
-                    "type": "PERSISTENT",
-                    "boot": True,
-                    "mode": "READ_WRITE",
-                    "autoDelete": True,
-                    "initializeParams": {
-                        "diskName": self.INSTANCE,
-                        "sourceImage": self.IMAGE_URL,
-                    },
-                }
-            ],
+            "disks": [self._disk_args],
             "serviceAccounts": [
                 {"email": "default",
                  "scopes": self.compute_client.DEFAULT_INSTANCE_SCOPE}
@@ -503,30 +491,24 @@
 
         resource_mock.insert.assert_called_with(
             project=PROJECT, zone=self.ZONE, body=expected_body)
-        self.compute_client.WaitOnOperation.assert_called_with(
+        mock_wait.assert_called_with(
             mock.ANY,
             operation_scope=gcompute_client.OperationScope.ZONE,
             scope_name=self.ZONE)
 
-    def testCreateInstanceWithGpu(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "GetAcceleratorUrl")
+    @mock.patch.object(gcompute_client.ComputeClient, "GetImage")
+    @mock.patch.object(gcompute_client.ComputeClient, "GetNetworkUrl")
+    @mock.patch.object(gcompute_client.ComputeClient, "GetMachineType")
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testCreateInstanceWithGpu(self, mock_wait, mock_get_mach,
+                                  mock_get_network, mock_get_image,
+                                  mock_get_accel):
         """Test CreateInstance with a GPU parameter not set to None."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
-        self.Patch(
-            gcompute_client.ComputeClient,
-            "GetMachineType",
-            return_value={"selfLink": self.MACHINE_TYPE_URL})
-        self.Patch(
-            gcompute_client.ComputeClient,
-            "GetNetworkUrl",
-            return_value=self.NETWORK_URL)
-        self.Patch(
-            gcompute_client.ComputeClient,
-            "GetAcceleratorUrl",
-            return_value=self.ACCELERATOR_URL)
-        self.Patch(
-            gcompute_client.ComputeClient,
-            "GetImage",
-            return_value={"selfLink": self.IMAGE_URL})
+        mock_get_mach.return_value = {"selfLink": self.MACHINE_TYPE_URL}
+        mock_get_network.return_value = self.NETWORK_URL
+        mock_get_accel.return_value = self.ACCELERATOR_URL
+        mock_get_image.return_value = {"selfLink": self.IMAGE_URL}
 
         resource_mock = mock.MagicMock()
         self.compute_client._service.instances = mock.MagicMock(
@@ -546,16 +528,7 @@
                     "type": "ONE_TO_ONE_NAT"
                 }],
             }],
-            "disks": [{
-                "type": "PERSISTENT",
-                "boot": True,
-                "mode": "READ_WRITE",
-                "autoDelete": True,
-                "initializeParams": {
-                    "diskName": self.INSTANCE,
-                    "sourceImage": self.IMAGE_URL,
-                },
-            }],
+            "disks": [self._disk_args],
             "serviceAccounts": [{
                 "email": "default",
                 "scopes": self.compute_client.DEFAULT_INSTANCE_SCOPE
@@ -586,13 +559,13 @@
 
         resource_mock.insert.assert_called_with(
             project=PROJECT, zone=self.ZONE, body=expected_body)
-        self.compute_client.WaitOnOperation.assert_called_with(
+        mock_wait.assert_called_with(
             mock.ANY, operation_scope=gcompute_client.OperationScope.ZONE,
             scope_name=self.ZONE)
 
-    def testDeleteInstance(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testDeleteInstance(self, mock_wait):
         """Test DeleteInstance."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         resource_mock = mock.MagicMock()
         self.compute_client._service.instances = mock.MagicMock(
             return_value=resource_mock)
@@ -601,15 +574,15 @@
             instance=self.INSTANCE, zone=self.ZONE)
         resource_mock.delete.assert_called_with(
             project=PROJECT, zone=self.ZONE, instance=self.INSTANCE)
-        self.compute_client.WaitOnOperation.assert_called_with(
+        mock_wait.assert_called_with(
             mock.ANY,
             operation_scope=gcompute_client.OperationScope.ZONE,
             scope_name=self.ZONE)
 
-    def testDeleteInstances(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testDeleteInstances(self, mock_wait):
         """Test DeleteInstances."""
         self._SetupBatchHttpRequestMock()
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         fake_instances = ["fake_instance_1", "fake_instance_2"]
         mock_api = mock.MagicMock()
         resource_mock = mock.MagicMock()
@@ -629,8 +602,7 @@
                 zone=self.ZONE)
         ]
         resource_mock.delete.assert_has_calls(calls, any_order=True)
-        self.assertEqual(
-            gcompute_client.ComputeClient.WaitOnOperation.call_count, 2)
+        self.assertEqual(mock_wait.call_count, 2)
         self.assertEqual(error_msgs, [])
         self.assertEqual(failed, [])
         self.assertEqual(set(deleted), set(fake_instances))
@@ -639,7 +611,7 @@
                               (None, PROJECT))
     def testCreateDisk(self, source_project, expected_project_to_use):
         """Test CreateDisk with images."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
+        mock_wait = self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         resource_mock = mock.MagicMock()
         self.compute_client._service.disks = mock.MagicMock(
             return_value=resource_mock)
@@ -660,14 +632,14 @@
                     "projects/%s/zones/%s/diskTypes/pd-standard" % (PROJECT,
                                                                     self.ZONE)
             })
-        self.assertTrue(self.compute_client.WaitOnOperation.called)
+        self.assertTrue(mock_wait.called)
 
     @parameterized.parameters(
         (gcompute_client.PersistentDiskType.STANDARD, "pd-standard"),
         (gcompute_client.PersistentDiskType.SSD, "pd-ssd"))
     def testCreateDiskWithType(self, disk_type, expected_disk_type_string):
         """Test CreateDisk with images."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
+        mock_wait = self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         resource_mock = mock.MagicMock()
         self.compute_client._service.disks = mock.MagicMock(
             return_value=resource_mock)
@@ -692,11 +664,11 @@
                     "projects/%s/zones/%s/diskTypes/%s" %
                     (PROJECT, self.ZONE, expected_disk_type_string)
             })
-        self.assertTrue(self.compute_client.WaitOnOperation.called)
+        self.assertTrue(mock_wait.called)
 
-    def testAttachDisk(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testAttachDisk(self, mock_wait):
         """Test AttachDisk."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         resource_mock = mock.MagicMock()
         self.compute_client._service.instances = mock.MagicMock(
             return_value=resource_mock)
@@ -712,11 +684,11 @@
                 "deviceName": "fake_disk",
                 "source": "fake-selfLink"
             })
-        self.assertTrue(self.compute_client.WaitOnOperation.called)
+        self.assertTrue(mock_wait.called)
 
-    def testDetachDisk(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testDetachDisk(self, mock_wait):
         """Test DetachDisk."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         resource_mock = mock.MagicMock()
         self.compute_client._service.instances = mock.MagicMock(
             return_value=resource_mock)
@@ -727,15 +699,13 @@
             zone=self.ZONE,
             instance="fake_instance_1",
             deviceName="fake_disk")
-        self.assertTrue(self.compute_client.WaitOnOperation.called)
+        self.assertTrue(mock_wait.called)
 
-    def testAttachAccelerator(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "GetAcceleratorUrl")
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testAttachAccelerator(self, mock_wait, mock_get_accel):
         """Test AttachAccelerator."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
-        self.Patch(
-            gcompute_client.ComputeClient,
-            "GetAcceleratorUrl",
-            return_value=self.ACCELERATOR_URL)
+        mock_get_accel.return_value = self.ACCELERATOR_URL
         resource_mock = mock.MagicMock()
         self.compute_client._service.instances = mock.MagicMock(
             return_value=resource_mock)
@@ -752,11 +722,12 @@
                     "acceleratorCount": 1
                 }]
             })
-        self.assertTrue(self.compute_client.WaitOnOperation.called)
+        self.assertTrue(mock_wait.called)
 
-    def testBatchExecuteOnInstances(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testBatchExecuteOnInstances(self, mock_wait):
+        """Test BatchExecuteOnInstances."""
         self._SetupBatchHttpRequestMock()
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         action = mock.MagicMock(return_value=mock.MagicMock())
         fake_instances = ["fake_instance_1", "fake_instance_2"]
         done, failed, error_msgs = self.compute_client._BatchExecuteOnInstances(
@@ -764,15 +735,14 @@
         calls = [mock.call(instance="fake_instance_1"),
                  mock.call(instance="fake_instance_2")]
         action.assert_has_calls(calls, any_order=True)
-        self.assertEqual(
-            gcompute_client.ComputeClient.WaitOnOperation.call_count, 2)
+        self.assertEqual(mock_wait.call_count, 2)
         self.assertEqual(set(done), set(fake_instances))
         self.assertEqual(error_msgs, [])
         self.assertEqual(failed, [])
 
-    def testResetInstance(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testResetInstance(self, mock_wait):
         """Test ResetInstance."""
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         resource_mock = mock.MagicMock()
         self.compute_client._service.instances = mock.MagicMock(
             return_value=resource_mock)
@@ -781,7 +751,7 @@
             instance=self.INSTANCE, zone=self.ZONE)
         resource_mock.reset.assert_called_with(
             project=PROJECT, zone=self.ZONE, instance=self.INSTANCE)
-        self.compute_client.WaitOnOperation.assert_called_with(
+        mock_wait.assert_called_with(
             mock.ANY,
             operation_scope=gcompute_client.OperationScope.ZONE,
             scope_name=self.ZONE)
@@ -799,7 +769,7 @@
             expected_result: An integer, 0, 1 or -1, or None if not set.
             expected_error_type: An exception type, if set will check for exception.
         """
-        self.Patch(
+        mock_get_mach_type = self.Patch(
             gcompute_client.ComputeClient,
             "GetMachineType",
             side_effect=[machine_info_1, machine_info_2])
@@ -812,7 +782,7 @@
                                                             self.ZONE)
             self.assertEqual(result, expected_result)
 
-        self.compute_client.GetMachineType.assert_has_calls(
+        mock_get_mach_type.assert_has_calls(
             [mock.call("name1", self.ZONE), mock.call("name2", self.ZONE)])
 
     def testCompareMachineSizeSmall(self):
@@ -887,10 +857,12 @@
             port=1)
 
     def testGetSerialPortOutput(self):
+        """Test GetSerialPortOutput."""
         response = {"contents": "fake contents"}
         self._GetSerialPortOutputTestHelper(response)
 
     def testGetSerialPortOutputFail(self):
+        """Test GetSerialPortOutputFail."""
         response = {"malformed": "fake contents"}
         self._GetSerialPortOutputTestHelper(response)
 
@@ -991,10 +963,10 @@
                                 self.compute_client.AddSshRsa, fake_user,
                                 "/path/to/test_rsa.pub")
 
-    def testDeleteDisks(self):
+    @mock.patch.object(gcompute_client.ComputeClient, "WaitOnOperation")
+    def testDeleteDisks(self, mock_wait):
         """Test DeleteDisks."""
         self._SetupBatchHttpRequestMock()
-        self.Patch(gcompute_client.ComputeClient, "WaitOnOperation")
         fake_disks = ["fake_disk_1", "fake_disk_2"]
         mock_api = mock.MagicMock()
         resource_mock = mock.MagicMock()
@@ -1010,19 +982,20 @@
             mock.call(project=PROJECT, disk="fake_disk_2", zone=self.ZONE)
         ]
         resource_mock.delete.assert_has_calls(calls, any_order=True)
-        self.assertEqual(
-            gcompute_client.ComputeClient.WaitOnOperation.call_count, 2)
+        self.assertEqual(mock_wait.call_count, 2)
         self.assertEqual(error_msgs, [])
         self.assertEqual(failed, [])
         self.assertEqual(set(deleted), set(fake_disks))
 
     def testRetryOnFingerPrintError(self):
+        """Test RetryOnFingerPrintError."""
         @utils.RetryOnException(gcompute_client._IsFingerPrintError, 10)
         def Raise412(sentinel):
-          if not sentinel.hitFingerPrintConflict.called:
-            sentinel.hitFingerPrintConflict()
-            raise errors.HttpError(412, "resource labels have changed")
-          return "Passed"
+            """Raise 412 HTTP exception."""
+            if not sentinel.hitFingerPrintConflict.called:
+                sentinel.hitFingerPrintConflict()
+                raise errors.HttpError(412, "resource labels have changed")
+            return "Passed"
 
         sentinel = mock.MagicMock()
         result = Raise412(sentinel)
diff --git a/internal/lib/goldfish_compute_client.py b/internal/lib/goldfish_compute_client.py
index 01278fc..9697d8e 100644
--- a/internal/lib/goldfish_compute_client.py
+++ b/internal/lib/goldfish_compute_client.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """A client that manages Goldfish Virtual Device on compute engine.
 
 ** GoldfishComputeClient **
@@ -37,12 +36,11 @@
        goldfish_compute_client.GoldfishComputeClient
 
 
-TODO(jansen): This class should likely be merged with CvdComputeClient
+TODO: This class should likely be merged with CvdComputeClient
 """
 
 import getpass
 import logging
-import os
 
 from acloud.internal.lib import android_compute_client
 from acloud.internal.lib import gcompute_client
@@ -52,150 +50,151 @@
 
 
 class GoldfishComputeClient(android_compute_client.AndroidComputeClient):
-  """Client that manages Goldfish based Android Virtual Device."""
+    """Client that manages Goldfish based Android Virtual Device.
 
-  # To determine if the boot failed
-  BOOT_FAILED_MSG = "VIRTUAL_DEVICE_FAILED"
-
-  # To determine the failure reason
-  # If the emulator build is not available
-  EMULATOR_FETCH_FAILED_MSG = "EMULATOR_FETCH_FAILED"
-  # If the system image build is not available
-  ANDROID_FETCH_FAILED_MSG = "ANDROID_FETCH_FAILED"
-  # If the emulator could not boot in time
-  BOOT_TIMEOUT_MSG = "VIRTUAL_DEVICE_BOOT_FAILED"
-
-  def __init__(self, acloud_config, oauth2_credentials):
-    """Initialize.
-
-    Args:
-      acloud_config: An AcloudConfig object.
-      oauth2_credentials: An oauth2client.OAuth2Credentials instance.
+    Attributes:
+        acloud_config: An AcloudConfig object.
+        oauth2_credentials: An oauth2client.OAuth2Credentials instance.
     """
-    super(GoldfishComputeClient, self).__init__(acloud_config,
-                                                oauth2_credentials)
 
-  def _GetDiskArgs(self, disk_name, image_name, image_project, disk_size_gb):
-    """Helper to generate disk args that is used to create an instance.
+    # To determine if the boot failed
+    BOOT_FAILED_MSG = "VIRTUAL_DEVICE_FAILED"
 
-    Args:
-      disk_name: A string
-      image_name: A string
-      image_project: A string
-      disk_size_gb: An integer
+    # To determine the failure reason
+    # If the emulator build is not available
+    EMULATOR_FETCH_FAILED_MSG = "EMULATOR_FETCH_FAILED"
+    # If the system image build is not available
+    ANDROID_FETCH_FAILED_MSG = "ANDROID_FETCH_FAILED"
+    # If the emulator could not boot in time
+    BOOT_TIMEOUT_MSG = "VIRTUAL_DEVICE_BOOT_FAILED"
 
-    Returns:
-      A dictionary representing disk args.
-    """
-    return [{
-        "type": "PERSISTENT",
-        "boot": True,
-        "mode": "READ_WRITE",
-        "autoDelete": True,
-        "initializeParams": {
-            "diskName":
+    #pylint: disable=signature-differs
+    def _GetDiskArgs(self, disk_name, image_name, image_project, disk_size_gb):
+        """Helper to generate disk args that is used to create an instance.
+
+        Args:
+            disk_name: String, the name of disk.
+            image_name: String, the name of the system image.
+            image_project: String, the name of the project where the image.
+            disk_size_gb: Integer, size of the blank data disk in GB.
+
+        Returns:
+            A dictionary representing disk args.
+        """
+        return [{
+            "type": "PERSISTENT",
+            "boot": True,
+            "mode": "READ_WRITE",
+            "autoDelete": True,
+            "initializeParams": {
+                "diskName":
                 disk_name,
-            "sourceImage":
+                "sourceImage":
                 self.GetImage(image_name, image_project)["selfLink"],
-            "diskSizeGb":
+                "diskSizeGb":
                 disk_size_gb
-        },
-    }]
+            },
+        }]
+    #pylint: disable=signature-differs
 
-  def CheckBootFailure(self, serial_out, instance):
-    """Overriding method from the parent class.
+    def CheckBootFailure(self, serial_out, instance):
+        """Overriding method from the parent class.
 
-    Args:
-      serial_out: A string
-      instance: A string
+        Args:
+            serial_out: String
+            instance: String
 
-    Raises:
-      Raises an errors.DeviceBootError exception if a failure is detected.
-    """
-    if self.BOOT_FAILED_MSG in serial_out:
-      if self.EMULATOR_FETCH_FAILED_MSG in serial_out:
-        raise errors.DeviceBootError(
-            "Failed to download emulator build. Re-run with a newer build.")
-      if self.ANDROID_FETCH_FAILED_MSG in serial_out:
-        raise errors.DeviceBootError(
-            "Failed to download system image build. Re-run with a newer build.")
-      if self.BOOT_TIMEOUT_MSG in serial_out:
-        raise errors.DeviceBootError(
-            "Emulator timed out while booting.")
+        Raises:
+            Raises an errors.DeviceBootError exception if a failure is detected.
+        """
+        if self.BOOT_FAILED_MSG in serial_out:
+            if self.EMULATOR_FETCH_FAILED_MSG in serial_out:
+                raise errors.DeviceBootError(
+                    "Failed to download emulator build. Re-run with a newer build."
+                )
+            if self.ANDROID_FETCH_FAILED_MSG in serial_out:
+                raise errors.DeviceBootError(
+                    "Failed to download system image build. Re-run with a newer build."
+                )
+            if self.BOOT_TIMEOUT_MSG in serial_out:
+                raise errors.DeviceBootError(
+                    "Emulator timed out while booting.")
 
-  def CreateInstance(self,
-                     instance,
-                     image_name,
-                     image_project,
-                     build_target,
-                     branch,
-                     build_id,
-                     emulator_branch=None,
-                     emulator_build_id=None,
-                     blank_data_disk_size_gb=None,
-                     gpu=None):
-    """Create a goldfish instance given a stable host image and a build id.
+    # pylint: disable=too-many-locals,arguments-differ
+    # TODO: Refactor CreateInstance to pass in an object instead of all these args.
+    def CreateInstance(self,
+                       instance,
+                       image_name,
+                       image_project,
+                       build_target,
+                       branch,
+                       build_id,
+                       emulator_branch=None,
+                       emulator_build_id=None,
+                       blank_data_disk_size_gb=None,
+                       gpu=None):
+        """Create a goldfish instance given a stable host image and a build id.
 
-    Args:
-      instance: instance name.
-      image_name: A string, the name of the system image.
-      image_project: A string, name of the project where the image belongs.
-                     Assume the default project if None.
-      build_target: Target name, e.g. "sdk_phone_x86_64-sdk"
-      branch: Branch name, e.g. "git_pi-dev"
-      build_id: Build id, a string, e.g. "2263051", "P2804227"
-      emulator_branch: emulator branch name, e.g.
-        "aosp-emu-master-dev"
-      emulator_build_id: emulator build id, a string, e.g. "2263051", "P2804227"
-      blank_data_disk_size_gb: Size of the blank data disk in GB.
-      gpu: GPU that should be attached to the instance, or None of no
-        acceleration is needed. e.g. "nvidia-tesla-k80"
-    """
-    self._CheckMachineSize()
+        Args:
+            instance: String, instance name.
+            image_name: String, the name of the system image.
+            image_project: String, name of the project where the image belongs.
+                           Assume the default project if None.
+            build_target: String, target name, e.g. "sdk_phone_x86_64-sdk"
+            branch: String, branch name, e.g. "git_pi-dev"
+            build_id: String, build id, a string, e.g. "2263051", "P2804227"
+            emulator_branch: String, emulator branch name, e.g."aosp-emu-master-dev"
+            emulator_build_id: String, emulator build id, a string, e.g. "2263051", "P2804227"
+            blank_data_disk_size_gb: Integer, size of the blank data disk in GB.
+            gpu: String, GPU that should be attached to the instance, or None of no
+                 acceleration is needed. e.g. "nvidia-tesla-k80"
+        """
+        self._CheckMachineSize()
 
-    # Add space for possible data partition.
-    boot_disk_size_gb = (
-        int(self.GetImage(image_name, image_project)["diskSizeGb"]) +
-        blank_data_disk_size_gb)
-    disk_args = self._GetDiskArgs(instance, image_name, image_project,
-                                  boot_disk_size_gb)
+        # Add space for possible data partition.
+        boot_disk_size_gb = (
+            int(self.GetImage(image_name, image_project)["diskSizeGb"]) +
+            blank_data_disk_size_gb)
+        disk_args = self._GetDiskArgs(instance, image_name, image_project,
+                                      boot_disk_size_gb)
 
-    # Goldfish instances are metadata compatible with cuttlefish devices.
-    # See details goto/goldfish-deployment
-    metadata = self._metadata.copy()
-    resolution = self._resolution.split("x")
+        # Goldfish instances are metadata compatible with cuttlefish devices.
+        # See details goto/goldfish-deployment
+        metadata = self._metadata.copy()
+        resolution = self._resolution.split("x")
 
-    # Note that we use the same metadata naming conventions as cuttlefish
-    metadata["cvd_01_dpi"] = resolution[3]
-    metadata["cvd_01_fetch_android_build_target"] = build_target
-    metadata["cvd_01_fetch_android_bid"] = "{branch}/{build_id}".format(
-        branch=branch, build_id=build_id)
-    if emulator_branch and emulator_build_id:
-      metadata["cvd_01_fetch_emulator_bid"] = "{branch}/{build_id}".format(
-          branch=emulator_branch, build_id=emulator_build_id)
-    metadata["cvd_01_launch"] = "1"
-    metadata["cvd_01_x_res"] = resolution[0]
-    metadata["cvd_01_y_res"] = resolution[1]
+        # Note that we use the same metadata naming conventions as cuttlefish
+        metadata["cvd_01_dpi"] = resolution[3]
+        metadata["cvd_01_fetch_android_build_target"] = build_target
+        metadata["cvd_01_fetch_android_bid"] = "{branch}/{build_id}".format(
+            branch=branch, build_id=build_id)
+        if emulator_branch and emulator_build_id:
+            metadata[
+                "cvd_01_fetch_emulator_bid"] = "{branch}/{build_id}".format(
+                    branch=emulator_branch, build_id=emulator_build_id)
+        metadata["cvd_01_launch"] = "1"
+        metadata["cvd_01_x_res"] = resolution[0]
+        metadata["cvd_01_y_res"] = resolution[1]
 
-    # Add per-instance ssh key
-    if self._ssh_public_key_path:
-      rsa = self._LoadSshPublicKey(self._ssh_public_key_path)
-      logger.info("ssh_public_key_path is specified in config: %s, "
-                  "will add the key to the instance.",
-                  self._ssh_public_key_path)
-      metadata["sshKeys"] = "%s:%s" % (getpass.getuser(), rsa)
-    else:
-      logger.warning("ssh_public_key_path is not specified in config, "
-                     "only project-wide key will be effective.")
+        # Add per-instance ssh key
+        if self._ssh_public_key_path:
+            rsa = self._LoadSshPublicKey(self._ssh_public_key_path)
+            logger.info(
+                "ssh_public_key_path is specified in config: %s, "
+                "will add the key to the instance.", self._ssh_public_key_path)
+            metadata["sshKeys"] = "%s:%s" % (getpass.getuser(), rsa)
+        else:
+            logger.warning("ssh_public_key_path is not specified in config, "
+                           "only project-wide key will be effective.")
 
-    gcompute_client.ComputeClient.CreateInstance(
-        self,
-        instance=instance,
-        image_name=image_name,
-        image_project=image_project,
-        disk_args=disk_args,
-        metadata=metadata,
-        machine_type=self._machine_type,
-        network=self._network,
-        zone=self._zone,
-        gpu=gpu)
+        gcompute_client.ComputeClient.CreateInstance(
+            self,
+            instance=instance,
+            image_name=image_name,
+            image_project=image_project,
+            disk_args=disk_args,
+            metadata=metadata,
+            machine_type=self._machine_type,
+            network=self._network,
+            zone=self._zone,
+            gpu=gpu)
diff --git a/internal/lib/goldfish_compute_client_test.py b/internal/lib/goldfish_compute_client_test.py
index 8e116d2..461fa48 100644
--- a/internal/lib/goldfish_compute_client_test.py
+++ b/internal/lib/goldfish_compute_client_test.py
@@ -13,117 +13,121 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Tests for acloud.internal.lib.goldfish_compute_client."""
+import unittest
 import mock
 
-import unittest
 from acloud.internal.lib import driver_test_lib
 from acloud.internal.lib import gcompute_client
 from acloud.internal.lib import goldfish_compute_client
 
 
 class GoldfishComputeClientTest(driver_test_lib.BaseDriverTest):
-  """Test GoldfishComputeClient."""
+    """Test GoldfishComputeClient."""
 
-  SSH_PUBLIC_KEY_PATH = ""
-  INSTANCE = "fake-instance"
-  IMAGE = "fake-image"
-  IMAGE_PROJECT = "fake-iamge-project"
-  MACHINE_TYPE = "fake-machine-type"
-  NETWORK = "fake-network"
-  ZONE = "fake-zone"
-  BRANCH = "fake-branch"
-  TARGET = "sdk_phone_x86_64-sdk"
-  BUILD_ID = "2263051"
-  EMULATOR_BRANCH = "aosp-emu-master-dev"
-  EMULATOR_BUILD_ID = "1234567"
-  DPI = 160
-  X_RES = 720
-  Y_RES = 1280
-  METADATA = {"metadata_key": "metadata_value"}
-  EXTRA_DATA_DISK_SIZE_GB = 4
-  BOOT_DISK_SIZE_GB = 10
-  GPU = "nvidia-tesla-k80"
+    SSH_PUBLIC_KEY_PATH = ""
+    INSTANCE = "fake-instance"
+    IMAGE = "fake-image"
+    IMAGE_PROJECT = "fake-iamge-project"
+    MACHINE_TYPE = "fake-machine-type"
+    NETWORK = "fake-network"
+    ZONE = "fake-zone"
+    BRANCH = "fake-branch"
+    TARGET = "sdk_phone_x86_64-sdk"
+    BUILD_ID = "2263051"
+    EMULATOR_BRANCH = "aosp-emu-master-dev"
+    EMULATOR_BUILD_ID = "1234567"
+    DPI = 160
+    X_RES = 720
+    Y_RES = 1280
+    METADATA = {"metadata_key": "metadata_value"}
+    EXTRA_DATA_DISK_SIZE_GB = 4
+    BOOT_DISK_SIZE_GB = 10
+    GPU = "nvidia-tesla-k80"
 
-  def _GetFakeConfig(self):
-    """Create a fake configuration object.
+    def _GetFakeConfig(self):
+        """Create a fake configuration object.
 
-    Returns:
-      A fake configuration mock object.
-    """
-    fake_cfg = mock.MagicMock()
-    fake_cfg.ssh_public_key_path = self.SSH_PUBLIC_KEY_PATH
-    fake_cfg.machine_type = self.MACHINE_TYPE
-    fake_cfg.network = self.NETWORK
-    fake_cfg.zone = self.ZONE
-    fake_cfg.resolution = "{x}x{y}x32x{dpi}".format(
-        x=self.X_RES, y=self.Y_RES, dpi=self.DPI)
-    fake_cfg.metadata_variable = self.METADATA
-    fake_cfg.extra_data_disk_size_gb = self.EXTRA_DATA_DISK_SIZE_GB
-    return fake_cfg
+        Returns:
+            A fake configuration mock object.
+        """
+        fake_cfg = mock.MagicMock()
+        fake_cfg.ssh_public_key_path = self.SSH_PUBLIC_KEY_PATH
+        fake_cfg.machine_type = self.MACHINE_TYPE
+        fake_cfg.network = self.NETWORK
+        fake_cfg.zone = self.ZONE
+        fake_cfg.resolution = "{x}x{y}x32x{dpi}".format(
+            x=self.X_RES, y=self.Y_RES, dpi=self.DPI)
+        fake_cfg.metadata_variable = self.METADATA
+        fake_cfg.extra_data_disk_size_gb = self.EXTRA_DATA_DISK_SIZE_GB
+        return fake_cfg
 
-  def setUp(self):
-    """Set up the test."""
-    super(GoldfishComputeClientTest, self).setUp()
-    self.Patch(goldfish_compute_client.GoldfishComputeClient,
-               "InitResourceHandle")
-    self.goldfish_compute_client = (
-        goldfish_compute_client.GoldfishComputeClient(self._GetFakeConfig(),
-                                                      mock.MagicMock()))
-    self.Patch(
-        gcompute_client.ComputeClient, "CompareMachineSize", return_value=1)
-    self.Patch(
-        gcompute_client.ComputeClient,
-        "GetImage",
-        return_value={"diskSizeGb": 10})
-    self.Patch(gcompute_client.ComputeClient, "CreateInstance")
-    self.Patch(
-        goldfish_compute_client.GoldfishComputeClient,
-        "_GetDiskArgs",
-        return_value=[{
-            "fake_arg": "fake_value"
-        }])
+    def setUp(self):
+        """Set up the test."""
+        super(GoldfishComputeClientTest, self).setUp()
+        self.Patch(goldfish_compute_client.GoldfishComputeClient,
+                   "InitResourceHandle")
+        self.goldfish_compute_client = (
+            goldfish_compute_client.GoldfishComputeClient(
+                self._GetFakeConfig(), mock.MagicMock()))
+        self.Patch(
+            gcompute_client.ComputeClient,
+            "CompareMachineSize",
+            return_value=1)
+        self.Patch(
+            gcompute_client.ComputeClient,
+            "GetImage",
+            return_value={"diskSizeGb": 10})
+        self.Patch(gcompute_client.ComputeClient, "CreateInstance")
+        self.Patch(
+            goldfish_compute_client.GoldfishComputeClient,
+            "_GetDiskArgs",
+            return_value=[{
+                "fake_arg": "fake_value"
+            }])
 
-  def testCreateInstance(self):
-    """Test CreateInstance."""
+    def testCreateInstance(self):
+        """Test CreateInstance."""
 
-    expected_metadata = {
-        "cvd_01_dpi":
+        expected_metadata = {
+            "cvd_01_dpi":
             str(self.DPI),
-        "cvd_01_fetch_android_build_target":
+            "cvd_01_fetch_android_build_target":
             self.TARGET,
-        "cvd_01_fetch_android_bid":
+            "cvd_01_fetch_android_bid":
             "{branch}/{build_id}".format(
                 branch=self.BRANCH, build_id=self.BUILD_ID),
-        "cvd_01_fetch_emulator_bid":
+            "cvd_01_fetch_emulator_bid":
             "{branch}/{build_id}".format(
                 branch=self.EMULATOR_BRANCH, build_id=self.EMULATOR_BUILD_ID),
-        "cvd_01_launch":
+            "cvd_01_launch":
             "1",
-        "cvd_01_x_res":
+            "cvd_01_x_res":
             str(self.X_RES),
-        "cvd_01_y_res":
+            "cvd_01_y_res":
             str(self.Y_RES),
-    }
-    expected_metadata.update(self.METADATA)
-    expected_disk_args = [{"fake_arg": "fake_value"}]
+        }
+        expected_metadata.update(self.METADATA)
+        expected_disk_args = [{"fake_arg": "fake_value"}]
 
-    self.goldfish_compute_client.CreateInstance(
-        self.INSTANCE, self.IMAGE, self.IMAGE_PROJECT, self.TARGET, self.BRANCH,
-        self.BUILD_ID, self.EMULATOR_BRANCH, self.EMULATOR_BUILD_ID,
-        self.EXTRA_DATA_DISK_SIZE_GB, self.GPU)
-    gcompute_client.ComputeClient.CreateInstance.assert_called_with(
-        self.goldfish_compute_client,
-        instance=self.INSTANCE,
-        image_name=self.IMAGE,
-        image_project=self.IMAGE_PROJECT,
-        disk_args=expected_disk_args,
-        metadata=expected_metadata,
-        machine_type=self.MACHINE_TYPE,
-        network=self.NETWORK,
-        zone=self.ZONE,
-        gpu=self.GPU)
+        self.goldfish_compute_client.CreateInstance(
+            self.INSTANCE, self.IMAGE, self.IMAGE_PROJECT, self.TARGET,
+            self.BRANCH, self.BUILD_ID, self.EMULATOR_BRANCH,
+            self.EMULATOR_BUILD_ID, self.EXTRA_DATA_DISK_SIZE_GB, self.GPU)
+
+        # pylint: disable=no-member
+        gcompute_client.ComputeClient.CreateInstance.assert_called_with(
+            self.goldfish_compute_client,
+            instance=self.INSTANCE,
+            image_name=self.IMAGE,
+            image_project=self.IMAGE_PROJECT,
+            disk_args=expected_disk_args,
+            metadata=expected_metadata,
+            machine_type=self.MACHINE_TYPE,
+            network=self.NETWORK,
+            zone=self.ZONE,
+            gpu=self.GPU)
+
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/internal/lib/utils.py b/internal/lib/utils.py
index 875e183..1c9ac9c 100755
--- a/internal/lib/utils.py
+++ b/internal/lib/utils.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Common Utilities."""
 
 import base64
@@ -33,10 +32,8 @@
 
 from acloud.public import errors
 
-
 logger = logging.getLogger(__name__)
 
-
 SSH_KEYGEN_CMD = ["ssh-keygen", "-t", "rsa", "-b", "4096"]
 
 
@@ -83,101 +80,112 @@
         except Exception as e:  # pylint: disable=W0703
             if exc_type:
                 logger.error(
-                        "Encountered error while deleting %s: %s",
-                        self.path, str(e), exc_info=True)
+                    "Encountered error while deleting %s: %s",
+                    self.path,
+                    str(e),
+                    exc_info=True)
             else:
                 raise
 
 
-def RetryOnException(retry_checker, max_retries, sleep_multiplier=0,
+def RetryOnException(retry_checker,
+                     max_retries,
+                     sleep_multiplier=0,
                      retry_backoff_factor=1):
-  """Decorater which retries the function call if |retry_checker| returns true.
+    """Decorater which retries the function call if |retry_checker| returns true.
 
-  Args:
-    retry_checker: A callback function which should take an exception instance
-                   and return True if functor(*args, **kwargs) should be retried
-                   when such exception is raised, and return False if it should
-                   not be retried.
-    max_retries: Maximum number of retries allowed.
-    sleep_multiplier: Will sleep sleep_multiplier * attempt_count seconds if
-                      retry_backoff_factor is 1.  Will sleep
-                      sleep_multiplier * (
-                          retry_backoff_factor ** (attempt_count -  1))
-                      if retry_backoff_factor != 1.
-    retry_backoff_factor: See explanation of sleep_multiplier.
+    Args:
+        retry_checker: A callback function which should take an exception instance
+                       and return True if functor(*args, **kwargs) should be retried
+                       when such exception is raised, and return False if it should
+                       not be retried.
+        max_retries: Maximum number of retries allowed.
+        sleep_multiplier: Will sleep sleep_multiplier * attempt_count seconds if
+                          retry_backoff_factor is 1.  Will sleep
+                          sleep_multiplier * (
+                              retry_backoff_factor ** (attempt_count -  1))
+                          if retry_backoff_factor != 1.
+        retry_backoff_factor: See explanation of sleep_multiplier.
 
-  Returns:
-    The function wrapper.
-  """
-  def _Wrapper(func):
-    def _FunctionWrapper(*args, **kwargs):
-      return Retry(retry_checker, max_retries, func, sleep_multiplier,
-                   retry_backoff_factor,
-                   *args, **kwargs)
-    return _FunctionWrapper
-  return _Wrapper
+    Returns:
+        The function wrapper.
+    """
+
+    def _Wrapper(func):
+        def _FunctionWrapper(*args, **kwargs):
+            return Retry(retry_checker, max_retries, func, sleep_multiplier,
+                         retry_backoff_factor, *args, **kwargs)
+
+        return _FunctionWrapper
+
+    return _Wrapper
 
 
-def Retry(retry_checker, max_retries, functor, sleep_multiplier=0,
-          retry_backoff_factor=1, *args, **kwargs):
-  """Conditionally retry a function.
+def Retry(retry_checker,
+          max_retries,
+          functor,
+          sleep_multiplier,
+          retry_backoff_factor,
+          *args,
+          **kwargs):
+    """Conditionally retry a function.
 
-  Args:
-    retry_checker: A callback function which should take an exception instance
-                   and return True if functor(*args, **kwargs) should be retried
-                   when such exception is raised, and return False if it should
-                   not be retried.
-    max_retries: Maximum number of retries allowed.
-    functor: The function to call, will call functor(*args, **kwargs).
-    sleep_multiplier: Will sleep sleep_multiplier * attempt_count seconds if
-                      retry_backoff_factor is 1.  Will sleep
-                      sleep_multiplier * (
-                          retry_backoff_factor ** (attempt_count -  1))
-                      if retry_backoff_factor != 1.
-    retry_backoff_factor: See explanation of sleep_multiplier.
-    *args: Arguments to pass to the functor.
-    **kwargs: Key-val based arguments to pass to the functor.
+    Args:
+        retry_checker: A callback function which should take an exception instance
+                       and return True if functor(*args, **kwargs) should be retried
+                       when such exception is raised, and return False if it should
+                       not be retried.
+        max_retries: Maximum number of retries allowed.
+        functor: The function to call, will call functor(*args, **kwargs).
+        sleep_multiplier: Will sleep sleep_multiplier * attempt_count seconds if
+                          retry_backoff_factor is 1.  Will sleep
+                          sleep_multiplier * (
+                              retry_backoff_factor ** (attempt_count -  1))
+                          if retry_backoff_factor != 1.
+        retry_backoff_factor: See explanation of sleep_multiplier.
+        *args: Arguments to pass to the functor.
+        **kwargs: Key-val based arguments to pass to the functor.
 
-  Returns:
-    The return value of the functor.
+    Returns:
+        The return value of the functor.
 
-  Raises:
-    Exception: The exception that functor(*args, **kwargs) throws.
-  """
-  attempt_count = 0
-  while attempt_count <= max_retries:
-    try:
-      attempt_count += 1
-      return_value = functor(*args, **kwargs)
-      return return_value
-    except Exception as e:  # pylint: disable=W0703
-      if retry_checker(e) and attempt_count <= max_retries:
-        if retry_backoff_factor != 1:
-          sleep = sleep_multiplier * (
-              retry_backoff_factor ** (attempt_count -  1))
-        else:
-          sleep = sleep_multiplier * attempt_count
-        time.sleep(sleep)
-      else:
-        raise
+    Raises:
+        Exception: The exception that functor(*args, **kwargs) throws.
+    """
+    attempt_count = 0
+    while attempt_count <= max_retries:
+        try:
+            attempt_count += 1
+            return_value = functor(*args, **kwargs)
+            return return_value
+        except Exception as e:  # pylint: disable=W0703
+            if retry_checker(e) and attempt_count <= max_retries:
+                if retry_backoff_factor != 1:
+                    sleep = sleep_multiplier * (retry_backoff_factor**
+                                                (attempt_count - 1))
+                else:
+                    sleep = sleep_multiplier * attempt_count
+                    time.sleep(sleep)
+            else:
+                raise
 
 
 def RetryExceptionType(exception_types, max_retries, functor, *args, **kwargs):
-  """Retry exception if it is one of the given types.
+    """Retry exception if it is one of the given types.
 
-  Args:
-    exception_types: A tuple of exception types, e.g. (ValueError, KeyError)
-    max_retries: Max number of retries allowed.
-    functor: The function to call. Will be retried if exception is raised and
-             the exception is one of the exception_types.
-    *args: Arguments to pass to Retry function.
-    **kwargs: Key-val based arguments to pass to Retry functions.
+    Args:
+        exception_types: A tuple of exception types, e.g. (ValueError, KeyError)
+        max_retries: Max number of retries allowed.
+        functor: The function to call. Will be retried if exception is raised and
+                 the exception is one of the exception_types.
+        *args: Arguments to pass to Retry function.
+        **kwargs: Key-val based arguments to pass to Retry functions.
 
-  Returns:
-    The value returned by calling functor.
-  """
-  return Retry(lambda e: isinstance(e, exception_types), max_retries,
-               functor, *args, **kwargs)
+    Returns:
+        The value returned by calling functor.
+    """
+    return Retry(lambda e: isinstance(e, exception_types), max_retries,
+                 functor, *args, **kwargs)
 
 
 def PollAndWait(func, expected_return, timeout_exception, timeout_secs,
@@ -261,27 +269,27 @@
     """
     public_key_path = os.path.expanduser(public_key_path)
     private_key_path = os.path.expanduser(private_key_path)
-    create_key = (
-            not os.path.exists(public_key_path) and
-            not os.path.exists(private_key_path))
+    create_key = (not os.path.exists(public_key_path)
+                  and not os.path.exists(private_key_path))
     if not create_key:
-        logger.debug("The ssh private key (%s) or public key (%s) already exist,"
-                     "will not automatically create the key pairs.",
-                     private_key_path, public_key_path)
+        logger.debug(
+            "The ssh private key (%s) or public key (%s) already exist,"
+            "will not automatically create the key pairs.", private_key_path,
+            public_key_path)
         return
     cmd = SSH_KEYGEN_CMD + ["-C", getpass.getuser(), "-f", private_key_path]
-    logger.info("The ssh private key (%s) and public key (%s) do not exist, "
-                "automatically creating key pair, calling: %s",
-                private_key_path, public_key_path, " ".join(cmd))
+    logger.info(
+        "The ssh private key (%s) and public key (%s) do not exist, "
+        "automatically creating key pair, calling: %s", private_key_path,
+        public_key_path, " ".join(cmd))
     try:
         subprocess.check_call(cmd, stdout=sys.stderr, stderr=sys.stdout)
     except subprocess.CalledProcessError as e:
-        raise errors.DriverError(
-                "Failed to create ssh key pair: %s" % str(e))
+        raise errors.DriverError("Failed to create ssh key pair: %s" % str(e))
     except OSError as e:
         raise errors.DriverError(
-                "Failed to create ssh key pair, please make sure "
-                "'ssh-keygen' is installed: %s" % str(e))
+            "Failed to create ssh key pair, please make sure "
+            "'ssh-keygen' is installed: %s" % str(e))
 
     # By default ssh-keygen will create a public key file
     # by append .pub to the private key file name. Rename it
@@ -292,8 +300,8 @@
             os.rename(default_pub_key_path, public_key_path)
     except OSError as e:
         raise errors.DriverError(
-                "Failed to rename %s to %s: %s" %
-                (default_pub_key_path, public_key_path, str(e)))
+            "Failed to rename %s to %s: %s" % (default_pub_key_path,
+                                               public_key_path, str(e)))
 
     logger.info("Created ssh private key (%s) and public key (%s)",
                 private_key_path, public_key_path)
@@ -332,8 +340,8 @@
         if binary_data[int_length:int_length + type_length] != key_type:
             raise errors.DriverError("rsa key is invalid: %s" % rsa)
     except (struct.error, binascii.Error) as e:
-        raise errors.DriverError("rsa key is invalid: %s, error: %s" %
-                                 (rsa, str(e)))
+        raise errors.DriverError(
+            "rsa key is invalid: %s, error: %s" % (rsa, str(e)))
 
 
 class BatchHttpRequestExecutor(object):
@@ -385,8 +393,8 @@
         if isinstance(exception, self._other_retriable_errors):
             return True
 
-        if (isinstance(exception, errors.HttpError) and
-                exception.code in self._retry_http_codes):
+        if (isinstance(exception, errors.HttpError)
+                and exception.code in self._retry_http_codes):
             return True
         return False
 
@@ -410,14 +418,15 @@
             # If there is still retriable requests pending, raise an error
             # so that Retry will retry this function with pending_requests.
             raise errors.HasRetriableRequestsError(
-                "Retriable errors: %s" % [str(results[rid][1])
-                                          for rid in self._pending_requests])
+                "Retriable errors: %s" %
+                [str(results[rid][1]) for rid in self._pending_requests])
 
     def Execute(self):
         """Executes the requests and retry if necessary.
 
         Will populate self._final_results.
         """
+
         def _ShouldRetryHandler(exc):
             """Check if |exc| is a retriable exception.
 
@@ -436,7 +445,8 @@
         try:
             self._pending_requests = self._requests.copy()
             Retry(
-                _ShouldRetryHandler, max_retries=self._max_retry,
+                _ShouldRetryHandler,
+                max_retries=self._max_retry,
                 functor=self._ExecuteOnce,
                 sleep_multiplier=self._sleep,
                 retry_backoff_factor=self._backoff_factor)
diff --git a/internal/lib/utils_test.py b/internal/lib/utils_test.py
index a450933..a5e845c 100644
--- a/internal/lib/utils_test.py
+++ b/internal/lib/utils_test.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Tests for acloud.internal.lib.utils."""
 
 import errno
@@ -24,163 +23,190 @@
 import tempfile
 import time
 
+import unittest
 import mock
 
-import unittest
 from acloud.internal.lib import driver_test_lib
 from acloud.internal.lib import utils
 
 
 class UtilsTest(driver_test_lib.BaseDriverTest):
+    """Test Utils."""
 
-  def testTempDir_Success(self):
-    """Test create a temp dir."""
-    self.Patch(os, "chmod")
-    self.Patch(tempfile, "mkdtemp", return_value="/tmp/tempdir")
-    self.Patch(shutil, "rmtree")
-    with utils.TempDir():
-      pass
-    # Verify.
-    tempfile.mkdtemp.assert_called_once()
-    shutil.rmtree.assert_called_with("/tmp/tempdir")
+    def TestTempDirSuccess(self):
+        """Test create a temp dir."""
+        self.Patch(os, "chmod")
+        self.Patch(tempfile, "mkdtemp", return_value="/tmp/tempdir")
+        self.Patch(shutil, "rmtree")
+        with utils.TempDir():
+            pass
+        # Verify.
+        tempfile.mkdtemp.assert_called_once()  # pylint: disable=no-member
+        shutil.rmtree.assert_called_with("/tmp/tempdir")  # pylint: disable=no-member
 
-  def testTempDir_ExceptionRaised(self):
-    """Test create a temp dir and exception is raised within with-clause."""
-    self.Patch(os, "chmod")
-    self.Patch(tempfile, "mkdtemp", return_value="/tmp/tempdir")
-    self.Patch(shutil, "rmtree")
+    def TestTempDirExceptionRaised(self):
+        """Test create a temp dir and exception is raised within with-clause."""
+        self.Patch(os, "chmod")
+        self.Patch(tempfile, "mkdtemp", return_value="/tmp/tempdir")
+        self.Patch(shutil, "rmtree")
 
-    class ExpectedException(Exception):
-      pass
+        class ExpectedException(Exception):
+            """Expected exception."""
+            pass
 
-    def _Call():
-      with utils.TempDir():
-        raise ExpectedException("Expected exception.")
-    # Verify. ExpectedException should be raised.
-    self.assertRaises(ExpectedException, _Call)
-    tempfile.mkdtemp.assert_called_once()
-    shutil.rmtree.assert_called_with("/tmp/tempdir")
+        def _Call():
+            with utils.TempDir():
+                raise ExpectedException("Expected exception.")
 
-  def testTempDir_WhenDeleteTempDirNoLongerExist(self):
-    """Test create a temp dir and dir no longer exists during deletion."""
-    self.Patch(os, "chmod")
-    self.Patch(tempfile, "mkdtemp", return_value="/tmp/tempdir")
-    expected_error = EnvironmentError()
-    expected_error.errno = errno.ENOENT
-    self.Patch(shutil, "rmtree", side_effect=expected_error)
-    def _Call():
-      with utils.TempDir():
-        pass
-    # Verify no exception should be raised when rmtree raises
-    # EnvironmentError with errno.ENOENT, i.e.
-    # directory no longer exists.
-    _Call()
-    tempfile.mkdtemp.assert_called_once()
-    shutil.rmtree.assert_called_with("/tmp/tempdir")
+        # Verify. ExpectedException should be raised.
+        self.assertRaises(ExpectedException, _Call)
+        tempfile.mkdtemp.assert_called_once()  # pylint: disable=no-member
+        shutil.rmtree.assert_called_with("/tmp/tempdir")  #pylint: disable=no-member
 
-  def testTempDir_WhenDeleteEncounterError(self):
-    """Test create a temp dir and encoutered error during deletion."""
-    self.Patch(os, "chmod")
-    self.Patch(tempfile, "mkdtemp", return_value="/tmp/tempdir")
-    expected_error = OSError("Expected OS Error")
-    self.Patch(shutil, "rmtree", side_effect=expected_error)
-    def _Call():
-      with utils.TempDir():
-        pass
+    def testTempDirWhenDeleteTempDirNoLongerExist(self):  # pylint: disable=invalid-name
+        """Test create a temp dir and dir no longer exists during deletion."""
+        self.Patch(os, "chmod")
+        self.Patch(tempfile, "mkdtemp", return_value="/tmp/tempdir")
+        expected_error = EnvironmentError()
+        expected_error.errno = errno.ENOENT
+        self.Patch(shutil, "rmtree", side_effect=expected_error)
 
-    # Verify OSError should be raised.
-    self.assertRaises(OSError, _Call)
-    tempfile.mkdtemp.assert_called_once()
-    shutil.rmtree.assert_called_with("/tmp/tempdir")
+        def _Call():
+            with utils.TempDir():
+                pass
 
-  def testTempDir_OrininalErrorRaised(self):
-    """Test original error is raised even if tmp dir deletion failed."""
-    self.Patch(os, "chmod")
-    self.Patch(tempfile, "mkdtemp", return_value="/tmp/tempdir")
-    expected_error = OSError("Expected OS Error")
-    self.Patch(shutil, "rmtree", side_effect=expected_error)
+        # Verify no exception should be raised when rmtree raises
+        # EnvironmentError with errno.ENOENT, i.e.
+        # directory no longer exists.
+        _Call()
+        tempfile.mkdtemp.assert_called_once()  #pylint: disable=no-member
+        shutil.rmtree.assert_called_with("/tmp/tempdir")  #pylint: disable=no-member
 
-    class ExpectedException(Exception):
-      pass
+    def testTempDirWhenDeleteEncounterError(self):
+        """Test create a temp dir and encoutered error during deletion."""
+        self.Patch(os, "chmod")
+        self.Patch(tempfile, "mkdtemp", return_value="/tmp/tempdir")
+        expected_error = OSError("Expected OS Error")
+        self.Patch(shutil, "rmtree", side_effect=expected_error)
 
-    def _Call():
-      with utils.TempDir():
-        raise ExpectedException("Expected Exception")
+        def _Call():
+            with utils.TempDir():
+                pass
 
-    # Verify.
-    # ExpectedException should be raised, and OSError
-    # should not be raised.
-    self.assertRaises(ExpectedException, _Call)
-    tempfile.mkdtemp.assert_called_once()
-    shutil.rmtree.assert_called_with("/tmp/tempdir")
+        # Verify OSError should be raised.
+        self.assertRaises(OSError, _Call)
+        tempfile.mkdtemp.assert_called_once()  #pylint: disable=no-member
+        shutil.rmtree.assert_called_with("/tmp/tempdir")  #pylint: disable=no-member
 
-  def testCreateSshKeyPair_KeyAlreadyExists(self):
-    """Test when the key pair already exists."""
-    public_key = "/fake/public_key"
-    private_key = "/fake/private_key"
-    self.Patch(os.path, "exists", side_effect=lambda path: path == public_key)
-    self.Patch(subprocess, "check_call")
-    utils.CreateSshKeyPairIfNotExist(private_key, public_key)
-    self.assertEqual(subprocess.check_call.call_count, 0)
+    def testTempDirOrininalErrorRaised(self):
+        """Test original error is raised even if tmp dir deletion failed."""
+        self.Patch(os, "chmod")
+        self.Patch(tempfile, "mkdtemp", return_value="/tmp/tempdir")
+        expected_error = OSError("Expected OS Error")
+        self.Patch(shutil, "rmtree", side_effect=expected_error)
 
-  def testCreateSshKeyPair_KeyAreCreated(self):
-    """Test when the key pair created."""
-    public_key = "/fake/public_key"
-    private_key = "/fake/private_key"
-    self.Patch(os.path, "exists", return_value=False)
-    self.Patch(subprocess, "check_call")
-    self.Patch(os, "rename")
-    utils.CreateSshKeyPairIfNotExist(private_key, public_key)
-    self.assertEqual(subprocess.check_call.call_count, 1)
-    subprocess.check_call.assert_called_with(
-        utils.SSH_KEYGEN_CMD + ["-C", getpass.getuser(), "-f", private_key],
-        stdout=mock.ANY, stderr=mock.ANY)
+        class ExpectedException(Exception):
+            """Expected exception."""
+            pass
 
-  def testRetryOnException(self):
-    def _IsValueError(exc):
-      return isinstance(exc, ValueError)
-    num_retry = 5
+        def _Call():
+            with utils.TempDir():
+                raise ExpectedException("Expected Exception")
 
-    @utils.RetryOnException(_IsValueError, num_retry)
-    def _RaiseAndRetry(sentinel):
-      sentinel.alert()
-      raise ValueError("Fake error.")
+        # Verify.
+        # ExpectedException should be raised, and OSError
+        # should not be raised.
+        self.assertRaises(ExpectedException, _Call)
+        tempfile.mkdtemp.assert_called_once()  #pylint: disable=no-member
+        shutil.rmtree.assert_called_with("/tmp/tempdir")  #pylint: disable=no-member
 
-    sentinel = mock.MagicMock()
-    self.assertRaises(ValueError, _RaiseAndRetry, sentinel)
-    self.assertEqual(1 + num_retry, sentinel.alert.call_count)
+    def testCreateSshKeyPairKeyAlreadyExists(self):  #pylint: disable=invalid-name
+        """Test when the key pair already exists."""
+        public_key = "/fake/public_key"
+        private_key = "/fake/private_key"
+        self.Patch(
+            os.path, "exists", side_effect=lambda path: path == public_key)
+        self.Patch(subprocess, "check_call")
+        utils.CreateSshKeyPairIfNotExist(private_key, public_key)
+        self.assertEqual(subprocess.check_call.call_count, 0)  #pylint: disable=no-member
 
-  def testRetryExceptionType(self):
-    """Test RetryExceptionType function."""
-    def _RaiseAndRetry(sentinel):
-      sentinel.alert()
-      raise ValueError("Fake error.")
+    def testCreateSshKeyPairKeyAreCreated(self):
+        """Test when the key pair created."""
+        public_key = "/fake/public_key"
+        private_key = "/fake/private_key"
+        self.Patch(os.path, "exists", return_value=False)
+        self.Patch(subprocess, "check_call")
+        self.Patch(os, "rename")
+        utils.CreateSshKeyPairIfNotExist(private_key, public_key)
+        self.assertEqual(subprocess.check_call.call_count, 1)  #pylint: disable=no-member
+        subprocess.check_call.assert_called_with(  #pylint: disable=no-member
+            utils.SSH_KEYGEN_CMD +
+            ["-C", getpass.getuser(), "-f", private_key],
+            stdout=mock.ANY,
+            stderr=mock.ANY)
 
-    num_retry = 5
-    sentinel = mock.MagicMock()
-    self.assertRaises(ValueError, utils.RetryExceptionType,
-                      (KeyError, ValueError), num_retry, _RaiseAndRetry,
-                      sentinel=sentinel)
-    self.assertEqual(1 + num_retry, sentinel.alert.call_count)
+    def TestRetryOnException(self):
+        """Test Retry."""
 
-  def testRetry(self):
-    """Test Retry."""
-    self.Patch(time, "sleep")
-    def _RaiseAndRetry(sentinel):
-      sentinel.alert()
-      raise ValueError("Fake error.")
+        def _IsValueError(exc):
+            return isinstance(exc, ValueError)
 
-    num_retry = 5
-    sentinel = mock.MagicMock()
-    self.assertRaises(ValueError, utils.RetryExceptionType,
-                      (ValueError, KeyError), num_retry, _RaiseAndRetry,
-                      sleep_multiplier=1,
-                      retry_backoff_factor=2,
-                      sentinel=sentinel)
+        num_retry = 5
 
-    self.assertEqual(1 + num_retry, sentinel.alert.call_count)
-    time.sleep.assert_has_calls(
-        [mock.call(1), mock.call(2), mock.call(4), mock.call(8), mock.call(16)])
+        @utils.RetryOnException(_IsValueError, num_retry)
+        def _RaiseAndRetry(sentinel):
+            sentinel.alert()
+            raise ValueError("Fake error.")
+
+        sentinel = mock.MagicMock()
+        self.assertRaises(ValueError, _RaiseAndRetry, sentinel)
+        self.assertEqual(1 + num_retry, sentinel.alert.call_count)
+
+    def testRetryExceptionType(self):
+        """Test RetryExceptionType function."""
+
+        def _RaiseAndRetry(sentinel):
+            sentinel.alert()
+            raise ValueError("Fake error.")
+
+        num_retry = 5
+        sentinel = mock.MagicMock()
+        self.assertRaises(
+            ValueError,
+            utils.RetryExceptionType, (KeyError, ValueError),
+            num_retry,
+            _RaiseAndRetry,
+            sentinel=sentinel)
+        self.assertEqual(1 + num_retry, sentinel.alert.call_count)
+
+    def testRetry(self):
+        """Test Retry."""
+        self.Patch(time, "sleep")
+
+        def _RaiseAndRetry(sentinel):
+            sentinel.alert()
+            raise ValueError("Fake error.")
+
+        num_retry = 5
+        sentinel = mock.MagicMock()
+        self.assertRaises(
+            ValueError,
+            utils.RetryExceptionType, (ValueError, KeyError),
+            num_retry,
+            _RaiseAndRetry,
+            sleep_multiplier=1,
+            retry_backoff_factor=2,
+            sentinel=sentinel)
+
+        self.assertEqual(1 + num_retry, sentinel.alert.call_count)
+        time.sleep.assert_has_calls(  #pylint: disable=no-member
+            [
+                mock.call(1),
+                mock.call(2),
+                mock.call(4),
+                mock.call(8),
+                mock.call(16)
+            ])
 
 
 if __name__ == "__main__":
diff --git a/public/acloud_kernel/acloud_kernel.py b/public/acloud_kernel/acloud_kernel.py
index 39361c4..5fd51b8 100755
--- a/public/acloud_kernel/acloud_kernel.py
+++ b/public/acloud_kernel/acloud_kernel.py
@@ -13,13 +13,11 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Acloud Kernel Utility.
 
 This CLI implements additional functionality to acloud CLI.
 """
 import argparse
-import os
 import sys
 
 from acloud.public import acloud_common
diff --git a/public/acloud_kernel/kernel_swapper.py b/public/acloud_kernel/kernel_swapper.py
index caf3d9a..35fc744 100755
--- a/public/acloud_kernel/kernel_swapper.py
+++ b/public/acloud_kernel/kernel_swapper.py
@@ -13,12 +13,10 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Kernel Swapper.
 
 This class manages swapping kernel images for a Cloud Android instance.
 """
-import os
 import subprocess
 
 from acloud.public import errors
@@ -29,9 +27,11 @@
 from acloud.internal.lib import gstorage_client
 from acloud.internal.lib import utils
 
-ALL_SCOPES = ' '.join([android_build_client.AndroidBuildClient.SCOPE,
-                       gstorage_client.StorageClient.SCOPE,
-                       android_compute_client.AndroidComputeClient.SCOPE])
+ALL_SCOPES = ' '.join([
+    android_build_client.AndroidBuildClient.SCOPE,
+    gstorage_client.StorageClient.SCOPE,
+    android_compute_client.AndroidComputeClient.SCOPE
+])
 
 # ssh flags used to communicate with the Cloud Android instance.
 SSH_FLAGS = [
@@ -50,7 +50,7 @@
 
     Attributes:
         _compute_client: AndroidCopmuteClient object, manages AVD.
-        _instance_name: string, name of Cloud Android Instance.
+        _instance_name: tring, name of Cloud Android Instance.
         _target_ip: string, IP address of Cloud Android instance.
         _ssh_flags: string list, flags to be used with ssh and scp.
     """
@@ -83,22 +83,22 @@
         Returns:
             A Report instance.
         """
-        r = report.Report(command='swap_kernel')
+        reboot_image = report.Report(command='swap_kernel')
         try:
             self._ShellCmdOnTarget(MOUNT_CMD)
             self.PushFile(local_kernel_image, '/boot')
             self.RebootTarget()
         except subprocess.CalledProcessError as e:
-            r.AddError(str(e))
-            r.SetStatus(report.Status.FAIL)
-            return r
+            reboot_image.AddError(str(e))
+            reboot_image.SetStatus(report.Status.FAIL)
+            return reboot_image
         except errors.DeviceBootError as e:
-            r.AddError(str(e))
-            r.SetStatus(report.Status.BOOT_FAIL)
-            return r
+            reboot_image.AddError(str(e))
+            reboot_image.SetStatus(report.Status.BOOT_FAIL)
+            return reboot_image
 
-        r.SetStatus(report.Status.SUCCESS)
-        return r
+        reboot_image.SetStatus(report.Status.SUCCESS)
+        return reboot_image
 
     def PushFile(self, src_path, dest_path):
         """Pushes local file to target Cloud Android instance.
@@ -137,7 +137,8 @@
         host_cmd = ' '.join([ssh_cmd, '"%s"' % target_cmd])
         self._ShellCmd(host_cmd)
 
-    def _ShellCmd(self, host_cmd):
+    @staticmethod
+    def _ShellCmd(host_cmd):
         """Runs a shell command on host device.
 
         Args:
@@ -148,7 +149,10 @@
                                            host_cmd.
         """
         utils.Retry(
-            retry_checker=lambda e: isinstance(e, subprocess.CalledProcessError),
+            retry_checker=
+                lambda e: isinstance(e, subprocess.CalledProcessError),
             max_retries=2,
             functor=lambda cmd: subprocess.check_call(cmd, shell=True),
+            sleep_multiplier=0,
+            retry_backoff_factor=1,
             cmd=host_cmd)
diff --git a/public/acloud_kernel/kernel_swapper_test.py b/public/acloud_kernel/kernel_swapper_test.py
index bddb722..dabe4a9 100644
--- a/public/acloud_kernel/kernel_swapper_test.py
+++ b/public/acloud_kernel/kernel_swapper_test.py
@@ -16,15 +16,17 @@
 """Tests acloud.public.acloud_kernel.kernel_swapper."""
 
 import subprocess
-import mock
 
 import unittest
+import mock
+
 from acloud.internal.lib import android_compute_client
 from acloud.internal.lib import auth
 from acloud.internal.lib import driver_test_lib
 from acloud.public.acloud_kernel import kernel_swapper
 
 
+#pylint: disable=too-many-instance-attributes
 class KernelSwapperTest(driver_test_lib.BaseDriverTest):
     """Test kernel_swapper."""
 
@@ -47,16 +49,18 @@
 
         self.kswapper = kernel_swapper.KernelSwapper(self.cfg,
                                                      self.fake_instance)
-        self.ssh_cmd_prefix = 'ssh %s root@%s' % (
-            ' '.join(kernel_swapper.SSH_FLAGS), self.fake_ip)
+        self.ssh_cmd_prefix = 'ssh %s root@%s' % (' '.join(
+            kernel_swapper.SSH_FLAGS), self.fake_ip)
         self.scp_cmd_prefix = 'scp %s' % ' '.join(kernel_swapper.SSH_FLAGS)
 
     def testPushFile(self):
         """Test RebootTarget."""
         fake_src_path = 'fake-src'
         fake_dest_path = 'fake-dest'
-        scp_cmd = ' '.join([self.scp_cmd_prefix, '%s root@%s:%s' %
-                            (fake_src_path, self.fake_ip, fake_dest_path)])
+        scp_cmd = ' '.join([
+            self.scp_cmd_prefix,
+            '%s root@%s:%s' % (fake_src_path, self.fake_ip, fake_dest_path)
+        ])
 
         self.kswapper.PushFile(fake_src_path, fake_dest_path)
         self.subprocess_call.assert_called_once_with(scp_cmd, shell=True)
@@ -64,9 +68,9 @@
     def testRebootTarget(self):
         """Test RebootTarget."""
         self.kswapper.RebootTarget()
-        reboot_cmd = ' '.join([
-            self.ssh_cmd_prefix, '"%s"' % kernel_swapper.REBOOT_CMD
-        ])
+        reboot_cmd = ' '.join(
+            [self.ssh_cmd_prefix,
+             '"%s"' % kernel_swapper.REBOOT_CMD])
 
         self.subprocess_call.assert_called_once_with(reboot_cmd, shell=True)
         self.compute_client.WaitForBoot.assert_called_once_with(
@@ -75,21 +79,22 @@
     def testSwapKernel(self):
         """Test SwapKernel."""
         fake_local_kernel_image = 'fake-kernel'
-        mount_cmd = ' '.join([
-            self.ssh_cmd_prefix, '"%s"' % kernel_swapper.MOUNT_CMD
+        mount_cmd = ' '.join(
+            [self.ssh_cmd_prefix,
+             '"%s"' % kernel_swapper.MOUNT_CMD])
+        scp_cmd = ' '.join([
+            self.scp_cmd_prefix,
+            '%s root@%s:%s' % (fake_local_kernel_image, self.fake_ip, '/boot')
         ])
-        scp_cmd = ' '.join([self.scp_cmd_prefix, '%s root@%s:%s' %
-                            (fake_local_kernel_image, self.fake_ip, '/boot')])
-        reboot_cmd = ' '.join([
-            self.ssh_cmd_prefix, '"%s"' % kernel_swapper.REBOOT_CMD
-        ])
+        reboot_cmd = ' '.join(
+            [self.ssh_cmd_prefix,
+             '"%s"' % kernel_swapper.REBOOT_CMD])
 
         self.kswapper.SwapKernel(fake_local_kernel_image)
         self.subprocess_call.assert_has_calls([
-            mock.call(
-                mount_cmd, shell=True), mock.call(
-                    scp_cmd, shell=True), mock.call(
-                        reboot_cmd, shell=True)
+            mock.call(mount_cmd, shell=True),
+            mock.call(scp_cmd, shell=True),
+            mock.call(reboot_cmd, shell=True)
         ])
 
 
diff --git a/public/acloud_main.py b/public/acloud_main.py
index cdf37eb..49894fe 100644
--- a/public/acloud_main.py
+++ b/public/acloud_main.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 r"""Cloud Android Driver.
 
 This CLI manages google compute engine project for android devices.
@@ -65,11 +64,12 @@
 import argparse
 import getpass
 import logging
-import os
 import sys
 
-# TODO: Find a better way to handling this import logic.
+# Needed to silence oauth2client.
+logging.basicConfig(level=logging.CRITICAL)
 
+# pylint: disable=wrong-import-position
 from acloud.internal import constants
 from acloud.public import acloud_common
 from acloud.public import config
@@ -90,6 +90,7 @@
 CMD_SSHKEY = "project_sshkey"
 
 
+# pylint: disable=too-many-statements
 def _ParseArgs(args):
     """Parse args.
 
@@ -99,8 +100,9 @@
     Returns:
         Parsed args.
     """
-    usage = ",".join([CMD_CREATE, CMD_CREATE_CUTTLEFISH, CMD_DELETE,
-                      CMD_CLEANUP, CMD_SSHKEY])
+    usage = ",".join([
+        CMD_CREATE, CMD_CREATE_CUTTLEFISH, CMD_DELETE, CMD_CLEANUP, CMD_SSHKEY
+    ])
     parser = argparse.ArgumentParser(
         description=__doc__,
         formatter_class=argparse.RawDescriptionHelpFormatter,
@@ -124,10 +126,11 @@
         dest="branch",
         help="Android branch, e.g. mnc-dev or git_mnc-dev")
     # TODO: Support HEAD (the latest build)
-    create_parser.add_argument("--build_id",
-                               type=str,
-                               dest="build_id",
-                               help="Android build id, e.g. 2145099, P2804227")
+    create_parser.add_argument(
+        "--build_id",
+        type=str,
+        dest="build_id",
+        help="Android build id, e.g. 2145099, P2804227")
     create_parser.add_argument(
         "--spec",
         type=str,
@@ -135,24 +138,26 @@
         required=False,
         help="The name of a pre-configured device spec that we are "
         "going to use. Choose from: %s" % ", ".join(constants.SPEC_NAMES))
-    create_parser.add_argument("--num",
-                               type=int,
-                               dest="num",
-                               required=False,
-                               default=1,
-                               help="Number of instances to create.")
+    create_parser.add_argument(
+        "--num",
+        type=int,
+        dest="num",
+        required=False,
+        default=1,
+        help="Number of instances to create.")
     create_parser.add_argument(
         "--gce_image",
         type=str,
         dest="gce_image",
         required=False,
         help="Name of an existing compute engine image to reuse.")
-    create_parser.add_argument("--local_disk_image",
-                               type=str,
-                               dest="local_disk_image",
-                               required=False,
-                               help="Path to a local disk image to use, "
-                               "e.g /tmp/avd-system.tar.gz")
+    create_parser.add_argument(
+        "--local_disk_image",
+        type=str,
+        dest="local_disk_image",
+        required=False,
+        help="Path to a local disk image to use, "
+        "e.g /tmp/avd-system.tar.gz")
     create_parser.add_argument(
         "--no_cleanup",
         dest="no_cleanup",
@@ -179,7 +184,8 @@
         action="store_true",
         dest="autoconnect",
         required=False,
-        help="For each instance created, we will automatically creates both 2 ssh"
+        help=
+        "For each instance created, we will automatically creates both 2 ssh"
         " tunnels forwarding both adb & vnc. Then add the device to adb.")
 
     subparser_list.append(create_parser)
@@ -238,7 +244,8 @@
         action="store_true",
         dest="autoconnect",
         required=False,
-        help="For each instance created, we will automatically creates both 2 ssh"
+        help=
+        "For each instance created, we will automatically creates both 2 ssh"
         " tunnels forwarding both adb & vnc. Then add the device to adb.")
 
     subparser_list.append(create_cf_parser)
@@ -307,7 +314,8 @@
         action="store_true",
         dest="autoconnect",
         required=False,
-        help="For each instance created, we will automatically creates both 2 ssh"
+        help=
+        "For each instance created, we will automatically creates both 2 ssh"
         " tunnels forwarding both adb & vnc. Then add the device to adb.")
 
     subparser_list.append(create_gf_parser)
@@ -355,12 +363,12 @@
         dest="ssh_rsa_path",
         required=True,
         help="Absolute path to the file that contains the public rsa key "
-             "that will be added as project-wide ssh key.")
+        "that will be added as project-wide ssh key.")
     subparser_list.append(sshkey_parser)
 
     # Add common arguments.
-    for p in subparser_list:
-        acloud_common.AddCommonArguments(p)
+    for parser in subparser_list:
+        acloud_common.AddCommonArguments(parser)
 
     return parser.parse_args(args)
 
@@ -402,8 +410,8 @@
             raise errors.CommandArgError(
                 "%s is not valid. Choose from: %s" %
                 (parsed_args.spec, ", ".join(constants.SPEC_NAMES)))
-        if not ((parsed_args.build_id and parsed_args.build_target) or
-                parsed_args.gce_image or parsed_args.local_disk_image):
+        if not ((parsed_args.build_id and parsed_args.build_target)
+                or parsed_args.gce_image or parsed_args.local_disk_image):
             raise errors.CommandArgError(
                 "At least one of the following should be specified: "
                 "--build_id and --build_target, or --gce_image, or "
@@ -414,21 +422,22 @@
 
     if parsed_args.which in [CMD_CREATE_CUTTLEFISH, CMD_CREATE_GOLDFISH]:
         if not parsed_args.build_id or not parsed_args.build_target:
-          raise errors.CommandArgError("Must specify --build_id and --build_target")
+            raise errors.CommandArgError(
+                "Must specify --build_id and --build_target")
 
     if parsed_args.which == CMD_CREATE_GOLDFISH:
         if not parsed_args.emulator_build_id:
-          raise errors.CommandArgError("Must specify --emulator_build_id")
+            raise errors.CommandArgError("Must specify --emulator_build_id")
 
     if parsed_args.which in [
-        CMD_CREATE, CMD_CREATE_CUTTLEFISH, CMD_CREATE_GOLDFISH
+            CMD_CREATE, CMD_CREATE_CUTTLEFISH, CMD_CREATE_GOLDFISH
     ]:
-        if (parsed_args.serial_log_file and
-                not parsed_args.serial_log_file.endswith(".tar.gz")):
+        if (parsed_args.serial_log_file
+                and not parsed_args.serial_log_file.endswith(".tar.gz")):
             raise errors.CommandArgError(
                 "--serial_log_file must ends with .tar.gz")
-        if (parsed_args.logcat_file and
-                not parsed_args.logcat_file.endswith(".tar.gz")):
+        if (parsed_args.logcat_file
+                and not parsed_args.logcat_file.endswith(".tar.gz")):
             raise errors.CommandArgError(
                 "--logcat_file must ends with .tar.gz")
 
@@ -514,8 +523,8 @@
             logcat_file=args.logcat_file,
             autoconnect=args.autoconnect)
     elif args.which == CMD_DELETE:
-        report = device_driver.DeleteAndroidVirtualDevices(cfg,
-                                                           args.instance_names)
+        report = device_driver.DeleteAndroidVirtualDevices(
+            cfg, args.instance_names)
     elif args.which == CMD_CLEANUP:
         report = device_driver.Cleanup(cfg, args.expiration_mins)
     elif args.which == CMD_SSHKEY:
diff --git a/public/actions/common_operations.py b/public/actions/common_operations.py
index 74ee50c..5a6d1d2 100644
--- a/public/actions/common_operations.py
+++ b/public/actions/common_operations.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Common operations between managing GCE and Cuttlefish devices.
 
 This module provides the common operations between managing GCE (device_driver)
@@ -22,7 +21,6 @@
 """
 
 import logging
-import os
 
 from acloud.public import avd
 from acloud.public import errors
@@ -33,127 +31,133 @@
 
 
 def CreateSshKeyPairIfNecessary(cfg):
-  """Create ssh key pair if necessary.
+    """Create ssh key pair if necessary.
 
-  Args:
-    cfg: An Acloudconfig instance.
+    Args:
+        cfg: An Acloudconfig instance.
 
-  Raises:
-    error.DriverError: If it falls into an unexpected condition.
-  """
-  if not cfg.ssh_public_key_path:
-    logger.warning("ssh_public_key_path is not specified in acloud config. "
-                   "Project-wide public key will "
-                   "be used when creating AVD instances. "
-                   "Please ensure you have the correct private half of "
-                   "a project-wide public key if you want to ssh into the "
-                   "instances after creation.")
-  elif cfg.ssh_public_key_path and not cfg.ssh_private_key_path:
-    logger.warning("Only ssh_public_key_path is specified in acloud config, "
-                   "but ssh_private_key_path is missing. "
-                   "Please ensure you have the correct private half "
-                   "if you want to ssh into the instances after creation.")
-  elif cfg.ssh_public_key_path and cfg.ssh_private_key_path:
-    utils.CreateSshKeyPairIfNotExist(cfg.ssh_private_key_path,
-                                     cfg.ssh_public_key_path)
-  else:
-    # Should never reach here.
-    raise errors.DriverError("Unexpected error in CreateSshKeyPairIfNecessary")
+    Raises:
+        error.DriverError: If it falls into an unexpected condition.
+    """
+    if not cfg.ssh_public_key_path:
+        logger.warning(
+            "ssh_public_key_path is not specified in acloud config. "
+            "Project-wide public key will "
+            "be used when creating AVD instances. "
+            "Please ensure you have the correct private half of "
+            "a project-wide public key if you want to ssh into the "
+            "instances after creation.")
+    elif cfg.ssh_public_key_path and not cfg.ssh_private_key_path:
+        logger.warning(
+            "Only ssh_public_key_path is specified in acloud config, "
+            "but ssh_private_key_path is missing. "
+            "Please ensure you have the correct private half "
+            "if you want to ssh into the instances after creation.")
+    elif cfg.ssh_public_key_path and cfg.ssh_private_key_path:
+        utils.CreateSshKeyPairIfNotExist(cfg.ssh_private_key_path,
+                                         cfg.ssh_public_key_path)
+    else:
+        # Should never reach here.
+        raise errors.DriverError(
+            "Unexpected error in CreateSshKeyPairIfNecessary")
 
 
 class DevicePool(object):
-  """A class that manages a pool of virtual devices.
+    """A class that manages a pool of virtual devices.
 
-  Attributes:
-    devices: A list of devices in the pool.
-  """
-
-  def __init__(self, device_factory, devices=None):
-    """Constructs a new DevicePool.
-
-    Args:
-      device_factory: A device factory capable of producing a goldfish or
-        cuttlefish device. The device factory must expose an attribute with
-        the credentials that can be used to retrieve information from the
-        constructed device.
-      devices: List of devices managed by this pool.
-    """
-    self._devices = devices or []
-    self._device_factory = device_factory
-    self._compute_client = device_factory.GetComputeClient()
-
-  def CreateDevices(self, num):
-    """Creates |num| devices for given build_target and build_id.
-
-    Args:
-      num: Number of devices to create.
+    Attributes:
+        devices: A list of devices in the pool.
     """
 
-    # Create host instances for cuttlefish/goldfish device.
-    # Currently one instance supports only 1 device.
-    for _ in range(num):
-      instance = self._device_factory.CreateInstance()
-      ip = self._compute_client.GetInstanceIP(instance)
-      self.devices.append(
-          avd.AndroidVirtualDevice(ip=ip, instance_name=instance))
+    def __init__(self, device_factory, devices=None):
+        """Constructs a new DevicePool.
 
-  def WaitForBoot(self):
-    """Waits for all devices to boot up.
+        Args:
+            device_factory: A device factory capable of producing a goldfish or
+                cuttlefish device. The device factory must expose an attribute with
+                the credentials that can be used to retrieve information from the
+                constructed device.
+            devices: List of devices managed by this pool.
+        """
+        self._devices = devices or []
+        self._device_factory = device_factory
+        self._compute_client = device_factory.GetComputeClient()
 
-    Returns:
-      A dictionary that contains all the failures.
-      The key is the name of the instance that fails to boot,
-      and the value is an errors.DeviceBootError object.
-    """
-    failures = {}
-    for device in self._devices:
-      try:
-        self._compute_client.WaitForBoot(device.instance_name)
-      except errors.DeviceBootError as e:
-        failures[device.instance_name] = e
-    return failures
+    def CreateDevices(self, num):
+        """Creates |num| devices for given build_target and build_id.
 
-  @property
-  def devices(self):
-    """Returns a list of devices in the pool.
+        Args:
+            num: Number of devices to create.
+        """
 
-    Returns:
-      A list of devices in the pool.
-    """
-    return self._devices
+        # Create host instances for cuttlefish/goldfish device.
+        # Currently one instance supports only 1 device.
+        for _ in range(num):
+            instance = self._device_factory.CreateInstance()
+            ip = self._compute_client.GetInstanceIP(instance)
+            self.devices.append(
+                avd.AndroidVirtualDevice(ip=ip, instance_name=instance))
+
+    def WaitForBoot(self):
+        """Waits for all devices to boot up.
+
+        Returns:
+            A dictionary that contains all the failures.
+            The key is the name of the instance that fails to boot,
+            and the value is an errors.DeviceBootError object.
+        """
+        failures = {}
+        for device in self._devices:
+            try:
+                self._compute_client.WaitForBoot(device.instance_name)
+            except errors.DeviceBootError as e:
+                failures[device.instance_name] = e
+        return failures
+
+    @property
+    def devices(self):
+        """Returns a list of devices in the pool.
+
+        Returns:
+            A list of devices in the pool.
+        """
+        return self._devices
 
 
 def CreateDevices(command, cfg, device_factory, num):
-  """Create a set of devices using the given factory.
+    """Create a set of devices using the given factory.
 
-  Args:
-    command: The name of the command, used for reporting.
-    cfg: An AcloudConfig instance.
-    device_factory: A factory capable of producing a single device.
-    num: The number of devices to create.
+    Args:
+        command: The name of the command, used for reporting.
+        cfg: An AcloudConfig instance.
+        device_factory: A factory capable of producing a single device.
+        num: The number of devices to create.
 
-  Returns:
-    A Report instance.
-  """
-  reporter = report.Report(command=command)
-  try:
-    CreateSshKeyPairIfNecessary(cfg)
-    device_pool = DevicePool(device_factory)
-    device_pool.CreateDevices(num)
-    failures = device_pool.WaitForBoot()
-    # Write result to report.
-    for device in device_pool.devices:
-      device_dict = {"ip": device.ip, "instance_name": device.instance_name}
-      if device.instance_name in failures:
-        reporter.AddData(key="devices_failing_boot", value=device_dict)
-        reporter.AddError(str(failures[device.instance_name]))
-      else:
-        reporter.AddData(key="devices", value=device_dict)
-    if failures:
-      reporter.SetStatus(report.Status.BOOT_FAIL)
-    else:
-      reporter.SetStatus(report.Status.SUCCESS)
-  except errors.DriverError as e:
-    reporter.AddError(str(e))
-    reporter.SetStatus(report.Status.FAIL)
-  return reporter
+    Returns:
+        A Report instance.
+    """
+    reporter = report.Report(command=command)
+    try:
+        CreateSshKeyPairIfNecessary(cfg)
+        device_pool = DevicePool(device_factory)
+        device_pool.CreateDevices(num)
+        failures = device_pool.WaitForBoot()
+        # Write result to report.
+        for device in device_pool.devices:
+            device_dict = {
+                "ip": device.ip,
+                "instance_name": device.instance_name
+            }
+            if device.instance_name in failures:
+                reporter.AddData(key="devices_failing_boot", value=device_dict)
+                reporter.AddError(str(failures[device.instance_name]))
+            else:
+                reporter.AddData(key="devices", value=device_dict)
+        if failures:
+            reporter.SetStatus(report.Status.BOOT_FAIL)
+        else:
+            reporter.SetStatus(report.Status.SUCCESS)
+    except errors.DriverError as e:
+        reporter.AddError(str(e))
+        reporter.SetStatus(report.Status.FAIL)
+    return reporter
diff --git a/public/actions/common_operations_test.py b/public/actions/common_operations_test.py
index 87a1227..91fcbd6 100644
--- a/public/actions/common_operations_test.py
+++ b/public/actions/common_operations_test.py
@@ -13,16 +13,15 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Tests for acloud.public.actions.common_operations."""
 
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import print_function
 
+import unittest
 import mock
 
-import unittest
 from acloud.internal.lib import android_build_client
 from acloud.internal.lib import android_compute_client
 from acloud.internal.lib import auth
@@ -32,61 +31,67 @@
 
 
 class CommonOperationsTest(driver_test_lib.BaseDriverTest):
+    """Test Common Operations."""
+    IP = "127.0.0.1"
+    INSTANCE = "fake-instance"
+    CMD = "test-cmd"
 
-  IP = "127.0.0.1"
-  INSTANCE = "fake-instance"
-  CMD = "test-cmd"
+    def setUp(self):
+        """Set up the test."""
+        super(CommonOperationsTest, self).setUp()
+        self.build_client = mock.MagicMock()
+        self.device_factory = mock.MagicMock()
+        self.Patch(
+            android_build_client,
+            "AndroidBuildClient",
+            return_value=self.build_client)
+        self.compute_client = mock.MagicMock()
+        self.Patch(
+            android_compute_client,
+            "AndroidComputeClient",
+            return_value=self.compute_client)
+        self.Patch(auth, "CreateCredentials", return_value=mock.MagicMock())
+        self.Patch(self.compute_client, "GetInstanceIP", return_value=self.IP)
+        self.Patch(
+            self.device_factory, "CreateInstance", return_value=self.INSTANCE)
+        self.Patch(
+            self.device_factory,
+            "GetComputeClient",
+            return_value=self.compute_client)
 
-  def setUp(self):
-    """Set up the test."""
-    super(CommonOperationsTest, self).setUp()
-    self.build_client = mock.MagicMock()
-    self.device_factory = mock.MagicMock()
-    self.Patch(
-        android_build_client,
-        "AndroidBuildClient",
-        return_value=self.build_client)
-    self.compute_client = mock.MagicMock()
-    self.Patch(
-        android_compute_client,
-        "AndroidComputeClient",
-        return_value=self.compute_client)
-    self.Patch(auth, "CreateCredentials", return_value=mock.MagicMock())
-    self.Patch(self.compute_client, "GetInstanceIP", return_value=self.IP)
-    self.Patch(
-        self.device_factory, "CreateInstance", return_value=self.INSTANCE)
-    self.Patch(self.device_factory, "GetComputeClient",
-               return_value=self.compute_client)
+    @staticmethod
+    def _CreateCfg():
+        """A helper method that creates a mock configuration object."""
+        cfg = mock.MagicMock()
+        cfg.service_account_name = "fake@service.com"
+        cfg.service_account_private_key_path = "/fake/path/to/key"
+        cfg.zone = "fake_zone"
+        cfg.disk_image_name = "fake_image.tar.gz"
+        cfg.disk_image_mime_type = "fake/type"
+        cfg.ssh_private_key_path = ""
+        cfg.ssh_public_key_path = ""
+        return cfg
 
-  def _CreateCfg(self):
-    """A helper method that creates a mock configuration object."""
-    cfg = mock.MagicMock()
-    cfg.service_account_name = "fake@service.com"
-    cfg.service_account_private_key_path = "/fake/path/to/key"
-    cfg.zone = "fake_zone"
-    cfg.disk_image_name = "fake_image.tar.gz"
-    cfg.disk_image_mime_type = "fake/type"
-    cfg.ssh_private_key_path = ""
-    cfg.ssh_public_key_path = ""
-    return cfg
+    def testDevicePoolCreateDevices(self):
+        """Test Device Pool Create Devices."""
+        pool = common_operations.DevicePool(self.device_factory)
+        pool.CreateDevices(5)
+        self.assertEqual(self.device_factory.CreateInstance.call_count, 5)
+        self.assertEqual(len(pool.devices), 5)
 
-  def testDevicePoolCreateDevices(self):
-    pool = common_operations.DevicePool(self.device_factory)
-    pool.CreateDevices(5)
-    self.assertEqual(self.device_factory.CreateInstance.call_count, 5)
-    self.assertEqual(len(pool.devices), 5)
-
-  def testCreateDevices(self):
-    cfg = self._CreateCfg()
-    r = common_operations.CreateDevices(self.CMD, cfg, self.device_factory, 1)
-    self.assertEqual(r.command, self.CMD)
-    self.assertEqual(r.status, report.Status.SUCCESS)
-    self.assertEqual(
-        r.data, {"devices": [{
-            "ip": self.IP,
-            "instance_name": self.INSTANCE
-        }]})
+    def testCreateDevices(self):
+        """Test Create Devices."""
+        cfg = self._CreateCfg()
+        _report = common_operations.CreateDevices(self.CMD, cfg, self.device_factory, 1)
+        self.assertEqual(_report.command, self.CMD)
+        self.assertEqual(_report.status, report.Status.SUCCESS)
+        self.assertEqual(
+            _report.data,
+            {"devices": [{
+                "ip": self.IP,
+                "instance_name": self.INSTANCE
+            }]})
 
 
 if __name__ == "__main__":
-  unittest.main()
+    unittest.main()
diff --git a/public/avd.py b/public/avd.py
index d2e9c8d..9df3df0 100755
--- a/public/avd.py
+++ b/public/avd.py
@@ -13,10 +13,9 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """This module defines an AVD instance.
 
-TODO(fdeng):
+TODO:
   The current implementation only initialize an object
   with IP and instance name. A complete implementation
   will include the following features.
@@ -50,9 +49,10 @@
 
     @property
     def ip(self):
+        """Getter of _ip."""
         if not self._ip:
-            raise ValueError("IP of instance %s is unknown yet." %
-                             self._instance_name)
+            raise ValueError(
+                "IP of instance %s is unknown yet." % self._instance_name)
         return self._ip
 
     @ip.setter
@@ -61,6 +61,7 @@
 
     @property
     def instance_name(self):
+        """Getter of _instance_name."""
         return self._instance_name
 
     def __str__(self):
diff --git a/public/device_driver.py b/public/device_driver.py
index 810a3a8..31cbb12 100755
--- a/public/device_driver.py
+++ b/public/device_driver.py
@@ -62,6 +62,7 @@
 ADB_CONNECT_CMD = "adb connect 127.0.0.1:%(adb_port)d"
 
 
+# pylint: disable=invalid-name
 class AndroidVirtualDevicePool(object):
     """A class that manages a pool of devices."""
 
@@ -375,6 +376,7 @@
                       vnc_port)
 
 
+# pylint: disable=too-many-locals
 def CreateAndroidVirtualDevices(cfg,
                                 build_target=None,
                                 build_id=None,
@@ -631,7 +633,7 @@
     """
     credentials = auth.CreateCredentials(cfg, ALL_SCOPES)
     compute_client = android_compute_client.AndroidComputeClient(
-            cfg, credentials)
+        cfg, credentials)
     logger.info("Checking if user has access to project %s", cfg.project)
     if not compute_client.CheckAccess():
         logger.error("User does not have access to project %s", cfg.project)
diff --git a/public/device_driver_test.py b/public/device_driver_test.py
index 321a87b..41853ba 100644
--- a/public/device_driver_test.py
+++ b/public/device_driver_test.py
@@ -19,10 +19,11 @@
 import datetime
 import uuid
 
-import dateutil.parser
+import unittest
 import mock
 
-import unittest
+import dateutil.parser
+
 from acloud.internal.lib import auth
 from acloud.internal.lib import android_build_client
 from acloud.internal.lib import android_compute_client
@@ -31,6 +32,26 @@
 from acloud.public import device_driver
 
 
+def _CreateCfg():
+    """A helper method that creates a mock configuration object."""
+    cfg = mock.MagicMock()
+    cfg.service_account_name = "fake@service.com"
+    cfg.service_account_private_key_path = "/fake/path/to/key"
+    cfg.zone = "fake_zone"
+    cfg.disk_image_name = "fake_image.tar.gz"
+    cfg.disk_image_mime_type = "fake/type"
+    cfg.storage_bucket_name = "fake_bucket"
+    cfg.extra_data_disk_size_gb = 4
+    cfg.precreated_data_image_map = {
+        4: "extradisk-image-4gb",
+        10: "extradisk-image-10gb"
+    }
+    cfg.ssh_private_key_path = ""
+    cfg.ssh_public_key_path = ""
+
+    return cfg
+
+
 class DeviceDriverTest(driver_test_lib.BaseDriverTest):
     """Test device_driver."""
 
@@ -39,7 +60,7 @@
         super(DeviceDriverTest, self).setUp()
         self.build_client = mock.MagicMock()
         self.Patch(android_build_client, "AndroidBuildClient",
-            return_value=self.build_client)
+                   return_value=self.build_client)
         self.storage_client = mock.MagicMock()
         self.Patch(
             gstorage_client, "StorageClient", return_value=self.storage_client)
@@ -50,28 +71,9 @@
             return_value=self.compute_client)
         self.Patch(auth, "CreateCredentials", return_value=mock.MagicMock())
 
-    def _CreateCfg(self):
-        """A helper method that creates a mock configuration object."""
-        cfg = mock.MagicMock()
-        cfg.service_account_name = "fake@service.com"
-        cfg.service_account_private_key_path = "/fake/path/to/key"
-        cfg.zone = "fake_zone"
-        cfg.disk_image_name = "fake_image.tar.gz"
-        cfg.disk_image_mime_type = "fake/type"
-        cfg.storage_bucket_name = "fake_bucket"
-        cfg.extra_data_disk_size_gb = 4
-        cfg.precreated_data_image_map = {
-            4: "extradisk-image-4gb",
-            10: "extradisk-image-10gb"
-        }
-        cfg.ssh_private_key_path = ""
-        cfg.ssh_public_key_path = ""
-
-        return cfg
-
     def testCreateAndroidVirtualDevices(self):
         """Test CreateAndroidVirtualDevices."""
-        cfg = self._CreateCfg()
+        cfg = _CreateCfg()
         fake_gs_url = "fake_gs_url"
         fake_ip = "140.1.1.1"
         fake_instance = "fake-instance"
@@ -93,14 +95,14 @@
         self.compute_client.GetDataDiskName.return_value = disk_name
 
         # Verify
-        r = device_driver.CreateAndroidVirtualDevices(
-        cfg, fake_build_target, fake_build_id)
+        report = device_driver.CreateAndroidVirtualDevices(
+            cfg, fake_build_target, fake_build_id)
         self.build_client.CopyTo.assert_called_with(
-        fake_build_target, fake_build_id, artifact_name=cfg.disk_image_name,
-        destination_bucket=cfg.storage_bucket_name,
-        destination_path=fake_gs_object)
+            fake_build_target, fake_build_id, artifact_name=cfg.disk_image_name,
+            destination_bucket=cfg.storage_bucket_name,
+            destination_path=fake_gs_object)
         self.compute_client.CreateImage.assert_called_with(
-        image_name=fake_image, source_uri=fake_gs_url)
+            image_name=fake_image, source_uri=fake_gs_url)
         self.compute_client.CreateInstance.assert_called_with(
             instance=fake_instance,
             image_name=fake_image,
@@ -109,7 +111,7 @@
         self.storage_client.Delete(cfg.storage_bucket_name, fake_gs_object)
 
         self.assertEquals(
-            r.data,
+            report.data,
             {
                 "devices": [
                     {
@@ -119,8 +121,8 @@
                 ],
             }
         )
-        self.assertEquals(r.command, "create")
-        self.assertEquals(r.status, "SUCCESS")
+        self.assertEquals(report.command, "create")
+        self.assertEquals(report.status, "SUCCESS")
 
 
     def testDeleteAndroidVirtualDevices(self):
@@ -128,11 +130,11 @@
         instance_names = ["fake-instance-1", "fake-instance-2"]
         self.compute_client.DeleteInstances.return_value = (instance_names, [],
                                                             [])
-        cfg = self._CreateCfg()
-        r = device_driver.DeleteAndroidVirtualDevices(cfg, instance_names)
+        cfg = _CreateCfg()
+        report = device_driver.DeleteAndroidVirtualDevices(cfg, instance_names)
         self.compute_client.DeleteInstances.assert_called_once_with(
             instance_names, cfg.zone)
-        self.assertEquals(r.data, {
+        self.assertEquals(report.data, {
             "deleted": [
                 {
                     "name": instance_names[0],
@@ -144,10 +146,11 @@
                 },
             ],
         })
-        self.assertEquals(r.command, "delete")
-        self.assertEquals(r.status, "SUCCESS")
+        self.assertEquals(report.command, "delete")
+        self.assertEquals(report.status, "SUCCESS")
 
     def testCleanup(self):
+        """Test Cleanup."""
         expiration_mins = 30
         before_deadline = "2015-10-29T12:00:30.018-07:00"
         after_deadline = "2015-10-29T12:45:30.018-07:00"
@@ -212,9 +215,9 @@
                                                         [])
         self.storage_client.DeleteFiles.return_value = (["fake_object_1"], [],
                                                         [])
-        cfg = self._CreateCfg()
-        r = device_driver.Cleanup(cfg, expiration_mins)
-        self.assertEqual(r.errors, [])
+        cfg = _CreateCfg()
+        report = device_driver.Cleanup(cfg, expiration_mins)
+        self.assertEqual(report.errors, [])
         expected_report_data = {
             "deleted": [
                 {"name": "fake_instance_1",
@@ -227,7 +230,7 @@
                  "type": "cached_build_artifact"},
             ]
         }
-        self.assertEqual(r.data, expected_report_data)
+        self.assertEqual(report.data, expected_report_data)
 
         self.compute_client.ListInstances.assert_called_once_with(
             zone=cfg.zone)
diff --git a/pylintrc b/pylintrc
new file mode 100644
index 0000000..b7c5763
--- /dev/null
+++ b/pylintrc
@@ -0,0 +1,18 @@
+[MESSAGES CONTROL]
+
+disable=
+    relative-import,
+    too-few-public-methods,
+    fixme,
+    too-many-instance-attributes,
+    too-many-arguments
+
+[BASIC]
+
+# Acloud uses PascalCase for functions/methods except for test methods which use
+# camelCase.
+method-rgx=[A-Z_][a-zA-Z0-9]{2,30}$|(__[a-z][a-zA-Z0-9_]+__)$|test[A-Z_][a-zA-Z0-9]{2,30}$
+function-rgx=[A-Z_][a-zA-Z0-9]{2,30}$|(__[a-z][a-zA-Z0-9_]+__)$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=e, f, logger, ip, main