docs: update generated docs (#981)

diff --git a/docs/dyn/ml_v1.projects.models.html b/docs/dyn/ml_v1.projects.models.html
index 4687f5e..b7dd28e 100644
--- a/docs/dyn/ml_v1.projects.models.html
+++ b/docs/dyn/ml_v1.projects.models.html
@@ -92,7 +92,7 @@
   <code><a href="#getIamPolicy">getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Gets the access control policy for a resource.</p>
 <p class="toc_element">
-  <code><a href="#list">list(parent, filter=None, pageToken=None, pageSize=None, x__xgafv=None)</a></code></p>
+  <code><a href="#list">list(parent, pageSize=None, pageToken=None, filter=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Lists the models in a project.</p>
 <p class="toc_element">
   <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
@@ -121,410 +121,53 @@
     The object takes the form of:
 
 { # Represents a machine learning solution.
-    # 
-    # A model can have multiple versions, each of which is a deployed, trained
-    # model ready to receive prediction requests. The model itself is just a
-    # container.
-  &quot;defaultVersion&quot;: { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
-      # handle prediction requests that do not specify a version.
       # 
-      # You can change the default version by calling
-      # projects.models.versions.setDefault.
-      #
-      # Each version is a trained model deployed in the cloud, ready to handle
-      # prediction requests. A model can have multiple versions. You can get
-      # information about all of the versions of a given model by calling
-      # projects.models.versions.list.
-    &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
-        # Only specify this field if you have specified a Compute Engine (N1) machine
-        # type in the `machineType` field. Learn more about [using GPUs for online
-        # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-        # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-        # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-        # [accelerators for online
-        # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-      &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-      &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-    },
-    &quot;isDefault&quot;: True or False, # Output only. If true, this version will be used to handle prediction
-        # requests that do not specify a version.
-        #
-        # You can change the default version by calling
-        # projects.methods.versions.setDefault.
-    &quot;manualScaling&quot;: { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
-        # model. You should generally use `auto_scaling` with an appropriate
-        # `min_nodes` instead, but this option is available if you want more
-        # predictable billing. Beware that latency and error rates will increase
-        # if the traffic exceeds that capability of the system to serve it based
-        # on the selected number of nodes.
-      &quot;nodes&quot;: 42, # The number of nodes to allocate for this model. These nodes are always up,
-          # starting from the time the model is deployed, so the cost of operating
-          # this model will be proportional to `nodes` * number of hours since
-          # last billing cycle plus the cost for each prediction performed.
-    },
-    &quot;state&quot;: &quot;A String&quot;, # Output only. The state of a version.
-    &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the version when it was created.
-        #
-        # The version name must be unique within the model it is created in.
-    &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for resource access control.
-    &quot;pythonVersion&quot;: &quot;A String&quot;, # Required. The version of Python used in prediction.
-        #
-        # The following Python versions are available:
-        #
-        # * Python &#x27;3.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
-        #   later.
-        # * Python &#x27;3.5&#x27; is available when `runtime_version` is set to a version
-        #   from &#x27;1.4&#x27; to &#x27;1.14&#x27;.
-        # * Python &#x27;2.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
-        #   earlier.
-        #
-        # Read more about the Python versions available for [each runtime
-        # version](/ml-engine/docs/runtime-version-list).
-    &quot;lastUseTime&quot;: &quot;A String&quot;, # Output only. The time the version was last used for prediction.
-    &quot;predictionClass&quot;: &quot;A String&quot;, # Optional. The fully qualified name
-        # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
-        # the Predictor interface described in this reference field. The module
-        # containing this class should be included in a package provided to the
-        # [`packageUris` field](#Version.FIELDS.package_uris).
-        #
-        # Specify this field if and only if you are deploying a [custom prediction
-        # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
-        # If you specify this field, you must set
-        # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
-        # you must set `machineType` to a [legacy (MLS1)
-        # machine type](/ml-engine/docs/machine-types-online-prediction).
-        #
-        # The following code sample provides the Predictor interface:
-        #
-        # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
-        # class Predictor(object):
-        # &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot;
-        #
-        # def predict(self, instances, **kwargs):
-        #     &quot;&quot;&quot;Performs custom prediction.
-        #
-        #     Instances are the decoded values from the request. They have already
-        #     been deserialized from JSON.
-        #
-        #     Args:
-        #         instances: A list of prediction input instances.
-        #         **kwargs: A dictionary of keyword args provided as additional
-        #             fields on the predict request body.
-        #
-        #     Returns:
-        #         A list of outputs containing the prediction results. This list must
-        #         be JSON serializable.
-        #     &quot;&quot;&quot;
-        #     raise NotImplementedError()
-        #
-        # @classmethod
-        # def from_path(cls, model_dir):
-        #     &quot;&quot;&quot;Creates an instance of Predictor using the given path.
-        #
-        #     Loading of the predictor should be done in this method.
-        #
-        #     Args:
-        #         model_dir: The local directory that contains the exported model
-        #             file along with any additional files uploaded when creating the
-        #             version resource.
-        #
-        #     Returns:
-        #         An instance implementing this Predictor class.
-        #     &quot;&quot;&quot;
-        #     raise NotImplementedError()
-        # &lt;/pre&gt;
-        #
-        # Learn more about [the Predictor interface and custom prediction
-        # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
-    &quot;deploymentUri&quot;: &quot;A String&quot;, # Required. The Cloud Storage location of the trained model used to
-        # create the version. See the
-        # [guide to model
-        # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
-        # information.
-        #
-        # When passing Version to
-        # projects.models.versions.create
-        # the model service uses the specified location as the source of the model.
-        # Once deployed, the model version is hosted by the prediction service, so
-        # this location is useful only as a historical record.
-        # The total number of model files can&#x27;t exceed 1000.
-    &quot;packageUris&quot;: [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
-        # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
-        # or [scikit-learn pipelines with custom
-        # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
-        #
-        # For a custom prediction routine, one of these packages must contain your
-        # Predictor class (see
-        # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
-        # include any dependencies used by your Predictor or scikit-learn pipeline
-        # uses that are not already included in your selected [runtime
-        # version](/ml-engine/docs/tensorflow/runtime-version-list).
-        #
-        # If you specify this field, you must also set
-        # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+      # A model can have multiple versions, each of which is a deployed, trained
+      # model ready to receive prediction requests. The model itself is just a
+      # container.
+    &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the model when it was created.
+    &quot;regions&quot;: [ # Optional. The list of regions where the model is going to be deployed.
+        # Only one region per model is supported.
+        # Defaults to &#x27;us-central1&#x27; if nothing is set.
+        # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+        # for AI Platform services.
+        # Note:
+        # *   No matter where a model is deployed, it can always be accessed by
+        #     users from anywhere, both for online and batch prediction.
+        # *   The region for a batch prediction job is set by the region field when
+        #     submitting the batch prediction job and does not take its value from
+        #     this field.
       &quot;A String&quot;,
     ],
-    &quot;explanationConfig&quot;: { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model&#x27;s version.
-        # Some explanation features require additional metadata to be loaded
-        # as part of the model payload.
-        # There are two feature attribution methods supported for TensorFlow models:
-        # integrated gradients and sampled Shapley.
-        # [Learn more about feature
-        # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
-      &quot;integratedGradientsAttribution&quot;: { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
-          # of the model&#x27;s fully differentiable structure. Refer to this paper for
-          # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
-          # of the model&#x27;s fully differentiable structure. Refer to this paper for
-          # more details: https://arxiv.org/abs/1703.01365
-        &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
-            # A good value to start is 50 and gradually increase until the
-            # sum to diff property is met within the desired error range.
-      },
-      &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
-          # contribute to the label being predicted. A sampling strategy is used to
-          # approximate the value rather than considering all subsets of features.
-          # contribute to the label being predicted. A sampling strategy is used to
-          # approximate the value rather than considering all subsets of features.
-        &quot;numPaths&quot;: 42, # The number of feature permutations to consider when approximating the
-            # Shapley values.
-      },
-      &quot;xraiAttribution&quot;: { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
-          # of the model&#x27;s fully differentiable structure. Refer to this paper for
-          # more details: https://arxiv.org/abs/1906.02825
-          # Currently only implemented for models with natural image inputs.
-          # of the model&#x27;s fully differentiable structure. Refer to this paper for
-          # more details: https://arxiv.org/abs/1906.02825
-          # Currently only implemented for models with natural image inputs.
-        &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
-            # A good value to start is 50 and gradually increase until the
-            # sum to diff property is met within the desired error range.
-      },
-    },
-    &quot;autoScaling&quot;: { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
-        # response to increases and decreases in traffic. Care should be
-        # taken to ramp up traffic according to the model&#x27;s ability to scale
-        # or you will start seeing increases in latency and 429 response codes.
-        #
-        # Note that you cannot use AutoScaling if your version uses
-        # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
-        # `manual_scaling`.
-      &quot;minNodes&quot;: 42, # Optional. The minimum number of nodes to allocate for this model. These
-          # nodes are always up, starting from the time the model is deployed.
-          # Therefore, the cost of operating this model will be at least
-          # `rate` * `min_nodes` * number of hours since last billing cycle,
-          # where `rate` is the cost per node-hour as documented in the
-          # [pricing guide](/ml-engine/docs/pricing),
-          # even if no predictions are performed. There is additional cost for each
-          # prediction performed.
-          #
-          # Unlike manual scaling, if the load gets too heavy for the nodes
-          # that are up, the service will automatically add nodes to handle the
-          # increased load as well as scale back as traffic drops, always maintaining
-          # at least `min_nodes`. You will be charged for the time in which additional
-          # nodes are used.
-          #
-          # If `min_nodes` is not specified and AutoScaling is used with a [legacy
-          # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
-          # `min_nodes` defaults to 0, in which case, when traffic to a model stops
-          # (and after a cool-down period), nodes will be shut down and no charges will
-          # be incurred until traffic to the model resumes.
-          #
-          # If `min_nodes` is not specified and AutoScaling is used with a [Compute
-          # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
-          # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
-          # Compute Engine machine type.
-          #
-          # Note that you cannot use AutoScaling if your version uses
-          # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
-          # ManualScaling.
-          #
-          # You can set `min_nodes` when creating the model version, and you can also
-          # update `min_nodes` for an existing version:
-          # &lt;pre&gt;
-          # update_body.json:
-          # {
-          #   &#x27;autoScaling&#x27;: {
-          #     &#x27;minNodes&#x27;: 5
-          #   }
-          # }
-          # &lt;/pre&gt;
-          # HTTP request:
-          # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
-          # PATCH
-          # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
-          # -d @./update_body.json
-          # &lt;/pre&gt;
-    },
-    &quot;createTime&quot;: &quot;A String&quot;, # Output only. The time the version was created.
-    &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your model
-        # versions. Each label is a key-value pair, where both the key and the value
-        # are arbitrary strings that you supply.
-        # For more information, see the documentation on
-        # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-      &quot;a_key&quot;: &quot;A String&quot;,
-    },
-    &quot;requestLoggingConfig&quot;: { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
-        # projects.models.versions.patch
-        # request. Specifying it in a
-        # projects.models.versions.create
-        # request has no effect.
-        #
-        # Configures the request-response pair logging on predictions from this
-        # Version.
-        # Online prediction requests to a model version and the responses to these
-        # requests are converted to raw strings and saved to the specified BigQuery
-        # table. Logging is constrained by [BigQuery quotas and
-        # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
-        # AI Platform Prediction does not log request-response pairs, but it continues
-        # to serve predictions.
-        #
-        # If you are using [continuous
-        # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
-        # specify this configuration manually. Setting up continuous evaluation
-        # automatically enables logging of request-response pairs.
-      &quot;bigqueryTableName&quot;: &quot;A String&quot;, # Required. Fully qualified BigQuery table name in the following format:
-          # &quot;&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;&quot;
-          #
-          # The specified table must already exist, and the &quot;Cloud ML Service Agent&quot;
-          # for your project must have permission to write to it. The table must have
-          # the following [schema](/bigquery/docs/schemas):
-          #
-          # &lt;table&gt;
-          #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style=&quot;display: table-cell&quot;&gt;Type&lt;/th&gt;
-          #     &lt;th style=&quot;display: table-cell&quot;&gt;Mode&lt;/th&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
-          # &lt;/table&gt;
-      &quot;samplingPercentage&quot;: 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
-          # For example, if you want to log 10% of requests, enter `0.1`. The sampling
-          # window is the lifetime of the model version. Defaults to 0.
-    },
-    &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
-    &quot;machineType&quot;: &quot;A String&quot;, # Optional. The type of machine on which to serve the model. Currently only
-        # applies to online prediction service. If this field is not specified, it
-        # defaults to `mls1-c1-m2`.
-        #
-        # Online prediction supports the following machine types:
-        #
-        # * `mls1-c1-m2`
-        # * `mls1-c4-m2`
-        # * `n1-standard-2`
-        # * `n1-standard-4`
-        # * `n1-standard-8`
-        # * `n1-standard-16`
-        # * `n1-standard-32`
-        # * `n1-highmem-2`
-        # * `n1-highmem-4`
-        # * `n1-highmem-8`
-        # * `n1-highmem-16`
-        # * `n1-highmem-32`
-        # * `n1-highcpu-2`
-        # * `n1-highcpu-4`
-        # * `n1-highcpu-8`
-        # * `n1-highcpu-16`
-        # * `n1-highcpu-32`
-        #
-        # `mls1-c1-m2` is generally available. All other machine types are available
-        # in beta. Learn more about the [differences between machine
-        # types](/ml-engine/docs/machine-types-online-prediction).
-    &quot;runtimeVersion&quot;: &quot;A String&quot;, # Required. The AI Platform runtime version to use for this deployment.
-        #
-        # For more information, see the
-        # [runtime version list](/ml-engine/docs/runtime-version-list) and
-        # [how to manage runtime versions](/ml-engine/docs/versioning).
-    &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the version when it was created.
-    &quot;framework&quot;: &quot;A String&quot;, # Optional. The machine learning framework AI Platform uses to train
-        # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
-        # `XGBOOST`. If you do not specify a framework, AI Platform
-        # will analyze files in the deployment_uri to determine a framework. If you
-        # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
-        # of the model to 1.4 or greater.
-        #
-        # Do **not** specify a framework if you&#x27;re deploying a [custom
-        # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
-        #
-        # If you specify a [Compute Engine (N1) machine
-        # type](/ml-engine/docs/machine-types-online-prediction) in the
-        # `machineType` field, you must specify `TENSORFLOW`
-        # for the framework.
+    &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the model when it was created.
+        # 
+        # The model name must be unique within the project it is created in.
+    &quot;onlinePredictionConsoleLogging&quot;: True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
+        # streams to Stackdriver Logging. These can be more verbose than the standard
+        # access logs (see `onlinePredictionLogging`) and can incur higher cost.
+        # However, they are helpful for debugging. Note that
+        # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+        # your project receives prediction requests at a high QPS. Estimate your
+        # costs before enabling this option.
+        # 
+        # Default is false.
     &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
         # prevent simultaneous updates of a model from overwriting each other.
         # It is strongly suggested that systems make use of the `etag` in the
         # read-modify-write cycle to perform model updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `GetVersion`, and
-        # systems are expected to put that etag in the request to `UpdateVersion` to
+        # conditions: An `etag` is returned in the response to `GetModel`, and
+        # systems are expected to put that etag in the request to `UpdateModel` to
         # ensure that their change will be applied to the model as intended.
-  },
-  &quot;onlinePredictionConsoleLogging&quot;: True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
-      # streams to Stackdriver Logging. These can be more verbose than the standard
-      # access logs (see `onlinePredictionLogging`) and can incur higher cost.
-      # However, they are helpful for debugging. Note that
-      # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
-      # your project receives prediction requests at a high QPS. Estimate your
-      # costs before enabling this option.
-      # 
-      # Default is false.
-  &quot;regions&quot;: [ # Optional. The list of regions where the model is going to be deployed.
-      # Only one region per model is supported.
-      # Defaults to &#x27;us-central1&#x27; if nothing is set.
-      # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-      # for AI Platform services.
-      # Note:
-      # *   No matter where a model is deployed, it can always be accessed by
-      #     users from anywhere, both for online and batch prediction.
-      # *   The region for a batch prediction job is set by the region field when
-      #     submitting the batch prediction job and does not take its value from
-      #     this field.
-    &quot;A String&quot;,
-  ],
-  &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the model when it was created.
-  &quot;onlinePredictionLogging&quot;: True or False, # Optional. If true, online prediction access logs are sent to StackDriver
-      # Logging. These logs are like standard server access logs, containing
-      # information like timestamp and latency for each request. Note that
-      # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
-      # your project receives prediction requests at a high queries per second rate
-      # (QPS). Estimate your costs before enabling this option.
-      # 
-      # Default is false.
-  &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-      # prevent simultaneous updates of a model from overwriting each other.
-      # It is strongly suggested that systems make use of the `etag` in the
-      # read-modify-write cycle to perform model updates in order to avoid race
-      # conditions: An `etag` is returned in the response to `GetModel`, and
-      # systems are expected to put that etag in the request to `UpdateModel` to
-      # ensure that their change will be applied to the model as intended.
-  &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your models.
-      # Each label is a key-value pair, where both the key and the value are
-      # arbitrary strings that you supply.
-      # For more information, see the documentation on
-      # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-    &quot;a_key&quot;: &quot;A String&quot;,
-  },
-  &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the model when it was created.
-      # 
-      # The model name must be unique within the project it is created in.
-}
-
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Represents a machine learning solution.
-      #
-      # A model can have multiple versions, each of which is a deployed, trained
-      # model ready to receive prediction requests. The model itself is just a
-      # container.
+    &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your models.
+        # Each label is a key-value pair, where both the key and the value are
+        # arbitrary strings that you supply.
+        # For more information, see the documentation on
+        # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+      &quot;a_key&quot;: &quot;A String&quot;,
+    },
     &quot;defaultVersion&quot;: { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
         # handle prediction requests that do not specify a version.
-        #
+        # 
         # You can change the default version by calling
         # projects.models.versions.setDefault.
         #
@@ -532,119 +175,40 @@
         # prediction requests. A model can have multiple versions. You can get
         # information about all of the versions of a given model by calling
         # projects.models.versions.list.
-      &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
-          # Only specify this field if you have specified a Compute Engine (N1) machine
-          # type in the `machineType` field. Learn more about [using GPUs for online
-          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-          # [accelerators for online
-          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-        &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-        &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+      &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your model
+          # versions. Each label is a key-value pair, where both the key and the value
+          # are arbitrary strings that you supply.
+          # For more information, see the documentation on
+          # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+        &quot;a_key&quot;: &quot;A String&quot;,
       },
-      &quot;isDefault&quot;: True or False, # Output only. If true, this version will be used to handle prediction
-          # requests that do not specify a version.
+      &quot;machineType&quot;: &quot;A String&quot;, # Optional. The type of machine on which to serve the model. Currently only
+          # applies to online prediction service. If this field is not specified, it
+          # defaults to `mls1-c1-m2`.
           #
-          # You can change the default version by calling
-          # projects.methods.versions.setDefault.
-      &quot;manualScaling&quot;: { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
-          # model. You should generally use `auto_scaling` with an appropriate
-          # `min_nodes` instead, but this option is available if you want more
-          # predictable billing. Beware that latency and error rates will increase
-          # if the traffic exceeds that capability of the system to serve it based
-          # on the selected number of nodes.
-        &quot;nodes&quot;: 42, # The number of nodes to allocate for this model. These nodes are always up,
-            # starting from the time the model is deployed, so the cost of operating
-            # this model will be proportional to `nodes` * number of hours since
-            # last billing cycle plus the cost for each prediction performed.
-      },
-      &quot;state&quot;: &quot;A String&quot;, # Output only. The state of a version.
-      &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the version when it was created.
+          # Online prediction supports the following machine types:
           #
-          # The version name must be unique within the model it is created in.
-      &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for resource access control.
-      &quot;pythonVersion&quot;: &quot;A String&quot;, # Required. The version of Python used in prediction.
+          # * `mls1-c1-m2`
+          # * `mls1-c4-m2`
+          # * `n1-standard-2`
+          # * `n1-standard-4`
+          # * `n1-standard-8`
+          # * `n1-standard-16`
+          # * `n1-standard-32`
+          # * `n1-highmem-2`
+          # * `n1-highmem-4`
+          # * `n1-highmem-8`
+          # * `n1-highmem-16`
+          # * `n1-highmem-32`
+          # * `n1-highcpu-2`
+          # * `n1-highcpu-4`
+          # * `n1-highcpu-8`
+          # * `n1-highcpu-16`
+          # * `n1-highcpu-32`
           #
-          # The following Python versions are available:
-          #
-          # * Python &#x27;3.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
-          #   later.
-          # * Python &#x27;3.5&#x27; is available when `runtime_version` is set to a version
-          #   from &#x27;1.4&#x27; to &#x27;1.14&#x27;.
-          # * Python &#x27;2.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
-          #   earlier.
-          #
-          # Read more about the Python versions available for [each runtime
-          # version](/ml-engine/docs/runtime-version-list).
-      &quot;lastUseTime&quot;: &quot;A String&quot;, # Output only. The time the version was last used for prediction.
-      &quot;predictionClass&quot;: &quot;A String&quot;, # Optional. The fully qualified name
-          # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
-          # the Predictor interface described in this reference field. The module
-          # containing this class should be included in a package provided to the
-          # [`packageUris` field](#Version.FIELDS.package_uris).
-          #
-          # Specify this field if and only if you are deploying a [custom prediction
-          # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
-          # If you specify this field, you must set
-          # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
-          # you must set `machineType` to a [legacy (MLS1)
-          # machine type](/ml-engine/docs/machine-types-online-prediction).
-          #
-          # The following code sample provides the Predictor interface:
-          #
-          # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
-          # class Predictor(object):
-          # &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot;
-          #
-          # def predict(self, instances, **kwargs):
-          #     &quot;&quot;&quot;Performs custom prediction.
-          #
-          #     Instances are the decoded values from the request. They have already
-          #     been deserialized from JSON.
-          #
-          #     Args:
-          #         instances: A list of prediction input instances.
-          #         **kwargs: A dictionary of keyword args provided as additional
-          #             fields on the predict request body.
-          #
-          #     Returns:
-          #         A list of outputs containing the prediction results. This list must
-          #         be JSON serializable.
-          #     &quot;&quot;&quot;
-          #     raise NotImplementedError()
-          #
-          # @classmethod
-          # def from_path(cls, model_dir):
-          #     &quot;&quot;&quot;Creates an instance of Predictor using the given path.
-          #
-          #     Loading of the predictor should be done in this method.
-          #
-          #     Args:
-          #         model_dir: The local directory that contains the exported model
-          #             file along with any additional files uploaded when creating the
-          #             version resource.
-          #
-          #     Returns:
-          #         An instance implementing this Predictor class.
-          #     &quot;&quot;&quot;
-          #     raise NotImplementedError()
-          # &lt;/pre&gt;
-          #
-          # Learn more about [the Predictor interface and custom prediction
-          # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
-      &quot;deploymentUri&quot;: &quot;A String&quot;, # Required. The Cloud Storage location of the trained model used to
-          # create the version. See the
-          # [guide to model
-          # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
-          # information.
-          #
-          # When passing Version to
-          # projects.models.versions.create
-          # the model service uses the specified location as the source of the model.
-          # Once deployed, the model version is hosted by the prediction service, so
-          # this location is useful only as a historical record.
-          # The total number of model files can&#x27;t exceed 1000.
+          # `mls1-c1-m2` is generally available. All other machine types are available
+          # in beta. Learn more about the [differences between machine
+          # types](/ml-engine/docs/machine-types-online-prediction).
       &quot;packageUris&quot;: [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
           # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
           # or [scikit-learn pipelines with custom
@@ -661,42 +225,21 @@
           # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
         &quot;A String&quot;,
       ],
-      &quot;explanationConfig&quot;: { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model&#x27;s version.
-          # Some explanation features require additional metadata to be loaded
-          # as part of the model payload.
-          # There are two feature attribution methods supported for TensorFlow models:
-          # integrated gradients and sampled Shapley.
-          # [Learn more about feature
-          # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
-        &quot;integratedGradientsAttribution&quot;: { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
-            # of the model&#x27;s fully differentiable structure. Refer to this paper for
-            # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
-            # of the model&#x27;s fully differentiable structure. Refer to this paper for
-            # more details: https://arxiv.org/abs/1703.01365
-          &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
-              # A good value to start is 50 and gradually increase until the
-              # sum to diff property is met within the desired error range.
-        },
-        &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
-            # contribute to the label being predicted. A sampling strategy is used to
-            # approximate the value rather than considering all subsets of features.
-            # contribute to the label being predicted. A sampling strategy is used to
-            # approximate the value rather than considering all subsets of features.
-          &quot;numPaths&quot;: 42, # The number of feature permutations to consider when approximating the
-              # Shapley values.
-        },
-        &quot;xraiAttribution&quot;: { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
-            # of the model&#x27;s fully differentiable structure. Refer to this paper for
-            # more details: https://arxiv.org/abs/1906.02825
-            # Currently only implemented for models with natural image inputs.
-            # of the model&#x27;s fully differentiable structure. Refer to this paper for
-            # more details: https://arxiv.org/abs/1906.02825
-            # Currently only implemented for models with natural image inputs.
-          &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
-              # A good value to start is 50 and gradually increase until the
-              # sum to diff property is met within the desired error range.
-        },
+      &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+          # Only specify this field if you have specified a Compute Engine (N1) machine
+          # type in the `machineType` field. Learn more about [using GPUs for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+          # [accelerators for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+        &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
       },
+      &quot;state&quot;: &quot;A String&quot;, # Output only. The state of a version.
+      &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the version when it was created.
+          #
+          # The version name must be unique within the model it is created in.
       &quot;autoScaling&quot;: { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
           # response to increases and decreases in traffic. Care should be
           # taken to ramp up traffic according to the model&#x27;s ability to scale
@@ -752,14 +295,55 @@
             # -d @./update_body.json
             # &lt;/pre&gt;
       },
-      &quot;createTime&quot;: &quot;A String&quot;, # Output only. The time the version was created.
-      &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your model
-          # versions. Each label is a key-value pair, where both the key and the value
-          # are arbitrary strings that you supply.
-          # For more information, see the documentation on
-          # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-        &quot;a_key&quot;: &quot;A String&quot;,
+      &quot;explanationConfig&quot;: { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model&#x27;s version.
+          # Some explanation features require additional metadata to be loaded
+          # as part of the model payload.
+          # There are two feature attribution methods supported for TensorFlow models:
+          # integrated gradients and sampled Shapley.
+          # [Learn more about feature
+          # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
+        &quot;integratedGradientsAttribution&quot;: { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+            # of the model&#x27;s fully differentiable structure. Refer to this paper for
+            # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+            # of the model&#x27;s fully differentiable structure. Refer to this paper for
+            # more details: https://arxiv.org/abs/1703.01365
+          &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
+              # A good value to start is 50 and gradually increase until the
+              # sum to diff property is met within the desired error range.
+        },
+        &quot;xraiAttribution&quot;: { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+            # of the model&#x27;s fully differentiable structure. Refer to this paper for
+            # more details: https://arxiv.org/abs/1906.02825
+            # Currently only implemented for models with natural image inputs.
+            # of the model&#x27;s fully differentiable structure. Refer to this paper for
+            # more details: https://arxiv.org/abs/1906.02825
+            # Currently only implemented for models with natural image inputs.
+          &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
+              # A good value to start is 50 and gradually increase until the
+              # sum to diff property is met within the desired error range.
+        },
+        &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+            # contribute to the label being predicted. A sampling strategy is used to
+            # approximate the value rather than considering all subsets of features.
+            # contribute to the label being predicted. A sampling strategy is used to
+            # approximate the value rather than considering all subsets of features.
+          &quot;numPaths&quot;: 42, # The number of feature permutations to consider when approximating the
+              # Shapley values.
+        },
       },
+      &quot;pythonVersion&quot;: &quot;A String&quot;, # Required. The version of Python used in prediction.
+          #
+          # The following Python versions are available:
+          #
+          # * Python &#x27;3.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
+          #   later.
+          # * Python &#x27;3.5&#x27; is available when `runtime_version` is set to a version
+          #   from &#x27;1.4&#x27; to &#x27;1.14&#x27;.
+          # * Python &#x27;2.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
+          #   earlier.
+          #
+          # Read more about the Python versions available for [each runtime
+          # version](/ml-engine/docs/runtime-version-list).
       &quot;requestLoggingConfig&quot;: { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
           # projects.models.versions.patch
           # request. Specifying it in a
@@ -779,6 +363,9 @@
           # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
           # specify this configuration manually. Setting up continuous evaluation
           # automatically enables logging of request-response pairs.
+        &quot;samplingPercentage&quot;: 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+            # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+            # window is the lifetime of the model version. Defaults to 0.
         &quot;bigqueryTableName&quot;: &quot;A String&quot;, # Required. Fully qualified BigQuery table name in the following format:
             # &quot;&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;&quot;
             #
@@ -796,44 +383,20 @@
             #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
             #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
             # &lt;/table&gt;
-        &quot;samplingPercentage&quot;: 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
-            # For example, if you want to log 10% of requests, enter `0.1`. The sampling
-            # window is the lifetime of the model version. Defaults to 0.
       },
-      &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
-      &quot;machineType&quot;: &quot;A String&quot;, # Optional. The type of machine on which to serve the model. Currently only
-          # applies to online prediction service. If this field is not specified, it
-          # defaults to `mls1-c1-m2`.
-          #
-          # Online prediction supports the following machine types:
-          #
-          # * `mls1-c1-m2`
-          # * `mls1-c4-m2`
-          # * `n1-standard-2`
-          # * `n1-standard-4`
-          # * `n1-standard-8`
-          # * `n1-standard-16`
-          # * `n1-standard-32`
-          # * `n1-highmem-2`
-          # * `n1-highmem-4`
-          # * `n1-highmem-8`
-          # * `n1-highmem-16`
-          # * `n1-highmem-32`
-          # * `n1-highcpu-2`
-          # * `n1-highcpu-4`
-          # * `n1-highcpu-8`
-          # * `n1-highcpu-16`
-          # * `n1-highcpu-32`
-          #
-          # `mls1-c1-m2` is generally available. All other machine types are available
-          # in beta. Learn more about the [differences between machine
-          # types](/ml-engine/docs/machine-types-online-prediction).
-      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Required. The AI Platform runtime version to use for this deployment.
-          #
-          # For more information, see the
-          # [runtime version list](/ml-engine/docs/runtime-version-list) and
-          # [how to manage runtime versions](/ml-engine/docs/versioning).
-      &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the version when it was created.
+      &quot;manualScaling&quot;: { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+          # model. You should generally use `auto_scaling` with an appropriate
+          # `min_nodes` instead, but this option is available if you want more
+          # predictable billing. Beware that latency and error rates will increase
+          # if the traffic exceeds that capability of the system to serve it based
+          # on the selected number of nodes.
+        &quot;nodes&quot;: 42, # The number of nodes to allocate for this model. These nodes are always up,
+            # starting from the time the model is deployed, so the cost of operating
+            # this model will be proportional to `nodes` * number of hours since
+            # last billing cycle plus the cost for each prediction performed.
+      },
+      &quot;createTime&quot;: &quot;A String&quot;, # Output only. The time the version was created.
+      &quot;lastUseTime&quot;: &quot;A String&quot;, # Output only. The time the version was last used for prediction.
       &quot;framework&quot;: &quot;A String&quot;, # Optional. The machine learning framework AI Platform uses to train
           # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
           # `XGBOOST`. If you do not specify a framework, AI Platform
@@ -848,6 +411,66 @@
           # type](/ml-engine/docs/machine-types-online-prediction) in the
           # `machineType` field, you must specify `TENSORFLOW`
           # for the framework.
+      &quot;predictionClass&quot;: &quot;A String&quot;, # Optional. The fully qualified name
+          # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
+          # the Predictor interface described in this reference field. The module
+          # containing this class should be included in a package provided to the
+          # [`packageUris` field](#Version.FIELDS.package_uris).
+          #
+          # Specify this field if and only if you are deploying a [custom prediction
+          # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
+          # If you specify this field, you must set
+          # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+          # you must set `machineType` to a [legacy (MLS1)
+          # machine type](/ml-engine/docs/machine-types-online-prediction).
+          #
+          # The following code sample provides the Predictor interface:
+          #
+          # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
+          # class Predictor(object):
+          # &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot;
+          #
+          # def predict(self, instances, **kwargs):
+          #     &quot;&quot;&quot;Performs custom prediction.
+          #
+          #     Instances are the decoded values from the request. They have already
+          #     been deserialized from JSON.
+          #
+          #     Args:
+          #         instances: A list of prediction input instances.
+          #         **kwargs: A dictionary of keyword args provided as additional
+          #             fields on the predict request body.
+          #
+          #     Returns:
+          #         A list of outputs containing the prediction results. This list must
+          #         be JSON serializable.
+          #     &quot;&quot;&quot;
+          #     raise NotImplementedError()
+          #
+          # @classmethod
+          # def from_path(cls, model_dir):
+          #     &quot;&quot;&quot;Creates an instance of Predictor using the given path.
+          #
+          #     Loading of the predictor should be done in this method.
+          #
+          #     Args:
+          #         model_dir: The local directory that contains the exported model
+          #             file along with any additional files uploaded when creating the
+          #             version resource.
+          #
+          #     Returns:
+          #         An instance implementing this Predictor class.
+          #     &quot;&quot;&quot;
+          #     raise NotImplementedError()
+          # &lt;/pre&gt;
+          #
+          # Learn more about [the Predictor interface and custom prediction
+          # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+      &quot;isDefault&quot;: True or False, # Output only. If true, this version will be used to handle prediction
+          # requests that do not specify a version.
+          #
+          # You can change the default version by calling
+          # projects.methods.versions.setDefault.
       &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
           # prevent simultaneous updates of a model from overwriting each other.
           # It is strongly suggested that systems make use of the `etag` in the
@@ -855,56 +478,433 @@
           # conditions: An `etag` is returned in the response to `GetVersion`, and
           # systems are expected to put that etag in the request to `UpdateVersion` to
           # ensure that their change will be applied to the model as intended.
+      &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for resource access control.
+      &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
+      &quot;deploymentUri&quot;: &quot;A String&quot;, # Required. The Cloud Storage location of the trained model used to
+          # create the version. See the
+          # [guide to model
+          # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+          # information.
+          #
+          # When passing Version to
+          # projects.models.versions.create
+          # the model service uses the specified location as the source of the model.
+          # Once deployed, the model version is hosted by the prediction service, so
+          # this location is useful only as a historical record.
+          # The total number of model files can&#x27;t exceed 1000.
+      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Required. The AI Platform runtime version to use for this deployment.
+          #
+          # For more information, see the
+          # [runtime version list](/ml-engine/docs/runtime-version-list) and
+          # [how to manage runtime versions](/ml-engine/docs/versioning).
+      &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the version when it was created.
     },
-    &quot;onlinePredictionConsoleLogging&quot;: True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
-        # streams to Stackdriver Logging. These can be more verbose than the standard
-        # access logs (see `onlinePredictionLogging`) and can incur higher cost.
-        # However, they are helpful for debugging. Note that
-        # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
-        # your project receives prediction requests at a high QPS. Estimate your
-        # costs before enabling this option.
-        #
-        # Default is false.
-    &quot;regions&quot;: [ # Optional. The list of regions where the model is going to be deployed.
-        # Only one region per model is supported.
-        # Defaults to &#x27;us-central1&#x27; if nothing is set.
-        # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-        # for AI Platform services.
-        # Note:
-        # *   No matter where a model is deployed, it can always be accessed by
-        #     users from anywhere, both for online and batch prediction.
-        # *   The region for a batch prediction job is set by the region field when
-        #     submitting the batch prediction job and does not take its value from
-        #     this field.
-      &quot;A String&quot;,
-    ],
-    &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the model when it was created.
     &quot;onlinePredictionLogging&quot;: True or False, # Optional. If true, online prediction access logs are sent to StackDriver
         # Logging. These logs are like standard server access logs, containing
         # information like timestamp and latency for each request. Note that
         # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
         # your project receives prediction requests at a high queries per second rate
         # (QPS). Estimate your costs before enabling this option.
-        #
+        # 
         # Default is false.
-    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a model from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform model updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `GetModel`, and
-        # systems are expected to put that etag in the request to `UpdateModel` to
-        # ensure that their change will be applied to the model as intended.
-    &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your models.
-        # Each label is a key-value pair, where both the key and the value are
-        # arbitrary strings that you supply.
-        # For more information, see the documentation on
-        # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-      &quot;a_key&quot;: &quot;A String&quot;,
-    },
-    &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the model when it was created.
+  }
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents a machine learning solution.
         #
-        # The model name must be unique within the project it is created in.
-  }</pre>
+        # A model can have multiple versions, each of which is a deployed, trained
+        # model ready to receive prediction requests. The model itself is just a
+        # container.
+      &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the model when it was created.
+      &quot;regions&quot;: [ # Optional. The list of regions where the model is going to be deployed.
+          # Only one region per model is supported.
+          # Defaults to &#x27;us-central1&#x27; if nothing is set.
+          # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+          # for AI Platform services.
+          # Note:
+          # *   No matter where a model is deployed, it can always be accessed by
+          #     users from anywhere, both for online and batch prediction.
+          # *   The region for a batch prediction job is set by the region field when
+          #     submitting the batch prediction job and does not take its value from
+          #     this field.
+        &quot;A String&quot;,
+      ],
+      &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the model when it was created.
+          #
+          # The model name must be unique within the project it is created in.
+      &quot;onlinePredictionConsoleLogging&quot;: True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
+          # streams to Stackdriver Logging. These can be more verbose than the standard
+          # access logs (see `onlinePredictionLogging`) and can incur higher cost.
+          # However, they are helpful for debugging. Note that
+          # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+          # your project receives prediction requests at a high QPS. Estimate your
+          # costs before enabling this option.
+          #
+          # Default is false.
+      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+          # prevent simultaneous updates of a model from overwriting each other.
+          # It is strongly suggested that systems make use of the `etag` in the
+          # read-modify-write cycle to perform model updates in order to avoid race
+          # conditions: An `etag` is returned in the response to `GetModel`, and
+          # systems are expected to put that etag in the request to `UpdateModel` to
+          # ensure that their change will be applied to the model as intended.
+      &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your models.
+          # Each label is a key-value pair, where both the key and the value are
+          # arbitrary strings that you supply.
+          # For more information, see the documentation on
+          # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+        &quot;a_key&quot;: &quot;A String&quot;,
+      },
+      &quot;defaultVersion&quot;: { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
+          # handle prediction requests that do not specify a version.
+          #
+          # You can change the default version by calling
+          # projects.models.versions.setDefault.
+          #
+          # Each version is a trained model deployed in the cloud, ready to handle
+          # prediction requests. A model can have multiple versions. You can get
+          # information about all of the versions of a given model by calling
+          # projects.models.versions.list.
+        &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your model
+            # versions. Each label is a key-value pair, where both the key and the value
+            # are arbitrary strings that you supply.
+            # For more information, see the documentation on
+            # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+          &quot;a_key&quot;: &quot;A String&quot;,
+        },
+        &quot;machineType&quot;: &quot;A String&quot;, # Optional. The type of machine on which to serve the model. Currently only
+            # applies to online prediction service. If this field is not specified, it
+            # defaults to `mls1-c1-m2`.
+            #
+            # Online prediction supports the following machine types:
+            #
+            # * `mls1-c1-m2`
+            # * `mls1-c4-m2`
+            # * `n1-standard-2`
+            # * `n1-standard-4`
+            # * `n1-standard-8`
+            # * `n1-standard-16`
+            # * `n1-standard-32`
+            # * `n1-highmem-2`
+            # * `n1-highmem-4`
+            # * `n1-highmem-8`
+            # * `n1-highmem-16`
+            # * `n1-highmem-32`
+            # * `n1-highcpu-2`
+            # * `n1-highcpu-4`
+            # * `n1-highcpu-8`
+            # * `n1-highcpu-16`
+            # * `n1-highcpu-32`
+            #
+            # `mls1-c1-m2` is generally available. All other machine types are available
+            # in beta. Learn more about the [differences between machine
+            # types](/ml-engine/docs/machine-types-online-prediction).
+        &quot;packageUris&quot;: [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
+            # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
+            # or [scikit-learn pipelines with custom
+            # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
+            #
+            # For a custom prediction routine, one of these packages must contain your
+            # Predictor class (see
+            # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
+            # include any dependencies used by your Predictor or scikit-learn pipeline
+            # uses that are not already included in your selected [runtime
+            # version](/ml-engine/docs/tensorflow/runtime-version-list).
+            #
+            # If you specify this field, you must also set
+            # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+          &quot;A String&quot;,
+        ],
+        &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+            # Only specify this field if you have specified a Compute Engine (N1) machine
+            # type in the `machineType` field. Learn more about [using GPUs for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+          &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+        },
+        &quot;state&quot;: &quot;A String&quot;, # Output only. The state of a version.
+        &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the version when it was created.
+            #
+            # The version name must be unique within the model it is created in.
+        &quot;autoScaling&quot;: { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+            # response to increases and decreases in traffic. Care should be
+            # taken to ramp up traffic according to the model&#x27;s ability to scale
+            # or you will start seeing increases in latency and 429 response codes.
+            #
+            # Note that you cannot use AutoScaling if your version uses
+            # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+            # `manual_scaling`.
+          &quot;minNodes&quot;: 42, # Optional. The minimum number of nodes to allocate for this model. These
+              # nodes are always up, starting from the time the model is deployed.
+              # Therefore, the cost of operating this model will be at least
+              # `rate` * `min_nodes` * number of hours since last billing cycle,
+              # where `rate` is the cost per node-hour as documented in the
+              # [pricing guide](/ml-engine/docs/pricing),
+              # even if no predictions are performed. There is additional cost for each
+              # prediction performed.
+              #
+              # Unlike manual scaling, if the load gets too heavy for the nodes
+              # that are up, the service will automatically add nodes to handle the
+              # increased load as well as scale back as traffic drops, always maintaining
+              # at least `min_nodes`. You will be charged for the time in which additional
+              # nodes are used.
+              #
+              # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+              # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+              # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+              # (and after a cool-down period), nodes will be shut down and no charges will
+              # be incurred until traffic to the model resumes.
+              #
+              # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+              # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+              # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+              # Compute Engine machine type.
+              #
+              # Note that you cannot use AutoScaling if your version uses
+              # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+              # ManualScaling.
+              #
+              # You can set `min_nodes` when creating the model version, and you can also
+              # update `min_nodes` for an existing version:
+              # &lt;pre&gt;
+              # update_body.json:
+              # {
+              #   &#x27;autoScaling&#x27;: {
+              #     &#x27;minNodes&#x27;: 5
+              #   }
+              # }
+              # &lt;/pre&gt;
+              # HTTP request:
+              # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
+              # PATCH
+              # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+              # -d @./update_body.json
+              # &lt;/pre&gt;
+        },
+        &quot;explanationConfig&quot;: { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model&#x27;s version.
+            # Some explanation features require additional metadata to be loaded
+            # as part of the model payload.
+            # There are two feature attribution methods supported for TensorFlow models:
+            # integrated gradients and sampled Shapley.
+            # [Learn more about feature
+            # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
+          &quot;integratedGradientsAttribution&quot;: { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+              # of the model&#x27;s fully differentiable structure. Refer to this paper for
+              # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+              # of the model&#x27;s fully differentiable structure. Refer to this paper for
+              # more details: https://arxiv.org/abs/1703.01365
+            &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
+                # A good value to start is 50 and gradually increase until the
+                # sum to diff property is met within the desired error range.
+          },
+          &quot;xraiAttribution&quot;: { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+              # of the model&#x27;s fully differentiable structure. Refer to this paper for
+              # more details: https://arxiv.org/abs/1906.02825
+              # Currently only implemented for models with natural image inputs.
+              # of the model&#x27;s fully differentiable structure. Refer to this paper for
+              # more details: https://arxiv.org/abs/1906.02825
+              # Currently only implemented for models with natural image inputs.
+            &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
+                # A good value to start is 50 and gradually increase until the
+                # sum to diff property is met within the desired error range.
+          },
+          &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+              # contribute to the label being predicted. A sampling strategy is used to
+              # approximate the value rather than considering all subsets of features.
+              # contribute to the label being predicted. A sampling strategy is used to
+              # approximate the value rather than considering all subsets of features.
+            &quot;numPaths&quot;: 42, # The number of feature permutations to consider when approximating the
+                # Shapley values.
+          },
+        },
+        &quot;pythonVersion&quot;: &quot;A String&quot;, # Required. The version of Python used in prediction.
+            #
+            # The following Python versions are available:
+            #
+            # * Python &#x27;3.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
+            #   later.
+            # * Python &#x27;3.5&#x27; is available when `runtime_version` is set to a version
+            #   from &#x27;1.4&#x27; to &#x27;1.14&#x27;.
+            # * Python &#x27;2.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
+            #   earlier.
+            #
+            # Read more about the Python versions available for [each runtime
+            # version](/ml-engine/docs/runtime-version-list).
+        &quot;requestLoggingConfig&quot;: { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+            # projects.models.versions.patch
+            # request. Specifying it in a
+            # projects.models.versions.create
+            # request has no effect.
+            #
+            # Configures the request-response pair logging on predictions from this
+            # Version.
+            # Online prediction requests to a model version and the responses to these
+            # requests are converted to raw strings and saved to the specified BigQuery
+            # table. Logging is constrained by [BigQuery quotas and
+            # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+            # AI Platform Prediction does not log request-response pairs, but it continues
+            # to serve predictions.
+            #
+            # If you are using [continuous
+            # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+            # specify this configuration manually. Setting up continuous evaluation
+            # automatically enables logging of request-response pairs.
+          &quot;samplingPercentage&quot;: 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+              # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+              # window is the lifetime of the model version. Defaults to 0.
+          &quot;bigqueryTableName&quot;: &quot;A String&quot;, # Required. Fully qualified BigQuery table name in the following format:
+              # &quot;&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;&quot;
+              #
+              # The specified table must already exist, and the &quot;Cloud ML Service Agent&quot;
+              # for your project must have permission to write to it. The table must have
+              # the following [schema](/bigquery/docs/schemas):
+              #
+              # &lt;table&gt;
+              #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style=&quot;display: table-cell&quot;&gt;Type&lt;/th&gt;
+              #     &lt;th style=&quot;display: table-cell&quot;&gt;Mode&lt;/th&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+              # &lt;/table&gt;
+        },
+        &quot;manualScaling&quot;: { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+            # model. You should generally use `auto_scaling` with an appropriate
+            # `min_nodes` instead, but this option is available if you want more
+            # predictable billing. Beware that latency and error rates will increase
+            # if the traffic exceeds that capability of the system to serve it based
+            # on the selected number of nodes.
+          &quot;nodes&quot;: 42, # The number of nodes to allocate for this model. These nodes are always up,
+              # starting from the time the model is deployed, so the cost of operating
+              # this model will be proportional to `nodes` * number of hours since
+              # last billing cycle plus the cost for each prediction performed.
+        },
+        &quot;createTime&quot;: &quot;A String&quot;, # Output only. The time the version was created.
+        &quot;lastUseTime&quot;: &quot;A String&quot;, # Output only. The time the version was last used for prediction.
+        &quot;framework&quot;: &quot;A String&quot;, # Optional. The machine learning framework AI Platform uses to train
+            # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+            # `XGBOOST`. If you do not specify a framework, AI Platform
+            # will analyze files in the deployment_uri to determine a framework. If you
+            # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+            # of the model to 1.4 or greater.
+            #
+            # Do **not** specify a framework if you&#x27;re deploying a [custom
+            # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+            #
+            # If you specify a [Compute Engine (N1) machine
+            # type](/ml-engine/docs/machine-types-online-prediction) in the
+            # `machineType` field, you must specify `TENSORFLOW`
+            # for the framework.
+        &quot;predictionClass&quot;: &quot;A String&quot;, # Optional. The fully qualified name
+            # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
+            # the Predictor interface described in this reference field. The module
+            # containing this class should be included in a package provided to the
+            # [`packageUris` field](#Version.FIELDS.package_uris).
+            #
+            # Specify this field if and only if you are deploying a [custom prediction
+            # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
+            # If you specify this field, you must set
+            # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+            # you must set `machineType` to a [legacy (MLS1)
+            # machine type](/ml-engine/docs/machine-types-online-prediction).
+            #
+            # The following code sample provides the Predictor interface:
+            #
+            # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
+            # class Predictor(object):
+            # &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot;
+            #
+            # def predict(self, instances, **kwargs):
+            #     &quot;&quot;&quot;Performs custom prediction.
+            #
+            #     Instances are the decoded values from the request. They have already
+            #     been deserialized from JSON.
+            #
+            #     Args:
+            #         instances: A list of prediction input instances.
+            #         **kwargs: A dictionary of keyword args provided as additional
+            #             fields on the predict request body.
+            #
+            #     Returns:
+            #         A list of outputs containing the prediction results. This list must
+            #         be JSON serializable.
+            #     &quot;&quot;&quot;
+            #     raise NotImplementedError()
+            #
+            # @classmethod
+            # def from_path(cls, model_dir):
+            #     &quot;&quot;&quot;Creates an instance of Predictor using the given path.
+            #
+            #     Loading of the predictor should be done in this method.
+            #
+            #     Args:
+            #         model_dir: The local directory that contains the exported model
+            #             file along with any additional files uploaded when creating the
+            #             version resource.
+            #
+            #     Returns:
+            #         An instance implementing this Predictor class.
+            #     &quot;&quot;&quot;
+            #     raise NotImplementedError()
+            # &lt;/pre&gt;
+            #
+            # Learn more about [the Predictor interface and custom prediction
+            # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+        &quot;isDefault&quot;: True or False, # Output only. If true, this version will be used to handle prediction
+            # requests that do not specify a version.
+            #
+            # You can change the default version by calling
+            # projects.methods.versions.setDefault.
+        &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+            # prevent simultaneous updates of a model from overwriting each other.
+            # It is strongly suggested that systems make use of the `etag` in the
+            # read-modify-write cycle to perform model updates in order to avoid race
+            # conditions: An `etag` is returned in the response to `GetVersion`, and
+            # systems are expected to put that etag in the request to `UpdateVersion` to
+            # ensure that their change will be applied to the model as intended.
+        &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for resource access control.
+        &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
+        &quot;deploymentUri&quot;: &quot;A String&quot;, # Required. The Cloud Storage location of the trained model used to
+            # create the version. See the
+            # [guide to model
+            # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+            # information.
+            #
+            # When passing Version to
+            # projects.models.versions.create
+            # the model service uses the specified location as the source of the model.
+            # Once deployed, the model version is hosted by the prediction service, so
+            # this location is useful only as a historical record.
+            # The total number of model files can&#x27;t exceed 1000.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Required. The AI Platform runtime version to use for this deployment.
+            #
+            # For more information, see the
+            # [runtime version list](/ml-engine/docs/runtime-version-list) and
+            # [how to manage runtime versions](/ml-engine/docs/versioning).
+        &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the version when it was created.
+      },
+      &quot;onlinePredictionLogging&quot;: True or False, # Optional. If true, online prediction access logs are sent to StackDriver
+          # Logging. These logs are like standard server access logs, containing
+          # information like timestamp and latency for each request. Note that
+          # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+          # your project receives prediction requests at a high queries per second rate
+          # (QPS). Estimate your costs before enabling this option.
+          #
+          # Default is false.
+    }</pre>
 </div>
 
 <div class="method">
@@ -927,9 +927,6 @@
 
     { # This resource represents a long-running operation that is the result of a
       # network API call.
-    &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
-        # originally returns it. If you use the default HTTP mapping, the
-        # `name` should be a resource name ending with `operations/{unique_id}`.
     &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
         # different programming environments, including REST APIs and RPC APIs. It is
         # used by [gRPC](https://github.com/grpc). Each `Status` message contains
@@ -937,22 +934,16 @@
         #
         # You can find out more about this error model and how to work with it in the
         # [API Design Guide](https://cloud.google.com/apis/design/errors).
-      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
-      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
-          # user-facing error message should be localized and sent in the
-          # google.rpc.Status.details field, or localized by the client.
       &quot;details&quot;: [ # A list of messages that carry the error details.  There is a common set of
           # message types for APIs to use.
         {
           &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
         },
       ],
-    },
-    &quot;metadata&quot;: { # Service-specific metadata associated with the operation.  It typically
-        # contains progress information and common metadata such as create time.
-        # Some services might not provide such metadata.  Any method that returns a
-        # long-running operation should document the metadata type, if any.
-      &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
+          # user-facing error message should be localized and sent in the
+          # google.rpc.Status.details field, or localized by the client.
+      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
     },
     &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
         # If `true`, the operation is completed, and either `error` or `response` is
@@ -967,6 +958,15 @@
         # `TakeSnapshotResponse`.
       &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
     },
+    &quot;metadata&quot;: { # Service-specific metadata associated with the operation.  It typically
+        # contains progress information and common metadata such as create time.
+        # Some services might not provide such metadata.  Any method that returns a
+        # long-running operation should document the metadata type, if any.
+      &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+    },
+    &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
+        # originally returns it. If you use the default HTTP mapping, the
+        # `name` should be a resource name ending with `operations/{unique_id}`.
   }</pre>
 </div>
 
@@ -987,393 +987,393 @@
   An object of the form:
 
     { # Represents a machine learning solution.
-      #
-      # A model can have multiple versions, each of which is a deployed, trained
-      # model ready to receive prediction requests. The model itself is just a
-      # container.
-    &quot;defaultVersion&quot;: { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
-        # handle prediction requests that do not specify a version.
         #
-        # You can change the default version by calling
-        # projects.models.versions.setDefault.
-        #
-        # Each version is a trained model deployed in the cloud, ready to handle
-        # prediction requests. A model can have multiple versions. You can get
-        # information about all of the versions of a given model by calling
-        # projects.models.versions.list.
-      &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
-          # Only specify this field if you have specified a Compute Engine (N1) machine
-          # type in the `machineType` field. Learn more about [using GPUs for online
-          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-          # [accelerators for online
-          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-        &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-        &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-      },
-      &quot;isDefault&quot;: True or False, # Output only. If true, this version will be used to handle prediction
-          # requests that do not specify a version.
-          #
-          # You can change the default version by calling
-          # projects.methods.versions.setDefault.
-      &quot;manualScaling&quot;: { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
-          # model. You should generally use `auto_scaling` with an appropriate
-          # `min_nodes` instead, but this option is available if you want more
-          # predictable billing. Beware that latency and error rates will increase
-          # if the traffic exceeds that capability of the system to serve it based
-          # on the selected number of nodes.
-        &quot;nodes&quot;: 42, # The number of nodes to allocate for this model. These nodes are always up,
-            # starting from the time the model is deployed, so the cost of operating
-            # this model will be proportional to `nodes` * number of hours since
-            # last billing cycle plus the cost for each prediction performed.
-      },
-      &quot;state&quot;: &quot;A String&quot;, # Output only. The state of a version.
-      &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the version when it was created.
-          #
-          # The version name must be unique within the model it is created in.
-      &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for resource access control.
-      &quot;pythonVersion&quot;: &quot;A String&quot;, # Required. The version of Python used in prediction.
-          #
-          # The following Python versions are available:
-          #
-          # * Python &#x27;3.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
-          #   later.
-          # * Python &#x27;3.5&#x27; is available when `runtime_version` is set to a version
-          #   from &#x27;1.4&#x27; to &#x27;1.14&#x27;.
-          # * Python &#x27;2.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
-          #   earlier.
-          #
-          # Read more about the Python versions available for [each runtime
-          # version](/ml-engine/docs/runtime-version-list).
-      &quot;lastUseTime&quot;: &quot;A String&quot;, # Output only. The time the version was last used for prediction.
-      &quot;predictionClass&quot;: &quot;A String&quot;, # Optional. The fully qualified name
-          # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
-          # the Predictor interface described in this reference field. The module
-          # containing this class should be included in a package provided to the
-          # [`packageUris` field](#Version.FIELDS.package_uris).
-          #
-          # Specify this field if and only if you are deploying a [custom prediction
-          # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
-          # If you specify this field, you must set
-          # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
-          # you must set `machineType` to a [legacy (MLS1)
-          # machine type](/ml-engine/docs/machine-types-online-prediction).
-          #
-          # The following code sample provides the Predictor interface:
-          #
-          # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
-          # class Predictor(object):
-          # &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot;
-          #
-          # def predict(self, instances, **kwargs):
-          #     &quot;&quot;&quot;Performs custom prediction.
-          #
-          #     Instances are the decoded values from the request. They have already
-          #     been deserialized from JSON.
-          #
-          #     Args:
-          #         instances: A list of prediction input instances.
-          #         **kwargs: A dictionary of keyword args provided as additional
-          #             fields on the predict request body.
-          #
-          #     Returns:
-          #         A list of outputs containing the prediction results. This list must
-          #         be JSON serializable.
-          #     &quot;&quot;&quot;
-          #     raise NotImplementedError()
-          #
-          # @classmethod
-          # def from_path(cls, model_dir):
-          #     &quot;&quot;&quot;Creates an instance of Predictor using the given path.
-          #
-          #     Loading of the predictor should be done in this method.
-          #
-          #     Args:
-          #         model_dir: The local directory that contains the exported model
-          #             file along with any additional files uploaded when creating the
-          #             version resource.
-          #
-          #     Returns:
-          #         An instance implementing this Predictor class.
-          #     &quot;&quot;&quot;
-          #     raise NotImplementedError()
-          # &lt;/pre&gt;
-          #
-          # Learn more about [the Predictor interface and custom prediction
-          # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
-      &quot;deploymentUri&quot;: &quot;A String&quot;, # Required. The Cloud Storage location of the trained model used to
-          # create the version. See the
-          # [guide to model
-          # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
-          # information.
-          #
-          # When passing Version to
-          # projects.models.versions.create
-          # the model service uses the specified location as the source of the model.
-          # Once deployed, the model version is hosted by the prediction service, so
-          # this location is useful only as a historical record.
-          # The total number of model files can&#x27;t exceed 1000.
-      &quot;packageUris&quot;: [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
-          # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
-          # or [scikit-learn pipelines with custom
-          # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
-          #
-          # For a custom prediction routine, one of these packages must contain your
-          # Predictor class (see
-          # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
-          # include any dependencies used by your Predictor or scikit-learn pipeline
-          # uses that are not already included in your selected [runtime
-          # version](/ml-engine/docs/tensorflow/runtime-version-list).
-          #
-          # If you specify this field, you must also set
-          # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+        # A model can have multiple versions, each of which is a deployed, trained
+        # model ready to receive prediction requests. The model itself is just a
+        # container.
+      &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the model when it was created.
+      &quot;regions&quot;: [ # Optional. The list of regions where the model is going to be deployed.
+          # Only one region per model is supported.
+          # Defaults to &#x27;us-central1&#x27; if nothing is set.
+          # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+          # for AI Platform services.
+          # Note:
+          # *   No matter where a model is deployed, it can always be accessed by
+          #     users from anywhere, both for online and batch prediction.
+          # *   The region for a batch prediction job is set by the region field when
+          #     submitting the batch prediction job and does not take its value from
+          #     this field.
         &quot;A String&quot;,
       ],
-      &quot;explanationConfig&quot;: { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model&#x27;s version.
-          # Some explanation features require additional metadata to be loaded
-          # as part of the model payload.
-          # There are two feature attribution methods supported for TensorFlow models:
-          # integrated gradients and sampled Shapley.
-          # [Learn more about feature
-          # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
-        &quot;integratedGradientsAttribution&quot;: { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
-            # of the model&#x27;s fully differentiable structure. Refer to this paper for
-            # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
-            # of the model&#x27;s fully differentiable structure. Refer to this paper for
-            # more details: https://arxiv.org/abs/1703.01365
-          &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
-              # A good value to start is 50 and gradually increase until the
-              # sum to diff property is met within the desired error range.
-        },
-        &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
-            # contribute to the label being predicted. A sampling strategy is used to
-            # approximate the value rather than considering all subsets of features.
-            # contribute to the label being predicted. A sampling strategy is used to
-            # approximate the value rather than considering all subsets of features.
-          &quot;numPaths&quot;: 42, # The number of feature permutations to consider when approximating the
-              # Shapley values.
-        },
-        &quot;xraiAttribution&quot;: { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
-            # of the model&#x27;s fully differentiable structure. Refer to this paper for
-            # more details: https://arxiv.org/abs/1906.02825
-            # Currently only implemented for models with natural image inputs.
-            # of the model&#x27;s fully differentiable structure. Refer to this paper for
-            # more details: https://arxiv.org/abs/1906.02825
-            # Currently only implemented for models with natural image inputs.
-          &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
-              # A good value to start is 50 and gradually increase until the
-              # sum to diff property is met within the desired error range.
-        },
-      },
-      &quot;autoScaling&quot;: { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
-          # response to increases and decreases in traffic. Care should be
-          # taken to ramp up traffic according to the model&#x27;s ability to scale
-          # or you will start seeing increases in latency and 429 response codes.
+      &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the model when it was created.
           #
-          # Note that you cannot use AutoScaling if your version uses
-          # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
-          # `manual_scaling`.
-        &quot;minNodes&quot;: 42, # Optional. The minimum number of nodes to allocate for this model. These
-            # nodes are always up, starting from the time the model is deployed.
-            # Therefore, the cost of operating this model will be at least
-            # `rate` * `min_nodes` * number of hours since last billing cycle,
-            # where `rate` is the cost per node-hour as documented in the
-            # [pricing guide](/ml-engine/docs/pricing),
-            # even if no predictions are performed. There is additional cost for each
-            # prediction performed.
-            #
-            # Unlike manual scaling, if the load gets too heavy for the nodes
-            # that are up, the service will automatically add nodes to handle the
-            # increased load as well as scale back as traffic drops, always maintaining
-            # at least `min_nodes`. You will be charged for the time in which additional
-            # nodes are used.
-            #
-            # If `min_nodes` is not specified and AutoScaling is used with a [legacy
-            # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
-            # `min_nodes` defaults to 0, in which case, when traffic to a model stops
-            # (and after a cool-down period), nodes will be shut down and no charges will
-            # be incurred until traffic to the model resumes.
-            #
-            # If `min_nodes` is not specified and AutoScaling is used with a [Compute
-            # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
-            # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
-            # Compute Engine machine type.
-            #
-            # Note that you cannot use AutoScaling if your version uses
-            # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
-            # ManualScaling.
-            #
-            # You can set `min_nodes` when creating the model version, and you can also
-            # update `min_nodes` for an existing version:
-            # &lt;pre&gt;
-            # update_body.json:
-            # {
-            #   &#x27;autoScaling&#x27;: {
-            #     &#x27;minNodes&#x27;: 5
-            #   }
-            # }
-            # &lt;/pre&gt;
-            # HTTP request:
-            # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
-            # PATCH
-            # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
-            # -d @./update_body.json
-            # &lt;/pre&gt;
-      },
-      &quot;createTime&quot;: &quot;A String&quot;, # Output only. The time the version was created.
-      &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your model
-          # versions. Each label is a key-value pair, where both the key and the value
-          # are arbitrary strings that you supply.
-          # For more information, see the documentation on
-          # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-        &quot;a_key&quot;: &quot;A String&quot;,
-      },
-      &quot;requestLoggingConfig&quot;: { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
-          # projects.models.versions.patch
-          # request. Specifying it in a
-          # projects.models.versions.create
-          # request has no effect.
+          # The model name must be unique within the project it is created in.
+      &quot;onlinePredictionConsoleLogging&quot;: True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
+          # streams to Stackdriver Logging. These can be more verbose than the standard
+          # access logs (see `onlinePredictionLogging`) and can incur higher cost.
+          # However, they are helpful for debugging. Note that
+          # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+          # your project receives prediction requests at a high QPS. Estimate your
+          # costs before enabling this option.
           #
-          # Configures the request-response pair logging on predictions from this
-          # Version.
-          # Online prediction requests to a model version and the responses to these
-          # requests are converted to raw strings and saved to the specified BigQuery
-          # table. Logging is constrained by [BigQuery quotas and
-          # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
-          # AI Platform Prediction does not log request-response pairs, but it continues
-          # to serve predictions.
-          #
-          # If you are using [continuous
-          # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
-          # specify this configuration manually. Setting up continuous evaluation
-          # automatically enables logging of request-response pairs.
-        &quot;bigqueryTableName&quot;: &quot;A String&quot;, # Required. Fully qualified BigQuery table name in the following format:
-            # &quot;&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;&quot;
-            #
-            # The specified table must already exist, and the &quot;Cloud ML Service Agent&quot;
-            # for your project must have permission to write to it. The table must have
-            # the following [schema](/bigquery/docs/schemas):
-            #
-            # &lt;table&gt;
-            #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style=&quot;display: table-cell&quot;&gt;Type&lt;/th&gt;
-            #     &lt;th style=&quot;display: table-cell&quot;&gt;Mode&lt;/th&gt;&lt;/tr&gt;
-            #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-            #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-            #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-            #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-            #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
-            #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
-            # &lt;/table&gt;
-        &quot;samplingPercentage&quot;: 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
-            # For example, if you want to log 10% of requests, enter `0.1`. The sampling
-            # window is the lifetime of the model version. Defaults to 0.
-      },
-      &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
-      &quot;machineType&quot;: &quot;A String&quot;, # Optional. The type of machine on which to serve the model. Currently only
-          # applies to online prediction service. If this field is not specified, it
-          # defaults to `mls1-c1-m2`.
-          #
-          # Online prediction supports the following machine types:
-          #
-          # * `mls1-c1-m2`
-          # * `mls1-c4-m2`
-          # * `n1-standard-2`
-          # * `n1-standard-4`
-          # * `n1-standard-8`
-          # * `n1-standard-16`
-          # * `n1-standard-32`
-          # * `n1-highmem-2`
-          # * `n1-highmem-4`
-          # * `n1-highmem-8`
-          # * `n1-highmem-16`
-          # * `n1-highmem-32`
-          # * `n1-highcpu-2`
-          # * `n1-highcpu-4`
-          # * `n1-highcpu-8`
-          # * `n1-highcpu-16`
-          # * `n1-highcpu-32`
-          #
-          # `mls1-c1-m2` is generally available. All other machine types are available
-          # in beta. Learn more about the [differences between machine
-          # types](/ml-engine/docs/machine-types-online-prediction).
-      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Required. The AI Platform runtime version to use for this deployment.
-          #
-          # For more information, see the
-          # [runtime version list](/ml-engine/docs/runtime-version-list) and
-          # [how to manage runtime versions](/ml-engine/docs/versioning).
-      &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the version when it was created.
-      &quot;framework&quot;: &quot;A String&quot;, # Optional. The machine learning framework AI Platform uses to train
-          # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
-          # `XGBOOST`. If you do not specify a framework, AI Platform
-          # will analyze files in the deployment_uri to determine a framework. If you
-          # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
-          # of the model to 1.4 or greater.
-          #
-          # Do **not** specify a framework if you&#x27;re deploying a [custom
-          # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
-          #
-          # If you specify a [Compute Engine (N1) machine
-          # type](/ml-engine/docs/machine-types-online-prediction) in the
-          # `machineType` field, you must specify `TENSORFLOW`
-          # for the framework.
+          # Default is false.
       &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
           # prevent simultaneous updates of a model from overwriting each other.
           # It is strongly suggested that systems make use of the `etag` in the
           # read-modify-write cycle to perform model updates in order to avoid race
-          # conditions: An `etag` is returned in the response to `GetVersion`, and
-          # systems are expected to put that etag in the request to `UpdateVersion` to
+          # conditions: An `etag` is returned in the response to `GetModel`, and
+          # systems are expected to put that etag in the request to `UpdateModel` to
           # ensure that their change will be applied to the model as intended.
-    },
-    &quot;onlinePredictionConsoleLogging&quot;: True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
-        # streams to Stackdriver Logging. These can be more verbose than the standard
-        # access logs (see `onlinePredictionLogging`) and can incur higher cost.
-        # However, they are helpful for debugging. Note that
-        # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
-        # your project receives prediction requests at a high QPS. Estimate your
-        # costs before enabling this option.
-        #
-        # Default is false.
-    &quot;regions&quot;: [ # Optional. The list of regions where the model is going to be deployed.
-        # Only one region per model is supported.
-        # Defaults to &#x27;us-central1&#x27; if nothing is set.
-        # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-        # for AI Platform services.
-        # Note:
-        # *   No matter where a model is deployed, it can always be accessed by
-        #     users from anywhere, both for online and batch prediction.
-        # *   The region for a batch prediction job is set by the region field when
-        #     submitting the batch prediction job and does not take its value from
-        #     this field.
-      &quot;A String&quot;,
-    ],
-    &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the model when it was created.
-    &quot;onlinePredictionLogging&quot;: True or False, # Optional. If true, online prediction access logs are sent to StackDriver
-        # Logging. These logs are like standard server access logs, containing
-        # information like timestamp and latency for each request. Note that
-        # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
-        # your project receives prediction requests at a high queries per second rate
-        # (QPS). Estimate your costs before enabling this option.
-        #
-        # Default is false.
-    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a model from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform model updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `GetModel`, and
-        # systems are expected to put that etag in the request to `UpdateModel` to
-        # ensure that their change will be applied to the model as intended.
-    &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your models.
-        # Each label is a key-value pair, where both the key and the value are
-        # arbitrary strings that you supply.
-        # For more information, see the documentation on
-        # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-      &quot;a_key&quot;: &quot;A String&quot;,
-    },
-    &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the model when it was created.
-        #
-        # The model name must be unique within the project it is created in.
-  }</pre>
+      &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your models.
+          # Each label is a key-value pair, where both the key and the value are
+          # arbitrary strings that you supply.
+          # For more information, see the documentation on
+          # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+        &quot;a_key&quot;: &quot;A String&quot;,
+      },
+      &quot;defaultVersion&quot;: { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
+          # handle prediction requests that do not specify a version.
+          #
+          # You can change the default version by calling
+          # projects.models.versions.setDefault.
+          #
+          # Each version is a trained model deployed in the cloud, ready to handle
+          # prediction requests. A model can have multiple versions. You can get
+          # information about all of the versions of a given model by calling
+          # projects.models.versions.list.
+        &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your model
+            # versions. Each label is a key-value pair, where both the key and the value
+            # are arbitrary strings that you supply.
+            # For more information, see the documentation on
+            # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+          &quot;a_key&quot;: &quot;A String&quot;,
+        },
+        &quot;machineType&quot;: &quot;A String&quot;, # Optional. The type of machine on which to serve the model. Currently only
+            # applies to online prediction service. If this field is not specified, it
+            # defaults to `mls1-c1-m2`.
+            #
+            # Online prediction supports the following machine types:
+            #
+            # * `mls1-c1-m2`
+            # * `mls1-c4-m2`
+            # * `n1-standard-2`
+            # * `n1-standard-4`
+            # * `n1-standard-8`
+            # * `n1-standard-16`
+            # * `n1-standard-32`
+            # * `n1-highmem-2`
+            # * `n1-highmem-4`
+            # * `n1-highmem-8`
+            # * `n1-highmem-16`
+            # * `n1-highmem-32`
+            # * `n1-highcpu-2`
+            # * `n1-highcpu-4`
+            # * `n1-highcpu-8`
+            # * `n1-highcpu-16`
+            # * `n1-highcpu-32`
+            #
+            # `mls1-c1-m2` is generally available. All other machine types are available
+            # in beta. Learn more about the [differences between machine
+            # types](/ml-engine/docs/machine-types-online-prediction).
+        &quot;packageUris&quot;: [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
+            # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
+            # or [scikit-learn pipelines with custom
+            # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
+            #
+            # For a custom prediction routine, one of these packages must contain your
+            # Predictor class (see
+            # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
+            # include any dependencies used by your Predictor or scikit-learn pipeline
+            # uses that are not already included in your selected [runtime
+            # version](/ml-engine/docs/tensorflow/runtime-version-list).
+            #
+            # If you specify this field, you must also set
+            # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+          &quot;A String&quot;,
+        ],
+        &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+            # Only specify this field if you have specified a Compute Engine (N1) machine
+            # type in the `machineType` field. Learn more about [using GPUs for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+          &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+        },
+        &quot;state&quot;: &quot;A String&quot;, # Output only. The state of a version.
+        &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the version when it was created.
+            #
+            # The version name must be unique within the model it is created in.
+        &quot;autoScaling&quot;: { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+            # response to increases and decreases in traffic. Care should be
+            # taken to ramp up traffic according to the model&#x27;s ability to scale
+            # or you will start seeing increases in latency and 429 response codes.
+            #
+            # Note that you cannot use AutoScaling if your version uses
+            # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+            # `manual_scaling`.
+          &quot;minNodes&quot;: 42, # Optional. The minimum number of nodes to allocate for this model. These
+              # nodes are always up, starting from the time the model is deployed.
+              # Therefore, the cost of operating this model will be at least
+              # `rate` * `min_nodes` * number of hours since last billing cycle,
+              # where `rate` is the cost per node-hour as documented in the
+              # [pricing guide](/ml-engine/docs/pricing),
+              # even if no predictions are performed. There is additional cost for each
+              # prediction performed.
+              #
+              # Unlike manual scaling, if the load gets too heavy for the nodes
+              # that are up, the service will automatically add nodes to handle the
+              # increased load as well as scale back as traffic drops, always maintaining
+              # at least `min_nodes`. You will be charged for the time in which additional
+              # nodes are used.
+              #
+              # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+              # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+              # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+              # (and after a cool-down period), nodes will be shut down and no charges will
+              # be incurred until traffic to the model resumes.
+              #
+              # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+              # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+              # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+              # Compute Engine machine type.
+              #
+              # Note that you cannot use AutoScaling if your version uses
+              # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+              # ManualScaling.
+              #
+              # You can set `min_nodes` when creating the model version, and you can also
+              # update `min_nodes` for an existing version:
+              # &lt;pre&gt;
+              # update_body.json:
+              # {
+              #   &#x27;autoScaling&#x27;: {
+              #     &#x27;minNodes&#x27;: 5
+              #   }
+              # }
+              # &lt;/pre&gt;
+              # HTTP request:
+              # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
+              # PATCH
+              # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+              # -d @./update_body.json
+              # &lt;/pre&gt;
+        },
+        &quot;explanationConfig&quot;: { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model&#x27;s version.
+            # Some explanation features require additional metadata to be loaded
+            # as part of the model payload.
+            # There are two feature attribution methods supported for TensorFlow models:
+            # integrated gradients and sampled Shapley.
+            # [Learn more about feature
+            # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
+          &quot;integratedGradientsAttribution&quot;: { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+              # of the model&#x27;s fully differentiable structure. Refer to this paper for
+              # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+              # of the model&#x27;s fully differentiable structure. Refer to this paper for
+              # more details: https://arxiv.org/abs/1703.01365
+            &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
+                # A good value to start is 50 and gradually increase until the
+                # sum to diff property is met within the desired error range.
+          },
+          &quot;xraiAttribution&quot;: { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+              # of the model&#x27;s fully differentiable structure. Refer to this paper for
+              # more details: https://arxiv.org/abs/1906.02825
+              # Currently only implemented for models with natural image inputs.
+              # of the model&#x27;s fully differentiable structure. Refer to this paper for
+              # more details: https://arxiv.org/abs/1906.02825
+              # Currently only implemented for models with natural image inputs.
+            &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
+                # A good value to start is 50 and gradually increase until the
+                # sum to diff property is met within the desired error range.
+          },
+          &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+              # contribute to the label being predicted. A sampling strategy is used to
+              # approximate the value rather than considering all subsets of features.
+              # contribute to the label being predicted. A sampling strategy is used to
+              # approximate the value rather than considering all subsets of features.
+            &quot;numPaths&quot;: 42, # The number of feature permutations to consider when approximating the
+                # Shapley values.
+          },
+        },
+        &quot;pythonVersion&quot;: &quot;A String&quot;, # Required. The version of Python used in prediction.
+            #
+            # The following Python versions are available:
+            #
+            # * Python &#x27;3.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
+            #   later.
+            # * Python &#x27;3.5&#x27; is available when `runtime_version` is set to a version
+            #   from &#x27;1.4&#x27; to &#x27;1.14&#x27;.
+            # * Python &#x27;2.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
+            #   earlier.
+            #
+            # Read more about the Python versions available for [each runtime
+            # version](/ml-engine/docs/runtime-version-list).
+        &quot;requestLoggingConfig&quot;: { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+            # projects.models.versions.patch
+            # request. Specifying it in a
+            # projects.models.versions.create
+            # request has no effect.
+            #
+            # Configures the request-response pair logging on predictions from this
+            # Version.
+            # Online prediction requests to a model version and the responses to these
+            # requests are converted to raw strings and saved to the specified BigQuery
+            # table. Logging is constrained by [BigQuery quotas and
+            # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+            # AI Platform Prediction does not log request-response pairs, but it continues
+            # to serve predictions.
+            #
+            # If you are using [continuous
+            # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+            # specify this configuration manually. Setting up continuous evaluation
+            # automatically enables logging of request-response pairs.
+          &quot;samplingPercentage&quot;: 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+              # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+              # window is the lifetime of the model version. Defaults to 0.
+          &quot;bigqueryTableName&quot;: &quot;A String&quot;, # Required. Fully qualified BigQuery table name in the following format:
+              # &quot;&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;&quot;
+              #
+              # The specified table must already exist, and the &quot;Cloud ML Service Agent&quot;
+              # for your project must have permission to write to it. The table must have
+              # the following [schema](/bigquery/docs/schemas):
+              #
+              # &lt;table&gt;
+              #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style=&quot;display: table-cell&quot;&gt;Type&lt;/th&gt;
+              #     &lt;th style=&quot;display: table-cell&quot;&gt;Mode&lt;/th&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+              # &lt;/table&gt;
+        },
+        &quot;manualScaling&quot;: { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+            # model. You should generally use `auto_scaling` with an appropriate
+            # `min_nodes` instead, but this option is available if you want more
+            # predictable billing. Beware that latency and error rates will increase
+            # if the traffic exceeds that capability of the system to serve it based
+            # on the selected number of nodes.
+          &quot;nodes&quot;: 42, # The number of nodes to allocate for this model. These nodes are always up,
+              # starting from the time the model is deployed, so the cost of operating
+              # this model will be proportional to `nodes` * number of hours since
+              # last billing cycle plus the cost for each prediction performed.
+        },
+        &quot;createTime&quot;: &quot;A String&quot;, # Output only. The time the version was created.
+        &quot;lastUseTime&quot;: &quot;A String&quot;, # Output only. The time the version was last used for prediction.
+        &quot;framework&quot;: &quot;A String&quot;, # Optional. The machine learning framework AI Platform uses to train
+            # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+            # `XGBOOST`. If you do not specify a framework, AI Platform
+            # will analyze files in the deployment_uri to determine a framework. If you
+            # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+            # of the model to 1.4 or greater.
+            #
+            # Do **not** specify a framework if you&#x27;re deploying a [custom
+            # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+            #
+            # If you specify a [Compute Engine (N1) machine
+            # type](/ml-engine/docs/machine-types-online-prediction) in the
+            # `machineType` field, you must specify `TENSORFLOW`
+            # for the framework.
+        &quot;predictionClass&quot;: &quot;A String&quot;, # Optional. The fully qualified name
+            # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
+            # the Predictor interface described in this reference field. The module
+            # containing this class should be included in a package provided to the
+            # [`packageUris` field](#Version.FIELDS.package_uris).
+            #
+            # Specify this field if and only if you are deploying a [custom prediction
+            # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
+            # If you specify this field, you must set
+            # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+            # you must set `machineType` to a [legacy (MLS1)
+            # machine type](/ml-engine/docs/machine-types-online-prediction).
+            #
+            # The following code sample provides the Predictor interface:
+            #
+            # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
+            # class Predictor(object):
+            # &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot;
+            #
+            # def predict(self, instances, **kwargs):
+            #     &quot;&quot;&quot;Performs custom prediction.
+            #
+            #     Instances are the decoded values from the request. They have already
+            #     been deserialized from JSON.
+            #
+            #     Args:
+            #         instances: A list of prediction input instances.
+            #         **kwargs: A dictionary of keyword args provided as additional
+            #             fields on the predict request body.
+            #
+            #     Returns:
+            #         A list of outputs containing the prediction results. This list must
+            #         be JSON serializable.
+            #     &quot;&quot;&quot;
+            #     raise NotImplementedError()
+            #
+            # @classmethod
+            # def from_path(cls, model_dir):
+            #     &quot;&quot;&quot;Creates an instance of Predictor using the given path.
+            #
+            #     Loading of the predictor should be done in this method.
+            #
+            #     Args:
+            #         model_dir: The local directory that contains the exported model
+            #             file along with any additional files uploaded when creating the
+            #             version resource.
+            #
+            #     Returns:
+            #         An instance implementing this Predictor class.
+            #     &quot;&quot;&quot;
+            #     raise NotImplementedError()
+            # &lt;/pre&gt;
+            #
+            # Learn more about [the Predictor interface and custom prediction
+            # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+        &quot;isDefault&quot;: True or False, # Output only. If true, this version will be used to handle prediction
+            # requests that do not specify a version.
+            #
+            # You can change the default version by calling
+            # projects.methods.versions.setDefault.
+        &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+            # prevent simultaneous updates of a model from overwriting each other.
+            # It is strongly suggested that systems make use of the `etag` in the
+            # read-modify-write cycle to perform model updates in order to avoid race
+            # conditions: An `etag` is returned in the response to `GetVersion`, and
+            # systems are expected to put that etag in the request to `UpdateVersion` to
+            # ensure that their change will be applied to the model as intended.
+        &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for resource access control.
+        &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
+        &quot;deploymentUri&quot;: &quot;A String&quot;, # Required. The Cloud Storage location of the trained model used to
+            # create the version. See the
+            # [guide to model
+            # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+            # information.
+            #
+            # When passing Version to
+            # projects.models.versions.create
+            # the model service uses the specified location as the source of the model.
+            # Once deployed, the model version is hosted by the prediction service, so
+            # this location is useful only as a historical record.
+            # The total number of model files can&#x27;t exceed 1000.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Required. The AI Platform runtime version to use for this deployment.
+            #
+            # For more information, see the
+            # [runtime version list](/ml-engine/docs/runtime-version-list) and
+            # [how to manage runtime versions](/ml-engine/docs/versioning).
+        &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the version when it was created.
+      },
+      &quot;onlinePredictionLogging&quot;: True or False, # Optional. If true, online prediction access logs are sent to StackDriver
+          # Logging. These logs are like standard server access logs, containing
+          # information like timestamp and latency for each request. Note that
+          # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+          # your project receives prediction requests at a high queries per second rate
+          # (QPS). Estimate your costs before enabling this option.
+          #
+          # Default is false.
+    }</pre>
 </div>
 
 <div class="method">
@@ -1472,30 +1472,18 @@
       #
       # For a description of IAM and its features, see the
       # [IAM documentation](https://cloud.google.com/iam/docs/).
-    &quot;version&quot;: 42, # Specifies the format of the policy.
-        #
-        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
-        # are rejected.
-        #
-        # Any operation that affects conditional role bindings must specify version
-        # `3`. This requirement applies to the following operations:
-        #
-        # * Getting a policy that includes a conditional role binding
-        # * Adding a conditional role binding to a policy
-        # * Changing a conditional role binding in a policy
-        # * Removing any role binding, with or without a condition, from a policy
-        #   that includes conditions
+    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a policy from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform policy updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+        # systems are expected to put that etag in the request to `setIamPolicy` to
+        # ensure that their change will be applied to the same version of the policy.
         #
         # **Important:** If you use IAM Conditions, you must include the `etag` field
         # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
         # you to overwrite a version `3` policy with a version `1` policy, and all of
         # the conditions in the version `3` policy are lost.
-        #
-        # If a policy does not include any conditions, operations on that policy may
-        # specify any valid version or leave the field unset.
-        #
-        # To learn which resources support conditions in their IAM policies, see the
-        # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
     &quot;auditConfigs&quot;: [ # Specifies cloud audit logging configuration for this policy.
       { # Specifies the audit configuration for a service.
           # The configuration determines which permission types are logged, and what
@@ -1512,7 +1500,7 @@
           #     {
           #       &quot;audit_configs&quot;: [
           #         {
-          #           &quot;service&quot;: &quot;allServices&quot;
+          #           &quot;service&quot;: &quot;allServices&quot;,
           #           &quot;audit_log_configs&quot;: [
           #             {
           #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
@@ -1521,18 +1509,18 @@
           #               ]
           #             },
           #             {
-          #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+          #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;
           #             },
           #             {
-          #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;,
+          #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;
           #             }
           #           ]
           #         },
           #         {
-          #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;
+          #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;,
           #           &quot;audit_log_configs&quot;: [
           #             {
-          #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
+          #               &quot;log_type&quot;: &quot;DATA_READ&quot;
           #             },
           #             {
           #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
@@ -1564,27 +1552,53 @@
               #           ]
               #         },
               #         {
-              #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+              #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;
               #         }
               #       ]
               #     }
               #
               # This enables &#x27;DATA_READ&#x27; and &#x27;DATA_WRITE&#x27; logging, while exempting
               # jose@example.com from DATA_READ logging.
+            &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
             &quot;exemptedMembers&quot;: [ # Specifies the identities that do not cause logging for this type of
                 # permission.
                 # Follows the same format of Binding.members.
               &quot;A String&quot;,
             ],
-            &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
           },
         ],
       },
     ],
+    &quot;version&quot;: 42, # Specifies the format of the policy.
+        #
+        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+        # are rejected.
+        #
+        # Any operation that affects conditional role bindings must specify version
+        # `3`. This requirement applies to the following operations:
+        #
+        # * Getting a policy that includes a conditional role binding
+        # * Adding a conditional role binding to a policy
+        # * Changing a conditional role binding in a policy
+        # * Removing any role binding, with or without a condition, from a policy
+        #   that includes conditions
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+        #
+        # If a policy does not include any conditions, operations on that policy may
+        # specify any valid version or leave the field unset.
+        #
+        # To learn which resources support conditions in their IAM policies, see the
+        # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
     &quot;bindings&quot;: [ # Associates a list of `members` to a `role`. Optionally, may specify a
         # `condition` that determines how and when the `bindings` are applied. Each
         # of the `bindings` must contain at least one member.
       { # Associates `members` with a `role`.
+        &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
+            # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
         &quot;condition&quot;: { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
             #
             # If the condition evaluates to `true`, then this binding applies to the
@@ -1627,8 +1641,6 @@
             # The exact variables and functions that may be referenced within an expression
             # are determined by the service that evaluates it. See the service
             # documentation for additional information.
-          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
-              # describes the expression, e.g. when hovered over it in a UI.
           &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
               # syntax.
           &quot;title&quot;: &quot;A String&quot;, # Optional. Title for the expression, i.e. a short string describing
@@ -1636,6 +1648,8 @@
               # expression.
           &quot;location&quot;: &quot;A String&quot;, # Optional. String indicating the location of the expression for error
               # reporting, e.g. a file name and a position in the file.
+          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
+              # describes the expression, e.g. when hovered over it in a UI.
         },
         &quot;members&quot;: [ # Specifies the identities requesting access for a Cloud Platform resource.
             # `members` can have the following values:
@@ -1682,27 +1696,13 @@
             #
           &quot;A String&quot;,
         ],
-        &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
-            # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
       },
     ],
-    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a policy from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform policy updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-        # systems are expected to put that etag in the request to `setIamPolicy` to
-        # ensure that their change will be applied to the same version of the policy.
-        #
-        # **Important:** If you use IAM Conditions, you must include the `etag` field
-        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
-        # you to overwrite a version `3` policy with a version `1` policy, and all of
-        # the conditions in the version `3` policy are lost.
   }</pre>
 </div>
 
 <div class="method">
-    <code class="details" id="list">list(parent, filter=None, pageToken=None, pageSize=None, x__xgafv=None)</code>
+    <code class="details" id="list">list(parent, pageSize=None, pageToken=None, filter=None, x__xgafv=None)</code>
   <pre>Lists the models in a project.
 
 Each project can contain multiple models, and each model can have multiple
@@ -1713,16 +1713,16 @@
 
 Args:
   parent: string, Required. The name of the project whose models are to be listed. (required)
-  filter: string, Optional. Specifies the subset of models to retrieve.
-  pageToken: string, Optional. A page token to request the next page of results.
-
-You get the token from the `next_page_token` field of the response from
-the previous call.
   pageSize: integer, Optional. The number of models to retrieve per &quot;page&quot; of results. If there
 are more remaining results than this number, the response message will
 contain a valid value in the `next_page_token` field.
 
 The default value is 20, and the maximum page size is 100.
+  pageToken: string, Optional. A page token to request the next page of results.
+
+You get the token from the `next_page_token` field of the response from
+the previous call.
+  filter: string, Optional. Specifies the subset of models to retrieve.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -1732,398 +1732,398 @@
   An object of the form:
 
     { # Response message for the ListModels method.
-    &quot;nextPageToken&quot;: &quot;A String&quot;, # Optional. Pass this token as the `page_token` field of the request for a
-        # subsequent call.
     &quot;models&quot;: [ # The list of models.
       { # Represents a machine learning solution.
-          #
-          # A model can have multiple versions, each of which is a deployed, trained
-          # model ready to receive prediction requests. The model itself is just a
-          # container.
-        &quot;defaultVersion&quot;: { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
-            # handle prediction requests that do not specify a version.
             #
-            # You can change the default version by calling
-            # projects.models.versions.setDefault.
-            #
-            # Each version is a trained model deployed in the cloud, ready to handle
-            # prediction requests. A model can have multiple versions. You can get
-            # information about all of the versions of a given model by calling
-            # projects.models.versions.list.
-          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
-              # Only specify this field if you have specified a Compute Engine (N1) machine
-              # type in the `machineType` field. Learn more about [using GPUs for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-          },
-          &quot;isDefault&quot;: True or False, # Output only. If true, this version will be used to handle prediction
-              # requests that do not specify a version.
-              #
-              # You can change the default version by calling
-              # projects.methods.versions.setDefault.
-          &quot;manualScaling&quot;: { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
-              # model. You should generally use `auto_scaling` with an appropriate
-              # `min_nodes` instead, but this option is available if you want more
-              # predictable billing. Beware that latency and error rates will increase
-              # if the traffic exceeds that capability of the system to serve it based
-              # on the selected number of nodes.
-            &quot;nodes&quot;: 42, # The number of nodes to allocate for this model. These nodes are always up,
-                # starting from the time the model is deployed, so the cost of operating
-                # this model will be proportional to `nodes` * number of hours since
-                # last billing cycle plus the cost for each prediction performed.
-          },
-          &quot;state&quot;: &quot;A String&quot;, # Output only. The state of a version.
-          &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the version when it was created.
-              #
-              # The version name must be unique within the model it is created in.
-          &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for resource access control.
-          &quot;pythonVersion&quot;: &quot;A String&quot;, # Required. The version of Python used in prediction.
-              #
-              # The following Python versions are available:
-              #
-              # * Python &#x27;3.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
-              #   later.
-              # * Python &#x27;3.5&#x27; is available when `runtime_version` is set to a version
-              #   from &#x27;1.4&#x27; to &#x27;1.14&#x27;.
-              # * Python &#x27;2.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
-              #   earlier.
-              #
-              # Read more about the Python versions available for [each runtime
-              # version](/ml-engine/docs/runtime-version-list).
-          &quot;lastUseTime&quot;: &quot;A String&quot;, # Output only. The time the version was last used for prediction.
-          &quot;predictionClass&quot;: &quot;A String&quot;, # Optional. The fully qualified name
-              # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
-              # the Predictor interface described in this reference field. The module
-              # containing this class should be included in a package provided to the
-              # [`packageUris` field](#Version.FIELDS.package_uris).
-              #
-              # Specify this field if and only if you are deploying a [custom prediction
-              # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
-              # If you specify this field, you must set
-              # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
-              # you must set `machineType` to a [legacy (MLS1)
-              # machine type](/ml-engine/docs/machine-types-online-prediction).
-              #
-              # The following code sample provides the Predictor interface:
-              #
-              # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
-              # class Predictor(object):
-              # &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot;
-              #
-              # def predict(self, instances, **kwargs):
-              #     &quot;&quot;&quot;Performs custom prediction.
-              #
-              #     Instances are the decoded values from the request. They have already
-              #     been deserialized from JSON.
-              #
-              #     Args:
-              #         instances: A list of prediction input instances.
-              #         **kwargs: A dictionary of keyword args provided as additional
-              #             fields on the predict request body.
-              #
-              #     Returns:
-              #         A list of outputs containing the prediction results. This list must
-              #         be JSON serializable.
-              #     &quot;&quot;&quot;
-              #     raise NotImplementedError()
-              #
-              # @classmethod
-              # def from_path(cls, model_dir):
-              #     &quot;&quot;&quot;Creates an instance of Predictor using the given path.
-              #
-              #     Loading of the predictor should be done in this method.
-              #
-              #     Args:
-              #         model_dir: The local directory that contains the exported model
-              #             file along with any additional files uploaded when creating the
-              #             version resource.
-              #
-              #     Returns:
-              #         An instance implementing this Predictor class.
-              #     &quot;&quot;&quot;
-              #     raise NotImplementedError()
-              # &lt;/pre&gt;
-              #
-              # Learn more about [the Predictor interface and custom prediction
-              # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
-          &quot;deploymentUri&quot;: &quot;A String&quot;, # Required. The Cloud Storage location of the trained model used to
-              # create the version. See the
-              # [guide to model
-              # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
-              # information.
-              #
-              # When passing Version to
-              # projects.models.versions.create
-              # the model service uses the specified location as the source of the model.
-              # Once deployed, the model version is hosted by the prediction service, so
-              # this location is useful only as a historical record.
-              # The total number of model files can&#x27;t exceed 1000.
-          &quot;packageUris&quot;: [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
-              # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
-              # or [scikit-learn pipelines with custom
-              # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
-              #
-              # For a custom prediction routine, one of these packages must contain your
-              # Predictor class (see
-              # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
-              # include any dependencies used by your Predictor or scikit-learn pipeline
-              # uses that are not already included in your selected [runtime
-              # version](/ml-engine/docs/tensorflow/runtime-version-list).
-              #
-              # If you specify this field, you must also set
-              # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+            # A model can have multiple versions, each of which is a deployed, trained
+            # model ready to receive prediction requests. The model itself is just a
+            # container.
+          &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the model when it was created.
+          &quot;regions&quot;: [ # Optional. The list of regions where the model is going to be deployed.
+              # Only one region per model is supported.
+              # Defaults to &#x27;us-central1&#x27; if nothing is set.
+              # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+              # for AI Platform services.
+              # Note:
+              # *   No matter where a model is deployed, it can always be accessed by
+              #     users from anywhere, both for online and batch prediction.
+              # *   The region for a batch prediction job is set by the region field when
+              #     submitting the batch prediction job and does not take its value from
+              #     this field.
             &quot;A String&quot;,
           ],
-          &quot;explanationConfig&quot;: { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model&#x27;s version.
-              # Some explanation features require additional metadata to be loaded
-              # as part of the model payload.
-              # There are two feature attribution methods supported for TensorFlow models:
-              # integrated gradients and sampled Shapley.
-              # [Learn more about feature
-              # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
-            &quot;integratedGradientsAttribution&quot;: { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
-                # of the model&#x27;s fully differentiable structure. Refer to this paper for
-                # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
-                # of the model&#x27;s fully differentiable structure. Refer to this paper for
-                # more details: https://arxiv.org/abs/1703.01365
-              &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
-                  # A good value to start is 50 and gradually increase until the
-                  # sum to diff property is met within the desired error range.
-            },
-            &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
-                # contribute to the label being predicted. A sampling strategy is used to
-                # approximate the value rather than considering all subsets of features.
-                # contribute to the label being predicted. A sampling strategy is used to
-                # approximate the value rather than considering all subsets of features.
-              &quot;numPaths&quot;: 42, # The number of feature permutations to consider when approximating the
-                  # Shapley values.
-            },
-            &quot;xraiAttribution&quot;: { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
-                # of the model&#x27;s fully differentiable structure. Refer to this paper for
-                # more details: https://arxiv.org/abs/1906.02825
-                # Currently only implemented for models with natural image inputs.
-                # of the model&#x27;s fully differentiable structure. Refer to this paper for
-                # more details: https://arxiv.org/abs/1906.02825
-                # Currently only implemented for models with natural image inputs.
-              &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
-                  # A good value to start is 50 and gradually increase until the
-                  # sum to diff property is met within the desired error range.
-            },
-          },
-          &quot;autoScaling&quot;: { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
-              # response to increases and decreases in traffic. Care should be
-              # taken to ramp up traffic according to the model&#x27;s ability to scale
-              # or you will start seeing increases in latency and 429 response codes.
+          &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the model when it was created.
               #
-              # Note that you cannot use AutoScaling if your version uses
-              # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
-              # `manual_scaling`.
-            &quot;minNodes&quot;: 42, # Optional. The minimum number of nodes to allocate for this model. These
-                # nodes are always up, starting from the time the model is deployed.
-                # Therefore, the cost of operating this model will be at least
-                # `rate` * `min_nodes` * number of hours since last billing cycle,
-                # where `rate` is the cost per node-hour as documented in the
-                # [pricing guide](/ml-engine/docs/pricing),
-                # even if no predictions are performed. There is additional cost for each
-                # prediction performed.
-                #
-                # Unlike manual scaling, if the load gets too heavy for the nodes
-                # that are up, the service will automatically add nodes to handle the
-                # increased load as well as scale back as traffic drops, always maintaining
-                # at least `min_nodes`. You will be charged for the time in which additional
-                # nodes are used.
-                #
-                # If `min_nodes` is not specified and AutoScaling is used with a [legacy
-                # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
-                # `min_nodes` defaults to 0, in which case, when traffic to a model stops
-                # (and after a cool-down period), nodes will be shut down and no charges will
-                # be incurred until traffic to the model resumes.
-                #
-                # If `min_nodes` is not specified and AutoScaling is used with a [Compute
-                # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
-                # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
-                # Compute Engine machine type.
-                #
-                # Note that you cannot use AutoScaling if your version uses
-                # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
-                # ManualScaling.
-                #
-                # You can set `min_nodes` when creating the model version, and you can also
-                # update `min_nodes` for an existing version:
-                # &lt;pre&gt;
-                # update_body.json:
-                # {
-                #   &#x27;autoScaling&#x27;: {
-                #     &#x27;minNodes&#x27;: 5
-                #   }
-                # }
-                # &lt;/pre&gt;
-                # HTTP request:
-                # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
-                # PATCH
-                # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
-                # -d @./update_body.json
-                # &lt;/pre&gt;
-          },
-          &quot;createTime&quot;: &quot;A String&quot;, # Output only. The time the version was created.
-          &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your model
-              # versions. Each label is a key-value pair, where both the key and the value
-              # are arbitrary strings that you supply.
-              # For more information, see the documentation on
-              # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-            &quot;a_key&quot;: &quot;A String&quot;,
-          },
-          &quot;requestLoggingConfig&quot;: { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
-              # projects.models.versions.patch
-              # request. Specifying it in a
-              # projects.models.versions.create
-              # request has no effect.
+              # The model name must be unique within the project it is created in.
+          &quot;onlinePredictionConsoleLogging&quot;: True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
+              # streams to Stackdriver Logging. These can be more verbose than the standard
+              # access logs (see `onlinePredictionLogging`) and can incur higher cost.
+              # However, they are helpful for debugging. Note that
+              # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+              # your project receives prediction requests at a high QPS. Estimate your
+              # costs before enabling this option.
               #
-              # Configures the request-response pair logging on predictions from this
-              # Version.
-              # Online prediction requests to a model version and the responses to these
-              # requests are converted to raw strings and saved to the specified BigQuery
-              # table. Logging is constrained by [BigQuery quotas and
-              # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
-              # AI Platform Prediction does not log request-response pairs, but it continues
-              # to serve predictions.
-              #
-              # If you are using [continuous
-              # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
-              # specify this configuration manually. Setting up continuous evaluation
-              # automatically enables logging of request-response pairs.
-            &quot;bigqueryTableName&quot;: &quot;A String&quot;, # Required. Fully qualified BigQuery table name in the following format:
-                # &quot;&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;&quot;
-                #
-                # The specified table must already exist, and the &quot;Cloud ML Service Agent&quot;
-                # for your project must have permission to write to it. The table must have
-                # the following [schema](/bigquery/docs/schemas):
-                #
-                # &lt;table&gt;
-                #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style=&quot;display: table-cell&quot;&gt;Type&lt;/th&gt;
-                #     &lt;th style=&quot;display: table-cell&quot;&gt;Mode&lt;/th&gt;&lt;/tr&gt;
-                #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-                #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-                #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-                #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-                #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
-                #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
-                # &lt;/table&gt;
-            &quot;samplingPercentage&quot;: 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
-                # For example, if you want to log 10% of requests, enter `0.1`. The sampling
-                # window is the lifetime of the model version. Defaults to 0.
-          },
-          &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
-          &quot;machineType&quot;: &quot;A String&quot;, # Optional. The type of machine on which to serve the model. Currently only
-              # applies to online prediction service. If this field is not specified, it
-              # defaults to `mls1-c1-m2`.
-              #
-              # Online prediction supports the following machine types:
-              #
-              # * `mls1-c1-m2`
-              # * `mls1-c4-m2`
-              # * `n1-standard-2`
-              # * `n1-standard-4`
-              # * `n1-standard-8`
-              # * `n1-standard-16`
-              # * `n1-standard-32`
-              # * `n1-highmem-2`
-              # * `n1-highmem-4`
-              # * `n1-highmem-8`
-              # * `n1-highmem-16`
-              # * `n1-highmem-32`
-              # * `n1-highcpu-2`
-              # * `n1-highcpu-4`
-              # * `n1-highcpu-8`
-              # * `n1-highcpu-16`
-              # * `n1-highcpu-32`
-              #
-              # `mls1-c1-m2` is generally available. All other machine types are available
-              # in beta. Learn more about the [differences between machine
-              # types](/ml-engine/docs/machine-types-online-prediction).
-          &quot;runtimeVersion&quot;: &quot;A String&quot;, # Required. The AI Platform runtime version to use for this deployment.
-              #
-              # For more information, see the
-              # [runtime version list](/ml-engine/docs/runtime-version-list) and
-              # [how to manage runtime versions](/ml-engine/docs/versioning).
-          &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the version when it was created.
-          &quot;framework&quot;: &quot;A String&quot;, # Optional. The machine learning framework AI Platform uses to train
-              # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
-              # `XGBOOST`. If you do not specify a framework, AI Platform
-              # will analyze files in the deployment_uri to determine a framework. If you
-              # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
-              # of the model to 1.4 or greater.
-              #
-              # Do **not** specify a framework if you&#x27;re deploying a [custom
-              # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
-              #
-              # If you specify a [Compute Engine (N1) machine
-              # type](/ml-engine/docs/machine-types-online-prediction) in the
-              # `machineType` field, you must specify `TENSORFLOW`
-              # for the framework.
+              # Default is false.
           &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
               # prevent simultaneous updates of a model from overwriting each other.
               # It is strongly suggested that systems make use of the `etag` in the
               # read-modify-write cycle to perform model updates in order to avoid race
-              # conditions: An `etag` is returned in the response to `GetVersion`, and
-              # systems are expected to put that etag in the request to `UpdateVersion` to
+              # conditions: An `etag` is returned in the response to `GetModel`, and
+              # systems are expected to put that etag in the request to `UpdateModel` to
               # ensure that their change will be applied to the model as intended.
+          &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your models.
+              # Each label is a key-value pair, where both the key and the value are
+              # arbitrary strings that you supply.
+              # For more information, see the documentation on
+              # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+            &quot;a_key&quot;: &quot;A String&quot;,
+          },
+          &quot;defaultVersion&quot;: { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
+              # handle prediction requests that do not specify a version.
+              #
+              # You can change the default version by calling
+              # projects.models.versions.setDefault.
+              #
+              # Each version is a trained model deployed in the cloud, ready to handle
+              # prediction requests. A model can have multiple versions. You can get
+              # information about all of the versions of a given model by calling
+              # projects.models.versions.list.
+            &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your model
+                # versions. Each label is a key-value pair, where both the key and the value
+                # are arbitrary strings that you supply.
+                # For more information, see the documentation on
+                # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+              &quot;a_key&quot;: &quot;A String&quot;,
+            },
+            &quot;machineType&quot;: &quot;A String&quot;, # Optional. The type of machine on which to serve the model. Currently only
+                # applies to online prediction service. If this field is not specified, it
+                # defaults to `mls1-c1-m2`.
+                #
+                # Online prediction supports the following machine types:
+                #
+                # * `mls1-c1-m2`
+                # * `mls1-c4-m2`
+                # * `n1-standard-2`
+                # * `n1-standard-4`
+                # * `n1-standard-8`
+                # * `n1-standard-16`
+                # * `n1-standard-32`
+                # * `n1-highmem-2`
+                # * `n1-highmem-4`
+                # * `n1-highmem-8`
+                # * `n1-highmem-16`
+                # * `n1-highmem-32`
+                # * `n1-highcpu-2`
+                # * `n1-highcpu-4`
+                # * `n1-highcpu-8`
+                # * `n1-highcpu-16`
+                # * `n1-highcpu-32`
+                #
+                # `mls1-c1-m2` is generally available. All other machine types are available
+                # in beta. Learn more about the [differences between machine
+                # types](/ml-engine/docs/machine-types-online-prediction).
+            &quot;packageUris&quot;: [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
+                # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
+                # or [scikit-learn pipelines with custom
+                # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
+                #
+                # For a custom prediction routine, one of these packages must contain your
+                # Predictor class (see
+                # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
+                # include any dependencies used by your Predictor or scikit-learn pipeline
+                # uses that are not already included in your selected [runtime
+                # version](/ml-engine/docs/tensorflow/runtime-version-list).
+                #
+                # If you specify this field, you must also set
+                # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+              &quot;A String&quot;,
+            ],
+            &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+                # Only specify this field if you have specified a Compute Engine (N1) machine
+                # type in the `machineType` field. Learn more about [using GPUs for online
+                # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+                # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+                # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+                # [accelerators for online
+                # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+              &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+              &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+            },
+            &quot;state&quot;: &quot;A String&quot;, # Output only. The state of a version.
+            &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the version when it was created.
+                #
+                # The version name must be unique within the model it is created in.
+            &quot;autoScaling&quot;: { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+                # response to increases and decreases in traffic. Care should be
+                # taken to ramp up traffic according to the model&#x27;s ability to scale
+                # or you will start seeing increases in latency and 429 response codes.
+                #
+                # Note that you cannot use AutoScaling if your version uses
+                # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+                # `manual_scaling`.
+              &quot;minNodes&quot;: 42, # Optional. The minimum number of nodes to allocate for this model. These
+                  # nodes are always up, starting from the time the model is deployed.
+                  # Therefore, the cost of operating this model will be at least
+                  # `rate` * `min_nodes` * number of hours since last billing cycle,
+                  # where `rate` is the cost per node-hour as documented in the
+                  # [pricing guide](/ml-engine/docs/pricing),
+                  # even if no predictions are performed. There is additional cost for each
+                  # prediction performed.
+                  #
+                  # Unlike manual scaling, if the load gets too heavy for the nodes
+                  # that are up, the service will automatically add nodes to handle the
+                  # increased load as well as scale back as traffic drops, always maintaining
+                  # at least `min_nodes`. You will be charged for the time in which additional
+                  # nodes are used.
+                  #
+                  # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+                  # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+                  # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+                  # (and after a cool-down period), nodes will be shut down and no charges will
+                  # be incurred until traffic to the model resumes.
+                  #
+                  # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+                  # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+                  # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+                  # Compute Engine machine type.
+                  #
+                  # Note that you cannot use AutoScaling if your version uses
+                  # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+                  # ManualScaling.
+                  #
+                  # You can set `min_nodes` when creating the model version, and you can also
+                  # update `min_nodes` for an existing version:
+                  # &lt;pre&gt;
+                  # update_body.json:
+                  # {
+                  #   &#x27;autoScaling&#x27;: {
+                  #     &#x27;minNodes&#x27;: 5
+                  #   }
+                  # }
+                  # &lt;/pre&gt;
+                  # HTTP request:
+                  # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
+                  # PATCH
+                  # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+                  # -d @./update_body.json
+                  # &lt;/pre&gt;
+            },
+            &quot;explanationConfig&quot;: { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model&#x27;s version.
+                # Some explanation features require additional metadata to be loaded
+                # as part of the model payload.
+                # There are two feature attribution methods supported for TensorFlow models:
+                # integrated gradients and sampled Shapley.
+                # [Learn more about feature
+                # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
+              &quot;integratedGradientsAttribution&quot;: { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+                  # of the model&#x27;s fully differentiable structure. Refer to this paper for
+                  # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+                  # of the model&#x27;s fully differentiable structure. Refer to this paper for
+                  # more details: https://arxiv.org/abs/1703.01365
+                &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
+                    # A good value to start is 50 and gradually increase until the
+                    # sum to diff property is met within the desired error range.
+              },
+              &quot;xraiAttribution&quot;: { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+                  # of the model&#x27;s fully differentiable structure. Refer to this paper for
+                  # more details: https://arxiv.org/abs/1906.02825
+                  # Currently only implemented for models with natural image inputs.
+                  # of the model&#x27;s fully differentiable structure. Refer to this paper for
+                  # more details: https://arxiv.org/abs/1906.02825
+                  # Currently only implemented for models with natural image inputs.
+                &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
+                    # A good value to start is 50 and gradually increase until the
+                    # sum to diff property is met within the desired error range.
+              },
+              &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+                  # contribute to the label being predicted. A sampling strategy is used to
+                  # approximate the value rather than considering all subsets of features.
+                  # contribute to the label being predicted. A sampling strategy is used to
+                  # approximate the value rather than considering all subsets of features.
+                &quot;numPaths&quot;: 42, # The number of feature permutations to consider when approximating the
+                    # Shapley values.
+              },
+            },
+            &quot;pythonVersion&quot;: &quot;A String&quot;, # Required. The version of Python used in prediction.
+                #
+                # The following Python versions are available:
+                #
+                # * Python &#x27;3.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
+                #   later.
+                # * Python &#x27;3.5&#x27; is available when `runtime_version` is set to a version
+                #   from &#x27;1.4&#x27; to &#x27;1.14&#x27;.
+                # * Python &#x27;2.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
+                #   earlier.
+                #
+                # Read more about the Python versions available for [each runtime
+                # version](/ml-engine/docs/runtime-version-list).
+            &quot;requestLoggingConfig&quot;: { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+                # projects.models.versions.patch
+                # request. Specifying it in a
+                # projects.models.versions.create
+                # request has no effect.
+                #
+                # Configures the request-response pair logging on predictions from this
+                # Version.
+                # Online prediction requests to a model version and the responses to these
+                # requests are converted to raw strings and saved to the specified BigQuery
+                # table. Logging is constrained by [BigQuery quotas and
+                # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+                # AI Platform Prediction does not log request-response pairs, but it continues
+                # to serve predictions.
+                #
+                # If you are using [continuous
+                # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+                # specify this configuration manually. Setting up continuous evaluation
+                # automatically enables logging of request-response pairs.
+              &quot;samplingPercentage&quot;: 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+                  # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+                  # window is the lifetime of the model version. Defaults to 0.
+              &quot;bigqueryTableName&quot;: &quot;A String&quot;, # Required. Fully qualified BigQuery table name in the following format:
+                  # &quot;&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;&quot;
+                  #
+                  # The specified table must already exist, and the &quot;Cloud ML Service Agent&quot;
+                  # for your project must have permission to write to it. The table must have
+                  # the following [schema](/bigquery/docs/schemas):
+                  #
+                  # &lt;table&gt;
+                  #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style=&quot;display: table-cell&quot;&gt;Type&lt;/th&gt;
+                  #     &lt;th style=&quot;display: table-cell&quot;&gt;Mode&lt;/th&gt;&lt;/tr&gt;
+                  #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+                  #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+                  #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+                  #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+                  #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+                  #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+                  # &lt;/table&gt;
+            },
+            &quot;manualScaling&quot;: { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+                # model. You should generally use `auto_scaling` with an appropriate
+                # `min_nodes` instead, but this option is available if you want more
+                # predictable billing. Beware that latency and error rates will increase
+                # if the traffic exceeds that capability of the system to serve it based
+                # on the selected number of nodes.
+              &quot;nodes&quot;: 42, # The number of nodes to allocate for this model. These nodes are always up,
+                  # starting from the time the model is deployed, so the cost of operating
+                  # this model will be proportional to `nodes` * number of hours since
+                  # last billing cycle plus the cost for each prediction performed.
+            },
+            &quot;createTime&quot;: &quot;A String&quot;, # Output only. The time the version was created.
+            &quot;lastUseTime&quot;: &quot;A String&quot;, # Output only. The time the version was last used for prediction.
+            &quot;framework&quot;: &quot;A String&quot;, # Optional. The machine learning framework AI Platform uses to train
+                # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+                # `XGBOOST`. If you do not specify a framework, AI Platform
+                # will analyze files in the deployment_uri to determine a framework. If you
+                # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+                # of the model to 1.4 or greater.
+                #
+                # Do **not** specify a framework if you&#x27;re deploying a [custom
+                # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+                #
+                # If you specify a [Compute Engine (N1) machine
+                # type](/ml-engine/docs/machine-types-online-prediction) in the
+                # `machineType` field, you must specify `TENSORFLOW`
+                # for the framework.
+            &quot;predictionClass&quot;: &quot;A String&quot;, # Optional. The fully qualified name
+                # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
+                # the Predictor interface described in this reference field. The module
+                # containing this class should be included in a package provided to the
+                # [`packageUris` field](#Version.FIELDS.package_uris).
+                #
+                # Specify this field if and only if you are deploying a [custom prediction
+                # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
+                # If you specify this field, you must set
+                # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+                # you must set `machineType` to a [legacy (MLS1)
+                # machine type](/ml-engine/docs/machine-types-online-prediction).
+                #
+                # The following code sample provides the Predictor interface:
+                #
+                # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
+                # class Predictor(object):
+                # &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot;
+                #
+                # def predict(self, instances, **kwargs):
+                #     &quot;&quot;&quot;Performs custom prediction.
+                #
+                #     Instances are the decoded values from the request. They have already
+                #     been deserialized from JSON.
+                #
+                #     Args:
+                #         instances: A list of prediction input instances.
+                #         **kwargs: A dictionary of keyword args provided as additional
+                #             fields on the predict request body.
+                #
+                #     Returns:
+                #         A list of outputs containing the prediction results. This list must
+                #         be JSON serializable.
+                #     &quot;&quot;&quot;
+                #     raise NotImplementedError()
+                #
+                # @classmethod
+                # def from_path(cls, model_dir):
+                #     &quot;&quot;&quot;Creates an instance of Predictor using the given path.
+                #
+                #     Loading of the predictor should be done in this method.
+                #
+                #     Args:
+                #         model_dir: The local directory that contains the exported model
+                #             file along with any additional files uploaded when creating the
+                #             version resource.
+                #
+                #     Returns:
+                #         An instance implementing this Predictor class.
+                #     &quot;&quot;&quot;
+                #     raise NotImplementedError()
+                # &lt;/pre&gt;
+                #
+                # Learn more about [the Predictor interface and custom prediction
+                # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+            &quot;isDefault&quot;: True or False, # Output only. If true, this version will be used to handle prediction
+                # requests that do not specify a version.
+                #
+                # You can change the default version by calling
+                # projects.methods.versions.setDefault.
+            &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+                # prevent simultaneous updates of a model from overwriting each other.
+                # It is strongly suggested that systems make use of the `etag` in the
+                # read-modify-write cycle to perform model updates in order to avoid race
+                # conditions: An `etag` is returned in the response to `GetVersion`, and
+                # systems are expected to put that etag in the request to `UpdateVersion` to
+                # ensure that their change will be applied to the model as intended.
+            &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for resource access control.
+            &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
+            &quot;deploymentUri&quot;: &quot;A String&quot;, # Required. The Cloud Storage location of the trained model used to
+                # create the version. See the
+                # [guide to model
+                # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+                # information.
+                #
+                # When passing Version to
+                # projects.models.versions.create
+                # the model service uses the specified location as the source of the model.
+                # Once deployed, the model version is hosted by the prediction service, so
+                # this location is useful only as a historical record.
+                # The total number of model files can&#x27;t exceed 1000.
+            &quot;runtimeVersion&quot;: &quot;A String&quot;, # Required. The AI Platform runtime version to use for this deployment.
+                #
+                # For more information, see the
+                # [runtime version list](/ml-engine/docs/runtime-version-list) and
+                # [how to manage runtime versions](/ml-engine/docs/versioning).
+            &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the version when it was created.
+          },
+          &quot;onlinePredictionLogging&quot;: True or False, # Optional. If true, online prediction access logs are sent to StackDriver
+              # Logging. These logs are like standard server access logs, containing
+              # information like timestamp and latency for each request. Note that
+              # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+              # your project receives prediction requests at a high queries per second rate
+              # (QPS). Estimate your costs before enabling this option.
+              #
+              # Default is false.
         },
-        &quot;onlinePredictionConsoleLogging&quot;: True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
-            # streams to Stackdriver Logging. These can be more verbose than the standard
-            # access logs (see `onlinePredictionLogging`) and can incur higher cost.
-            # However, they are helpful for debugging. Note that
-            # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
-            # your project receives prediction requests at a high QPS. Estimate your
-            # costs before enabling this option.
-            #
-            # Default is false.
-        &quot;regions&quot;: [ # Optional. The list of regions where the model is going to be deployed.
-            # Only one region per model is supported.
-            # Defaults to &#x27;us-central1&#x27; if nothing is set.
-            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-            # for AI Platform services.
-            # Note:
-            # *   No matter where a model is deployed, it can always be accessed by
-            #     users from anywhere, both for online and batch prediction.
-            # *   The region for a batch prediction job is set by the region field when
-            #     submitting the batch prediction job and does not take its value from
-            #     this field.
-          &quot;A String&quot;,
-        ],
-        &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the model when it was created.
-        &quot;onlinePredictionLogging&quot;: True or False, # Optional. If true, online prediction access logs are sent to StackDriver
-            # Logging. These logs are like standard server access logs, containing
-            # information like timestamp and latency for each request. Note that
-            # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
-            # your project receives prediction requests at a high queries per second rate
-            # (QPS). Estimate your costs before enabling this option.
-            #
-            # Default is false.
-        &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-            # prevent simultaneous updates of a model from overwriting each other.
-            # It is strongly suggested that systems make use of the `etag` in the
-            # read-modify-write cycle to perform model updates in order to avoid race
-            # conditions: An `etag` is returned in the response to `GetModel`, and
-            # systems are expected to put that etag in the request to `UpdateModel` to
-            # ensure that their change will be applied to the model as intended.
-        &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your models.
-            # Each label is a key-value pair, where both the key and the value are
-            # arbitrary strings that you supply.
-            # For more information, see the documentation on
-            # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-          &quot;a_key&quot;: &quot;A String&quot;,
-        },
-        &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the model when it was created.
-            #
-            # The model name must be unique within the project it is created in.
-      },
     ],
+    &quot;nextPageToken&quot;: &quot;A String&quot;, # Optional. Pass this token as the `page_token` field of the request for a
+        # subsequent call.
   }</pre>
 </div>
 
@@ -2154,393 +2154,393 @@
     The object takes the form of:
 
 { # Represents a machine learning solution.
-    # 
-    # A model can have multiple versions, each of which is a deployed, trained
-    # model ready to receive prediction requests. The model itself is just a
-    # container.
-  &quot;defaultVersion&quot;: { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
-      # handle prediction requests that do not specify a version.
       # 
-      # You can change the default version by calling
-      # projects.models.versions.setDefault.
-      #
-      # Each version is a trained model deployed in the cloud, ready to handle
-      # prediction requests. A model can have multiple versions. You can get
-      # information about all of the versions of a given model by calling
-      # projects.models.versions.list.
-    &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
-        # Only specify this field if you have specified a Compute Engine (N1) machine
-        # type in the `machineType` field. Learn more about [using GPUs for online
-        # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-        # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-        # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-        # [accelerators for online
-        # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-      &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-      &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-    },
-    &quot;isDefault&quot;: True or False, # Output only. If true, this version will be used to handle prediction
-        # requests that do not specify a version.
-        #
-        # You can change the default version by calling
-        # projects.methods.versions.setDefault.
-    &quot;manualScaling&quot;: { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
-        # model. You should generally use `auto_scaling` with an appropriate
-        # `min_nodes` instead, but this option is available if you want more
-        # predictable billing. Beware that latency and error rates will increase
-        # if the traffic exceeds that capability of the system to serve it based
-        # on the selected number of nodes.
-      &quot;nodes&quot;: 42, # The number of nodes to allocate for this model. These nodes are always up,
-          # starting from the time the model is deployed, so the cost of operating
-          # this model will be proportional to `nodes` * number of hours since
-          # last billing cycle plus the cost for each prediction performed.
-    },
-    &quot;state&quot;: &quot;A String&quot;, # Output only. The state of a version.
-    &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the version when it was created.
-        #
-        # The version name must be unique within the model it is created in.
-    &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for resource access control.
-    &quot;pythonVersion&quot;: &quot;A String&quot;, # Required. The version of Python used in prediction.
-        #
-        # The following Python versions are available:
-        #
-        # * Python &#x27;3.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
-        #   later.
-        # * Python &#x27;3.5&#x27; is available when `runtime_version` is set to a version
-        #   from &#x27;1.4&#x27; to &#x27;1.14&#x27;.
-        # * Python &#x27;2.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
-        #   earlier.
-        #
-        # Read more about the Python versions available for [each runtime
-        # version](/ml-engine/docs/runtime-version-list).
-    &quot;lastUseTime&quot;: &quot;A String&quot;, # Output only. The time the version was last used for prediction.
-    &quot;predictionClass&quot;: &quot;A String&quot;, # Optional. The fully qualified name
-        # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
-        # the Predictor interface described in this reference field. The module
-        # containing this class should be included in a package provided to the
-        # [`packageUris` field](#Version.FIELDS.package_uris).
-        #
-        # Specify this field if and only if you are deploying a [custom prediction
-        # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
-        # If you specify this field, you must set
-        # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
-        # you must set `machineType` to a [legacy (MLS1)
-        # machine type](/ml-engine/docs/machine-types-online-prediction).
-        #
-        # The following code sample provides the Predictor interface:
-        #
-        # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
-        # class Predictor(object):
-        # &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot;
-        #
-        # def predict(self, instances, **kwargs):
-        #     &quot;&quot;&quot;Performs custom prediction.
-        #
-        #     Instances are the decoded values from the request. They have already
-        #     been deserialized from JSON.
-        #
-        #     Args:
-        #         instances: A list of prediction input instances.
-        #         **kwargs: A dictionary of keyword args provided as additional
-        #             fields on the predict request body.
-        #
-        #     Returns:
-        #         A list of outputs containing the prediction results. This list must
-        #         be JSON serializable.
-        #     &quot;&quot;&quot;
-        #     raise NotImplementedError()
-        #
-        # @classmethod
-        # def from_path(cls, model_dir):
-        #     &quot;&quot;&quot;Creates an instance of Predictor using the given path.
-        #
-        #     Loading of the predictor should be done in this method.
-        #
-        #     Args:
-        #         model_dir: The local directory that contains the exported model
-        #             file along with any additional files uploaded when creating the
-        #             version resource.
-        #
-        #     Returns:
-        #         An instance implementing this Predictor class.
-        #     &quot;&quot;&quot;
-        #     raise NotImplementedError()
-        # &lt;/pre&gt;
-        #
-        # Learn more about [the Predictor interface and custom prediction
-        # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
-    &quot;deploymentUri&quot;: &quot;A String&quot;, # Required. The Cloud Storage location of the trained model used to
-        # create the version. See the
-        # [guide to model
-        # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
-        # information.
-        #
-        # When passing Version to
-        # projects.models.versions.create
-        # the model service uses the specified location as the source of the model.
-        # Once deployed, the model version is hosted by the prediction service, so
-        # this location is useful only as a historical record.
-        # The total number of model files can&#x27;t exceed 1000.
-    &quot;packageUris&quot;: [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
-        # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
-        # or [scikit-learn pipelines with custom
-        # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
-        #
-        # For a custom prediction routine, one of these packages must contain your
-        # Predictor class (see
-        # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
-        # include any dependencies used by your Predictor or scikit-learn pipeline
-        # uses that are not already included in your selected [runtime
-        # version](/ml-engine/docs/tensorflow/runtime-version-list).
-        #
-        # If you specify this field, you must also set
-        # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+      # A model can have multiple versions, each of which is a deployed, trained
+      # model ready to receive prediction requests. The model itself is just a
+      # container.
+    &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the model when it was created.
+    &quot;regions&quot;: [ # Optional. The list of regions where the model is going to be deployed.
+        # Only one region per model is supported.
+        # Defaults to &#x27;us-central1&#x27; if nothing is set.
+        # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+        # for AI Platform services.
+        # Note:
+        # *   No matter where a model is deployed, it can always be accessed by
+        #     users from anywhere, both for online and batch prediction.
+        # *   The region for a batch prediction job is set by the region field when
+        #     submitting the batch prediction job and does not take its value from
+        #     this field.
       &quot;A String&quot;,
     ],
-    &quot;explanationConfig&quot;: { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model&#x27;s version.
-        # Some explanation features require additional metadata to be loaded
-        # as part of the model payload.
-        # There are two feature attribution methods supported for TensorFlow models:
-        # integrated gradients and sampled Shapley.
-        # [Learn more about feature
-        # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
-      &quot;integratedGradientsAttribution&quot;: { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
-          # of the model&#x27;s fully differentiable structure. Refer to this paper for
-          # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
-          # of the model&#x27;s fully differentiable structure. Refer to this paper for
-          # more details: https://arxiv.org/abs/1703.01365
-        &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
-            # A good value to start is 50 and gradually increase until the
-            # sum to diff property is met within the desired error range.
-      },
-      &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
-          # contribute to the label being predicted. A sampling strategy is used to
-          # approximate the value rather than considering all subsets of features.
-          # contribute to the label being predicted. A sampling strategy is used to
-          # approximate the value rather than considering all subsets of features.
-        &quot;numPaths&quot;: 42, # The number of feature permutations to consider when approximating the
-            # Shapley values.
-      },
-      &quot;xraiAttribution&quot;: { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
-          # of the model&#x27;s fully differentiable structure. Refer to this paper for
-          # more details: https://arxiv.org/abs/1906.02825
-          # Currently only implemented for models with natural image inputs.
-          # of the model&#x27;s fully differentiable structure. Refer to this paper for
-          # more details: https://arxiv.org/abs/1906.02825
-          # Currently only implemented for models with natural image inputs.
-        &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
-            # A good value to start is 50 and gradually increase until the
-            # sum to diff property is met within the desired error range.
-      },
-    },
-    &quot;autoScaling&quot;: { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
-        # response to increases and decreases in traffic. Care should be
-        # taken to ramp up traffic according to the model&#x27;s ability to scale
-        # or you will start seeing increases in latency and 429 response codes.
-        #
-        # Note that you cannot use AutoScaling if your version uses
-        # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
-        # `manual_scaling`.
-      &quot;minNodes&quot;: 42, # Optional. The minimum number of nodes to allocate for this model. These
-          # nodes are always up, starting from the time the model is deployed.
-          # Therefore, the cost of operating this model will be at least
-          # `rate` * `min_nodes` * number of hours since last billing cycle,
-          # where `rate` is the cost per node-hour as documented in the
-          # [pricing guide](/ml-engine/docs/pricing),
-          # even if no predictions are performed. There is additional cost for each
-          # prediction performed.
-          #
-          # Unlike manual scaling, if the load gets too heavy for the nodes
-          # that are up, the service will automatically add nodes to handle the
-          # increased load as well as scale back as traffic drops, always maintaining
-          # at least `min_nodes`. You will be charged for the time in which additional
-          # nodes are used.
-          #
-          # If `min_nodes` is not specified and AutoScaling is used with a [legacy
-          # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
-          # `min_nodes` defaults to 0, in which case, when traffic to a model stops
-          # (and after a cool-down period), nodes will be shut down and no charges will
-          # be incurred until traffic to the model resumes.
-          #
-          # If `min_nodes` is not specified and AutoScaling is used with a [Compute
-          # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
-          # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
-          # Compute Engine machine type.
-          #
-          # Note that you cannot use AutoScaling if your version uses
-          # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
-          # ManualScaling.
-          #
-          # You can set `min_nodes` when creating the model version, and you can also
-          # update `min_nodes` for an existing version:
-          # &lt;pre&gt;
-          # update_body.json:
-          # {
-          #   &#x27;autoScaling&#x27;: {
-          #     &#x27;minNodes&#x27;: 5
-          #   }
-          # }
-          # &lt;/pre&gt;
-          # HTTP request:
-          # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
-          # PATCH
-          # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
-          # -d @./update_body.json
-          # &lt;/pre&gt;
-    },
-    &quot;createTime&quot;: &quot;A String&quot;, # Output only. The time the version was created.
-    &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your model
-        # versions. Each label is a key-value pair, where both the key and the value
-        # are arbitrary strings that you supply.
-        # For more information, see the documentation on
-        # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-      &quot;a_key&quot;: &quot;A String&quot;,
-    },
-    &quot;requestLoggingConfig&quot;: { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
-        # projects.models.versions.patch
-        # request. Specifying it in a
-        # projects.models.versions.create
-        # request has no effect.
-        #
-        # Configures the request-response pair logging on predictions from this
-        # Version.
-        # Online prediction requests to a model version and the responses to these
-        # requests are converted to raw strings and saved to the specified BigQuery
-        # table. Logging is constrained by [BigQuery quotas and
-        # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
-        # AI Platform Prediction does not log request-response pairs, but it continues
-        # to serve predictions.
-        #
-        # If you are using [continuous
-        # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
-        # specify this configuration manually. Setting up continuous evaluation
-        # automatically enables logging of request-response pairs.
-      &quot;bigqueryTableName&quot;: &quot;A String&quot;, # Required. Fully qualified BigQuery table name in the following format:
-          # &quot;&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;&quot;
-          #
-          # The specified table must already exist, and the &quot;Cloud ML Service Agent&quot;
-          # for your project must have permission to write to it. The table must have
-          # the following [schema](/bigquery/docs/schemas):
-          #
-          # &lt;table&gt;
-          #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style=&quot;display: table-cell&quot;&gt;Type&lt;/th&gt;
-          #     &lt;th style=&quot;display: table-cell&quot;&gt;Mode&lt;/th&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
-          #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
-          # &lt;/table&gt;
-      &quot;samplingPercentage&quot;: 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
-          # For example, if you want to log 10% of requests, enter `0.1`. The sampling
-          # window is the lifetime of the model version. Defaults to 0.
-    },
-    &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
-    &quot;machineType&quot;: &quot;A String&quot;, # Optional. The type of machine on which to serve the model. Currently only
-        # applies to online prediction service. If this field is not specified, it
-        # defaults to `mls1-c1-m2`.
-        #
-        # Online prediction supports the following machine types:
-        #
-        # * `mls1-c1-m2`
-        # * `mls1-c4-m2`
-        # * `n1-standard-2`
-        # * `n1-standard-4`
-        # * `n1-standard-8`
-        # * `n1-standard-16`
-        # * `n1-standard-32`
-        # * `n1-highmem-2`
-        # * `n1-highmem-4`
-        # * `n1-highmem-8`
-        # * `n1-highmem-16`
-        # * `n1-highmem-32`
-        # * `n1-highcpu-2`
-        # * `n1-highcpu-4`
-        # * `n1-highcpu-8`
-        # * `n1-highcpu-16`
-        # * `n1-highcpu-32`
-        #
-        # `mls1-c1-m2` is generally available. All other machine types are available
-        # in beta. Learn more about the [differences between machine
-        # types](/ml-engine/docs/machine-types-online-prediction).
-    &quot;runtimeVersion&quot;: &quot;A String&quot;, # Required. The AI Platform runtime version to use for this deployment.
-        #
-        # For more information, see the
-        # [runtime version list](/ml-engine/docs/runtime-version-list) and
-        # [how to manage runtime versions](/ml-engine/docs/versioning).
-    &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the version when it was created.
-    &quot;framework&quot;: &quot;A String&quot;, # Optional. The machine learning framework AI Platform uses to train
-        # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
-        # `XGBOOST`. If you do not specify a framework, AI Platform
-        # will analyze files in the deployment_uri to determine a framework. If you
-        # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
-        # of the model to 1.4 or greater.
-        #
-        # Do **not** specify a framework if you&#x27;re deploying a [custom
-        # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
-        #
-        # If you specify a [Compute Engine (N1) machine
-        # type](/ml-engine/docs/machine-types-online-prediction) in the
-        # `machineType` field, you must specify `TENSORFLOW`
-        # for the framework.
+    &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the model when it was created.
+        # 
+        # The model name must be unique within the project it is created in.
+    &quot;onlinePredictionConsoleLogging&quot;: True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
+        # streams to Stackdriver Logging. These can be more verbose than the standard
+        # access logs (see `onlinePredictionLogging`) and can incur higher cost.
+        # However, they are helpful for debugging. Note that
+        # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+        # your project receives prediction requests at a high QPS. Estimate your
+        # costs before enabling this option.
+        # 
+        # Default is false.
     &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
         # prevent simultaneous updates of a model from overwriting each other.
         # It is strongly suggested that systems make use of the `etag` in the
         # read-modify-write cycle to perform model updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `GetVersion`, and
-        # systems are expected to put that etag in the request to `UpdateVersion` to
+        # conditions: An `etag` is returned in the response to `GetModel`, and
+        # systems are expected to put that etag in the request to `UpdateModel` to
         # ensure that their change will be applied to the model as intended.
-  },
-  &quot;onlinePredictionConsoleLogging&quot;: True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
-      # streams to Stackdriver Logging. These can be more verbose than the standard
-      # access logs (see `onlinePredictionLogging`) and can incur higher cost.
-      # However, they are helpful for debugging. Note that
-      # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
-      # your project receives prediction requests at a high QPS. Estimate your
-      # costs before enabling this option.
-      # 
-      # Default is false.
-  &quot;regions&quot;: [ # Optional. The list of regions where the model is going to be deployed.
-      # Only one region per model is supported.
-      # Defaults to &#x27;us-central1&#x27; if nothing is set.
-      # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-      # for AI Platform services.
-      # Note:
-      # *   No matter where a model is deployed, it can always be accessed by
-      #     users from anywhere, both for online and batch prediction.
-      # *   The region for a batch prediction job is set by the region field when
-      #     submitting the batch prediction job and does not take its value from
-      #     this field.
-    &quot;A String&quot;,
-  ],
-  &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the model when it was created.
-  &quot;onlinePredictionLogging&quot;: True or False, # Optional. If true, online prediction access logs are sent to StackDriver
-      # Logging. These logs are like standard server access logs, containing
-      # information like timestamp and latency for each request. Note that
-      # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
-      # your project receives prediction requests at a high queries per second rate
-      # (QPS). Estimate your costs before enabling this option.
-      # 
-      # Default is false.
-  &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-      # prevent simultaneous updates of a model from overwriting each other.
-      # It is strongly suggested that systems make use of the `etag` in the
-      # read-modify-write cycle to perform model updates in order to avoid race
-      # conditions: An `etag` is returned in the response to `GetModel`, and
-      # systems are expected to put that etag in the request to `UpdateModel` to
-      # ensure that their change will be applied to the model as intended.
-  &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your models.
-      # Each label is a key-value pair, where both the key and the value are
-      # arbitrary strings that you supply.
-      # For more information, see the documentation on
-      # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-    &quot;a_key&quot;: &quot;A String&quot;,
-  },
-  &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the model when it was created.
-      # 
-      # The model name must be unique within the project it is created in.
-}
+    &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your models.
+        # Each label is a key-value pair, where both the key and the value are
+        # arbitrary strings that you supply.
+        # For more information, see the documentation on
+        # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+      &quot;a_key&quot;: &quot;A String&quot;,
+    },
+    &quot;defaultVersion&quot;: { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
+        # handle prediction requests that do not specify a version.
+        # 
+        # You can change the default version by calling
+        # projects.models.versions.setDefault.
+        #
+        # Each version is a trained model deployed in the cloud, ready to handle
+        # prediction requests. A model can have multiple versions. You can get
+        # information about all of the versions of a given model by calling
+        # projects.models.versions.list.
+      &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your model
+          # versions. Each label is a key-value pair, where both the key and the value
+          # are arbitrary strings that you supply.
+          # For more information, see the documentation on
+          # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+        &quot;a_key&quot;: &quot;A String&quot;,
+      },
+      &quot;machineType&quot;: &quot;A String&quot;, # Optional. The type of machine on which to serve the model. Currently only
+          # applies to online prediction service. If this field is not specified, it
+          # defaults to `mls1-c1-m2`.
+          #
+          # Online prediction supports the following machine types:
+          #
+          # * `mls1-c1-m2`
+          # * `mls1-c4-m2`
+          # * `n1-standard-2`
+          # * `n1-standard-4`
+          # * `n1-standard-8`
+          # * `n1-standard-16`
+          # * `n1-standard-32`
+          # * `n1-highmem-2`
+          # * `n1-highmem-4`
+          # * `n1-highmem-8`
+          # * `n1-highmem-16`
+          # * `n1-highmem-32`
+          # * `n1-highcpu-2`
+          # * `n1-highcpu-4`
+          # * `n1-highcpu-8`
+          # * `n1-highcpu-16`
+          # * `n1-highcpu-32`
+          #
+          # `mls1-c1-m2` is generally available. All other machine types are available
+          # in beta. Learn more about the [differences between machine
+          # types](/ml-engine/docs/machine-types-online-prediction).
+      &quot;packageUris&quot;: [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
+          # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
+          # or [scikit-learn pipelines with custom
+          # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
+          #
+          # For a custom prediction routine, one of these packages must contain your
+          # Predictor class (see
+          # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
+          # include any dependencies used by your Predictor or scikit-learn pipeline
+          # uses that are not already included in your selected [runtime
+          # version](/ml-engine/docs/tensorflow/runtime-version-list).
+          #
+          # If you specify this field, you must also set
+          # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+        &quot;A String&quot;,
+      ],
+      &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+          # Only specify this field if you have specified a Compute Engine (N1) machine
+          # type in the `machineType` field. Learn more about [using GPUs for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+          # [accelerators for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+        &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+      },
+      &quot;state&quot;: &quot;A String&quot;, # Output only. The state of a version.
+      &quot;name&quot;: &quot;A String&quot;, # Required. The name specified for the version when it was created.
+          #
+          # The version name must be unique within the model it is created in.
+      &quot;autoScaling&quot;: { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+          # response to increases and decreases in traffic. Care should be
+          # taken to ramp up traffic according to the model&#x27;s ability to scale
+          # or you will start seeing increases in latency and 429 response codes.
+          #
+          # Note that you cannot use AutoScaling if your version uses
+          # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+          # `manual_scaling`.
+        &quot;minNodes&quot;: 42, # Optional. The minimum number of nodes to allocate for this model. These
+            # nodes are always up, starting from the time the model is deployed.
+            # Therefore, the cost of operating this model will be at least
+            # `rate` * `min_nodes` * number of hours since last billing cycle,
+            # where `rate` is the cost per node-hour as documented in the
+            # [pricing guide](/ml-engine/docs/pricing),
+            # even if no predictions are performed. There is additional cost for each
+            # prediction performed.
+            #
+            # Unlike manual scaling, if the load gets too heavy for the nodes
+            # that are up, the service will automatically add nodes to handle the
+            # increased load as well as scale back as traffic drops, always maintaining
+            # at least `min_nodes`. You will be charged for the time in which additional
+            # nodes are used.
+            #
+            # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+            # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+            # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+            # (and after a cool-down period), nodes will be shut down and no charges will
+            # be incurred until traffic to the model resumes.
+            #
+            # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+            # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+            # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+            # Compute Engine machine type.
+            #
+            # Note that you cannot use AutoScaling if your version uses
+            # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+            # ManualScaling.
+            #
+            # You can set `min_nodes` when creating the model version, and you can also
+            # update `min_nodes` for an existing version:
+            # &lt;pre&gt;
+            # update_body.json:
+            # {
+            #   &#x27;autoScaling&#x27;: {
+            #     &#x27;minNodes&#x27;: 5
+            #   }
+            # }
+            # &lt;/pre&gt;
+            # HTTP request:
+            # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
+            # PATCH
+            # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+            # -d @./update_body.json
+            # &lt;/pre&gt;
+      },
+      &quot;explanationConfig&quot;: { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model&#x27;s version.
+          # Some explanation features require additional metadata to be loaded
+          # as part of the model payload.
+          # There are two feature attribution methods supported for TensorFlow models:
+          # integrated gradients and sampled Shapley.
+          # [Learn more about feature
+          # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
+        &quot;integratedGradientsAttribution&quot;: { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+            # of the model&#x27;s fully differentiable structure. Refer to this paper for
+            # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+            # of the model&#x27;s fully differentiable structure. Refer to this paper for
+            # more details: https://arxiv.org/abs/1703.01365
+          &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
+              # A good value to start is 50 and gradually increase until the
+              # sum to diff property is met within the desired error range.
+        },
+        &quot;xraiAttribution&quot;: { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+            # of the model&#x27;s fully differentiable structure. Refer to this paper for
+            # more details: https://arxiv.org/abs/1906.02825
+            # Currently only implemented for models with natural image inputs.
+            # of the model&#x27;s fully differentiable structure. Refer to this paper for
+            # more details: https://arxiv.org/abs/1906.02825
+            # Currently only implemented for models with natural image inputs.
+          &quot;numIntegralSteps&quot;: 42, # Number of steps for approximating the path integral.
+              # A good value to start is 50 and gradually increase until the
+              # sum to diff property is met within the desired error range.
+        },
+        &quot;sampledShapleyAttribution&quot;: { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+            # contribute to the label being predicted. A sampling strategy is used to
+            # approximate the value rather than considering all subsets of features.
+            # contribute to the label being predicted. A sampling strategy is used to
+            # approximate the value rather than considering all subsets of features.
+          &quot;numPaths&quot;: 42, # The number of feature permutations to consider when approximating the
+              # Shapley values.
+        },
+      },
+      &quot;pythonVersion&quot;: &quot;A String&quot;, # Required. The version of Python used in prediction.
+          #
+          # The following Python versions are available:
+          #
+          # * Python &#x27;3.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
+          #   later.
+          # * Python &#x27;3.5&#x27; is available when `runtime_version` is set to a version
+          #   from &#x27;1.4&#x27; to &#x27;1.14&#x27;.
+          # * Python &#x27;2.7&#x27; is available when `runtime_version` is set to &#x27;1.15&#x27; or
+          #   earlier.
+          #
+          # Read more about the Python versions available for [each runtime
+          # version](/ml-engine/docs/runtime-version-list).
+      &quot;requestLoggingConfig&quot;: { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+          # projects.models.versions.patch
+          # request. Specifying it in a
+          # projects.models.versions.create
+          # request has no effect.
+          #
+          # Configures the request-response pair logging on predictions from this
+          # Version.
+          # Online prediction requests to a model version and the responses to these
+          # requests are converted to raw strings and saved to the specified BigQuery
+          # table. Logging is constrained by [BigQuery quotas and
+          # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+          # AI Platform Prediction does not log request-response pairs, but it continues
+          # to serve predictions.
+          #
+          # If you are using [continuous
+          # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+          # specify this configuration manually. Setting up continuous evaluation
+          # automatically enables logging of request-response pairs.
+        &quot;samplingPercentage&quot;: 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+            # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+            # window is the lifetime of the model version. Defaults to 0.
+        &quot;bigqueryTableName&quot;: &quot;A String&quot;, # Required. Fully qualified BigQuery table name in the following format:
+            # &quot;&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;&quot;
+            #
+            # The specified table must already exist, and the &quot;Cloud ML Service Agent&quot;
+            # for your project must have permission to write to it. The table must have
+            # the following [schema](/bigquery/docs/schemas):
+            #
+            # &lt;table&gt;
+            #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style=&quot;display: table-cell&quot;&gt;Type&lt;/th&gt;
+            #     &lt;th style=&quot;display: table-cell&quot;&gt;Mode&lt;/th&gt;&lt;/tr&gt;
+            #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+            #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+            #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+            #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+            #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+            #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+            # &lt;/table&gt;
+      },
+      &quot;manualScaling&quot;: { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+          # model. You should generally use `auto_scaling` with an appropriate
+          # `min_nodes` instead, but this option is available if you want more
+          # predictable billing. Beware that latency and error rates will increase
+          # if the traffic exceeds that capability of the system to serve it based
+          # on the selected number of nodes.
+        &quot;nodes&quot;: 42, # The number of nodes to allocate for this model. These nodes are always up,
+            # starting from the time the model is deployed, so the cost of operating
+            # this model will be proportional to `nodes` * number of hours since
+            # last billing cycle plus the cost for each prediction performed.
+      },
+      &quot;createTime&quot;: &quot;A String&quot;, # Output only. The time the version was created.
+      &quot;lastUseTime&quot;: &quot;A String&quot;, # Output only. The time the version was last used for prediction.
+      &quot;framework&quot;: &quot;A String&quot;, # Optional. The machine learning framework AI Platform uses to train
+          # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+          # `XGBOOST`. If you do not specify a framework, AI Platform
+          # will analyze files in the deployment_uri to determine a framework. If you
+          # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+          # of the model to 1.4 or greater.
+          #
+          # Do **not** specify a framework if you&#x27;re deploying a [custom
+          # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+          #
+          # If you specify a [Compute Engine (N1) machine
+          # type](/ml-engine/docs/machine-types-online-prediction) in the
+          # `machineType` field, you must specify `TENSORFLOW`
+          # for the framework.
+      &quot;predictionClass&quot;: &quot;A String&quot;, # Optional. The fully qualified name
+          # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
+          # the Predictor interface described in this reference field. The module
+          # containing this class should be included in a package provided to the
+          # [`packageUris` field](#Version.FIELDS.package_uris).
+          #
+          # Specify this field if and only if you are deploying a [custom prediction
+          # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
+          # If you specify this field, you must set
+          # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+          # you must set `machineType` to a [legacy (MLS1)
+          # machine type](/ml-engine/docs/machine-types-online-prediction).
+          #
+          # The following code sample provides the Predictor interface:
+          #
+          # &lt;pre style=&quot;max-width: 626px;&quot;&gt;
+          # class Predictor(object):
+          # &quot;&quot;&quot;Interface for constructing custom predictors.&quot;&quot;&quot;
+          #
+          # def predict(self, instances, **kwargs):
+          #     &quot;&quot;&quot;Performs custom prediction.
+          #
+          #     Instances are the decoded values from the request. They have already
+          #     been deserialized from JSON.
+          #
+          #     Args:
+          #         instances: A list of prediction input instances.
+          #         **kwargs: A dictionary of keyword args provided as additional
+          #             fields on the predict request body.
+          #
+          #     Returns:
+          #         A list of outputs containing the prediction results. This list must
+          #         be JSON serializable.
+          #     &quot;&quot;&quot;
+          #     raise NotImplementedError()
+          #
+          # @classmethod
+          # def from_path(cls, model_dir):
+          #     &quot;&quot;&quot;Creates an instance of Predictor using the given path.
+          #
+          #     Loading of the predictor should be done in this method.
+          #
+          #     Args:
+          #         model_dir: The local directory that contains the exported model
+          #             file along with any additional files uploaded when creating the
+          #             version resource.
+          #
+          #     Returns:
+          #         An instance implementing this Predictor class.
+          #     &quot;&quot;&quot;
+          #     raise NotImplementedError()
+          # &lt;/pre&gt;
+          #
+          # Learn more about [the Predictor interface and custom prediction
+          # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+      &quot;isDefault&quot;: True or False, # Output only. If true, this version will be used to handle prediction
+          # requests that do not specify a version.
+          #
+          # You can change the default version by calling
+          # projects.methods.versions.setDefault.
+      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+          # prevent simultaneous updates of a model from overwriting each other.
+          # It is strongly suggested that systems make use of the `etag` in the
+          # read-modify-write cycle to perform model updates in order to avoid race
+          # conditions: An `etag` is returned in the response to `GetVersion`, and
+          # systems are expected to put that etag in the request to `UpdateVersion` to
+          # ensure that their change will be applied to the model as intended.
+      &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for resource access control.
+      &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
+      &quot;deploymentUri&quot;: &quot;A String&quot;, # Required. The Cloud Storage location of the trained model used to
+          # create the version. See the
+          # [guide to model
+          # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+          # information.
+          #
+          # When passing Version to
+          # projects.models.versions.create
+          # the model service uses the specified location as the source of the model.
+          # Once deployed, the model version is hosted by the prediction service, so
+          # this location is useful only as a historical record.
+          # The total number of model files can&#x27;t exceed 1000.
+      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Required. The AI Platform runtime version to use for this deployment.
+          #
+          # For more information, see the
+          # [runtime version list](/ml-engine/docs/runtime-version-list) and
+          # [how to manage runtime versions](/ml-engine/docs/versioning).
+      &quot;description&quot;: &quot;A String&quot;, # Optional. The description specified for the version when it was created.
+    },
+    &quot;onlinePredictionLogging&quot;: True or False, # Optional. If true, online prediction access logs are sent to StackDriver
+        # Logging. These logs are like standard server access logs, containing
+        # information like timestamp and latency for each request. Note that
+        # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+        # your project receives prediction requests at a high queries per second rate
+        # (QPS). Estimate your costs before enabling this option.
+        # 
+        # Default is false.
+  }
 
   updateMask: string, Required. Specifies the path, relative to `Model`, of the field to update.
 
@@ -2567,9 +2567,6 @@
 
     { # This resource represents a long-running operation that is the result of a
       # network API call.
-    &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
-        # originally returns it. If you use the default HTTP mapping, the
-        # `name` should be a resource name ending with `operations/{unique_id}`.
     &quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
         # different programming environments, including REST APIs and RPC APIs. It is
         # used by [gRPC](https://github.com/grpc). Each `Status` message contains
@@ -2577,22 +2574,16 @@
         #
         # You can find out more about this error model and how to work with it in the
         # [API Design Guide](https://cloud.google.com/apis/design/errors).
-      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
-      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
-          # user-facing error message should be localized and sent in the
-          # google.rpc.Status.details field, or localized by the client.
       &quot;details&quot;: [ # A list of messages that carry the error details.  There is a common set of
           # message types for APIs to use.
         {
           &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
         },
       ],
-    },
-    &quot;metadata&quot;: { # Service-specific metadata associated with the operation.  It typically
-        # contains progress information and common metadata such as create time.
-        # Some services might not provide such metadata.  Any method that returns a
-        # long-running operation should document the metadata type, if any.
-      &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
+          # user-facing error message should be localized and sent in the
+          # google.rpc.Status.details field, or localized by the client.
+      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
     },
     &quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
         # If `true`, the operation is completed, and either `error` or `response` is
@@ -2607,6 +2598,15 @@
         # `TakeSnapshotResponse`.
       &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
     },
+    &quot;metadata&quot;: { # Service-specific metadata associated with the operation.  It typically
+        # contains progress information and common metadata such as create time.
+        # Some services might not provide such metadata.  Any method that returns a
+        # long-running operation should document the metadata type, if any.
+      &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+    },
+    &quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
+        # originally returns it. If you use the default HTTP mapping, the
+        # `name` should be a resource name ending with `operations/{unique_id}`.
   }</pre>
 </div>
 
@@ -2624,11 +2624,6 @@
     The object takes the form of:
 
 { # Request message for `SetIamPolicy` method.
-    &quot;updateMask&quot;: &quot;A String&quot;, # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
-        # the fields in the mask will be modified. If no mask is provided, the
-        # following default mask is used:
-        # 
-        # `paths: &quot;bindings, etag&quot;`
     &quot;policy&quot;: { # An Identity and Access Management (IAM) policy, which specifies access # REQUIRED: The complete policy to be applied to the `resource`. The size of
         # the policy is limited to a few 10s of KB. An empty policy is a
         # valid policy but certain Cloud Platform services (such as Projects)
@@ -2699,30 +2694,18 @@
         #
         # For a description of IAM and its features, see the
         # [IAM documentation](https://cloud.google.com/iam/docs/).
-      &quot;version&quot;: 42, # Specifies the format of the policy.
-          #
-          # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
-          # are rejected.
-          #
-          # Any operation that affects conditional role bindings must specify version
-          # `3`. This requirement applies to the following operations:
-          #
-          # * Getting a policy that includes a conditional role binding
-          # * Adding a conditional role binding to a policy
-          # * Changing a conditional role binding in a policy
-          # * Removing any role binding, with or without a condition, from a policy
-          #   that includes conditions
+      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+          # prevent simultaneous updates of a policy from overwriting each other.
+          # It is strongly suggested that systems make use of the `etag` in the
+          # read-modify-write cycle to perform policy updates in order to avoid race
+          # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+          # systems are expected to put that etag in the request to `setIamPolicy` to
+          # ensure that their change will be applied to the same version of the policy.
           #
           # **Important:** If you use IAM Conditions, you must include the `etag` field
           # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
           # you to overwrite a version `3` policy with a version `1` policy, and all of
           # the conditions in the version `3` policy are lost.
-          #
-          # If a policy does not include any conditions, operations on that policy may
-          # specify any valid version or leave the field unset.
-          #
-          # To learn which resources support conditions in their IAM policies, see the
-          # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
       &quot;auditConfigs&quot;: [ # Specifies cloud audit logging configuration for this policy.
         { # Specifies the audit configuration for a service.
             # The configuration determines which permission types are logged, and what
@@ -2739,7 +2722,7 @@
             #     {
             #       &quot;audit_configs&quot;: [
             #         {
-            #           &quot;service&quot;: &quot;allServices&quot;
+            #           &quot;service&quot;: &quot;allServices&quot;,
             #           &quot;audit_log_configs&quot;: [
             #             {
             #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
@@ -2748,18 +2731,18 @@
             #               ]
             #             },
             #             {
-            #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+            #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;
             #             },
             #             {
-            #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;,
+            #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;
             #             }
             #           ]
             #         },
             #         {
-            #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;
+            #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;,
             #           &quot;audit_log_configs&quot;: [
             #             {
-            #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
+            #               &quot;log_type&quot;: &quot;DATA_READ&quot;
             #             },
             #             {
             #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
@@ -2791,27 +2774,53 @@
                 #           ]
                 #         },
                 #         {
-                #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+                #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;
                 #         }
                 #       ]
                 #     }
                 #
                 # This enables &#x27;DATA_READ&#x27; and &#x27;DATA_WRITE&#x27; logging, while exempting
                 # jose@example.com from DATA_READ logging.
+              &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
               &quot;exemptedMembers&quot;: [ # Specifies the identities that do not cause logging for this type of
                   # permission.
                   # Follows the same format of Binding.members.
                 &quot;A String&quot;,
               ],
-              &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
             },
           ],
         },
       ],
+      &quot;version&quot;: 42, # Specifies the format of the policy.
+          #
+          # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+          # are rejected.
+          #
+          # Any operation that affects conditional role bindings must specify version
+          # `3`. This requirement applies to the following operations:
+          #
+          # * Getting a policy that includes a conditional role binding
+          # * Adding a conditional role binding to a policy
+          # * Changing a conditional role binding in a policy
+          # * Removing any role binding, with or without a condition, from a policy
+          #   that includes conditions
+          #
+          # **Important:** If you use IAM Conditions, you must include the `etag` field
+          # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+          # you to overwrite a version `3` policy with a version `1` policy, and all of
+          # the conditions in the version `3` policy are lost.
+          #
+          # If a policy does not include any conditions, operations on that policy may
+          # specify any valid version or leave the field unset.
+          #
+          # To learn which resources support conditions in their IAM policies, see the
+          # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
       &quot;bindings&quot;: [ # Associates a list of `members` to a `role`. Optionally, may specify a
           # `condition` that determines how and when the `bindings` are applied. Each
           # of the `bindings` must contain at least one member.
         { # Associates `members` with a `role`.
+          &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
+              # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
           &quot;condition&quot;: { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
               #
               # If the condition evaluates to `true`, then this binding applies to the
@@ -2854,8 +2863,6 @@
               # The exact variables and functions that may be referenced within an expression
               # are determined by the service that evaluates it. See the service
               # documentation for additional information.
-            &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
-                # describes the expression, e.g. when hovered over it in a UI.
             &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
                 # syntax.
             &quot;title&quot;: &quot;A String&quot;, # Optional. Title for the expression, i.e. a short string describing
@@ -2863,6 +2870,8 @@
                 # expression.
             &quot;location&quot;: &quot;A String&quot;, # Optional. String indicating the location of the expression for error
                 # reporting, e.g. a file name and a position in the file.
+            &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
+                # describes the expression, e.g. when hovered over it in a UI.
           },
           &quot;members&quot;: [ # Specifies the identities requesting access for a Cloud Platform resource.
               # `members` can have the following values:
@@ -2909,23 +2918,14 @@
               #
             &quot;A String&quot;,
           ],
-          &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
-              # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
         },
       ],
-      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-          # prevent simultaneous updates of a policy from overwriting each other.
-          # It is strongly suggested that systems make use of the `etag` in the
-          # read-modify-write cycle to perform policy updates in order to avoid race
-          # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-          # systems are expected to put that etag in the request to `setIamPolicy` to
-          # ensure that their change will be applied to the same version of the policy.
-          #
-          # **Important:** If you use IAM Conditions, you must include the `etag` field
-          # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
-          # you to overwrite a version `3` policy with a version `1` policy, and all of
-          # the conditions in the version `3` policy are lost.
     },
+    &quot;updateMask&quot;: &quot;A String&quot;, # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
+        # the fields in the mask will be modified. If no mask is provided, the
+        # following default mask is used:
+        # 
+        # `paths: &quot;bindings, etag&quot;`
   }
 
   x__xgafv: string, V1 error format.
@@ -3003,30 +3003,18 @@
       #
       # For a description of IAM and its features, see the
       # [IAM documentation](https://cloud.google.com/iam/docs/).
-    &quot;version&quot;: 42, # Specifies the format of the policy.
-        #
-        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
-        # are rejected.
-        #
-        # Any operation that affects conditional role bindings must specify version
-        # `3`. This requirement applies to the following operations:
-        #
-        # * Getting a policy that includes a conditional role binding
-        # * Adding a conditional role binding to a policy
-        # * Changing a conditional role binding in a policy
-        # * Removing any role binding, with or without a condition, from a policy
-        #   that includes conditions
+    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a policy from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform policy updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+        # systems are expected to put that etag in the request to `setIamPolicy` to
+        # ensure that their change will be applied to the same version of the policy.
         #
         # **Important:** If you use IAM Conditions, you must include the `etag` field
         # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
         # you to overwrite a version `3` policy with a version `1` policy, and all of
         # the conditions in the version `3` policy are lost.
-        #
-        # If a policy does not include any conditions, operations on that policy may
-        # specify any valid version or leave the field unset.
-        #
-        # To learn which resources support conditions in their IAM policies, see the
-        # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
     &quot;auditConfigs&quot;: [ # Specifies cloud audit logging configuration for this policy.
       { # Specifies the audit configuration for a service.
           # The configuration determines which permission types are logged, and what
@@ -3043,7 +3031,7 @@
           #     {
           #       &quot;audit_configs&quot;: [
           #         {
-          #           &quot;service&quot;: &quot;allServices&quot;
+          #           &quot;service&quot;: &quot;allServices&quot;,
           #           &quot;audit_log_configs&quot;: [
           #             {
           #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
@@ -3052,18 +3040,18 @@
           #               ]
           #             },
           #             {
-          #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+          #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;
           #             },
           #             {
-          #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;,
+          #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;
           #             }
           #           ]
           #         },
           #         {
-          #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;
+          #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;,
           #           &quot;audit_log_configs&quot;: [
           #             {
-          #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
+          #               &quot;log_type&quot;: &quot;DATA_READ&quot;
           #             },
           #             {
           #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
@@ -3095,27 +3083,53 @@
               #           ]
               #         },
               #         {
-              #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+              #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;
               #         }
               #       ]
               #     }
               #
               # This enables &#x27;DATA_READ&#x27; and &#x27;DATA_WRITE&#x27; logging, while exempting
               # jose@example.com from DATA_READ logging.
+            &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
             &quot;exemptedMembers&quot;: [ # Specifies the identities that do not cause logging for this type of
                 # permission.
                 # Follows the same format of Binding.members.
               &quot;A String&quot;,
             ],
-            &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
           },
         ],
       },
     ],
+    &quot;version&quot;: 42, # Specifies the format of the policy.
+        #
+        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+        # are rejected.
+        #
+        # Any operation that affects conditional role bindings must specify version
+        # `3`. This requirement applies to the following operations:
+        #
+        # * Getting a policy that includes a conditional role binding
+        # * Adding a conditional role binding to a policy
+        # * Changing a conditional role binding in a policy
+        # * Removing any role binding, with or without a condition, from a policy
+        #   that includes conditions
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+        #
+        # If a policy does not include any conditions, operations on that policy may
+        # specify any valid version or leave the field unset.
+        #
+        # To learn which resources support conditions in their IAM policies, see the
+        # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
     &quot;bindings&quot;: [ # Associates a list of `members` to a `role`. Optionally, may specify a
         # `condition` that determines how and when the `bindings` are applied. Each
         # of the `bindings` must contain at least one member.
       { # Associates `members` with a `role`.
+        &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
+            # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
         &quot;condition&quot;: { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
             #
             # If the condition evaluates to `true`, then this binding applies to the
@@ -3158,8 +3172,6 @@
             # The exact variables and functions that may be referenced within an expression
             # are determined by the service that evaluates it. See the service
             # documentation for additional information.
-          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
-              # describes the expression, e.g. when hovered over it in a UI.
           &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
               # syntax.
           &quot;title&quot;: &quot;A String&quot;, # Optional. Title for the expression, i.e. a short string describing
@@ -3167,6 +3179,8 @@
               # expression.
           &quot;location&quot;: &quot;A String&quot;, # Optional. String indicating the location of the expression for error
               # reporting, e.g. a file name and a position in the file.
+          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
+              # describes the expression, e.g. when hovered over it in a UI.
         },
         &quot;members&quot;: [ # Specifies the identities requesting access for a Cloud Platform resource.
             # `members` can have the following values:
@@ -3213,22 +3227,8 @@
             #
           &quot;A String&quot;,
         ],
-        &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
-            # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
       },
     ],
-    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a policy from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform policy updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-        # systems are expected to put that etag in the request to `setIamPolicy` to
-        # ensure that their change will be applied to the same version of the policy.
-        #
-        # **Important:** If you use IAM Conditions, you must include the `etag` field
-        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
-        # you to overwrite a version `3` policy with a version `1` policy, and all of
-        # the conditions in the version `3` policy are lost.
   }</pre>
 </div>