chore: regens API reference docs (#889)

diff --git a/docs/dyn/ml_v1.projects.models.versions.html b/docs/dyn/ml_v1.projects.models.versions.html
index d77bf26..d948048 100644
--- a/docs/dyn/ml_v1.projects.models.versions.html
+++ b/docs/dyn/ml_v1.projects.models.versions.html
@@ -72,10 +72,10 @@
 
 </style>
 
-<h1><a href="ml_v1.html">Cloud Machine Learning Engine</a> . <a href="ml_v1.projects.html">projects</a> . <a href="ml_v1.projects.models.html">models</a> . <a href="ml_v1.projects.models.versions.html">versions</a></h1>
+<h1><a href="ml_v1.html">AI Platform Training & Prediction API</a> . <a href="ml_v1.projects.html">projects</a> . <a href="ml_v1.projects.models.html">models</a> . <a href="ml_v1.projects.models.versions.html">versions</a></h1>
 <h2>Instance Methods</h2>
 <p class="toc_element">
-  <code><a href="#create">create(parent, body, x__xgafv=None)</a></code></p>
+  <code><a href="#create">create(parent, body=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Creates a new version of a model from a trained TensorFlow model.</p>
 <p class="toc_element">
   <code><a href="#delete">delete(name, x__xgafv=None)</a></code></p>
@@ -90,14 +90,14 @@
   <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
 <p class="firstline">Retrieves the next page of results.</p>
 <p class="toc_element">
-  <code><a href="#patch">patch(name, body, updateMask=None, x__xgafv=None)</a></code></p>
+  <code><a href="#patch">patch(name, body=None, updateMask=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Updates the specified Version resource.</p>
 <p class="toc_element">
   <code><a href="#setDefault">setDefault(name, body=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Designates a version to be the default for the model.</p>
 <h3>Method Details</h3>
 <div class="method">
-    <code class="details" id="create">create(parent, body, x__xgafv=None)</code>
+    <code class="details" id="create">create(parent, body=None, x__xgafv=None)</code>
   <pre>Creates a new version of a model from a trained TensorFlow model.
 
 If the version created in the cloud by this call is the first deployed
@@ -105,11 +105,11 @@
 model. When you add a version to a model that already has one or more
 versions, the default version does not automatically change. If you want a
 new version to be the default, you must call
-[projects.models.versions.setDefault](/ml-engine/reference/rest/v1/projects.models.versions/setDefault).
+projects.models.versions.setDefault.
 
 Args:
   parent: string, Required. The name of the model. (required)
-  body: object, The request body. (required)
+  body: object, The request body.
     The object takes the form of:
 
 { # Represents a version of the model.
@@ -117,48 +117,27 @@
     # Each version is a trained model deployed in the cloud, ready to handle
     # prediction requests. A model can have multiple versions. You can get
     # information about all of the versions of a given model by calling
-    # [projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list).
-  "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+    # projects.models.versions.list.
+  "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+      # Only specify this field if you have specified a Compute Engine (N1) machine
+      # type in the `machineType` field. Learn more about [using GPUs for online
+      # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+      # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+      # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+      # [accelerators for online
+      # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+    "count": "A String", # The number of accelerators to attach to each machine running the job.
+    "type": "A String", # The type of accelerator to use.
+  },
   "labels": { # Optional. One or more labels that you can add, to organize your model
       # versions. Each label is a key-value pair, where both the key and the value
       # are arbitrary strings that you supply.
       # For more information, see the documentation on
-      # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+      # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
     "a_key": "A String",
   },
-  "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
-      # applies to online prediction service.
-      # <dl>
-      #   <dt>mls1-c1-m2</dt>
-      #   <dd>
-      #   The <b>default</b> machine type, with 1 core and 2 GB RAM. The deprecated
-      #   name for this machine type is "mls1-highmem-1".
-      #   </dd>
-      #   <dt>mls1-c4-m2</dt>
-      #   <dd>
-      #   In <b>Beta</b>. This machine type has 4 cores and 2 GB RAM. The
-      #   deprecated name for this machine type is "mls1-highcpu-4".
-      #   </dd>
-      # </dl>
-  "description": "A String", # Optional. The description specified for the version when it was created.
-  "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this deployment.
-      # If not set, AI Platform uses the default stable version, 1.0. For more
-      # information, see the
-      # [runtime version list](/ml-engine/docs/runtime-version-list) and
-      # [how to manage runtime versions](/ml-engine/docs/versioning).
-  "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
-      # model. You should generally use `auto_scaling` with an appropriate
-      # `min_nodes` instead, but this option is available if you want more
-      # predictable billing. Beware that latency and error rates will increase
-      # if the traffic exceeds that capability of the system to serve it based
-      # on the selected number of nodes.
-    "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
-        # starting from the time the model is deployed, so the cost of operating
-        # this model will be proportional to `nodes` * number of hours since
-        # last billing cycle plus the cost for each prediction performed.
-  },
   "predictionClass": "A String", # Optional. The fully qualified name
-      # (<var>module_name</var>.<var>class_name</var>) of a class that implements
+      # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
       # the Predictor interface described in this reference field. The module
       # containing this class should be included in a package provided to the
       # [`packageUris` field](#Version.FIELDS.package_uris).
@@ -166,11 +145,13 @@
       # Specify this field if and only if you are deploying a [custom prediction
       # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
       # If you specify this field, you must set
-      # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+      # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+      # you must set `machineType` to a [legacy (MLS1)
+      # machine type](/ml-engine/docs/machine-types-online-prediction).
       # 
       # The following code sample provides the Predictor interface:
       # 
-      # ```py
+      # &lt;pre style="max-width: 626px;"&gt;
       # class Predictor(object):
       # """Interface for constructing custom predictors."""
       # 
@@ -206,64 +187,12 @@
       #         An instance implementing this Predictor class.
       #     """
       #     raise NotImplementedError()
-      # ```
+      # &lt;/pre&gt;
       # 
       # Learn more about [the Predictor interface and custom prediction
       # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
-  "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
-      # response to increases and decreases in traffic. Care should be
-      # taken to ramp up traffic according to the model's ability to scale
-      # or you will start seeing increases in latency and 429 response codes.
-    "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
-        # nodes are always up, starting from the time the model is deployed.
-        # Therefore, the cost of operating this model will be at least
-        # `rate` * `min_nodes` * number of hours since last billing cycle,
-        # where `rate` is the cost per node-hour as documented in the
-        # [pricing guide](/ml-engine/docs/pricing),
-        # even if no predictions are performed. There is additional cost for each
-        # prediction performed.
-        #
-        # Unlike manual scaling, if the load gets too heavy for the nodes
-        # that are up, the service will automatically add nodes to handle the
-        # increased load as well as scale back as traffic drops, always maintaining
-        # at least `min_nodes`. You will be charged for the time in which additional
-        # nodes are used.
-        #
-        # If not specified, `min_nodes` defaults to 0, in which case, when traffic
-        # to a model stops (and after a cool-down period), nodes will be shut down
-        # and no charges will be incurred until traffic to the model resumes.
-        #
-        # You can set `min_nodes` when creating the model version, and you can also
-        # update `min_nodes` for an existing version:
-        # <pre>
-        # update_body.json:
-        # {
-        #   'autoScaling': {
-        #     'minNodes': 5
-        #   }
-        # }
-        # </pre>
-        # HTTP request:
-        # <pre>
-        # PATCH
-        # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
-        # -d @./update_body.json
-        # </pre>
-  },
   "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
   "state": "A String", # Output only. The state of a version.
-  "pythonVersion": "A String", # Optional. The version of Python used in prediction. If not set, the default
-      # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-      # to '1.4' and above. Python '2.7' works with all supported runtime versions.
-  "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
-      # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
-      # `XGBOOST`. If you do not specify a framework, AI Platform
-      # will analyze files in the deployment_uri to determine a framework. If you
-      # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
-      # of the model to 1.4 or greater.
-      # 
-      # Do **not** specify a framework if you're deploying a [custom
-      # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
   "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
       # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
       # or [scikit-learn pipelines with custom
@@ -295,20 +224,223 @@
       # information.
       # 
       # When passing Version to
-      # [projects.models.versions.create](/ml-engine/reference/rest/v1/projects.models.versions/create)
+      # projects.models.versions.create
       # the model service uses the specified location as the source of the model.
       # Once deployed, the model version is hosted by the prediction service, so
       # this location is useful only as a historical record.
       # The total number of model files can't exceed 1000.
-  "createTime": "A String", # Output only. The time the version was created.
+  "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
+      # Some explanation features require additional metadata to be loaded
+      # as part of the model payload.
+      # There are two feature attribution methods supported for TensorFlow models:
+      # integrated gradients and sampled Shapley.
+      # [Learn more about feature
+      # attributions.](/ml-engine/docs/ai-explanations/overview)
+    "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+        # of the model's fully differentiable structure. Refer to this paper for
+        # more details: https://arxiv.org/abs/1906.02825
+        # Currently only implemented for models with natural image inputs.
+        # of the model's fully differentiable structure. Refer to this paper for
+        # more details: https://arxiv.org/abs/1906.02825
+        # Currently only implemented for models with natural image inputs.
+      "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+          # A good value to start is 50 and gradually increase until the
+          # sum to diff property is met within the desired error range.
+    },
+    "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+        # contribute to the label being predicted. A sampling strategy is used to
+        # approximate the value rather than considering all subsets of features.
+        # contribute to the label being predicted. A sampling strategy is used to
+        # approximate the value rather than considering all subsets of features.
+      "numPaths": 42, # The number of feature permutations to consider when approximating the
+          # Shapley values.
+    },
+    "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+        # of the model's fully differentiable structure. Refer to this paper for
+        # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+        # of the model's fully differentiable structure. Refer to this paper for
+        # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+      "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+          # A good value to start is 50 and gradually increase until the
+          # sum to diff property is met within the desired error range.
+    },
+  },
   "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
       # requests that do not specify a version.
       # 
       # You can change the default version by calling
-      # [projects.methods.versions.setDefault](/ml-engine/reference/rest/v1/projects.models.versions/setDefault).
-  "name": "A String", # Required.The name specified for the version when it was created.
+      # projects.methods.versions.setDefault.
+  "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
+      # applies to online prediction service. If this field is not specified, it
+      # defaults to `mls1-c1-m2`.
+      # 
+      # Online prediction supports the following machine types:
+      # 
+      # * `mls1-c1-m2`
+      # * `mls1-c4-m2`
+      # * `n1-standard-2`
+      # * `n1-standard-4`
+      # * `n1-standard-8`
+      # * `n1-standard-16`
+      # * `n1-standard-32`
+      # * `n1-highmem-2`
+      # * `n1-highmem-4`
+      # * `n1-highmem-8`
+      # * `n1-highmem-16`
+      # * `n1-highmem-32`
+      # * `n1-highcpu-2`
+      # * `n1-highcpu-4`
+      # * `n1-highcpu-8`
+      # * `n1-highcpu-16`
+      # * `n1-highcpu-32`
+      # 
+      # `mls1-c1-m2` is generally available. All other machine types are available
+      # in beta. Learn more about the [differences between machine
+      # types](/ml-engine/docs/machine-types-online-prediction).
+  "description": "A String", # Optional. The description specified for the version when it was created.
+  "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
+      # 
+      # For more information, see the
+      # [runtime version list](/ml-engine/docs/runtime-version-list) and
+      # [how to manage runtime versions](/ml-engine/docs/versioning).
+  "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+      # model. You should generally use `auto_scaling` with an appropriate
+      # `min_nodes` instead, but this option is available if you want more
+      # predictable billing. Beware that latency and error rates will increase
+      # if the traffic exceeds that capability of the system to serve it based
+      # on the selected number of nodes.
+    "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
+        # starting from the time the model is deployed, so the cost of operating
+        # this model will be proportional to `nodes` * number of hours since
+        # last billing cycle plus the cost for each prediction performed.
+  },
+  "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+  "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
+      # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+      # `XGBOOST`. If you do not specify a framework, AI Platform
+      # will analyze files in the deployment_uri to determine a framework. If you
+      # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+      # of the model to 1.4 or greater.
+      # 
+      # Do **not** specify a framework if you're deploying a [custom
+      # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+      # 
+      # If you specify a [Compute Engine (N1) machine
+      # type](/ml-engine/docs/machine-types-online-prediction) in the
+      # `machineType` field, you must specify `TENSORFLOW`
+      # for the framework.
+  "createTime": "A String", # Output only. The time the version was created.
+  "name": "A String", # Required. The name specified for the version when it was created.
       # 
       # The version name must be unique within the model it is created in.
+  "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+      # response to increases and decreases in traffic. Care should be
+      # taken to ramp up traffic according to the model's ability to scale
+      # or you will start seeing increases in latency and 429 response codes.
+      # 
+      # Note that you cannot use AutoScaling if your version uses
+      # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+      # `manual_scaling`.
+    "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
+        # nodes are always up, starting from the time the model is deployed.
+        # Therefore, the cost of operating this model will be at least
+        # `rate` * `min_nodes` * number of hours since last billing cycle,
+        # where `rate` is the cost per node-hour as documented in the
+        # [pricing guide](/ml-engine/docs/pricing),
+        # even if no predictions are performed. There is additional cost for each
+        # prediction performed.
+        #
+        # Unlike manual scaling, if the load gets too heavy for the nodes
+        # that are up, the service will automatically add nodes to handle the
+        # increased load as well as scale back as traffic drops, always maintaining
+        # at least `min_nodes`. You will be charged for the time in which additional
+        # nodes are used.
+        #
+        # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+        # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+        # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+        # (and after a cool-down period), nodes will be shut down and no charges will
+        # be incurred until traffic to the model resumes.
+        #
+        # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+        # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+        # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+        # Compute Engine machine type.
+        #
+        # Note that you cannot use AutoScaling if your version uses
+        # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+        # ManualScaling.
+        #
+        # You can set `min_nodes` when creating the model version, and you can also
+        # update `min_nodes` for an existing version:
+        # &lt;pre&gt;
+        # update_body.json:
+        # {
+        #   'autoScaling': {
+        #     'minNodes': 5
+        #   }
+        # }
+        # &lt;/pre&gt;
+        # HTTP request:
+        # &lt;pre style="max-width: 626px;"&gt;
+        # PATCH
+        # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+        # -d @./update_body.json
+        # &lt;/pre&gt;
+  },
+  "pythonVersion": "A String", # Required. The version of Python used in prediction.
+      # 
+      # The following Python versions are available:
+      # 
+      # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+      #   later.
+      # * Python '3.5' is available when `runtime_version` is set to a version
+      #   from '1.4' to '1.14'.
+      # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+      #   earlier.
+      # 
+      # Read more about the Python versions available for [each runtime
+      # version](/ml-engine/docs/runtime-version-list).
+  "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+      # projects.models.versions.patch
+      # request. Specifying it in a
+      # projects.models.versions.create
+      # request has no effect.
+      # 
+      # Configures the request-response pair logging on predictions from this
+      # Version.
+      # Online prediction requests to a model version and the responses to these
+      # requests are converted to raw strings and saved to the specified BigQuery
+      # table. Logging is constrained by [BigQuery quotas and
+      # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+      # AI Platform Prediction does not log request-response pairs, but it continues
+      # to serve predictions.
+      #
+      # If you are using [continuous
+      # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+      # specify this configuration manually. Setting up continuous evaluation
+      # automatically enables logging of request-response pairs.
+    "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+        # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+        # window is the lifetime of the model version. Defaults to 0.
+    "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
+        # "&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;"
+        #
+        # The specified table must already exist, and the "Cloud ML Service Agent"
+        # for your project must have permission to write to it. The table must have
+        # the following [schema](/bigquery/docs/schemas):
+        #
+        # &lt;table&gt;
+        #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style="display: table-cell"&gt;Type&lt;/th&gt;
+        #     &lt;th style="display: table-cell"&gt;Mode&lt;/th&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+        # &lt;/table&gt;
+  },
 }
 
   x__xgafv: string, V1 error format.
@@ -377,7 +509,7 @@
 Args:
   name: string, Required. The name of the version. You can get the names of all the
 versions of a model by calling
-[projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list). (required)
+projects.models.versions.list. (required)
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -436,7 +568,7 @@
   <pre>Gets information about a model version.
 
 Models can have multiple versions. You can call
-[projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list)
+projects.models.versions.list
 to get the same information that this method returns for all of the
 versions of a model.
 
@@ -455,48 +587,27 @@
       # Each version is a trained model deployed in the cloud, ready to handle
       # prediction requests. A model can have multiple versions. You can get
       # information about all of the versions of a given model by calling
-      # [projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list).
-    "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+      # projects.models.versions.list.
+    "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+        # Only specify this field if you have specified a Compute Engine (N1) machine
+        # type in the `machineType` field. Learn more about [using GPUs for online
+        # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+        # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+        # [accelerators for online
+        # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+      "count": "A String", # The number of accelerators to attach to each machine running the job.
+      "type": "A String", # The type of accelerator to use.
+    },
     "labels": { # Optional. One or more labels that you can add, to organize your model
         # versions. Each label is a key-value pair, where both the key and the value
         # are arbitrary strings that you supply.
         # For more information, see the documentation on
-        # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+        # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
       "a_key": "A String",
     },
-    "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
-        # applies to online prediction service.
-        # <dl>
-        #   <dt>mls1-c1-m2</dt>
-        #   <dd>
-        #   The <b>default</b> machine type, with 1 core and 2 GB RAM. The deprecated
-        #   name for this machine type is "mls1-highmem-1".
-        #   </dd>
-        #   <dt>mls1-c4-m2</dt>
-        #   <dd>
-        #   In <b>Beta</b>. This machine type has 4 cores and 2 GB RAM. The
-        #   deprecated name for this machine type is "mls1-highcpu-4".
-        #   </dd>
-        # </dl>
-    "description": "A String", # Optional. The description specified for the version when it was created.
-    "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this deployment.
-        # If not set, AI Platform uses the default stable version, 1.0. For more
-        # information, see the
-        # [runtime version list](/ml-engine/docs/runtime-version-list) and
-        # [how to manage runtime versions](/ml-engine/docs/versioning).
-    "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
-        # model. You should generally use `auto_scaling` with an appropriate
-        # `min_nodes` instead, but this option is available if you want more
-        # predictable billing. Beware that latency and error rates will increase
-        # if the traffic exceeds that capability of the system to serve it based
-        # on the selected number of nodes.
-      "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
-          # starting from the time the model is deployed, so the cost of operating
-          # this model will be proportional to `nodes` * number of hours since
-          # last billing cycle plus the cost for each prediction performed.
-    },
     "predictionClass": "A String", # Optional. The fully qualified name
-        # (<var>module_name</var>.<var>class_name</var>) of a class that implements
+        # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
         # the Predictor interface described in this reference field. The module
         # containing this class should be included in a package provided to the
         # [`packageUris` field](#Version.FIELDS.package_uris).
@@ -504,11 +615,13 @@
         # Specify this field if and only if you are deploying a [custom prediction
         # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
         # If you specify this field, you must set
-        # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+        # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+        # you must set `machineType` to a [legacy (MLS1)
+        # machine type](/ml-engine/docs/machine-types-online-prediction).
         #
         # The following code sample provides the Predictor interface:
         #
-        # ```py
+        # &lt;pre style="max-width: 626px;"&gt;
         # class Predictor(object):
         # """Interface for constructing custom predictors."""
         #
@@ -544,64 +657,12 @@
         #         An instance implementing this Predictor class.
         #     """
         #     raise NotImplementedError()
-        # ```
+        # &lt;/pre&gt;
         #
         # Learn more about [the Predictor interface and custom prediction
         # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
-    "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
-        # response to increases and decreases in traffic. Care should be
-        # taken to ramp up traffic according to the model's ability to scale
-        # or you will start seeing increases in latency and 429 response codes.
-      "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
-          # nodes are always up, starting from the time the model is deployed.
-          # Therefore, the cost of operating this model will be at least
-          # `rate` * `min_nodes` * number of hours since last billing cycle,
-          # where `rate` is the cost per node-hour as documented in the
-          # [pricing guide](/ml-engine/docs/pricing),
-          # even if no predictions are performed. There is additional cost for each
-          # prediction performed.
-          #
-          # Unlike manual scaling, if the load gets too heavy for the nodes
-          # that are up, the service will automatically add nodes to handle the
-          # increased load as well as scale back as traffic drops, always maintaining
-          # at least `min_nodes`. You will be charged for the time in which additional
-          # nodes are used.
-          #
-          # If not specified, `min_nodes` defaults to 0, in which case, when traffic
-          # to a model stops (and after a cool-down period), nodes will be shut down
-          # and no charges will be incurred until traffic to the model resumes.
-          #
-          # You can set `min_nodes` when creating the model version, and you can also
-          # update `min_nodes` for an existing version:
-          # <pre>
-          # update_body.json:
-          # {
-          #   'autoScaling': {
-          #     'minNodes': 5
-          #   }
-          # }
-          # </pre>
-          # HTTP request:
-          # <pre>
-          # PATCH
-          # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
-          # -d @./update_body.json
-          # </pre>
-    },
     "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
     "state": "A String", # Output only. The state of a version.
-    "pythonVersion": "A String", # Optional. The version of Python used in prediction. If not set, the default
-        # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-        # to '1.4' and above. Python '2.7' works with all supported runtime versions.
-    "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
-        # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
-        # `XGBOOST`. If you do not specify a framework, AI Platform
-        # will analyze files in the deployment_uri to determine a framework. If you
-        # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
-        # of the model to 1.4 or greater.
-        #
-        # Do **not** specify a framework if you're deploying a [custom
-        # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
     "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
         # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
         # or [scikit-learn pipelines with custom
@@ -633,20 +694,223 @@
         # information.
         #
         # When passing Version to
-        # [projects.models.versions.create](/ml-engine/reference/rest/v1/projects.models.versions/create)
+        # projects.models.versions.create
         # the model service uses the specified location as the source of the model.
         # Once deployed, the model version is hosted by the prediction service, so
         # this location is useful only as a historical record.
         # The total number of model files can't exceed 1000.
-    "createTime": "A String", # Output only. The time the version was created.
+    "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
+        # Some explanation features require additional metadata to be loaded
+        # as part of the model payload.
+        # There are two feature attribution methods supported for TensorFlow models:
+        # integrated gradients and sampled Shapley.
+        # [Learn more about feature
+        # attributions.](/ml-engine/docs/ai-explanations/overview)
+      "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+          # of the model's fully differentiable structure. Refer to this paper for
+          # more details: https://arxiv.org/abs/1906.02825
+          # Currently only implemented for models with natural image inputs.
+          # of the model's fully differentiable structure. Refer to this paper for
+          # more details: https://arxiv.org/abs/1906.02825
+          # Currently only implemented for models with natural image inputs.
+        "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+            # A good value to start is 50 and gradually increase until the
+            # sum to diff property is met within the desired error range.
+      },
+      "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+          # contribute to the label being predicted. A sampling strategy is used to
+          # approximate the value rather than considering all subsets of features.
+          # contribute to the label being predicted. A sampling strategy is used to
+          # approximate the value rather than considering all subsets of features.
+        "numPaths": 42, # The number of feature permutations to consider when approximating the
+            # Shapley values.
+      },
+      "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+          # of the model's fully differentiable structure. Refer to this paper for
+          # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+          # of the model's fully differentiable structure. Refer to this paper for
+          # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+        "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+            # A good value to start is 50 and gradually increase until the
+            # sum to diff property is met within the desired error range.
+      },
+    },
     "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
         # requests that do not specify a version.
         #
         # You can change the default version by calling
-        # [projects.methods.versions.setDefault](/ml-engine/reference/rest/v1/projects.models.versions/setDefault).
-    "name": "A String", # Required.The name specified for the version when it was created.
+        # projects.methods.versions.setDefault.
+    "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
+        # applies to online prediction service. If this field is not specified, it
+        # defaults to `mls1-c1-m2`.
+        #
+        # Online prediction supports the following machine types:
+        #
+        # * `mls1-c1-m2`
+        # * `mls1-c4-m2`
+        # * `n1-standard-2`
+        # * `n1-standard-4`
+        # * `n1-standard-8`
+        # * `n1-standard-16`
+        # * `n1-standard-32`
+        # * `n1-highmem-2`
+        # * `n1-highmem-4`
+        # * `n1-highmem-8`
+        # * `n1-highmem-16`
+        # * `n1-highmem-32`
+        # * `n1-highcpu-2`
+        # * `n1-highcpu-4`
+        # * `n1-highcpu-8`
+        # * `n1-highcpu-16`
+        # * `n1-highcpu-32`
+        #
+        # `mls1-c1-m2` is generally available. All other machine types are available
+        # in beta. Learn more about the [differences between machine
+        # types](/ml-engine/docs/machine-types-online-prediction).
+    "description": "A String", # Optional. The description specified for the version when it was created.
+    "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
+        #
+        # For more information, see the
+        # [runtime version list](/ml-engine/docs/runtime-version-list) and
+        # [how to manage runtime versions](/ml-engine/docs/versioning).
+    "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+        # model. You should generally use `auto_scaling` with an appropriate
+        # `min_nodes` instead, but this option is available if you want more
+        # predictable billing. Beware that latency and error rates will increase
+        # if the traffic exceeds that capability of the system to serve it based
+        # on the selected number of nodes.
+      "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
+          # starting from the time the model is deployed, so the cost of operating
+          # this model will be proportional to `nodes` * number of hours since
+          # last billing cycle plus the cost for each prediction performed.
+    },
+    "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+    "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
+        # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+        # `XGBOOST`. If you do not specify a framework, AI Platform
+        # will analyze files in the deployment_uri to determine a framework. If you
+        # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+        # of the model to 1.4 or greater.
+        #
+        # Do **not** specify a framework if you're deploying a [custom
+        # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+        #
+        # If you specify a [Compute Engine (N1) machine
+        # type](/ml-engine/docs/machine-types-online-prediction) in the
+        # `machineType` field, you must specify `TENSORFLOW`
+        # for the framework.
+    "createTime": "A String", # Output only. The time the version was created.
+    "name": "A String", # Required. The name specified for the version when it was created.
         #
         # The version name must be unique within the model it is created in.
+    "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+        # response to increases and decreases in traffic. Care should be
+        # taken to ramp up traffic according to the model's ability to scale
+        # or you will start seeing increases in latency and 429 response codes.
+        #
+        # Note that you cannot use AutoScaling if your version uses
+        # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+        # `manual_scaling`.
+      "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
+          # nodes are always up, starting from the time the model is deployed.
+          # Therefore, the cost of operating this model will be at least
+          # `rate` * `min_nodes` * number of hours since last billing cycle,
+          # where `rate` is the cost per node-hour as documented in the
+          # [pricing guide](/ml-engine/docs/pricing),
+          # even if no predictions are performed. There is additional cost for each
+          # prediction performed.
+          #
+          # Unlike manual scaling, if the load gets too heavy for the nodes
+          # that are up, the service will automatically add nodes to handle the
+          # increased load as well as scale back as traffic drops, always maintaining
+          # at least `min_nodes`. You will be charged for the time in which additional
+          # nodes are used.
+          #
+          # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+          # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+          # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+          # (and after a cool-down period), nodes will be shut down and no charges will
+          # be incurred until traffic to the model resumes.
+          #
+          # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+          # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+          # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+          # Compute Engine machine type.
+          #
+          # Note that you cannot use AutoScaling if your version uses
+          # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+          # ManualScaling.
+          #
+          # You can set `min_nodes` when creating the model version, and you can also
+          # update `min_nodes` for an existing version:
+          # &lt;pre&gt;
+          # update_body.json:
+          # {
+          #   'autoScaling': {
+          #     'minNodes': 5
+          #   }
+          # }
+          # &lt;/pre&gt;
+          # HTTP request:
+          # &lt;pre style="max-width: 626px;"&gt;
+          # PATCH
+          # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+          # -d @./update_body.json
+          # &lt;/pre&gt;
+    },
+    "pythonVersion": "A String", # Required. The version of Python used in prediction.
+        #
+        # The following Python versions are available:
+        #
+        # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+        #   later.
+        # * Python '3.5' is available when `runtime_version` is set to a version
+        #   from '1.4' to '1.14'.
+        # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+        #   earlier.
+        #
+        # Read more about the Python versions available for [each runtime
+        # version](/ml-engine/docs/runtime-version-list).
+    "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+        # projects.models.versions.patch
+        # request. Specifying it in a
+        # projects.models.versions.create
+        # request has no effect.
+        #
+        # Configures the request-response pair logging on predictions from this
+        # Version.
+        # Online prediction requests to a model version and the responses to these
+        # requests are converted to raw strings and saved to the specified BigQuery
+        # table. Logging is constrained by [BigQuery quotas and
+        # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+        # AI Platform Prediction does not log request-response pairs, but it continues
+        # to serve predictions.
+        #
+        # If you are using [continuous
+        # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+        # specify this configuration manually. Setting up continuous evaluation
+        # automatically enables logging of request-response pairs.
+      "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+          # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+          # window is the lifetime of the model version. Defaults to 0.
+      "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
+          # "&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;"
+          #
+          # The specified table must already exist, and the "Cloud ML Service Agent"
+          # for your project must have permission to write to it. The table must have
+          # the following [schema](/bigquery/docs/schemas):
+          #
+          # &lt;table&gt;
+          #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style="display: table-cell"&gt;Type&lt;/th&gt;
+          #     &lt;th style="display: table-cell"&gt;Mode&lt;/th&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+          # &lt;/table&gt;
+    },
   }</pre>
 </div>
 
@@ -690,48 +954,27 @@
           # Each version is a trained model deployed in the cloud, ready to handle
           # prediction requests. A model can have multiple versions. You can get
           # information about all of the versions of a given model by calling
-          # [projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list).
-        "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+          # projects.models.versions.list.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+            # Only specify this field if you have specified a Compute Engine (N1) machine
+            # type in the `machineType` field. Learn more about [using GPUs for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
         "labels": { # Optional. One or more labels that you can add, to organize your model
             # versions. Each label is a key-value pair, where both the key and the value
             # are arbitrary strings that you supply.
             # For more information, see the documentation on
-            # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+            # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
           "a_key": "A String",
         },
-        "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
-            # applies to online prediction service.
-            # <dl>
-            #   <dt>mls1-c1-m2</dt>
-            #   <dd>
-            #   The <b>default</b> machine type, with 1 core and 2 GB RAM. The deprecated
-            #   name for this machine type is "mls1-highmem-1".
-            #   </dd>
-            #   <dt>mls1-c4-m2</dt>
-            #   <dd>
-            #   In <b>Beta</b>. This machine type has 4 cores and 2 GB RAM. The
-            #   deprecated name for this machine type is "mls1-highcpu-4".
-            #   </dd>
-            # </dl>
-        "description": "A String", # Optional. The description specified for the version when it was created.
-        "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this deployment.
-            # If not set, AI Platform uses the default stable version, 1.0. For more
-            # information, see the
-            # [runtime version list](/ml-engine/docs/runtime-version-list) and
-            # [how to manage runtime versions](/ml-engine/docs/versioning).
-        "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
-            # model. You should generally use `auto_scaling` with an appropriate
-            # `min_nodes` instead, but this option is available if you want more
-            # predictable billing. Beware that latency and error rates will increase
-            # if the traffic exceeds that capability of the system to serve it based
-            # on the selected number of nodes.
-          "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
-              # starting from the time the model is deployed, so the cost of operating
-              # this model will be proportional to `nodes` * number of hours since
-              # last billing cycle plus the cost for each prediction performed.
-        },
         "predictionClass": "A String", # Optional. The fully qualified name
-            # (<var>module_name</var>.<var>class_name</var>) of a class that implements
+            # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
             # the Predictor interface described in this reference field. The module
             # containing this class should be included in a package provided to the
             # [`packageUris` field](#Version.FIELDS.package_uris).
@@ -739,11 +982,13 @@
             # Specify this field if and only if you are deploying a [custom prediction
             # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
             # If you specify this field, you must set
-            # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+            # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+            # you must set `machineType` to a [legacy (MLS1)
+            # machine type](/ml-engine/docs/machine-types-online-prediction).
             #
             # The following code sample provides the Predictor interface:
             #
-            # ```py
+            # &lt;pre style="max-width: 626px;"&gt;
             # class Predictor(object):
             # """Interface for constructing custom predictors."""
             #
@@ -779,64 +1024,12 @@
             #         An instance implementing this Predictor class.
             #     """
             #     raise NotImplementedError()
-            # ```
+            # &lt;/pre&gt;
             #
             # Learn more about [the Predictor interface and custom prediction
             # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
-        "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
-            # response to increases and decreases in traffic. Care should be
-            # taken to ramp up traffic according to the model's ability to scale
-            # or you will start seeing increases in latency and 429 response codes.
-          "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
-              # nodes are always up, starting from the time the model is deployed.
-              # Therefore, the cost of operating this model will be at least
-              # `rate` * `min_nodes` * number of hours since last billing cycle,
-              # where `rate` is the cost per node-hour as documented in the
-              # [pricing guide](/ml-engine/docs/pricing),
-              # even if no predictions are performed. There is additional cost for each
-              # prediction performed.
-              #
-              # Unlike manual scaling, if the load gets too heavy for the nodes
-              # that are up, the service will automatically add nodes to handle the
-              # increased load as well as scale back as traffic drops, always maintaining
-              # at least `min_nodes`. You will be charged for the time in which additional
-              # nodes are used.
-              #
-              # If not specified, `min_nodes` defaults to 0, in which case, when traffic
-              # to a model stops (and after a cool-down period), nodes will be shut down
-              # and no charges will be incurred until traffic to the model resumes.
-              #
-              # You can set `min_nodes` when creating the model version, and you can also
-              # update `min_nodes` for an existing version:
-              # <pre>
-              # update_body.json:
-              # {
-              #   'autoScaling': {
-              #     'minNodes': 5
-              #   }
-              # }
-              # </pre>
-              # HTTP request:
-              # <pre>
-              # PATCH
-              # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
-              # -d @./update_body.json
-              # </pre>
-        },
         "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
         "state": "A String", # Output only. The state of a version.
-        "pythonVersion": "A String", # Optional. The version of Python used in prediction. If not set, the default
-            # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-            # to '1.4' and above. Python '2.7' works with all supported runtime versions.
-        "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
-            # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
-            # `XGBOOST`. If you do not specify a framework, AI Platform
-            # will analyze files in the deployment_uri to determine a framework. If you
-            # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
-            # of the model to 1.4 or greater.
-            #
-            # Do **not** specify a framework if you're deploying a [custom
-            # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
         "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
             # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
             # or [scikit-learn pipelines with custom
@@ -868,20 +1061,223 @@
             # information.
             #
             # When passing Version to
-            # [projects.models.versions.create](/ml-engine/reference/rest/v1/projects.models.versions/create)
+            # projects.models.versions.create
             # the model service uses the specified location as the source of the model.
             # Once deployed, the model version is hosted by the prediction service, so
             # this location is useful only as a historical record.
             # The total number of model files can't exceed 1000.
-        "createTime": "A String", # Output only. The time the version was created.
+        "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
+            # Some explanation features require additional metadata to be loaded
+            # as part of the model payload.
+            # There are two feature attribution methods supported for TensorFlow models:
+            # integrated gradients and sampled Shapley.
+            # [Learn more about feature
+            # attributions.](/ml-engine/docs/ai-explanations/overview)
+          "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+              # of the model's fully differentiable structure. Refer to this paper for
+              # more details: https://arxiv.org/abs/1906.02825
+              # Currently only implemented for models with natural image inputs.
+              # of the model's fully differentiable structure. Refer to this paper for
+              # more details: https://arxiv.org/abs/1906.02825
+              # Currently only implemented for models with natural image inputs.
+            "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+                # A good value to start is 50 and gradually increase until the
+                # sum to diff property is met within the desired error range.
+          },
+          "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+              # contribute to the label being predicted. A sampling strategy is used to
+              # approximate the value rather than considering all subsets of features.
+              # contribute to the label being predicted. A sampling strategy is used to
+              # approximate the value rather than considering all subsets of features.
+            "numPaths": 42, # The number of feature permutations to consider when approximating the
+                # Shapley values.
+          },
+          "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+              # of the model's fully differentiable structure. Refer to this paper for
+              # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+              # of the model's fully differentiable structure. Refer to this paper for
+              # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+            "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+                # A good value to start is 50 and gradually increase until the
+                # sum to diff property is met within the desired error range.
+          },
+        },
         "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
             # requests that do not specify a version.
             #
             # You can change the default version by calling
-            # [projects.methods.versions.setDefault](/ml-engine/reference/rest/v1/projects.models.versions/setDefault).
-        "name": "A String", # Required.The name specified for the version when it was created.
+            # projects.methods.versions.setDefault.
+        "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
+            # applies to online prediction service. If this field is not specified, it
+            # defaults to `mls1-c1-m2`.
+            #
+            # Online prediction supports the following machine types:
+            #
+            # * `mls1-c1-m2`
+            # * `mls1-c4-m2`
+            # * `n1-standard-2`
+            # * `n1-standard-4`
+            # * `n1-standard-8`
+            # * `n1-standard-16`
+            # * `n1-standard-32`
+            # * `n1-highmem-2`
+            # * `n1-highmem-4`
+            # * `n1-highmem-8`
+            # * `n1-highmem-16`
+            # * `n1-highmem-32`
+            # * `n1-highcpu-2`
+            # * `n1-highcpu-4`
+            # * `n1-highcpu-8`
+            # * `n1-highcpu-16`
+            # * `n1-highcpu-32`
+            #
+            # `mls1-c1-m2` is generally available. All other machine types are available
+            # in beta. Learn more about the [differences between machine
+            # types](/ml-engine/docs/machine-types-online-prediction).
+        "description": "A String", # Optional. The description specified for the version when it was created.
+        "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
+            #
+            # For more information, see the
+            # [runtime version list](/ml-engine/docs/runtime-version-list) and
+            # [how to manage runtime versions](/ml-engine/docs/versioning).
+        "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+            # model. You should generally use `auto_scaling` with an appropriate
+            # `min_nodes` instead, but this option is available if you want more
+            # predictable billing. Beware that latency and error rates will increase
+            # if the traffic exceeds that capability of the system to serve it based
+            # on the selected number of nodes.
+          "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
+              # starting from the time the model is deployed, so the cost of operating
+              # this model will be proportional to `nodes` * number of hours since
+              # last billing cycle plus the cost for each prediction performed.
+        },
+        "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+        "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
+            # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+            # `XGBOOST`. If you do not specify a framework, AI Platform
+            # will analyze files in the deployment_uri to determine a framework. If you
+            # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+            # of the model to 1.4 or greater.
+            #
+            # Do **not** specify a framework if you're deploying a [custom
+            # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+            #
+            # If you specify a [Compute Engine (N1) machine
+            # type](/ml-engine/docs/machine-types-online-prediction) in the
+            # `machineType` field, you must specify `TENSORFLOW`
+            # for the framework.
+        "createTime": "A String", # Output only. The time the version was created.
+        "name": "A String", # Required. The name specified for the version when it was created.
             #
             # The version name must be unique within the model it is created in.
+        "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+            # response to increases and decreases in traffic. Care should be
+            # taken to ramp up traffic according to the model's ability to scale
+            # or you will start seeing increases in latency and 429 response codes.
+            #
+            # Note that you cannot use AutoScaling if your version uses
+            # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+            # `manual_scaling`.
+          "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
+              # nodes are always up, starting from the time the model is deployed.
+              # Therefore, the cost of operating this model will be at least
+              # `rate` * `min_nodes` * number of hours since last billing cycle,
+              # where `rate` is the cost per node-hour as documented in the
+              # [pricing guide](/ml-engine/docs/pricing),
+              # even if no predictions are performed. There is additional cost for each
+              # prediction performed.
+              #
+              # Unlike manual scaling, if the load gets too heavy for the nodes
+              # that are up, the service will automatically add nodes to handle the
+              # increased load as well as scale back as traffic drops, always maintaining
+              # at least `min_nodes`. You will be charged for the time in which additional
+              # nodes are used.
+              #
+              # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+              # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+              # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+              # (and after a cool-down period), nodes will be shut down and no charges will
+              # be incurred until traffic to the model resumes.
+              #
+              # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+              # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+              # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+              # Compute Engine machine type.
+              #
+              # Note that you cannot use AutoScaling if your version uses
+              # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+              # ManualScaling.
+              #
+              # You can set `min_nodes` when creating the model version, and you can also
+              # update `min_nodes` for an existing version:
+              # &lt;pre&gt;
+              # update_body.json:
+              # {
+              #   'autoScaling': {
+              #     'minNodes': 5
+              #   }
+              # }
+              # &lt;/pre&gt;
+              # HTTP request:
+              # &lt;pre style="max-width: 626px;"&gt;
+              # PATCH
+              # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+              # -d @./update_body.json
+              # &lt;/pre&gt;
+        },
+        "pythonVersion": "A String", # Required. The version of Python used in prediction.
+            #
+            # The following Python versions are available:
+            #
+            # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+            #   later.
+            # * Python '3.5' is available when `runtime_version` is set to a version
+            #   from '1.4' to '1.14'.
+            # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+            #   earlier.
+            #
+            # Read more about the Python versions available for [each runtime
+            # version](/ml-engine/docs/runtime-version-list).
+        "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+            # projects.models.versions.patch
+            # request. Specifying it in a
+            # projects.models.versions.create
+            # request has no effect.
+            #
+            # Configures the request-response pair logging on predictions from this
+            # Version.
+            # Online prediction requests to a model version and the responses to these
+            # requests are converted to raw strings and saved to the specified BigQuery
+            # table. Logging is constrained by [BigQuery quotas and
+            # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+            # AI Platform Prediction does not log request-response pairs, but it continues
+            # to serve predictions.
+            #
+            # If you are using [continuous
+            # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+            # specify this configuration manually. Setting up continuous evaluation
+            # automatically enables logging of request-response pairs.
+          "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+              # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+              # window is the lifetime of the model version. Defaults to 0.
+          "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
+              # "&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;"
+              #
+              # The specified table must already exist, and the "Cloud ML Service Agent"
+              # for your project must have permission to write to it. The table must have
+              # the following [schema](/bigquery/docs/schemas):
+              #
+              # &lt;table&gt;
+              #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style="display: table-cell"&gt;Type&lt;/th&gt;
+              #     &lt;th style="display: table-cell"&gt;Mode&lt;/th&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+              #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+              # &lt;/table&gt;
+        },
       },
     ],
   }</pre>
@@ -902,15 +1298,15 @@
 </div>
 
 <div class="method">
-    <code class="details" id="patch">patch(name, body, updateMask=None, x__xgafv=None)</code>
+    <code class="details" id="patch">patch(name, body=None, updateMask=None, x__xgafv=None)</code>
   <pre>Updates the specified Version resource.
 
-Currently the only update-able fields are `description` and
-`autoScaling.minNodes`.
+Currently the only update-able fields are `description`,
+`requestLoggingConfig`, `autoScaling.minNodes`, and `manualScaling.nodes`.
 
 Args:
   name: string, Required. The name of the model. (required)
-  body: object, The request body. (required)
+  body: object, The request body.
     The object takes the form of:
 
 { # Represents a version of the model.
@@ -918,48 +1314,27 @@
     # Each version is a trained model deployed in the cloud, ready to handle
     # prediction requests. A model can have multiple versions. You can get
     # information about all of the versions of a given model by calling
-    # [projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list).
-  "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+    # projects.models.versions.list.
+  "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+      # Only specify this field if you have specified a Compute Engine (N1) machine
+      # type in the `machineType` field. Learn more about [using GPUs for online
+      # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+      # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+      # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+      # [accelerators for online
+      # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+    "count": "A String", # The number of accelerators to attach to each machine running the job.
+    "type": "A String", # The type of accelerator to use.
+  },
   "labels": { # Optional. One or more labels that you can add, to organize your model
       # versions. Each label is a key-value pair, where both the key and the value
       # are arbitrary strings that you supply.
       # For more information, see the documentation on
-      # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+      # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
     "a_key": "A String",
   },
-  "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
-      # applies to online prediction service.
-      # <dl>
-      #   <dt>mls1-c1-m2</dt>
-      #   <dd>
-      #   The <b>default</b> machine type, with 1 core and 2 GB RAM. The deprecated
-      #   name for this machine type is "mls1-highmem-1".
-      #   </dd>
-      #   <dt>mls1-c4-m2</dt>
-      #   <dd>
-      #   In <b>Beta</b>. This machine type has 4 cores and 2 GB RAM. The
-      #   deprecated name for this machine type is "mls1-highcpu-4".
-      #   </dd>
-      # </dl>
-  "description": "A String", # Optional. The description specified for the version when it was created.
-  "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this deployment.
-      # If not set, AI Platform uses the default stable version, 1.0. For more
-      # information, see the
-      # [runtime version list](/ml-engine/docs/runtime-version-list) and
-      # [how to manage runtime versions](/ml-engine/docs/versioning).
-  "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
-      # model. You should generally use `auto_scaling` with an appropriate
-      # `min_nodes` instead, but this option is available if you want more
-      # predictable billing. Beware that latency and error rates will increase
-      # if the traffic exceeds that capability of the system to serve it based
-      # on the selected number of nodes.
-    "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
-        # starting from the time the model is deployed, so the cost of operating
-        # this model will be proportional to `nodes` * number of hours since
-        # last billing cycle plus the cost for each prediction performed.
-  },
   "predictionClass": "A String", # Optional. The fully qualified name
-      # (<var>module_name</var>.<var>class_name</var>) of a class that implements
+      # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
       # the Predictor interface described in this reference field. The module
       # containing this class should be included in a package provided to the
       # [`packageUris` field](#Version.FIELDS.package_uris).
@@ -967,11 +1342,13 @@
       # Specify this field if and only if you are deploying a [custom prediction
       # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
       # If you specify this field, you must set
-      # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+      # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+      # you must set `machineType` to a [legacy (MLS1)
+      # machine type](/ml-engine/docs/machine-types-online-prediction).
       # 
       # The following code sample provides the Predictor interface:
       # 
-      # ```py
+      # &lt;pre style="max-width: 626px;"&gt;
       # class Predictor(object):
       # """Interface for constructing custom predictors."""
       # 
@@ -1007,64 +1384,12 @@
       #         An instance implementing this Predictor class.
       #     """
       #     raise NotImplementedError()
-      # ```
+      # &lt;/pre&gt;
       # 
       # Learn more about [the Predictor interface and custom prediction
       # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
-  "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
-      # response to increases and decreases in traffic. Care should be
-      # taken to ramp up traffic according to the model's ability to scale
-      # or you will start seeing increases in latency and 429 response codes.
-    "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
-        # nodes are always up, starting from the time the model is deployed.
-        # Therefore, the cost of operating this model will be at least
-        # `rate` * `min_nodes` * number of hours since last billing cycle,
-        # where `rate` is the cost per node-hour as documented in the
-        # [pricing guide](/ml-engine/docs/pricing),
-        # even if no predictions are performed. There is additional cost for each
-        # prediction performed.
-        #
-        # Unlike manual scaling, if the load gets too heavy for the nodes
-        # that are up, the service will automatically add nodes to handle the
-        # increased load as well as scale back as traffic drops, always maintaining
-        # at least `min_nodes`. You will be charged for the time in which additional
-        # nodes are used.
-        #
-        # If not specified, `min_nodes` defaults to 0, in which case, when traffic
-        # to a model stops (and after a cool-down period), nodes will be shut down
-        # and no charges will be incurred until traffic to the model resumes.
-        #
-        # You can set `min_nodes` when creating the model version, and you can also
-        # update `min_nodes` for an existing version:
-        # <pre>
-        # update_body.json:
-        # {
-        #   'autoScaling': {
-        #     'minNodes': 5
-        #   }
-        # }
-        # </pre>
-        # HTTP request:
-        # <pre>
-        # PATCH
-        # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
-        # -d @./update_body.json
-        # </pre>
-  },
   "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
   "state": "A String", # Output only. The state of a version.
-  "pythonVersion": "A String", # Optional. The version of Python used in prediction. If not set, the default
-      # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-      # to '1.4' and above. Python '2.7' works with all supported runtime versions.
-  "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
-      # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
-      # `XGBOOST`. If you do not specify a framework, AI Platform
-      # will analyze files in the deployment_uri to determine a framework. If you
-      # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
-      # of the model to 1.4 or greater.
-      # 
-      # Do **not** specify a framework if you're deploying a [custom
-      # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
   "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
       # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
       # or [scikit-learn pipelines with custom
@@ -1096,20 +1421,223 @@
       # information.
       # 
       # When passing Version to
-      # [projects.models.versions.create](/ml-engine/reference/rest/v1/projects.models.versions/create)
+      # projects.models.versions.create
       # the model service uses the specified location as the source of the model.
       # Once deployed, the model version is hosted by the prediction service, so
       # this location is useful only as a historical record.
       # The total number of model files can't exceed 1000.
-  "createTime": "A String", # Output only. The time the version was created.
+  "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
+      # Some explanation features require additional metadata to be loaded
+      # as part of the model payload.
+      # There are two feature attribution methods supported for TensorFlow models:
+      # integrated gradients and sampled Shapley.
+      # [Learn more about feature
+      # attributions.](/ml-engine/docs/ai-explanations/overview)
+    "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+        # of the model's fully differentiable structure. Refer to this paper for
+        # more details: https://arxiv.org/abs/1906.02825
+        # Currently only implemented for models with natural image inputs.
+        # of the model's fully differentiable structure. Refer to this paper for
+        # more details: https://arxiv.org/abs/1906.02825
+        # Currently only implemented for models with natural image inputs.
+      "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+          # A good value to start is 50 and gradually increase until the
+          # sum to diff property is met within the desired error range.
+    },
+    "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+        # contribute to the label being predicted. A sampling strategy is used to
+        # approximate the value rather than considering all subsets of features.
+        # contribute to the label being predicted. A sampling strategy is used to
+        # approximate the value rather than considering all subsets of features.
+      "numPaths": 42, # The number of feature permutations to consider when approximating the
+          # Shapley values.
+    },
+    "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+        # of the model's fully differentiable structure. Refer to this paper for
+        # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+        # of the model's fully differentiable structure. Refer to this paper for
+        # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+      "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+          # A good value to start is 50 and gradually increase until the
+          # sum to diff property is met within the desired error range.
+    },
+  },
   "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
       # requests that do not specify a version.
       # 
       # You can change the default version by calling
-      # [projects.methods.versions.setDefault](/ml-engine/reference/rest/v1/projects.models.versions/setDefault).
-  "name": "A String", # Required.The name specified for the version when it was created.
+      # projects.methods.versions.setDefault.
+  "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
+      # applies to online prediction service. If this field is not specified, it
+      # defaults to `mls1-c1-m2`.
+      # 
+      # Online prediction supports the following machine types:
+      # 
+      # * `mls1-c1-m2`
+      # * `mls1-c4-m2`
+      # * `n1-standard-2`
+      # * `n1-standard-4`
+      # * `n1-standard-8`
+      # * `n1-standard-16`
+      # * `n1-standard-32`
+      # * `n1-highmem-2`
+      # * `n1-highmem-4`
+      # * `n1-highmem-8`
+      # * `n1-highmem-16`
+      # * `n1-highmem-32`
+      # * `n1-highcpu-2`
+      # * `n1-highcpu-4`
+      # * `n1-highcpu-8`
+      # * `n1-highcpu-16`
+      # * `n1-highcpu-32`
+      # 
+      # `mls1-c1-m2` is generally available. All other machine types are available
+      # in beta. Learn more about the [differences between machine
+      # types](/ml-engine/docs/machine-types-online-prediction).
+  "description": "A String", # Optional. The description specified for the version when it was created.
+  "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
+      # 
+      # For more information, see the
+      # [runtime version list](/ml-engine/docs/runtime-version-list) and
+      # [how to manage runtime versions](/ml-engine/docs/versioning).
+  "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+      # model. You should generally use `auto_scaling` with an appropriate
+      # `min_nodes` instead, but this option is available if you want more
+      # predictable billing. Beware that latency and error rates will increase
+      # if the traffic exceeds that capability of the system to serve it based
+      # on the selected number of nodes.
+    "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
+        # starting from the time the model is deployed, so the cost of operating
+        # this model will be proportional to `nodes` * number of hours since
+        # last billing cycle plus the cost for each prediction performed.
+  },
+  "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+  "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
+      # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+      # `XGBOOST`. If you do not specify a framework, AI Platform
+      # will analyze files in the deployment_uri to determine a framework. If you
+      # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+      # of the model to 1.4 or greater.
+      # 
+      # Do **not** specify a framework if you're deploying a [custom
+      # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+      # 
+      # If you specify a [Compute Engine (N1) machine
+      # type](/ml-engine/docs/machine-types-online-prediction) in the
+      # `machineType` field, you must specify `TENSORFLOW`
+      # for the framework.
+  "createTime": "A String", # Output only. The time the version was created.
+  "name": "A String", # Required. The name specified for the version when it was created.
       # 
       # The version name must be unique within the model it is created in.
+  "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+      # response to increases and decreases in traffic. Care should be
+      # taken to ramp up traffic according to the model's ability to scale
+      # or you will start seeing increases in latency and 429 response codes.
+      # 
+      # Note that you cannot use AutoScaling if your version uses
+      # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+      # `manual_scaling`.
+    "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
+        # nodes are always up, starting from the time the model is deployed.
+        # Therefore, the cost of operating this model will be at least
+        # `rate` * `min_nodes` * number of hours since last billing cycle,
+        # where `rate` is the cost per node-hour as documented in the
+        # [pricing guide](/ml-engine/docs/pricing),
+        # even if no predictions are performed. There is additional cost for each
+        # prediction performed.
+        #
+        # Unlike manual scaling, if the load gets too heavy for the nodes
+        # that are up, the service will automatically add nodes to handle the
+        # increased load as well as scale back as traffic drops, always maintaining
+        # at least `min_nodes`. You will be charged for the time in which additional
+        # nodes are used.
+        #
+        # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+        # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+        # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+        # (and after a cool-down period), nodes will be shut down and no charges will
+        # be incurred until traffic to the model resumes.
+        #
+        # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+        # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+        # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+        # Compute Engine machine type.
+        #
+        # Note that you cannot use AutoScaling if your version uses
+        # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+        # ManualScaling.
+        #
+        # You can set `min_nodes` when creating the model version, and you can also
+        # update `min_nodes` for an existing version:
+        # &lt;pre&gt;
+        # update_body.json:
+        # {
+        #   'autoScaling': {
+        #     'minNodes': 5
+        #   }
+        # }
+        # &lt;/pre&gt;
+        # HTTP request:
+        # &lt;pre style="max-width: 626px;"&gt;
+        # PATCH
+        # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+        # -d @./update_body.json
+        # &lt;/pre&gt;
+  },
+  "pythonVersion": "A String", # Required. The version of Python used in prediction.
+      # 
+      # The following Python versions are available:
+      # 
+      # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+      #   later.
+      # * Python '3.5' is available when `runtime_version` is set to a version
+      #   from '1.4' to '1.14'.
+      # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+      #   earlier.
+      # 
+      # Read more about the Python versions available for [each runtime
+      # version](/ml-engine/docs/runtime-version-list).
+  "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+      # projects.models.versions.patch
+      # request. Specifying it in a
+      # projects.models.versions.create
+      # request has no effect.
+      # 
+      # Configures the request-response pair logging on predictions from this
+      # Version.
+      # Online prediction requests to a model version and the responses to these
+      # requests are converted to raw strings and saved to the specified BigQuery
+      # table. Logging is constrained by [BigQuery quotas and
+      # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+      # AI Platform Prediction does not log request-response pairs, but it continues
+      # to serve predictions.
+      #
+      # If you are using [continuous
+      # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+      # specify this configuration manually. Setting up continuous evaluation
+      # automatically enables logging of request-response pairs.
+    "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+        # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+        # window is the lifetime of the model version. Defaults to 0.
+    "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
+        # "&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;"
+        #
+        # The specified table must already exist, and the "Cloud ML Service Agent"
+        # for your project must have permission to write to it. The table must have
+        # the following [schema](/bigquery/docs/schemas):
+        #
+        # &lt;table&gt;
+        #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style="display: table-cell"&gt;Type&lt;/th&gt;
+        #     &lt;th style="display: table-cell"&gt;Mode&lt;/th&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+        #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+        # &lt;/table&gt;
+  },
 }
 
   updateMask: string, Required. Specifies the path, relative to `Version`, of the field to
@@ -1118,12 +1646,18 @@
 For example, to change the description of a version to "foo", the
 `update_mask` parameter would be specified as `description`, and the
 `PATCH` request body would specify the new value, as follows:
-    {
-      "description": "foo"
-    }
 
-Currently the only supported update mask fields are `description` and
-`autoScaling.minNodes`.
+```
+{
+  "description": "foo"
+}
+```
+
+Currently the only supported update mask fields are `description`,
+`requestLoggingConfig`, `autoScaling.minNodes`, and `manualScaling.nodes`.
+However, you can only update `manualScaling.nodes` if the version uses a
+[Compute Engine (N1)
+machine type](/ml-engine/docs/machine-types-online-prediction).
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -1191,7 +1725,7 @@
 Args:
   name: string, Required. The name of the version to make the default for the model. You
 can get the names of all the versions of a model by calling
-[projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list). (required)
+projects.models.versions.list. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -1211,48 +1745,27 @@
       # Each version is a trained model deployed in the cloud, ready to handle
       # prediction requests. A model can have multiple versions. You can get
       # information about all of the versions of a given model by calling
-      # [projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list).
-    "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+      # projects.models.versions.list.
+    "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+        # Only specify this field if you have specified a Compute Engine (N1) machine
+        # type in the `machineType` field. Learn more about [using GPUs for online
+        # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+        # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+        # [accelerators for online
+        # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+      "count": "A String", # The number of accelerators to attach to each machine running the job.
+      "type": "A String", # The type of accelerator to use.
+    },
     "labels": { # Optional. One or more labels that you can add, to organize your model
         # versions. Each label is a key-value pair, where both the key and the value
         # are arbitrary strings that you supply.
         # For more information, see the documentation on
-        # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+        # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
       "a_key": "A String",
     },
-    "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
-        # applies to online prediction service.
-        # <dl>
-        #   <dt>mls1-c1-m2</dt>
-        #   <dd>
-        #   The <b>default</b> machine type, with 1 core and 2 GB RAM. The deprecated
-        #   name for this machine type is "mls1-highmem-1".
-        #   </dd>
-        #   <dt>mls1-c4-m2</dt>
-        #   <dd>
-        #   In <b>Beta</b>. This machine type has 4 cores and 2 GB RAM. The
-        #   deprecated name for this machine type is "mls1-highcpu-4".
-        #   </dd>
-        # </dl>
-    "description": "A String", # Optional. The description specified for the version when it was created.
-    "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this deployment.
-        # If not set, AI Platform uses the default stable version, 1.0. For more
-        # information, see the
-        # [runtime version list](/ml-engine/docs/runtime-version-list) and
-        # [how to manage runtime versions](/ml-engine/docs/versioning).
-    "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
-        # model. You should generally use `auto_scaling` with an appropriate
-        # `min_nodes` instead, but this option is available if you want more
-        # predictable billing. Beware that latency and error rates will increase
-        # if the traffic exceeds that capability of the system to serve it based
-        # on the selected number of nodes.
-      "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
-          # starting from the time the model is deployed, so the cost of operating
-          # this model will be proportional to `nodes` * number of hours since
-          # last billing cycle plus the cost for each prediction performed.
-    },
     "predictionClass": "A String", # Optional. The fully qualified name
-        # (<var>module_name</var>.<var>class_name</var>) of a class that implements
+        # (&lt;var&gt;module_name&lt;/var&gt;.&lt;var&gt;class_name&lt;/var&gt;) of a class that implements
         # the Predictor interface described in this reference field. The module
         # containing this class should be included in a package provided to the
         # [`packageUris` field](#Version.FIELDS.package_uris).
@@ -1260,11 +1773,13 @@
         # Specify this field if and only if you are deploying a [custom prediction
         # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
         # If you specify this field, you must set
-        # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+        # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+        # you must set `machineType` to a [legacy (MLS1)
+        # machine type](/ml-engine/docs/machine-types-online-prediction).
         #
         # The following code sample provides the Predictor interface:
         #
-        # ```py
+        # &lt;pre style="max-width: 626px;"&gt;
         # class Predictor(object):
         # """Interface for constructing custom predictors."""
         #
@@ -1300,64 +1815,12 @@
         #         An instance implementing this Predictor class.
         #     """
         #     raise NotImplementedError()
-        # ```
+        # &lt;/pre&gt;
         #
         # Learn more about [the Predictor interface and custom prediction
         # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
-    "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
-        # response to increases and decreases in traffic. Care should be
-        # taken to ramp up traffic according to the model's ability to scale
-        # or you will start seeing increases in latency and 429 response codes.
-      "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
-          # nodes are always up, starting from the time the model is deployed.
-          # Therefore, the cost of operating this model will be at least
-          # `rate` * `min_nodes` * number of hours since last billing cycle,
-          # where `rate` is the cost per node-hour as documented in the
-          # [pricing guide](/ml-engine/docs/pricing),
-          # even if no predictions are performed. There is additional cost for each
-          # prediction performed.
-          #
-          # Unlike manual scaling, if the load gets too heavy for the nodes
-          # that are up, the service will automatically add nodes to handle the
-          # increased load as well as scale back as traffic drops, always maintaining
-          # at least `min_nodes`. You will be charged for the time in which additional
-          # nodes are used.
-          #
-          # If not specified, `min_nodes` defaults to 0, in which case, when traffic
-          # to a model stops (and after a cool-down period), nodes will be shut down
-          # and no charges will be incurred until traffic to the model resumes.
-          #
-          # You can set `min_nodes` when creating the model version, and you can also
-          # update `min_nodes` for an existing version:
-          # <pre>
-          # update_body.json:
-          # {
-          #   'autoScaling': {
-          #     'minNodes': 5
-          #   }
-          # }
-          # </pre>
-          # HTTP request:
-          # <pre>
-          # PATCH
-          # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
-          # -d @./update_body.json
-          # </pre>
-    },
     "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
     "state": "A String", # Output only. The state of a version.
-    "pythonVersion": "A String", # Optional. The version of Python used in prediction. If not set, the default
-        # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-        # to '1.4' and above. Python '2.7' works with all supported runtime versions.
-    "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
-        # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
-        # `XGBOOST`. If you do not specify a framework, AI Platform
-        # will analyze files in the deployment_uri to determine a framework. If you
-        # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
-        # of the model to 1.4 or greater.
-        #
-        # Do **not** specify a framework if you're deploying a [custom
-        # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
     "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
         # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
         # or [scikit-learn pipelines with custom
@@ -1389,20 +1852,223 @@
         # information.
         #
         # When passing Version to
-        # [projects.models.versions.create](/ml-engine/reference/rest/v1/projects.models.versions/create)
+        # projects.models.versions.create
         # the model service uses the specified location as the source of the model.
         # Once deployed, the model version is hosted by the prediction service, so
         # this location is useful only as a historical record.
         # The total number of model files can't exceed 1000.
-    "createTime": "A String", # Output only. The time the version was created.
+    "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
+        # Some explanation features require additional metadata to be loaded
+        # as part of the model payload.
+        # There are two feature attribution methods supported for TensorFlow models:
+        # integrated gradients and sampled Shapley.
+        # [Learn more about feature
+        # attributions.](/ml-engine/docs/ai-explanations/overview)
+      "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+          # of the model's fully differentiable structure. Refer to this paper for
+          # more details: https://arxiv.org/abs/1906.02825
+          # Currently only implemented for models with natural image inputs.
+          # of the model's fully differentiable structure. Refer to this paper for
+          # more details: https://arxiv.org/abs/1906.02825
+          # Currently only implemented for models with natural image inputs.
+        "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+            # A good value to start is 50 and gradually increase until the
+            # sum to diff property is met within the desired error range.
+      },
+      "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+          # contribute to the label being predicted. A sampling strategy is used to
+          # approximate the value rather than considering all subsets of features.
+          # contribute to the label being predicted. A sampling strategy is used to
+          # approximate the value rather than considering all subsets of features.
+        "numPaths": 42, # The number of feature permutations to consider when approximating the
+            # Shapley values.
+      },
+      "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+          # of the model's fully differentiable structure. Refer to this paper for
+          # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+          # of the model's fully differentiable structure. Refer to this paper for
+          # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+        "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+            # A good value to start is 50 and gradually increase until the
+            # sum to diff property is met within the desired error range.
+      },
+    },
     "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
         # requests that do not specify a version.
         #
         # You can change the default version by calling
-        # [projects.methods.versions.setDefault](/ml-engine/reference/rest/v1/projects.models.versions/setDefault).
-    "name": "A String", # Required.The name specified for the version when it was created.
+        # projects.methods.versions.setDefault.
+    "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
+        # applies to online prediction service. If this field is not specified, it
+        # defaults to `mls1-c1-m2`.
+        #
+        # Online prediction supports the following machine types:
+        #
+        # * `mls1-c1-m2`
+        # * `mls1-c4-m2`
+        # * `n1-standard-2`
+        # * `n1-standard-4`
+        # * `n1-standard-8`
+        # * `n1-standard-16`
+        # * `n1-standard-32`
+        # * `n1-highmem-2`
+        # * `n1-highmem-4`
+        # * `n1-highmem-8`
+        # * `n1-highmem-16`
+        # * `n1-highmem-32`
+        # * `n1-highcpu-2`
+        # * `n1-highcpu-4`
+        # * `n1-highcpu-8`
+        # * `n1-highcpu-16`
+        # * `n1-highcpu-32`
+        #
+        # `mls1-c1-m2` is generally available. All other machine types are available
+        # in beta. Learn more about the [differences between machine
+        # types](/ml-engine/docs/machine-types-online-prediction).
+    "description": "A String", # Optional. The description specified for the version when it was created.
+    "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
+        #
+        # For more information, see the
+        # [runtime version list](/ml-engine/docs/runtime-version-list) and
+        # [how to manage runtime versions](/ml-engine/docs/versioning).
+    "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+        # model. You should generally use `auto_scaling` with an appropriate
+        # `min_nodes` instead, but this option is available if you want more
+        # predictable billing. Beware that latency and error rates will increase
+        # if the traffic exceeds that capability of the system to serve it based
+        # on the selected number of nodes.
+      "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
+          # starting from the time the model is deployed, so the cost of operating
+          # this model will be proportional to `nodes` * number of hours since
+          # last billing cycle plus the cost for each prediction performed.
+    },
+    "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+    "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
+        # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+        # `XGBOOST`. If you do not specify a framework, AI Platform
+        # will analyze files in the deployment_uri to determine a framework. If you
+        # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+        # of the model to 1.4 or greater.
+        #
+        # Do **not** specify a framework if you're deploying a [custom
+        # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+        #
+        # If you specify a [Compute Engine (N1) machine
+        # type](/ml-engine/docs/machine-types-online-prediction) in the
+        # `machineType` field, you must specify `TENSORFLOW`
+        # for the framework.
+    "createTime": "A String", # Output only. The time the version was created.
+    "name": "A String", # Required. The name specified for the version when it was created.
         #
         # The version name must be unique within the model it is created in.
+    "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+        # response to increases and decreases in traffic. Care should be
+        # taken to ramp up traffic according to the model's ability to scale
+        # or you will start seeing increases in latency and 429 response codes.
+        #
+        # Note that you cannot use AutoScaling if your version uses
+        # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+        # `manual_scaling`.
+      "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
+          # nodes are always up, starting from the time the model is deployed.
+          # Therefore, the cost of operating this model will be at least
+          # `rate` * `min_nodes` * number of hours since last billing cycle,
+          # where `rate` is the cost per node-hour as documented in the
+          # [pricing guide](/ml-engine/docs/pricing),
+          # even if no predictions are performed. There is additional cost for each
+          # prediction performed.
+          #
+          # Unlike manual scaling, if the load gets too heavy for the nodes
+          # that are up, the service will automatically add nodes to handle the
+          # increased load as well as scale back as traffic drops, always maintaining
+          # at least `min_nodes`. You will be charged for the time in which additional
+          # nodes are used.
+          #
+          # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+          # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+          # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+          # (and after a cool-down period), nodes will be shut down and no charges will
+          # be incurred until traffic to the model resumes.
+          #
+          # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+          # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+          # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+          # Compute Engine machine type.
+          #
+          # Note that you cannot use AutoScaling if your version uses
+          # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+          # ManualScaling.
+          #
+          # You can set `min_nodes` when creating the model version, and you can also
+          # update `min_nodes` for an existing version:
+          # &lt;pre&gt;
+          # update_body.json:
+          # {
+          #   'autoScaling': {
+          #     'minNodes': 5
+          #   }
+          # }
+          # &lt;/pre&gt;
+          # HTTP request:
+          # &lt;pre style="max-width: 626px;"&gt;
+          # PATCH
+          # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+          # -d @./update_body.json
+          # &lt;/pre&gt;
+    },
+    "pythonVersion": "A String", # Required. The version of Python used in prediction.
+        #
+        # The following Python versions are available:
+        #
+        # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+        #   later.
+        # * Python '3.5' is available when `runtime_version` is set to a version
+        #   from '1.4' to '1.14'.
+        # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+        #   earlier.
+        #
+        # Read more about the Python versions available for [each runtime
+        # version](/ml-engine/docs/runtime-version-list).
+    "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+        # projects.models.versions.patch
+        # request. Specifying it in a
+        # projects.models.versions.create
+        # request has no effect.
+        #
+        # Configures the request-response pair logging on predictions from this
+        # Version.
+        # Online prediction requests to a model version and the responses to these
+        # requests are converted to raw strings and saved to the specified BigQuery
+        # table. Logging is constrained by [BigQuery quotas and
+        # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+        # AI Platform Prediction does not log request-response pairs, but it continues
+        # to serve predictions.
+        #
+        # If you are using [continuous
+        # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+        # specify this configuration manually. Setting up continuous evaluation
+        # automatically enables logging of request-response pairs.
+      "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+          # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+          # window is the lifetime of the model version. Defaults to 0.
+      "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
+          # "&lt;var&gt;project_id&lt;/var&gt;.&lt;var&gt;dataset_name&lt;/var&gt;.&lt;var&gt;table_name&lt;/var&gt;"
+          #
+          # The specified table must already exist, and the "Cloud ML Service Agent"
+          # for your project must have permission to write to it. The table must have
+          # the following [schema](/bigquery/docs/schemas):
+          #
+          # &lt;table&gt;
+          #   &lt;tr&gt;&lt;th&gt;Field name&lt;/th&gt;&lt;th style="display: table-cell"&gt;Type&lt;/th&gt;
+          #     &lt;th style="display: table-cell"&gt;Mode&lt;/th&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;model&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;model_version&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;time&lt;/td&gt;&lt;td&gt;TIMESTAMP&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;raw_data&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;REQUIRED&lt;/td&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;raw_prediction&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+          #   &lt;tr&gt;&lt;td&gt;groundtruth&lt;/td&gt;&lt;td&gt;STRING&lt;/td&gt;&lt;td&gt;NULLABLE&lt;/td&gt;&lt;/tr&gt;
+          # &lt;/table&gt;
+    },
   }</pre>
 </div>