docs: update generated docs (#981)
diff --git a/docs/dyn/ml_v1.projects.models.html b/docs/dyn/ml_v1.projects.models.html
index 4687f5e..b7dd28e 100644
--- a/docs/dyn/ml_v1.projects.models.html
+++ b/docs/dyn/ml_v1.projects.models.html
@@ -92,7 +92,7 @@
<code><a href="#getIamPolicy">getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)</a></code></p>
<p class="firstline">Gets the access control policy for a resource.</p>
<p class="toc_element">
- <code><a href="#list">list(parent, filter=None, pageToken=None, pageSize=None, x__xgafv=None)</a></code></p>
+ <code><a href="#list">list(parent, pageSize=None, pageToken=None, filter=None, x__xgafv=None)</a></code></p>
<p class="firstline">Lists the models in a project.</p>
<p class="toc_element">
<code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
@@ -121,410 +121,53 @@
The object takes the form of:
{ # Represents a machine learning solution.
- #
- # A model can have multiple versions, each of which is a deployed, trained
- # model ready to receive prediction requests. The model itself is just a
- # container.
- "defaultVersion": { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
- # handle prediction requests that do not specify a version.
#
- # You can change the default version by calling
- # projects.models.versions.setDefault.
- #
- # Each version is a trained model deployed in the cloud, ready to handle
- # prediction requests. A model can have multiple versions. You can get
- # information about all of the versions of a given model by calling
- # projects.models.versions.list.
- "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
- # Only specify this field if you have specified a Compute Engine (N1) machine
- # type in the `machineType` field. Learn more about [using GPUs for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- # Note that the AcceleratorConfig can be used in both Jobs and Versions.
- # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
- # [accelerators for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- "count": "A String", # The number of accelerators to attach to each machine running the job.
- "type": "A String", # The type of accelerator to use.
- },
- "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
- # requests that do not specify a version.
- #
- # You can change the default version by calling
- # projects.methods.versions.setDefault.
- "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
- # model. You should generally use `auto_scaling` with an appropriate
- # `min_nodes` instead, but this option is available if you want more
- # predictable billing. Beware that latency and error rates will increase
- # if the traffic exceeds that capability of the system to serve it based
- # on the selected number of nodes.
- "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
- # starting from the time the model is deployed, so the cost of operating
- # this model will be proportional to `nodes` * number of hours since
- # last billing cycle plus the cost for each prediction performed.
- },
- "state": "A String", # Output only. The state of a version.
- "name": "A String", # Required. The name specified for the version when it was created.
- #
- # The version name must be unique within the model it is created in.
- "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
- "pythonVersion": "A String", # Required. The version of Python used in prediction.
- #
- # The following Python versions are available:
- #
- # * Python '3.7' is available when `runtime_version` is set to '1.15' or
- # later.
- # * Python '3.5' is available when `runtime_version` is set to a version
- # from '1.4' to '1.14'.
- # * Python '2.7' is available when `runtime_version` is set to '1.15' or
- # earlier.
- #
- # Read more about the Python versions available for [each runtime
- # version](/ml-engine/docs/runtime-version-list).
- "lastUseTime": "A String", # Output only. The time the version was last used for prediction.
- "predictionClass": "A String", # Optional. The fully qualified name
- # (<var>module_name</var>.<var>class_name</var>) of a class that implements
- # the Predictor interface described in this reference field. The module
- # containing this class should be included in a package provided to the
- # [`packageUris` field](#Version.FIELDS.package_uris).
- #
- # Specify this field if and only if you are deploying a [custom prediction
- # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
- # If you specify this field, you must set
- # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
- # you must set `machineType` to a [legacy (MLS1)
- # machine type](/ml-engine/docs/machine-types-online-prediction).
- #
- # The following code sample provides the Predictor interface:
- #
- # <pre style="max-width: 626px;">
- # class Predictor(object):
- # """Interface for constructing custom predictors."""
- #
- # def predict(self, instances, **kwargs):
- # """Performs custom prediction.
- #
- # Instances are the decoded values from the request. They have already
- # been deserialized from JSON.
- #
- # Args:
- # instances: A list of prediction input instances.
- # **kwargs: A dictionary of keyword args provided as additional
- # fields on the predict request body.
- #
- # Returns:
- # A list of outputs containing the prediction results. This list must
- # be JSON serializable.
- # """
- # raise NotImplementedError()
- #
- # @classmethod
- # def from_path(cls, model_dir):
- # """Creates an instance of Predictor using the given path.
- #
- # Loading of the predictor should be done in this method.
- #
- # Args:
- # model_dir: The local directory that contains the exported model
- # file along with any additional files uploaded when creating the
- # version resource.
- #
- # Returns:
- # An instance implementing this Predictor class.
- # """
- # raise NotImplementedError()
- # </pre>
- #
- # Learn more about [the Predictor interface and custom prediction
- # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
- "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
- # create the version. See the
- # [guide to model
- # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
- # information.
- #
- # When passing Version to
- # projects.models.versions.create
- # the model service uses the specified location as the source of the model.
- # Once deployed, the model version is hosted by the prediction service, so
- # this location is useful only as a historical record.
- # The total number of model files can't exceed 1000.
- "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
- # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
- # or [scikit-learn pipelines with custom
- # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
- #
- # For a custom prediction routine, one of these packages must contain your
- # Predictor class (see
- # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
- # include any dependencies used by your Predictor or scikit-learn pipeline
- # uses that are not already included in your selected [runtime
- # version](/ml-engine/docs/tensorflow/runtime-version-list).
- #
- # If you specify this field, you must also set
- # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+ # A model can have multiple versions, each of which is a deployed, trained
+ # model ready to receive prediction requests. The model itself is just a
+ # container.
+ "description": "A String", # Optional. The description specified for the model when it was created.
+ "regions": [ # Optional. The list of regions where the model is going to be deployed.
+ # Only one region per model is supported.
+ # Defaults to 'us-central1' if nothing is set.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ # Note:
+ # * No matter where a model is deployed, it can always be accessed by
+ # users from anywhere, both for online and batch prediction.
+ # * The region for a batch prediction job is set by the region field when
+ # submitting the batch prediction job and does not take its value from
+ # this field.
"A String",
],
- "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
- # Some explanation features require additional metadata to be loaded
- # as part of the model payload.
- # There are two feature attribution methods supported for TensorFlow models:
- # integrated gradients and sampled Shapley.
- # [Learn more about feature
- # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
- "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1703.01365
- "numIntegralSteps": 42, # Number of steps for approximating the path integral.
- # A good value to start is 50 and gradually increase until the
- # sum to diff property is met within the desired error range.
- },
- "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
- # contribute to the label being predicted. A sampling strategy is used to
- # approximate the value rather than considering all subsets of features.
- # contribute to the label being predicted. A sampling strategy is used to
- # approximate the value rather than considering all subsets of features.
- "numPaths": 42, # The number of feature permutations to consider when approximating the
- # Shapley values.
- },
- "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1906.02825
- # Currently only implemented for models with natural image inputs.
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1906.02825
- # Currently only implemented for models with natural image inputs.
- "numIntegralSteps": 42, # Number of steps for approximating the path integral.
- # A good value to start is 50 and gradually increase until the
- # sum to diff property is met within the desired error range.
- },
- },
- "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
- # response to increases and decreases in traffic. Care should be
- # taken to ramp up traffic according to the model's ability to scale
- # or you will start seeing increases in latency and 429 response codes.
- #
- # Note that you cannot use AutoScaling if your version uses
- # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
- # `manual_scaling`.
- "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
- # nodes are always up, starting from the time the model is deployed.
- # Therefore, the cost of operating this model will be at least
- # `rate` * `min_nodes` * number of hours since last billing cycle,
- # where `rate` is the cost per node-hour as documented in the
- # [pricing guide](/ml-engine/docs/pricing),
- # even if no predictions are performed. There is additional cost for each
- # prediction performed.
- #
- # Unlike manual scaling, if the load gets too heavy for the nodes
- # that are up, the service will automatically add nodes to handle the
- # increased load as well as scale back as traffic drops, always maintaining
- # at least `min_nodes`. You will be charged for the time in which additional
- # nodes are used.
- #
- # If `min_nodes` is not specified and AutoScaling is used with a [legacy
- # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
- # `min_nodes` defaults to 0, in which case, when traffic to a model stops
- # (and after a cool-down period), nodes will be shut down and no charges will
- # be incurred until traffic to the model resumes.
- #
- # If `min_nodes` is not specified and AutoScaling is used with a [Compute
- # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
- # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
- # Compute Engine machine type.
- #
- # Note that you cannot use AutoScaling if your version uses
- # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
- # ManualScaling.
- #
- # You can set `min_nodes` when creating the model version, and you can also
- # update `min_nodes` for an existing version:
- # <pre>
- # update_body.json:
- # {
- # 'autoScaling': {
- # 'minNodes': 5
- # }
- # }
- # </pre>
- # HTTP request:
- # <pre style="max-width: 626px;">
- # PATCH
- # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
- # -d @./update_body.json
- # </pre>
- },
- "createTime": "A String", # Output only. The time the version was created.
- "labels": { # Optional. One or more labels that you can add, to organize your model
- # versions. Each label is a key-value pair, where both the key and the value
- # are arbitrary strings that you supply.
- # For more information, see the documentation on
- # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
- "a_key": "A String",
- },
- "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
- # projects.models.versions.patch
- # request. Specifying it in a
- # projects.models.versions.create
- # request has no effect.
- #
- # Configures the request-response pair logging on predictions from this
- # Version.
- # Online prediction requests to a model version and the responses to these
- # requests are converted to raw strings and saved to the specified BigQuery
- # table. Logging is constrained by [BigQuery quotas and
- # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
- # AI Platform Prediction does not log request-response pairs, but it continues
- # to serve predictions.
- #
- # If you are using [continuous
- # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
- # specify this configuration manually. Setting up continuous evaluation
- # automatically enables logging of request-response pairs.
- "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
- # "<var>project_id</var>.<var>dataset_name</var>.<var>table_name</var>"
- #
- # The specified table must already exist, and the "Cloud ML Service Agent"
- # for your project must have permission to write to it. The table must have
- # the following [schema](/bigquery/docs/schemas):
- #
- # <table>
- # <tr><th>Field name</th><th style="display: table-cell">Type</th>
- # <th style="display: table-cell">Mode</th></tr>
- # <tr><td>model</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>model_version</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>time</td><td>TIMESTAMP</td><td>REQUIRED</td></tr>
- # <tr><td>raw_data</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>raw_prediction</td><td>STRING</td><td>NULLABLE</td></tr>
- # <tr><td>groundtruth</td><td>STRING</td><td>NULLABLE</td></tr>
- # </table>
- "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
- # For example, if you want to log 10% of requests, enter `0.1`. The sampling
- # window is the lifetime of the model version. Defaults to 0.
- },
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
- "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
- # applies to online prediction service. If this field is not specified, it
- # defaults to `mls1-c1-m2`.
- #
- # Online prediction supports the following machine types:
- #
- # * `mls1-c1-m2`
- # * `mls1-c4-m2`
- # * `n1-standard-2`
- # * `n1-standard-4`
- # * `n1-standard-8`
- # * `n1-standard-16`
- # * `n1-standard-32`
- # * `n1-highmem-2`
- # * `n1-highmem-4`
- # * `n1-highmem-8`
- # * `n1-highmem-16`
- # * `n1-highmem-32`
- # * `n1-highcpu-2`
- # * `n1-highcpu-4`
- # * `n1-highcpu-8`
- # * `n1-highcpu-16`
- # * `n1-highcpu-32`
- #
- # `mls1-c1-m2` is generally available. All other machine types are available
- # in beta. Learn more about the [differences between machine
- # types](/ml-engine/docs/machine-types-online-prediction).
- "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
- #
- # For more information, see the
- # [runtime version list](/ml-engine/docs/runtime-version-list) and
- # [how to manage runtime versions](/ml-engine/docs/versioning).
- "description": "A String", # Optional. The description specified for the version when it was created.
- "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
- # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
- # `XGBOOST`. If you do not specify a framework, AI Platform
- # will analyze files in the deployment_uri to determine a framework. If you
- # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
- # of the model to 1.4 or greater.
- #
- # Do **not** specify a framework if you're deploying a [custom
- # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
- #
- # If you specify a [Compute Engine (N1) machine
- # type](/ml-engine/docs/machine-types-online-prediction) in the
- # `machineType` field, you must specify `TENSORFLOW`
- # for the framework.
+ "name": "A String", # Required. The name specified for the model when it was created.
+ #
+ # The model name must be unique within the project it is created in.
+ "onlinePredictionConsoleLogging": True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
+ # streams to Stackdriver Logging. These can be more verbose than the standard
+ # access logs (see `onlinePredictionLogging`) and can incur higher cost.
+ # However, they are helpful for debugging. Note that
+ # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+ # your project receives prediction requests at a high QPS. Estimate your
+ # costs before enabling this option.
+ #
+ # Default is false.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a model from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
# read-modify-write cycle to perform model updates in order to avoid race
- # conditions: An `etag` is returned in the response to `GetVersion`, and
- # systems are expected to put that etag in the request to `UpdateVersion` to
+ # conditions: An `etag` is returned in the response to `GetModel`, and
+ # systems are expected to put that etag in the request to `UpdateModel` to
# ensure that their change will be applied to the model as intended.
- },
- "onlinePredictionConsoleLogging": True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
- # streams to Stackdriver Logging. These can be more verbose than the standard
- # access logs (see `onlinePredictionLogging`) and can incur higher cost.
- # However, they are helpful for debugging. Note that
- # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
- # your project receives prediction requests at a high QPS. Estimate your
- # costs before enabling this option.
- #
- # Default is false.
- "regions": [ # Optional. The list of regions where the model is going to be deployed.
- # Only one region per model is supported.
- # Defaults to 'us-central1' if nothing is set.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- # Note:
- # * No matter where a model is deployed, it can always be accessed by
- # users from anywhere, both for online and batch prediction.
- # * The region for a batch prediction job is set by the region field when
- # submitting the batch prediction job and does not take its value from
- # this field.
- "A String",
- ],
- "description": "A String", # Optional. The description specified for the model when it was created.
- "onlinePredictionLogging": True or False, # Optional. If true, online prediction access logs are sent to StackDriver
- # Logging. These logs are like standard server access logs, containing
- # information like timestamp and latency for each request. Note that
- # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
- # your project receives prediction requests at a high queries per second rate
- # (QPS). Estimate your costs before enabling this option.
- #
- # Default is false.
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a model from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform model updates in order to avoid race
- # conditions: An `etag` is returned in the response to `GetModel`, and
- # systems are expected to put that etag in the request to `UpdateModel` to
- # ensure that their change will be applied to the model as intended.
- "labels": { # Optional. One or more labels that you can add, to organize your models.
- # Each label is a key-value pair, where both the key and the value are
- # arbitrary strings that you supply.
- # For more information, see the documentation on
- # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
- "a_key": "A String",
- },
- "name": "A String", # Required. The name specified for the model when it was created.
- #
- # The model name must be unique within the project it is created in.
-}
-
- x__xgafv: string, V1 error format.
- Allowed values
- 1 - v1 error format
- 2 - v2 error format
-
-Returns:
- An object of the form:
-
- { # Represents a machine learning solution.
- #
- # A model can have multiple versions, each of which is a deployed, trained
- # model ready to receive prediction requests. The model itself is just a
- # container.
+ "labels": { # Optional. One or more labels that you can add, to organize your models.
+ # Each label is a key-value pair, where both the key and the value are
+ # arbitrary strings that you supply.
+ # For more information, see the documentation on
+ # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+ "a_key": "A String",
+ },
"defaultVersion": { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
# handle prediction requests that do not specify a version.
- #
+ #
# You can change the default version by calling
# projects.models.versions.setDefault.
#
@@ -532,119 +175,40 @@
# prediction requests. A model can have multiple versions. You can get
# information about all of the versions of a given model by calling
# projects.models.versions.list.
- "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
- # Only specify this field if you have specified a Compute Engine (N1) machine
- # type in the `machineType` field. Learn more about [using GPUs for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- # Note that the AcceleratorConfig can be used in both Jobs and Versions.
- # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
- # [accelerators for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- "count": "A String", # The number of accelerators to attach to each machine running the job.
- "type": "A String", # The type of accelerator to use.
+ "labels": { # Optional. One or more labels that you can add, to organize your model
+ # versions. Each label is a key-value pair, where both the key and the value
+ # are arbitrary strings that you supply.
+ # For more information, see the documentation on
+ # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+ "a_key": "A String",
},
- "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
- # requests that do not specify a version.
+ "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
+ # applies to online prediction service. If this field is not specified, it
+ # defaults to `mls1-c1-m2`.
#
- # You can change the default version by calling
- # projects.methods.versions.setDefault.
- "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
- # model. You should generally use `auto_scaling` with an appropriate
- # `min_nodes` instead, but this option is available if you want more
- # predictable billing. Beware that latency and error rates will increase
- # if the traffic exceeds that capability of the system to serve it based
- # on the selected number of nodes.
- "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
- # starting from the time the model is deployed, so the cost of operating
- # this model will be proportional to `nodes` * number of hours since
- # last billing cycle plus the cost for each prediction performed.
- },
- "state": "A String", # Output only. The state of a version.
- "name": "A String", # Required. The name specified for the version when it was created.
+ # Online prediction supports the following machine types:
#
- # The version name must be unique within the model it is created in.
- "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
- "pythonVersion": "A String", # Required. The version of Python used in prediction.
+ # * `mls1-c1-m2`
+ # * `mls1-c4-m2`
+ # * `n1-standard-2`
+ # * `n1-standard-4`
+ # * `n1-standard-8`
+ # * `n1-standard-16`
+ # * `n1-standard-32`
+ # * `n1-highmem-2`
+ # * `n1-highmem-4`
+ # * `n1-highmem-8`
+ # * `n1-highmem-16`
+ # * `n1-highmem-32`
+ # * `n1-highcpu-2`
+ # * `n1-highcpu-4`
+ # * `n1-highcpu-8`
+ # * `n1-highcpu-16`
+ # * `n1-highcpu-32`
#
- # The following Python versions are available:
- #
- # * Python '3.7' is available when `runtime_version` is set to '1.15' or
- # later.
- # * Python '3.5' is available when `runtime_version` is set to a version
- # from '1.4' to '1.14'.
- # * Python '2.7' is available when `runtime_version` is set to '1.15' or
- # earlier.
- #
- # Read more about the Python versions available for [each runtime
- # version](/ml-engine/docs/runtime-version-list).
- "lastUseTime": "A String", # Output only. The time the version was last used for prediction.
- "predictionClass": "A String", # Optional. The fully qualified name
- # (<var>module_name</var>.<var>class_name</var>) of a class that implements
- # the Predictor interface described in this reference field. The module
- # containing this class should be included in a package provided to the
- # [`packageUris` field](#Version.FIELDS.package_uris).
- #
- # Specify this field if and only if you are deploying a [custom prediction
- # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
- # If you specify this field, you must set
- # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
- # you must set `machineType` to a [legacy (MLS1)
- # machine type](/ml-engine/docs/machine-types-online-prediction).
- #
- # The following code sample provides the Predictor interface:
- #
- # <pre style="max-width: 626px;">
- # class Predictor(object):
- # """Interface for constructing custom predictors."""
- #
- # def predict(self, instances, **kwargs):
- # """Performs custom prediction.
- #
- # Instances are the decoded values from the request. They have already
- # been deserialized from JSON.
- #
- # Args:
- # instances: A list of prediction input instances.
- # **kwargs: A dictionary of keyword args provided as additional
- # fields on the predict request body.
- #
- # Returns:
- # A list of outputs containing the prediction results. This list must
- # be JSON serializable.
- # """
- # raise NotImplementedError()
- #
- # @classmethod
- # def from_path(cls, model_dir):
- # """Creates an instance of Predictor using the given path.
- #
- # Loading of the predictor should be done in this method.
- #
- # Args:
- # model_dir: The local directory that contains the exported model
- # file along with any additional files uploaded when creating the
- # version resource.
- #
- # Returns:
- # An instance implementing this Predictor class.
- # """
- # raise NotImplementedError()
- # </pre>
- #
- # Learn more about [the Predictor interface and custom prediction
- # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
- "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
- # create the version. See the
- # [guide to model
- # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
- # information.
- #
- # When passing Version to
- # projects.models.versions.create
- # the model service uses the specified location as the source of the model.
- # Once deployed, the model version is hosted by the prediction service, so
- # this location is useful only as a historical record.
- # The total number of model files can't exceed 1000.
+ # `mls1-c1-m2` is generally available. All other machine types are available
+ # in beta. Learn more about the [differences between machine
+ # types](/ml-engine/docs/machine-types-online-prediction).
"packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
# prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
# or [scikit-learn pipelines with custom
@@ -661,42 +225,21 @@
# [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
"A String",
],
- "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
- # Some explanation features require additional metadata to be loaded
- # as part of the model payload.
- # There are two feature attribution methods supported for TensorFlow models:
- # integrated gradients and sampled Shapley.
- # [Learn more about feature
- # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
- "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1703.01365
- "numIntegralSteps": 42, # Number of steps for approximating the path integral.
- # A good value to start is 50 and gradually increase until the
- # sum to diff property is met within the desired error range.
- },
- "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
- # contribute to the label being predicted. A sampling strategy is used to
- # approximate the value rather than considering all subsets of features.
- # contribute to the label being predicted. A sampling strategy is used to
- # approximate the value rather than considering all subsets of features.
- "numPaths": 42, # The number of feature permutations to consider when approximating the
- # Shapley values.
- },
- "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1906.02825
- # Currently only implemented for models with natural image inputs.
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1906.02825
- # Currently only implemented for models with natural image inputs.
- "numIntegralSteps": 42, # Number of steps for approximating the path integral.
- # A good value to start is 50 and gradually increase until the
- # sum to diff property is met within the desired error range.
- },
+ "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+ # Only specify this field if you have specified a Compute Engine (N1) machine
+ # type in the `machineType` field. Learn more about [using GPUs for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+ # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+ # [accelerators for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ "type": "A String", # The type of accelerator to use.
+ "count": "A String", # The number of accelerators to attach to each machine running the job.
},
+ "state": "A String", # Output only. The state of a version.
+ "name": "A String", # Required. The name specified for the version when it was created.
+ #
+ # The version name must be unique within the model it is created in.
"autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
# response to increases and decreases in traffic. Care should be
# taken to ramp up traffic according to the model's ability to scale
@@ -752,14 +295,55 @@
# -d @./update_body.json
# </pre>
},
- "createTime": "A String", # Output only. The time the version was created.
- "labels": { # Optional. One or more labels that you can add, to organize your model
- # versions. Each label is a key-value pair, where both the key and the value
- # are arbitrary strings that you supply.
- # For more information, see the documentation on
- # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
- "a_key": "A String",
+ "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
+ # Some explanation features require additional metadata to be loaded
+ # as part of the model payload.
+ # There are two feature attribution methods supported for TensorFlow models:
+ # integrated gradients and sampled Shapley.
+ # [Learn more about feature
+ # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
+ "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1703.01365
+ "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+ # A good value to start is 50 and gradually increase until the
+ # sum to diff property is met within the desired error range.
+ },
+ "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1906.02825
+ # Currently only implemented for models with natural image inputs.
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1906.02825
+ # Currently only implemented for models with natural image inputs.
+ "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+ # A good value to start is 50 and gradually increase until the
+ # sum to diff property is met within the desired error range.
+ },
+ "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+ # contribute to the label being predicted. A sampling strategy is used to
+ # approximate the value rather than considering all subsets of features.
+ # contribute to the label being predicted. A sampling strategy is used to
+ # approximate the value rather than considering all subsets of features.
+ "numPaths": 42, # The number of feature permutations to consider when approximating the
+ # Shapley values.
+ },
},
+ "pythonVersion": "A String", # Required. The version of Python used in prediction.
+ #
+ # The following Python versions are available:
+ #
+ # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+ # later.
+ # * Python '3.5' is available when `runtime_version` is set to a version
+ # from '1.4' to '1.14'.
+ # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+ # earlier.
+ #
+ # Read more about the Python versions available for [each runtime
+ # version](/ml-engine/docs/runtime-version-list).
"requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
# projects.models.versions.patch
# request. Specifying it in a
@@ -779,6 +363,9 @@
# evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
# specify this configuration manually. Setting up continuous evaluation
# automatically enables logging of request-response pairs.
+ "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+ # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+ # window is the lifetime of the model version. Defaults to 0.
"bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
# "<var>project_id</var>.<var>dataset_name</var>.<var>table_name</var>"
#
@@ -796,44 +383,20 @@
# <tr><td>raw_prediction</td><td>STRING</td><td>NULLABLE</td></tr>
# <tr><td>groundtruth</td><td>STRING</td><td>NULLABLE</td></tr>
# </table>
- "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
- # For example, if you want to log 10% of requests, enter `0.1`. The sampling
- # window is the lifetime of the model version. Defaults to 0.
},
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
- "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
- # applies to online prediction service. If this field is not specified, it
- # defaults to `mls1-c1-m2`.
- #
- # Online prediction supports the following machine types:
- #
- # * `mls1-c1-m2`
- # * `mls1-c4-m2`
- # * `n1-standard-2`
- # * `n1-standard-4`
- # * `n1-standard-8`
- # * `n1-standard-16`
- # * `n1-standard-32`
- # * `n1-highmem-2`
- # * `n1-highmem-4`
- # * `n1-highmem-8`
- # * `n1-highmem-16`
- # * `n1-highmem-32`
- # * `n1-highcpu-2`
- # * `n1-highcpu-4`
- # * `n1-highcpu-8`
- # * `n1-highcpu-16`
- # * `n1-highcpu-32`
- #
- # `mls1-c1-m2` is generally available. All other machine types are available
- # in beta. Learn more about the [differences between machine
- # types](/ml-engine/docs/machine-types-online-prediction).
- "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
- #
- # For more information, see the
- # [runtime version list](/ml-engine/docs/runtime-version-list) and
- # [how to manage runtime versions](/ml-engine/docs/versioning).
- "description": "A String", # Optional. The description specified for the version when it was created.
+ "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+ # model. You should generally use `auto_scaling` with an appropriate
+ # `min_nodes` instead, but this option is available if you want more
+ # predictable billing. Beware that latency and error rates will increase
+ # if the traffic exceeds that capability of the system to serve it based
+ # on the selected number of nodes.
+ "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
+ # starting from the time the model is deployed, so the cost of operating
+ # this model will be proportional to `nodes` * number of hours since
+ # last billing cycle plus the cost for each prediction performed.
+ },
+ "createTime": "A String", # Output only. The time the version was created.
+ "lastUseTime": "A String", # Output only. The time the version was last used for prediction.
"framework": "A String", # Optional. The machine learning framework AI Platform uses to train
# this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
# `XGBOOST`. If you do not specify a framework, AI Platform
@@ -848,6 +411,66 @@
# type](/ml-engine/docs/machine-types-online-prediction) in the
# `machineType` field, you must specify `TENSORFLOW`
# for the framework.
+ "predictionClass": "A String", # Optional. The fully qualified name
+ # (<var>module_name</var>.<var>class_name</var>) of a class that implements
+ # the Predictor interface described in this reference field. The module
+ # containing this class should be included in a package provided to the
+ # [`packageUris` field](#Version.FIELDS.package_uris).
+ #
+ # Specify this field if and only if you are deploying a [custom prediction
+ # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ # If you specify this field, you must set
+ # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+ # you must set `machineType` to a [legacy (MLS1)
+ # machine type](/ml-engine/docs/machine-types-online-prediction).
+ #
+ # The following code sample provides the Predictor interface:
+ #
+ # <pre style="max-width: 626px;">
+ # class Predictor(object):
+ # """Interface for constructing custom predictors."""
+ #
+ # def predict(self, instances, **kwargs):
+ # """Performs custom prediction.
+ #
+ # Instances are the decoded values from the request. They have already
+ # been deserialized from JSON.
+ #
+ # Args:
+ # instances: A list of prediction input instances.
+ # **kwargs: A dictionary of keyword args provided as additional
+ # fields on the predict request body.
+ #
+ # Returns:
+ # A list of outputs containing the prediction results. This list must
+ # be JSON serializable.
+ # """
+ # raise NotImplementedError()
+ #
+ # @classmethod
+ # def from_path(cls, model_dir):
+ # """Creates an instance of Predictor using the given path.
+ #
+ # Loading of the predictor should be done in this method.
+ #
+ # Args:
+ # model_dir: The local directory that contains the exported model
+ # file along with any additional files uploaded when creating the
+ # version resource.
+ #
+ # Returns:
+ # An instance implementing this Predictor class.
+ # """
+ # raise NotImplementedError()
+ # </pre>
+ #
+ # Learn more about [the Predictor interface and custom prediction
+ # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
+ # requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.methods.versions.setDefault.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a model from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
@@ -855,56 +478,433 @@
# conditions: An `etag` is returned in the response to `GetVersion`, and
# systems are expected to put that etag in the request to `UpdateVersion` to
# ensure that their change will be applied to the model as intended.
+ "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+ "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
+ # create the version. See the
+ # [guide to model
+ # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+ # information.
+ #
+ # When passing Version to
+ # projects.models.versions.create
+ # the model service uses the specified location as the source of the model.
+ # Once deployed, the model version is hosted by the prediction service, so
+ # this location is useful only as a historical record.
+ # The total number of model files can't exceed 1000.
+ "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
+ #
+ # For more information, see the
+ # [runtime version list](/ml-engine/docs/runtime-version-list) and
+ # [how to manage runtime versions](/ml-engine/docs/versioning).
+ "description": "A String", # Optional. The description specified for the version when it was created.
},
- "onlinePredictionConsoleLogging": True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
- # streams to Stackdriver Logging. These can be more verbose than the standard
- # access logs (see `onlinePredictionLogging`) and can incur higher cost.
- # However, they are helpful for debugging. Note that
- # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
- # your project receives prediction requests at a high QPS. Estimate your
- # costs before enabling this option.
- #
- # Default is false.
- "regions": [ # Optional. The list of regions where the model is going to be deployed.
- # Only one region per model is supported.
- # Defaults to 'us-central1' if nothing is set.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- # Note:
- # * No matter where a model is deployed, it can always be accessed by
- # users from anywhere, both for online and batch prediction.
- # * The region for a batch prediction job is set by the region field when
- # submitting the batch prediction job and does not take its value from
- # this field.
- "A String",
- ],
- "description": "A String", # Optional. The description specified for the model when it was created.
"onlinePredictionLogging": True or False, # Optional. If true, online prediction access logs are sent to StackDriver
# Logging. These logs are like standard server access logs, containing
# information like timestamp and latency for each request. Note that
# [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
# your project receives prediction requests at a high queries per second rate
# (QPS). Estimate your costs before enabling this option.
- #
+ #
# Default is false.
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a model from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform model updates in order to avoid race
- # conditions: An `etag` is returned in the response to `GetModel`, and
- # systems are expected to put that etag in the request to `UpdateModel` to
- # ensure that their change will be applied to the model as intended.
- "labels": { # Optional. One or more labels that you can add, to organize your models.
- # Each label is a key-value pair, where both the key and the value are
- # arbitrary strings that you supply.
- # For more information, see the documentation on
- # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
- "a_key": "A String",
- },
- "name": "A String", # Required. The name specified for the model when it was created.
+ }
+
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # Represents a machine learning solution.
#
- # The model name must be unique within the project it is created in.
- }</pre>
+ # A model can have multiple versions, each of which is a deployed, trained
+ # model ready to receive prediction requests. The model itself is just a
+ # container.
+ "description": "A String", # Optional. The description specified for the model when it was created.
+ "regions": [ # Optional. The list of regions where the model is going to be deployed.
+ # Only one region per model is supported.
+ # Defaults to 'us-central1' if nothing is set.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ # Note:
+ # * No matter where a model is deployed, it can always be accessed by
+ # users from anywhere, both for online and batch prediction.
+ # * The region for a batch prediction job is set by the region field when
+ # submitting the batch prediction job and does not take its value from
+ # this field.
+ "A String",
+ ],
+ "name": "A String", # Required. The name specified for the model when it was created.
+ #
+ # The model name must be unique within the project it is created in.
+ "onlinePredictionConsoleLogging": True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
+ # streams to Stackdriver Logging. These can be more verbose than the standard
+ # access logs (see `onlinePredictionLogging`) and can incur higher cost.
+ # However, they are helpful for debugging. Note that
+ # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+ # your project receives prediction requests at a high QPS. Estimate your
+ # costs before enabling this option.
+ #
+ # Default is false.
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a model from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform model updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `GetModel`, and
+ # systems are expected to put that etag in the request to `UpdateModel` to
+ # ensure that their change will be applied to the model as intended.
+ "labels": { # Optional. One or more labels that you can add, to organize your models.
+ # Each label is a key-value pair, where both the key and the value are
+ # arbitrary strings that you supply.
+ # For more information, see the documentation on
+ # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+ "a_key": "A String",
+ },
+ "defaultVersion": { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
+ # handle prediction requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.models.versions.setDefault.
+ #
+ # Each version is a trained model deployed in the cloud, ready to handle
+ # prediction requests. A model can have multiple versions. You can get
+ # information about all of the versions of a given model by calling
+ # projects.models.versions.list.
+ "labels": { # Optional. One or more labels that you can add, to organize your model
+ # versions. Each label is a key-value pair, where both the key and the value
+ # are arbitrary strings that you supply.
+ # For more information, see the documentation on
+ # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+ "a_key": "A String",
+ },
+ "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
+ # applies to online prediction service. If this field is not specified, it
+ # defaults to `mls1-c1-m2`.
+ #
+ # Online prediction supports the following machine types:
+ #
+ # * `mls1-c1-m2`
+ # * `mls1-c4-m2`
+ # * `n1-standard-2`
+ # * `n1-standard-4`
+ # * `n1-standard-8`
+ # * `n1-standard-16`
+ # * `n1-standard-32`
+ # * `n1-highmem-2`
+ # * `n1-highmem-4`
+ # * `n1-highmem-8`
+ # * `n1-highmem-16`
+ # * `n1-highmem-32`
+ # * `n1-highcpu-2`
+ # * `n1-highcpu-4`
+ # * `n1-highcpu-8`
+ # * `n1-highcpu-16`
+ # * `n1-highcpu-32`
+ #
+ # `mls1-c1-m2` is generally available. All other machine types are available
+ # in beta. Learn more about the [differences between machine
+ # types](/ml-engine/docs/machine-types-online-prediction).
+ "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
+ # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
+ # or [scikit-learn pipelines with custom
+ # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
+ #
+ # For a custom prediction routine, one of these packages must contain your
+ # Predictor class (see
+ # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
+ # include any dependencies used by your Predictor or scikit-learn pipeline
+ # uses that are not already included in your selected [runtime
+ # version](/ml-engine/docs/tensorflow/runtime-version-list).
+ #
+ # If you specify this field, you must also set
+ # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+ "A String",
+ ],
+ "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+ # Only specify this field if you have specified a Compute Engine (N1) machine
+ # type in the `machineType` field. Learn more about [using GPUs for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+ # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+ # [accelerators for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ "type": "A String", # The type of accelerator to use.
+ "count": "A String", # The number of accelerators to attach to each machine running the job.
+ },
+ "state": "A String", # Output only. The state of a version.
+ "name": "A String", # Required. The name specified for the version when it was created.
+ #
+ # The version name must be unique within the model it is created in.
+ "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+ # response to increases and decreases in traffic. Care should be
+ # taken to ramp up traffic according to the model's ability to scale
+ # or you will start seeing increases in latency and 429 response codes.
+ #
+ # Note that you cannot use AutoScaling if your version uses
+ # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+ # `manual_scaling`.
+ "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
+ # nodes are always up, starting from the time the model is deployed.
+ # Therefore, the cost of operating this model will be at least
+ # `rate` * `min_nodes` * number of hours since last billing cycle,
+ # where `rate` is the cost per node-hour as documented in the
+ # [pricing guide](/ml-engine/docs/pricing),
+ # even if no predictions are performed. There is additional cost for each
+ # prediction performed.
+ #
+ # Unlike manual scaling, if the load gets too heavy for the nodes
+ # that are up, the service will automatically add nodes to handle the
+ # increased load as well as scale back as traffic drops, always maintaining
+ # at least `min_nodes`. You will be charged for the time in which additional
+ # nodes are used.
+ #
+ # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+ # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+ # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+ # (and after a cool-down period), nodes will be shut down and no charges will
+ # be incurred until traffic to the model resumes.
+ #
+ # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+ # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+ # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+ # Compute Engine machine type.
+ #
+ # Note that you cannot use AutoScaling if your version uses
+ # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+ # ManualScaling.
+ #
+ # You can set `min_nodes` when creating the model version, and you can also
+ # update `min_nodes` for an existing version:
+ # <pre>
+ # update_body.json:
+ # {
+ # 'autoScaling': {
+ # 'minNodes': 5
+ # }
+ # }
+ # </pre>
+ # HTTP request:
+ # <pre style="max-width: 626px;">
+ # PATCH
+ # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+ # -d @./update_body.json
+ # </pre>
+ },
+ "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
+ # Some explanation features require additional metadata to be loaded
+ # as part of the model payload.
+ # There are two feature attribution methods supported for TensorFlow models:
+ # integrated gradients and sampled Shapley.
+ # [Learn more about feature
+ # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
+ "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1703.01365
+ "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+ # A good value to start is 50 and gradually increase until the
+ # sum to diff property is met within the desired error range.
+ },
+ "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1906.02825
+ # Currently only implemented for models with natural image inputs.
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1906.02825
+ # Currently only implemented for models with natural image inputs.
+ "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+ # A good value to start is 50 and gradually increase until the
+ # sum to diff property is met within the desired error range.
+ },
+ "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+ # contribute to the label being predicted. A sampling strategy is used to
+ # approximate the value rather than considering all subsets of features.
+ # contribute to the label being predicted. A sampling strategy is used to
+ # approximate the value rather than considering all subsets of features.
+ "numPaths": 42, # The number of feature permutations to consider when approximating the
+ # Shapley values.
+ },
+ },
+ "pythonVersion": "A String", # Required. The version of Python used in prediction.
+ #
+ # The following Python versions are available:
+ #
+ # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+ # later.
+ # * Python '3.5' is available when `runtime_version` is set to a version
+ # from '1.4' to '1.14'.
+ # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+ # earlier.
+ #
+ # Read more about the Python versions available for [each runtime
+ # version](/ml-engine/docs/runtime-version-list).
+ "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+ # projects.models.versions.patch
+ # request. Specifying it in a
+ # projects.models.versions.create
+ # request has no effect.
+ #
+ # Configures the request-response pair logging on predictions from this
+ # Version.
+ # Online prediction requests to a model version and the responses to these
+ # requests are converted to raw strings and saved to the specified BigQuery
+ # table. Logging is constrained by [BigQuery quotas and
+ # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+ # AI Platform Prediction does not log request-response pairs, but it continues
+ # to serve predictions.
+ #
+ # If you are using [continuous
+ # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+ # specify this configuration manually. Setting up continuous evaluation
+ # automatically enables logging of request-response pairs.
+ "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+ # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+ # window is the lifetime of the model version. Defaults to 0.
+ "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
+ # "<var>project_id</var>.<var>dataset_name</var>.<var>table_name</var>"
+ #
+ # The specified table must already exist, and the "Cloud ML Service Agent"
+ # for your project must have permission to write to it. The table must have
+ # the following [schema](/bigquery/docs/schemas):
+ #
+ # <table>
+ # <tr><th>Field name</th><th style="display: table-cell">Type</th>
+ # <th style="display: table-cell">Mode</th></tr>
+ # <tr><td>model</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>model_version</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>time</td><td>TIMESTAMP</td><td>REQUIRED</td></tr>
+ # <tr><td>raw_data</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>raw_prediction</td><td>STRING</td><td>NULLABLE</td></tr>
+ # <tr><td>groundtruth</td><td>STRING</td><td>NULLABLE</td></tr>
+ # </table>
+ },
+ "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+ # model. You should generally use `auto_scaling` with an appropriate
+ # `min_nodes` instead, but this option is available if you want more
+ # predictable billing. Beware that latency and error rates will increase
+ # if the traffic exceeds that capability of the system to serve it based
+ # on the selected number of nodes.
+ "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
+ # starting from the time the model is deployed, so the cost of operating
+ # this model will be proportional to `nodes` * number of hours since
+ # last billing cycle plus the cost for each prediction performed.
+ },
+ "createTime": "A String", # Output only. The time the version was created.
+ "lastUseTime": "A String", # Output only. The time the version was last used for prediction.
+ "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
+ # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+ # `XGBOOST`. If you do not specify a framework, AI Platform
+ # will analyze files in the deployment_uri to determine a framework. If you
+ # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+ # of the model to 1.4 or greater.
+ #
+ # Do **not** specify a framework if you're deploying a [custom
+ # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ #
+ # If you specify a [Compute Engine (N1) machine
+ # type](/ml-engine/docs/machine-types-online-prediction) in the
+ # `machineType` field, you must specify `TENSORFLOW`
+ # for the framework.
+ "predictionClass": "A String", # Optional. The fully qualified name
+ # (<var>module_name</var>.<var>class_name</var>) of a class that implements
+ # the Predictor interface described in this reference field. The module
+ # containing this class should be included in a package provided to the
+ # [`packageUris` field](#Version.FIELDS.package_uris).
+ #
+ # Specify this field if and only if you are deploying a [custom prediction
+ # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ # If you specify this field, you must set
+ # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+ # you must set `machineType` to a [legacy (MLS1)
+ # machine type](/ml-engine/docs/machine-types-online-prediction).
+ #
+ # The following code sample provides the Predictor interface:
+ #
+ # <pre style="max-width: 626px;">
+ # class Predictor(object):
+ # """Interface for constructing custom predictors."""
+ #
+ # def predict(self, instances, **kwargs):
+ # """Performs custom prediction.
+ #
+ # Instances are the decoded values from the request. They have already
+ # been deserialized from JSON.
+ #
+ # Args:
+ # instances: A list of prediction input instances.
+ # **kwargs: A dictionary of keyword args provided as additional
+ # fields on the predict request body.
+ #
+ # Returns:
+ # A list of outputs containing the prediction results. This list must
+ # be JSON serializable.
+ # """
+ # raise NotImplementedError()
+ #
+ # @classmethod
+ # def from_path(cls, model_dir):
+ # """Creates an instance of Predictor using the given path.
+ #
+ # Loading of the predictor should be done in this method.
+ #
+ # Args:
+ # model_dir: The local directory that contains the exported model
+ # file along with any additional files uploaded when creating the
+ # version resource.
+ #
+ # Returns:
+ # An instance implementing this Predictor class.
+ # """
+ # raise NotImplementedError()
+ # </pre>
+ #
+ # Learn more about [the Predictor interface and custom prediction
+ # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
+ # requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.methods.versions.setDefault.
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a model from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform model updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `GetVersion`, and
+ # systems are expected to put that etag in the request to `UpdateVersion` to
+ # ensure that their change will be applied to the model as intended.
+ "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+ "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
+ # create the version. See the
+ # [guide to model
+ # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+ # information.
+ #
+ # When passing Version to
+ # projects.models.versions.create
+ # the model service uses the specified location as the source of the model.
+ # Once deployed, the model version is hosted by the prediction service, so
+ # this location is useful only as a historical record.
+ # The total number of model files can't exceed 1000.
+ "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
+ #
+ # For more information, see the
+ # [runtime version list](/ml-engine/docs/runtime-version-list) and
+ # [how to manage runtime versions](/ml-engine/docs/versioning).
+ "description": "A String", # Optional. The description specified for the version when it was created.
+ },
+ "onlinePredictionLogging": True or False, # Optional. If true, online prediction access logs are sent to StackDriver
+ # Logging. These logs are like standard server access logs, containing
+ # information like timestamp and latency for each request. Note that
+ # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+ # your project receives prediction requests at a high queries per second rate
+ # (QPS). Estimate your costs before enabling this option.
+ #
+ # Default is false.
+ }</pre>
</div>
<div class="method">
@@ -927,9 +927,6 @@
{ # This resource represents a long-running operation that is the result of a
# network API call.
- "name": "A String", # The server-assigned name, which is only unique within the same service that
- # originally returns it. If you use the default HTTP mapping, the
- # `name` should be a resource name ending with `operations/{unique_id}`.
"error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
# different programming environments, including REST APIs and RPC APIs. It is
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
@@ -937,22 +934,16 @@
#
# You can find out more about this error model and how to work with it in the
# [API Design Guide](https://cloud.google.com/apis/design/errors).
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
- "message": "A String", # A developer-facing error message, which should be in English. Any
- # user-facing error message should be localized and sent in the
- # google.rpc.Status.details field, or localized by the client.
"details": [ # A list of messages that carry the error details. There is a common set of
# message types for APIs to use.
{
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
],
- },
- "metadata": { # Service-specific metadata associated with the operation. It typically
- # contains progress information and common metadata such as create time.
- # Some services might not provide such metadata. Any method that returns a
- # long-running operation should document the metadata type, if any.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
+ "message": "A String", # A developer-facing error message, which should be in English. Any
+ # user-facing error message should be localized and sent in the
+ # google.rpc.Status.details field, or localized by the client.
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
},
"done": True or False, # If the value is `false`, it means the operation is still in progress.
# If `true`, the operation is completed, and either `error` or `response` is
@@ -967,6 +958,15 @@
# `TakeSnapshotResponse`.
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
+ "metadata": { # Service-specific metadata associated with the operation. It typically
+ # contains progress information and common metadata such as create time.
+ # Some services might not provide such metadata. Any method that returns a
+ # long-running operation should document the metadata type, if any.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ "name": "A String", # The server-assigned name, which is only unique within the same service that
+ # originally returns it. If you use the default HTTP mapping, the
+ # `name` should be a resource name ending with `operations/{unique_id}`.
}</pre>
</div>
@@ -987,393 +987,393 @@
An object of the form:
{ # Represents a machine learning solution.
- #
- # A model can have multiple versions, each of which is a deployed, trained
- # model ready to receive prediction requests. The model itself is just a
- # container.
- "defaultVersion": { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
- # handle prediction requests that do not specify a version.
#
- # You can change the default version by calling
- # projects.models.versions.setDefault.
- #
- # Each version is a trained model deployed in the cloud, ready to handle
- # prediction requests. A model can have multiple versions. You can get
- # information about all of the versions of a given model by calling
- # projects.models.versions.list.
- "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
- # Only specify this field if you have specified a Compute Engine (N1) machine
- # type in the `machineType` field. Learn more about [using GPUs for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- # Note that the AcceleratorConfig can be used in both Jobs and Versions.
- # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
- # [accelerators for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- "count": "A String", # The number of accelerators to attach to each machine running the job.
- "type": "A String", # The type of accelerator to use.
- },
- "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
- # requests that do not specify a version.
- #
- # You can change the default version by calling
- # projects.methods.versions.setDefault.
- "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
- # model. You should generally use `auto_scaling` with an appropriate
- # `min_nodes` instead, but this option is available if you want more
- # predictable billing. Beware that latency and error rates will increase
- # if the traffic exceeds that capability of the system to serve it based
- # on the selected number of nodes.
- "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
- # starting from the time the model is deployed, so the cost of operating
- # this model will be proportional to `nodes` * number of hours since
- # last billing cycle plus the cost for each prediction performed.
- },
- "state": "A String", # Output only. The state of a version.
- "name": "A String", # Required. The name specified for the version when it was created.
- #
- # The version name must be unique within the model it is created in.
- "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
- "pythonVersion": "A String", # Required. The version of Python used in prediction.
- #
- # The following Python versions are available:
- #
- # * Python '3.7' is available when `runtime_version` is set to '1.15' or
- # later.
- # * Python '3.5' is available when `runtime_version` is set to a version
- # from '1.4' to '1.14'.
- # * Python '2.7' is available when `runtime_version` is set to '1.15' or
- # earlier.
- #
- # Read more about the Python versions available for [each runtime
- # version](/ml-engine/docs/runtime-version-list).
- "lastUseTime": "A String", # Output only. The time the version was last used for prediction.
- "predictionClass": "A String", # Optional. The fully qualified name
- # (<var>module_name</var>.<var>class_name</var>) of a class that implements
- # the Predictor interface described in this reference field. The module
- # containing this class should be included in a package provided to the
- # [`packageUris` field](#Version.FIELDS.package_uris).
- #
- # Specify this field if and only if you are deploying a [custom prediction
- # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
- # If you specify this field, you must set
- # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
- # you must set `machineType` to a [legacy (MLS1)
- # machine type](/ml-engine/docs/machine-types-online-prediction).
- #
- # The following code sample provides the Predictor interface:
- #
- # <pre style="max-width: 626px;">
- # class Predictor(object):
- # """Interface for constructing custom predictors."""
- #
- # def predict(self, instances, **kwargs):
- # """Performs custom prediction.
- #
- # Instances are the decoded values from the request. They have already
- # been deserialized from JSON.
- #
- # Args:
- # instances: A list of prediction input instances.
- # **kwargs: A dictionary of keyword args provided as additional
- # fields on the predict request body.
- #
- # Returns:
- # A list of outputs containing the prediction results. This list must
- # be JSON serializable.
- # """
- # raise NotImplementedError()
- #
- # @classmethod
- # def from_path(cls, model_dir):
- # """Creates an instance of Predictor using the given path.
- #
- # Loading of the predictor should be done in this method.
- #
- # Args:
- # model_dir: The local directory that contains the exported model
- # file along with any additional files uploaded when creating the
- # version resource.
- #
- # Returns:
- # An instance implementing this Predictor class.
- # """
- # raise NotImplementedError()
- # </pre>
- #
- # Learn more about [the Predictor interface and custom prediction
- # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
- "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
- # create the version. See the
- # [guide to model
- # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
- # information.
- #
- # When passing Version to
- # projects.models.versions.create
- # the model service uses the specified location as the source of the model.
- # Once deployed, the model version is hosted by the prediction service, so
- # this location is useful only as a historical record.
- # The total number of model files can't exceed 1000.
- "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
- # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
- # or [scikit-learn pipelines with custom
- # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
- #
- # For a custom prediction routine, one of these packages must contain your
- # Predictor class (see
- # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
- # include any dependencies used by your Predictor or scikit-learn pipeline
- # uses that are not already included in your selected [runtime
- # version](/ml-engine/docs/tensorflow/runtime-version-list).
- #
- # If you specify this field, you must also set
- # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+ # A model can have multiple versions, each of which is a deployed, trained
+ # model ready to receive prediction requests. The model itself is just a
+ # container.
+ "description": "A String", # Optional. The description specified for the model when it was created.
+ "regions": [ # Optional. The list of regions where the model is going to be deployed.
+ # Only one region per model is supported.
+ # Defaults to 'us-central1' if nothing is set.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ # Note:
+ # * No matter where a model is deployed, it can always be accessed by
+ # users from anywhere, both for online and batch prediction.
+ # * The region for a batch prediction job is set by the region field when
+ # submitting the batch prediction job and does not take its value from
+ # this field.
"A String",
],
- "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
- # Some explanation features require additional metadata to be loaded
- # as part of the model payload.
- # There are two feature attribution methods supported for TensorFlow models:
- # integrated gradients and sampled Shapley.
- # [Learn more about feature
- # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
- "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1703.01365
- "numIntegralSteps": 42, # Number of steps for approximating the path integral.
- # A good value to start is 50 and gradually increase until the
- # sum to diff property is met within the desired error range.
- },
- "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
- # contribute to the label being predicted. A sampling strategy is used to
- # approximate the value rather than considering all subsets of features.
- # contribute to the label being predicted. A sampling strategy is used to
- # approximate the value rather than considering all subsets of features.
- "numPaths": 42, # The number of feature permutations to consider when approximating the
- # Shapley values.
- },
- "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1906.02825
- # Currently only implemented for models with natural image inputs.
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1906.02825
- # Currently only implemented for models with natural image inputs.
- "numIntegralSteps": 42, # Number of steps for approximating the path integral.
- # A good value to start is 50 and gradually increase until the
- # sum to diff property is met within the desired error range.
- },
- },
- "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
- # response to increases and decreases in traffic. Care should be
- # taken to ramp up traffic according to the model's ability to scale
- # or you will start seeing increases in latency and 429 response codes.
+ "name": "A String", # Required. The name specified for the model when it was created.
#
- # Note that you cannot use AutoScaling if your version uses
- # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
- # `manual_scaling`.
- "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
- # nodes are always up, starting from the time the model is deployed.
- # Therefore, the cost of operating this model will be at least
- # `rate` * `min_nodes` * number of hours since last billing cycle,
- # where `rate` is the cost per node-hour as documented in the
- # [pricing guide](/ml-engine/docs/pricing),
- # even if no predictions are performed. There is additional cost for each
- # prediction performed.
- #
- # Unlike manual scaling, if the load gets too heavy for the nodes
- # that are up, the service will automatically add nodes to handle the
- # increased load as well as scale back as traffic drops, always maintaining
- # at least `min_nodes`. You will be charged for the time in which additional
- # nodes are used.
- #
- # If `min_nodes` is not specified and AutoScaling is used with a [legacy
- # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
- # `min_nodes` defaults to 0, in which case, when traffic to a model stops
- # (and after a cool-down period), nodes will be shut down and no charges will
- # be incurred until traffic to the model resumes.
- #
- # If `min_nodes` is not specified and AutoScaling is used with a [Compute
- # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
- # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
- # Compute Engine machine type.
- #
- # Note that you cannot use AutoScaling if your version uses
- # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
- # ManualScaling.
- #
- # You can set `min_nodes` when creating the model version, and you can also
- # update `min_nodes` for an existing version:
- # <pre>
- # update_body.json:
- # {
- # 'autoScaling': {
- # 'minNodes': 5
- # }
- # }
- # </pre>
- # HTTP request:
- # <pre style="max-width: 626px;">
- # PATCH
- # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
- # -d @./update_body.json
- # </pre>
- },
- "createTime": "A String", # Output only. The time the version was created.
- "labels": { # Optional. One or more labels that you can add, to organize your model
- # versions. Each label is a key-value pair, where both the key and the value
- # are arbitrary strings that you supply.
- # For more information, see the documentation on
- # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
- "a_key": "A String",
- },
- "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
- # projects.models.versions.patch
- # request. Specifying it in a
- # projects.models.versions.create
- # request has no effect.
+ # The model name must be unique within the project it is created in.
+ "onlinePredictionConsoleLogging": True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
+ # streams to Stackdriver Logging. These can be more verbose than the standard
+ # access logs (see `onlinePredictionLogging`) and can incur higher cost.
+ # However, they are helpful for debugging. Note that
+ # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+ # your project receives prediction requests at a high QPS. Estimate your
+ # costs before enabling this option.
#
- # Configures the request-response pair logging on predictions from this
- # Version.
- # Online prediction requests to a model version and the responses to these
- # requests are converted to raw strings and saved to the specified BigQuery
- # table. Logging is constrained by [BigQuery quotas and
- # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
- # AI Platform Prediction does not log request-response pairs, but it continues
- # to serve predictions.
- #
- # If you are using [continuous
- # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
- # specify this configuration manually. Setting up continuous evaluation
- # automatically enables logging of request-response pairs.
- "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
- # "<var>project_id</var>.<var>dataset_name</var>.<var>table_name</var>"
- #
- # The specified table must already exist, and the "Cloud ML Service Agent"
- # for your project must have permission to write to it. The table must have
- # the following [schema](/bigquery/docs/schemas):
- #
- # <table>
- # <tr><th>Field name</th><th style="display: table-cell">Type</th>
- # <th style="display: table-cell">Mode</th></tr>
- # <tr><td>model</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>model_version</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>time</td><td>TIMESTAMP</td><td>REQUIRED</td></tr>
- # <tr><td>raw_data</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>raw_prediction</td><td>STRING</td><td>NULLABLE</td></tr>
- # <tr><td>groundtruth</td><td>STRING</td><td>NULLABLE</td></tr>
- # </table>
- "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
- # For example, if you want to log 10% of requests, enter `0.1`. The sampling
- # window is the lifetime of the model version. Defaults to 0.
- },
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
- "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
- # applies to online prediction service. If this field is not specified, it
- # defaults to `mls1-c1-m2`.
- #
- # Online prediction supports the following machine types:
- #
- # * `mls1-c1-m2`
- # * `mls1-c4-m2`
- # * `n1-standard-2`
- # * `n1-standard-4`
- # * `n1-standard-8`
- # * `n1-standard-16`
- # * `n1-standard-32`
- # * `n1-highmem-2`
- # * `n1-highmem-4`
- # * `n1-highmem-8`
- # * `n1-highmem-16`
- # * `n1-highmem-32`
- # * `n1-highcpu-2`
- # * `n1-highcpu-4`
- # * `n1-highcpu-8`
- # * `n1-highcpu-16`
- # * `n1-highcpu-32`
- #
- # `mls1-c1-m2` is generally available. All other machine types are available
- # in beta. Learn more about the [differences between machine
- # types](/ml-engine/docs/machine-types-online-prediction).
- "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
- #
- # For more information, see the
- # [runtime version list](/ml-engine/docs/runtime-version-list) and
- # [how to manage runtime versions](/ml-engine/docs/versioning).
- "description": "A String", # Optional. The description specified for the version when it was created.
- "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
- # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
- # `XGBOOST`. If you do not specify a framework, AI Platform
- # will analyze files in the deployment_uri to determine a framework. If you
- # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
- # of the model to 1.4 or greater.
- #
- # Do **not** specify a framework if you're deploying a [custom
- # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
- #
- # If you specify a [Compute Engine (N1) machine
- # type](/ml-engine/docs/machine-types-online-prediction) in the
- # `machineType` field, you must specify `TENSORFLOW`
- # for the framework.
+ # Default is false.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a model from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
# read-modify-write cycle to perform model updates in order to avoid race
- # conditions: An `etag` is returned in the response to `GetVersion`, and
- # systems are expected to put that etag in the request to `UpdateVersion` to
+ # conditions: An `etag` is returned in the response to `GetModel`, and
+ # systems are expected to put that etag in the request to `UpdateModel` to
# ensure that their change will be applied to the model as intended.
- },
- "onlinePredictionConsoleLogging": True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
- # streams to Stackdriver Logging. These can be more verbose than the standard
- # access logs (see `onlinePredictionLogging`) and can incur higher cost.
- # However, they are helpful for debugging. Note that
- # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
- # your project receives prediction requests at a high QPS. Estimate your
- # costs before enabling this option.
- #
- # Default is false.
- "regions": [ # Optional. The list of regions where the model is going to be deployed.
- # Only one region per model is supported.
- # Defaults to 'us-central1' if nothing is set.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- # Note:
- # * No matter where a model is deployed, it can always be accessed by
- # users from anywhere, both for online and batch prediction.
- # * The region for a batch prediction job is set by the region field when
- # submitting the batch prediction job and does not take its value from
- # this field.
- "A String",
- ],
- "description": "A String", # Optional. The description specified for the model when it was created.
- "onlinePredictionLogging": True or False, # Optional. If true, online prediction access logs are sent to StackDriver
- # Logging. These logs are like standard server access logs, containing
- # information like timestamp and latency for each request. Note that
- # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
- # your project receives prediction requests at a high queries per second rate
- # (QPS). Estimate your costs before enabling this option.
- #
- # Default is false.
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a model from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform model updates in order to avoid race
- # conditions: An `etag` is returned in the response to `GetModel`, and
- # systems are expected to put that etag in the request to `UpdateModel` to
- # ensure that their change will be applied to the model as intended.
- "labels": { # Optional. One or more labels that you can add, to organize your models.
- # Each label is a key-value pair, where both the key and the value are
- # arbitrary strings that you supply.
- # For more information, see the documentation on
- # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
- "a_key": "A String",
- },
- "name": "A String", # Required. The name specified for the model when it was created.
- #
- # The model name must be unique within the project it is created in.
- }</pre>
+ "labels": { # Optional. One or more labels that you can add, to organize your models.
+ # Each label is a key-value pair, where both the key and the value are
+ # arbitrary strings that you supply.
+ # For more information, see the documentation on
+ # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+ "a_key": "A String",
+ },
+ "defaultVersion": { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
+ # handle prediction requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.models.versions.setDefault.
+ #
+ # Each version is a trained model deployed in the cloud, ready to handle
+ # prediction requests. A model can have multiple versions. You can get
+ # information about all of the versions of a given model by calling
+ # projects.models.versions.list.
+ "labels": { # Optional. One or more labels that you can add, to organize your model
+ # versions. Each label is a key-value pair, where both the key and the value
+ # are arbitrary strings that you supply.
+ # For more information, see the documentation on
+ # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+ "a_key": "A String",
+ },
+ "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
+ # applies to online prediction service. If this field is not specified, it
+ # defaults to `mls1-c1-m2`.
+ #
+ # Online prediction supports the following machine types:
+ #
+ # * `mls1-c1-m2`
+ # * `mls1-c4-m2`
+ # * `n1-standard-2`
+ # * `n1-standard-4`
+ # * `n1-standard-8`
+ # * `n1-standard-16`
+ # * `n1-standard-32`
+ # * `n1-highmem-2`
+ # * `n1-highmem-4`
+ # * `n1-highmem-8`
+ # * `n1-highmem-16`
+ # * `n1-highmem-32`
+ # * `n1-highcpu-2`
+ # * `n1-highcpu-4`
+ # * `n1-highcpu-8`
+ # * `n1-highcpu-16`
+ # * `n1-highcpu-32`
+ #
+ # `mls1-c1-m2` is generally available. All other machine types are available
+ # in beta. Learn more about the [differences between machine
+ # types](/ml-engine/docs/machine-types-online-prediction).
+ "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
+ # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
+ # or [scikit-learn pipelines with custom
+ # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
+ #
+ # For a custom prediction routine, one of these packages must contain your
+ # Predictor class (see
+ # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
+ # include any dependencies used by your Predictor or scikit-learn pipeline
+ # uses that are not already included in your selected [runtime
+ # version](/ml-engine/docs/tensorflow/runtime-version-list).
+ #
+ # If you specify this field, you must also set
+ # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+ "A String",
+ ],
+ "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+ # Only specify this field if you have specified a Compute Engine (N1) machine
+ # type in the `machineType` field. Learn more about [using GPUs for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+ # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+ # [accelerators for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ "type": "A String", # The type of accelerator to use.
+ "count": "A String", # The number of accelerators to attach to each machine running the job.
+ },
+ "state": "A String", # Output only. The state of a version.
+ "name": "A String", # Required. The name specified for the version when it was created.
+ #
+ # The version name must be unique within the model it is created in.
+ "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+ # response to increases and decreases in traffic. Care should be
+ # taken to ramp up traffic according to the model's ability to scale
+ # or you will start seeing increases in latency and 429 response codes.
+ #
+ # Note that you cannot use AutoScaling if your version uses
+ # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+ # `manual_scaling`.
+ "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
+ # nodes are always up, starting from the time the model is deployed.
+ # Therefore, the cost of operating this model will be at least
+ # `rate` * `min_nodes` * number of hours since last billing cycle,
+ # where `rate` is the cost per node-hour as documented in the
+ # [pricing guide](/ml-engine/docs/pricing),
+ # even if no predictions are performed. There is additional cost for each
+ # prediction performed.
+ #
+ # Unlike manual scaling, if the load gets too heavy for the nodes
+ # that are up, the service will automatically add nodes to handle the
+ # increased load as well as scale back as traffic drops, always maintaining
+ # at least `min_nodes`. You will be charged for the time in which additional
+ # nodes are used.
+ #
+ # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+ # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+ # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+ # (and after a cool-down period), nodes will be shut down and no charges will
+ # be incurred until traffic to the model resumes.
+ #
+ # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+ # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+ # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+ # Compute Engine machine type.
+ #
+ # Note that you cannot use AutoScaling if your version uses
+ # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+ # ManualScaling.
+ #
+ # You can set `min_nodes` when creating the model version, and you can also
+ # update `min_nodes` for an existing version:
+ # <pre>
+ # update_body.json:
+ # {
+ # 'autoScaling': {
+ # 'minNodes': 5
+ # }
+ # }
+ # </pre>
+ # HTTP request:
+ # <pre style="max-width: 626px;">
+ # PATCH
+ # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+ # -d @./update_body.json
+ # </pre>
+ },
+ "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
+ # Some explanation features require additional metadata to be loaded
+ # as part of the model payload.
+ # There are two feature attribution methods supported for TensorFlow models:
+ # integrated gradients and sampled Shapley.
+ # [Learn more about feature
+ # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
+ "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1703.01365
+ "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+ # A good value to start is 50 and gradually increase until the
+ # sum to diff property is met within the desired error range.
+ },
+ "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1906.02825
+ # Currently only implemented for models with natural image inputs.
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1906.02825
+ # Currently only implemented for models with natural image inputs.
+ "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+ # A good value to start is 50 and gradually increase until the
+ # sum to diff property is met within the desired error range.
+ },
+ "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+ # contribute to the label being predicted. A sampling strategy is used to
+ # approximate the value rather than considering all subsets of features.
+ # contribute to the label being predicted. A sampling strategy is used to
+ # approximate the value rather than considering all subsets of features.
+ "numPaths": 42, # The number of feature permutations to consider when approximating the
+ # Shapley values.
+ },
+ },
+ "pythonVersion": "A String", # Required. The version of Python used in prediction.
+ #
+ # The following Python versions are available:
+ #
+ # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+ # later.
+ # * Python '3.5' is available when `runtime_version` is set to a version
+ # from '1.4' to '1.14'.
+ # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+ # earlier.
+ #
+ # Read more about the Python versions available for [each runtime
+ # version](/ml-engine/docs/runtime-version-list).
+ "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+ # projects.models.versions.patch
+ # request. Specifying it in a
+ # projects.models.versions.create
+ # request has no effect.
+ #
+ # Configures the request-response pair logging on predictions from this
+ # Version.
+ # Online prediction requests to a model version and the responses to these
+ # requests are converted to raw strings and saved to the specified BigQuery
+ # table. Logging is constrained by [BigQuery quotas and
+ # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+ # AI Platform Prediction does not log request-response pairs, but it continues
+ # to serve predictions.
+ #
+ # If you are using [continuous
+ # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+ # specify this configuration manually. Setting up continuous evaluation
+ # automatically enables logging of request-response pairs.
+ "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+ # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+ # window is the lifetime of the model version. Defaults to 0.
+ "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
+ # "<var>project_id</var>.<var>dataset_name</var>.<var>table_name</var>"
+ #
+ # The specified table must already exist, and the "Cloud ML Service Agent"
+ # for your project must have permission to write to it. The table must have
+ # the following [schema](/bigquery/docs/schemas):
+ #
+ # <table>
+ # <tr><th>Field name</th><th style="display: table-cell">Type</th>
+ # <th style="display: table-cell">Mode</th></tr>
+ # <tr><td>model</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>model_version</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>time</td><td>TIMESTAMP</td><td>REQUIRED</td></tr>
+ # <tr><td>raw_data</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>raw_prediction</td><td>STRING</td><td>NULLABLE</td></tr>
+ # <tr><td>groundtruth</td><td>STRING</td><td>NULLABLE</td></tr>
+ # </table>
+ },
+ "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+ # model. You should generally use `auto_scaling` with an appropriate
+ # `min_nodes` instead, but this option is available if you want more
+ # predictable billing. Beware that latency and error rates will increase
+ # if the traffic exceeds that capability of the system to serve it based
+ # on the selected number of nodes.
+ "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
+ # starting from the time the model is deployed, so the cost of operating
+ # this model will be proportional to `nodes` * number of hours since
+ # last billing cycle plus the cost for each prediction performed.
+ },
+ "createTime": "A String", # Output only. The time the version was created.
+ "lastUseTime": "A String", # Output only. The time the version was last used for prediction.
+ "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
+ # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+ # `XGBOOST`. If you do not specify a framework, AI Platform
+ # will analyze files in the deployment_uri to determine a framework. If you
+ # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+ # of the model to 1.4 or greater.
+ #
+ # Do **not** specify a framework if you're deploying a [custom
+ # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ #
+ # If you specify a [Compute Engine (N1) machine
+ # type](/ml-engine/docs/machine-types-online-prediction) in the
+ # `machineType` field, you must specify `TENSORFLOW`
+ # for the framework.
+ "predictionClass": "A String", # Optional. The fully qualified name
+ # (<var>module_name</var>.<var>class_name</var>) of a class that implements
+ # the Predictor interface described in this reference field. The module
+ # containing this class should be included in a package provided to the
+ # [`packageUris` field](#Version.FIELDS.package_uris).
+ #
+ # Specify this field if and only if you are deploying a [custom prediction
+ # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ # If you specify this field, you must set
+ # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+ # you must set `machineType` to a [legacy (MLS1)
+ # machine type](/ml-engine/docs/machine-types-online-prediction).
+ #
+ # The following code sample provides the Predictor interface:
+ #
+ # <pre style="max-width: 626px;">
+ # class Predictor(object):
+ # """Interface for constructing custom predictors."""
+ #
+ # def predict(self, instances, **kwargs):
+ # """Performs custom prediction.
+ #
+ # Instances are the decoded values from the request. They have already
+ # been deserialized from JSON.
+ #
+ # Args:
+ # instances: A list of prediction input instances.
+ # **kwargs: A dictionary of keyword args provided as additional
+ # fields on the predict request body.
+ #
+ # Returns:
+ # A list of outputs containing the prediction results. This list must
+ # be JSON serializable.
+ # """
+ # raise NotImplementedError()
+ #
+ # @classmethod
+ # def from_path(cls, model_dir):
+ # """Creates an instance of Predictor using the given path.
+ #
+ # Loading of the predictor should be done in this method.
+ #
+ # Args:
+ # model_dir: The local directory that contains the exported model
+ # file along with any additional files uploaded when creating the
+ # version resource.
+ #
+ # Returns:
+ # An instance implementing this Predictor class.
+ # """
+ # raise NotImplementedError()
+ # </pre>
+ #
+ # Learn more about [the Predictor interface and custom prediction
+ # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
+ # requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.methods.versions.setDefault.
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a model from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform model updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `GetVersion`, and
+ # systems are expected to put that etag in the request to `UpdateVersion` to
+ # ensure that their change will be applied to the model as intended.
+ "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+ "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
+ # create the version. See the
+ # [guide to model
+ # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+ # information.
+ #
+ # When passing Version to
+ # projects.models.versions.create
+ # the model service uses the specified location as the source of the model.
+ # Once deployed, the model version is hosted by the prediction service, so
+ # this location is useful only as a historical record.
+ # The total number of model files can't exceed 1000.
+ "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
+ #
+ # For more information, see the
+ # [runtime version list](/ml-engine/docs/runtime-version-list) and
+ # [how to manage runtime versions](/ml-engine/docs/versioning).
+ "description": "A String", # Optional. The description specified for the version when it was created.
+ },
+ "onlinePredictionLogging": True or False, # Optional. If true, online prediction access logs are sent to StackDriver
+ # Logging. These logs are like standard server access logs, containing
+ # information like timestamp and latency for each request. Note that
+ # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+ # your project receives prediction requests at a high queries per second rate
+ # (QPS). Estimate your costs before enabling this option.
+ #
+ # Default is false.
+ }</pre>
</div>
<div class="method">
@@ -1472,30 +1472,18 @@
#
# For a description of IAM and its features, see the
# [IAM documentation](https://cloud.google.com/iam/docs/).
- "version": 42, # Specifies the format of the policy.
- #
- # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
- # are rejected.
- #
- # Any operation that affects conditional role bindings must specify version
- # `3`. This requirement applies to the following operations:
- #
- # * Getting a policy that includes a conditional role binding
- # * Adding a conditional role binding to a policy
- # * Changing a conditional role binding in a policy
- # * Removing any role binding, with or without a condition, from a policy
- # that includes conditions
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a policy from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform policy updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+ # systems are expected to put that etag in the request to `setIamPolicy` to
+ # ensure that their change will be applied to the same version of the policy.
#
# **Important:** If you use IAM Conditions, you must include the `etag` field
# whenever you call `setIamPolicy`. If you omit this field, then IAM allows
# you to overwrite a version `3` policy with a version `1` policy, and all of
# the conditions in the version `3` policy are lost.
- #
- # If a policy does not include any conditions, operations on that policy may
- # specify any valid version or leave the field unset.
- #
- # To learn which resources support conditions in their IAM policies, see the
- # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
{ # Specifies the audit configuration for a service.
# The configuration determines which permission types are logged, and what
@@ -1512,7 +1500,7 @@
# {
# "audit_configs": [
# {
- # "service": "allServices"
+ # "service": "allServices",
# "audit_log_configs": [
# {
# "log_type": "DATA_READ",
@@ -1521,18 +1509,18 @@
# ]
# },
# {
- # "log_type": "DATA_WRITE",
+ # "log_type": "DATA_WRITE"
# },
# {
- # "log_type": "ADMIN_READ",
+ # "log_type": "ADMIN_READ"
# }
# ]
# },
# {
- # "service": "sampleservice.googleapis.com"
+ # "service": "sampleservice.googleapis.com",
# "audit_log_configs": [
# {
- # "log_type": "DATA_READ",
+ # "log_type": "DATA_READ"
# },
# {
# "log_type": "DATA_WRITE",
@@ -1564,27 +1552,53 @@
# ]
# },
# {
- # "log_type": "DATA_WRITE",
+ # "log_type": "DATA_WRITE"
# }
# ]
# }
#
# This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
# jose@example.com from DATA_READ logging.
+ "logType": "A String", # The log type that this config enables.
"exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
# permission.
# Follows the same format of Binding.members.
"A String",
],
- "logType": "A String", # The log type that this config enables.
},
],
},
],
+ "version": 42, # Specifies the format of the policy.
+ #
+ # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+ # are rejected.
+ #
+ # Any operation that affects conditional role bindings must specify version
+ # `3`. This requirement applies to the following operations:
+ #
+ # * Getting a policy that includes a conditional role binding
+ # * Adding a conditional role binding to a policy
+ # * Changing a conditional role binding in a policy
+ # * Removing any role binding, with or without a condition, from a policy
+ # that includes conditions
+ #
+ # **Important:** If you use IAM Conditions, you must include the `etag` field
+ # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+ # you to overwrite a version `3` policy with a version `1` policy, and all of
+ # the conditions in the version `3` policy are lost.
+ #
+ # If a policy does not include any conditions, operations on that policy may
+ # specify any valid version or leave the field unset.
+ #
+ # To learn which resources support conditions in their IAM policies, see the
+ # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"bindings": [ # Associates a list of `members` to a `role`. Optionally, may specify a
# `condition` that determines how and when the `bindings` are applied. Each
# of the `bindings` must contain at least one member.
{ # Associates `members` with a `role`.
+ "role": "A String", # Role that is assigned to `members`.
+ # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
#
# If the condition evaluates to `true`, then this binding applies to the
@@ -1627,8 +1641,6 @@
# The exact variables and functions that may be referenced within an expression
# are determined by the service that evaluates it. See the service
# documentation for additional information.
- "description": "A String", # Optional. Description of the expression. This is a longer text which
- # describes the expression, e.g. when hovered over it in a UI.
"expression": "A String", # Textual representation of an expression in Common Expression Language
# syntax.
"title": "A String", # Optional. Title for the expression, i.e. a short string describing
@@ -1636,6 +1648,8 @@
# expression.
"location": "A String", # Optional. String indicating the location of the expression for error
# reporting, e.g. a file name and a position in the file.
+ "description": "A String", # Optional. Description of the expression. This is a longer text which
+ # describes the expression, e.g. when hovered over it in a UI.
},
"members": [ # Specifies the identities requesting access for a Cloud Platform resource.
# `members` can have the following values:
@@ -1682,27 +1696,13 @@
#
"A String",
],
- "role": "A String", # Role that is assigned to `members`.
- # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
},
],
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a policy from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform policy updates in order to avoid race
- # conditions: An `etag` is returned in the response to `getIamPolicy`, and
- # systems are expected to put that etag in the request to `setIamPolicy` to
- # ensure that their change will be applied to the same version of the policy.
- #
- # **Important:** If you use IAM Conditions, you must include the `etag` field
- # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
- # you to overwrite a version `3` policy with a version `1` policy, and all of
- # the conditions in the version `3` policy are lost.
}</pre>
</div>
<div class="method">
- <code class="details" id="list">list(parent, filter=None, pageToken=None, pageSize=None, x__xgafv=None)</code>
+ <code class="details" id="list">list(parent, pageSize=None, pageToken=None, filter=None, x__xgafv=None)</code>
<pre>Lists the models in a project.
Each project can contain multiple models, and each model can have multiple
@@ -1713,16 +1713,16 @@
Args:
parent: string, Required. The name of the project whose models are to be listed. (required)
- filter: string, Optional. Specifies the subset of models to retrieve.
- pageToken: string, Optional. A page token to request the next page of results.
-
-You get the token from the `next_page_token` field of the response from
-the previous call.
pageSize: integer, Optional. The number of models to retrieve per "page" of results. If there
are more remaining results than this number, the response message will
contain a valid value in the `next_page_token` field.
The default value is 20, and the maximum page size is 100.
+ pageToken: string, Optional. A page token to request the next page of results.
+
+You get the token from the `next_page_token` field of the response from
+the previous call.
+ filter: string, Optional. Specifies the subset of models to retrieve.
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
@@ -1732,398 +1732,398 @@
An object of the form:
{ # Response message for the ListModels method.
- "nextPageToken": "A String", # Optional. Pass this token as the `page_token` field of the request for a
- # subsequent call.
"models": [ # The list of models.
{ # Represents a machine learning solution.
- #
- # A model can have multiple versions, each of which is a deployed, trained
- # model ready to receive prediction requests. The model itself is just a
- # container.
- "defaultVersion": { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
- # handle prediction requests that do not specify a version.
#
- # You can change the default version by calling
- # projects.models.versions.setDefault.
- #
- # Each version is a trained model deployed in the cloud, ready to handle
- # prediction requests. A model can have multiple versions. You can get
- # information about all of the versions of a given model by calling
- # projects.models.versions.list.
- "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
- # Only specify this field if you have specified a Compute Engine (N1) machine
- # type in the `machineType` field. Learn more about [using GPUs for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- # Note that the AcceleratorConfig can be used in both Jobs and Versions.
- # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
- # [accelerators for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- "count": "A String", # The number of accelerators to attach to each machine running the job.
- "type": "A String", # The type of accelerator to use.
- },
- "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
- # requests that do not specify a version.
- #
- # You can change the default version by calling
- # projects.methods.versions.setDefault.
- "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
- # model. You should generally use `auto_scaling` with an appropriate
- # `min_nodes` instead, but this option is available if you want more
- # predictable billing. Beware that latency and error rates will increase
- # if the traffic exceeds that capability of the system to serve it based
- # on the selected number of nodes.
- "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
- # starting from the time the model is deployed, so the cost of operating
- # this model will be proportional to `nodes` * number of hours since
- # last billing cycle plus the cost for each prediction performed.
- },
- "state": "A String", # Output only. The state of a version.
- "name": "A String", # Required. The name specified for the version when it was created.
- #
- # The version name must be unique within the model it is created in.
- "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
- "pythonVersion": "A String", # Required. The version of Python used in prediction.
- #
- # The following Python versions are available:
- #
- # * Python '3.7' is available when `runtime_version` is set to '1.15' or
- # later.
- # * Python '3.5' is available when `runtime_version` is set to a version
- # from '1.4' to '1.14'.
- # * Python '2.7' is available when `runtime_version` is set to '1.15' or
- # earlier.
- #
- # Read more about the Python versions available for [each runtime
- # version](/ml-engine/docs/runtime-version-list).
- "lastUseTime": "A String", # Output only. The time the version was last used for prediction.
- "predictionClass": "A String", # Optional. The fully qualified name
- # (<var>module_name</var>.<var>class_name</var>) of a class that implements
- # the Predictor interface described in this reference field. The module
- # containing this class should be included in a package provided to the
- # [`packageUris` field](#Version.FIELDS.package_uris).
- #
- # Specify this field if and only if you are deploying a [custom prediction
- # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
- # If you specify this field, you must set
- # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
- # you must set `machineType` to a [legacy (MLS1)
- # machine type](/ml-engine/docs/machine-types-online-prediction).
- #
- # The following code sample provides the Predictor interface:
- #
- # <pre style="max-width: 626px;">
- # class Predictor(object):
- # """Interface for constructing custom predictors."""
- #
- # def predict(self, instances, **kwargs):
- # """Performs custom prediction.
- #
- # Instances are the decoded values from the request. They have already
- # been deserialized from JSON.
- #
- # Args:
- # instances: A list of prediction input instances.
- # **kwargs: A dictionary of keyword args provided as additional
- # fields on the predict request body.
- #
- # Returns:
- # A list of outputs containing the prediction results. This list must
- # be JSON serializable.
- # """
- # raise NotImplementedError()
- #
- # @classmethod
- # def from_path(cls, model_dir):
- # """Creates an instance of Predictor using the given path.
- #
- # Loading of the predictor should be done in this method.
- #
- # Args:
- # model_dir: The local directory that contains the exported model
- # file along with any additional files uploaded when creating the
- # version resource.
- #
- # Returns:
- # An instance implementing this Predictor class.
- # """
- # raise NotImplementedError()
- # </pre>
- #
- # Learn more about [the Predictor interface and custom prediction
- # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
- "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
- # create the version. See the
- # [guide to model
- # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
- # information.
- #
- # When passing Version to
- # projects.models.versions.create
- # the model service uses the specified location as the source of the model.
- # Once deployed, the model version is hosted by the prediction service, so
- # this location is useful only as a historical record.
- # The total number of model files can't exceed 1000.
- "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
- # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
- # or [scikit-learn pipelines with custom
- # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
- #
- # For a custom prediction routine, one of these packages must contain your
- # Predictor class (see
- # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
- # include any dependencies used by your Predictor or scikit-learn pipeline
- # uses that are not already included in your selected [runtime
- # version](/ml-engine/docs/tensorflow/runtime-version-list).
- #
- # If you specify this field, you must also set
- # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+ # A model can have multiple versions, each of which is a deployed, trained
+ # model ready to receive prediction requests. The model itself is just a
+ # container.
+ "description": "A String", # Optional. The description specified for the model when it was created.
+ "regions": [ # Optional. The list of regions where the model is going to be deployed.
+ # Only one region per model is supported.
+ # Defaults to 'us-central1' if nothing is set.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ # Note:
+ # * No matter where a model is deployed, it can always be accessed by
+ # users from anywhere, both for online and batch prediction.
+ # * The region for a batch prediction job is set by the region field when
+ # submitting the batch prediction job and does not take its value from
+ # this field.
"A String",
],
- "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
- # Some explanation features require additional metadata to be loaded
- # as part of the model payload.
- # There are two feature attribution methods supported for TensorFlow models:
- # integrated gradients and sampled Shapley.
- # [Learn more about feature
- # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
- "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1703.01365
- "numIntegralSteps": 42, # Number of steps for approximating the path integral.
- # A good value to start is 50 and gradually increase until the
- # sum to diff property is met within the desired error range.
- },
- "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
- # contribute to the label being predicted. A sampling strategy is used to
- # approximate the value rather than considering all subsets of features.
- # contribute to the label being predicted. A sampling strategy is used to
- # approximate the value rather than considering all subsets of features.
- "numPaths": 42, # The number of feature permutations to consider when approximating the
- # Shapley values.
- },
- "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1906.02825
- # Currently only implemented for models with natural image inputs.
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1906.02825
- # Currently only implemented for models with natural image inputs.
- "numIntegralSteps": 42, # Number of steps for approximating the path integral.
- # A good value to start is 50 and gradually increase until the
- # sum to diff property is met within the desired error range.
- },
- },
- "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
- # response to increases and decreases in traffic. Care should be
- # taken to ramp up traffic according to the model's ability to scale
- # or you will start seeing increases in latency and 429 response codes.
+ "name": "A String", # Required. The name specified for the model when it was created.
#
- # Note that you cannot use AutoScaling if your version uses
- # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
- # `manual_scaling`.
- "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
- # nodes are always up, starting from the time the model is deployed.
- # Therefore, the cost of operating this model will be at least
- # `rate` * `min_nodes` * number of hours since last billing cycle,
- # where `rate` is the cost per node-hour as documented in the
- # [pricing guide](/ml-engine/docs/pricing),
- # even if no predictions are performed. There is additional cost for each
- # prediction performed.
- #
- # Unlike manual scaling, if the load gets too heavy for the nodes
- # that are up, the service will automatically add nodes to handle the
- # increased load as well as scale back as traffic drops, always maintaining
- # at least `min_nodes`. You will be charged for the time in which additional
- # nodes are used.
- #
- # If `min_nodes` is not specified and AutoScaling is used with a [legacy
- # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
- # `min_nodes` defaults to 0, in which case, when traffic to a model stops
- # (and after a cool-down period), nodes will be shut down and no charges will
- # be incurred until traffic to the model resumes.
- #
- # If `min_nodes` is not specified and AutoScaling is used with a [Compute
- # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
- # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
- # Compute Engine machine type.
- #
- # Note that you cannot use AutoScaling if your version uses
- # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
- # ManualScaling.
- #
- # You can set `min_nodes` when creating the model version, and you can also
- # update `min_nodes` for an existing version:
- # <pre>
- # update_body.json:
- # {
- # 'autoScaling': {
- # 'minNodes': 5
- # }
- # }
- # </pre>
- # HTTP request:
- # <pre style="max-width: 626px;">
- # PATCH
- # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
- # -d @./update_body.json
- # </pre>
- },
- "createTime": "A String", # Output only. The time the version was created.
- "labels": { # Optional. One or more labels that you can add, to organize your model
- # versions. Each label is a key-value pair, where both the key and the value
- # are arbitrary strings that you supply.
- # For more information, see the documentation on
- # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
- "a_key": "A String",
- },
- "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
- # projects.models.versions.patch
- # request. Specifying it in a
- # projects.models.versions.create
- # request has no effect.
+ # The model name must be unique within the project it is created in.
+ "onlinePredictionConsoleLogging": True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
+ # streams to Stackdriver Logging. These can be more verbose than the standard
+ # access logs (see `onlinePredictionLogging`) and can incur higher cost.
+ # However, they are helpful for debugging. Note that
+ # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+ # your project receives prediction requests at a high QPS. Estimate your
+ # costs before enabling this option.
#
- # Configures the request-response pair logging on predictions from this
- # Version.
- # Online prediction requests to a model version and the responses to these
- # requests are converted to raw strings and saved to the specified BigQuery
- # table. Logging is constrained by [BigQuery quotas and
- # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
- # AI Platform Prediction does not log request-response pairs, but it continues
- # to serve predictions.
- #
- # If you are using [continuous
- # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
- # specify this configuration manually. Setting up continuous evaluation
- # automatically enables logging of request-response pairs.
- "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
- # "<var>project_id</var>.<var>dataset_name</var>.<var>table_name</var>"
- #
- # The specified table must already exist, and the "Cloud ML Service Agent"
- # for your project must have permission to write to it. The table must have
- # the following [schema](/bigquery/docs/schemas):
- #
- # <table>
- # <tr><th>Field name</th><th style="display: table-cell">Type</th>
- # <th style="display: table-cell">Mode</th></tr>
- # <tr><td>model</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>model_version</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>time</td><td>TIMESTAMP</td><td>REQUIRED</td></tr>
- # <tr><td>raw_data</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>raw_prediction</td><td>STRING</td><td>NULLABLE</td></tr>
- # <tr><td>groundtruth</td><td>STRING</td><td>NULLABLE</td></tr>
- # </table>
- "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
- # For example, if you want to log 10% of requests, enter `0.1`. The sampling
- # window is the lifetime of the model version. Defaults to 0.
- },
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
- "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
- # applies to online prediction service. If this field is not specified, it
- # defaults to `mls1-c1-m2`.
- #
- # Online prediction supports the following machine types:
- #
- # * `mls1-c1-m2`
- # * `mls1-c4-m2`
- # * `n1-standard-2`
- # * `n1-standard-4`
- # * `n1-standard-8`
- # * `n1-standard-16`
- # * `n1-standard-32`
- # * `n1-highmem-2`
- # * `n1-highmem-4`
- # * `n1-highmem-8`
- # * `n1-highmem-16`
- # * `n1-highmem-32`
- # * `n1-highcpu-2`
- # * `n1-highcpu-4`
- # * `n1-highcpu-8`
- # * `n1-highcpu-16`
- # * `n1-highcpu-32`
- #
- # `mls1-c1-m2` is generally available. All other machine types are available
- # in beta. Learn more about the [differences between machine
- # types](/ml-engine/docs/machine-types-online-prediction).
- "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
- #
- # For more information, see the
- # [runtime version list](/ml-engine/docs/runtime-version-list) and
- # [how to manage runtime versions](/ml-engine/docs/versioning).
- "description": "A String", # Optional. The description specified for the version when it was created.
- "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
- # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
- # `XGBOOST`. If you do not specify a framework, AI Platform
- # will analyze files in the deployment_uri to determine a framework. If you
- # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
- # of the model to 1.4 or greater.
- #
- # Do **not** specify a framework if you're deploying a [custom
- # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
- #
- # If you specify a [Compute Engine (N1) machine
- # type](/ml-engine/docs/machine-types-online-prediction) in the
- # `machineType` field, you must specify `TENSORFLOW`
- # for the framework.
+ # Default is false.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a model from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
# read-modify-write cycle to perform model updates in order to avoid race
- # conditions: An `etag` is returned in the response to `GetVersion`, and
- # systems are expected to put that etag in the request to `UpdateVersion` to
+ # conditions: An `etag` is returned in the response to `GetModel`, and
+ # systems are expected to put that etag in the request to `UpdateModel` to
# ensure that their change will be applied to the model as intended.
+ "labels": { # Optional. One or more labels that you can add, to organize your models.
+ # Each label is a key-value pair, where both the key and the value are
+ # arbitrary strings that you supply.
+ # For more information, see the documentation on
+ # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+ "a_key": "A String",
+ },
+ "defaultVersion": { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
+ # handle prediction requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.models.versions.setDefault.
+ #
+ # Each version is a trained model deployed in the cloud, ready to handle
+ # prediction requests. A model can have multiple versions. You can get
+ # information about all of the versions of a given model by calling
+ # projects.models.versions.list.
+ "labels": { # Optional. One or more labels that you can add, to organize your model
+ # versions. Each label is a key-value pair, where both the key and the value
+ # are arbitrary strings that you supply.
+ # For more information, see the documentation on
+ # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+ "a_key": "A String",
+ },
+ "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
+ # applies to online prediction service. If this field is not specified, it
+ # defaults to `mls1-c1-m2`.
+ #
+ # Online prediction supports the following machine types:
+ #
+ # * `mls1-c1-m2`
+ # * `mls1-c4-m2`
+ # * `n1-standard-2`
+ # * `n1-standard-4`
+ # * `n1-standard-8`
+ # * `n1-standard-16`
+ # * `n1-standard-32`
+ # * `n1-highmem-2`
+ # * `n1-highmem-4`
+ # * `n1-highmem-8`
+ # * `n1-highmem-16`
+ # * `n1-highmem-32`
+ # * `n1-highcpu-2`
+ # * `n1-highcpu-4`
+ # * `n1-highcpu-8`
+ # * `n1-highcpu-16`
+ # * `n1-highcpu-32`
+ #
+ # `mls1-c1-m2` is generally available. All other machine types are available
+ # in beta. Learn more about the [differences between machine
+ # types](/ml-engine/docs/machine-types-online-prediction).
+ "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
+ # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
+ # or [scikit-learn pipelines with custom
+ # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
+ #
+ # For a custom prediction routine, one of these packages must contain your
+ # Predictor class (see
+ # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
+ # include any dependencies used by your Predictor or scikit-learn pipeline
+ # uses that are not already included in your selected [runtime
+ # version](/ml-engine/docs/tensorflow/runtime-version-list).
+ #
+ # If you specify this field, you must also set
+ # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+ "A String",
+ ],
+ "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+ # Only specify this field if you have specified a Compute Engine (N1) machine
+ # type in the `machineType` field. Learn more about [using GPUs for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+ # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+ # [accelerators for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ "type": "A String", # The type of accelerator to use.
+ "count": "A String", # The number of accelerators to attach to each machine running the job.
+ },
+ "state": "A String", # Output only. The state of a version.
+ "name": "A String", # Required. The name specified for the version when it was created.
+ #
+ # The version name must be unique within the model it is created in.
+ "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+ # response to increases and decreases in traffic. Care should be
+ # taken to ramp up traffic according to the model's ability to scale
+ # or you will start seeing increases in latency and 429 response codes.
+ #
+ # Note that you cannot use AutoScaling if your version uses
+ # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+ # `manual_scaling`.
+ "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
+ # nodes are always up, starting from the time the model is deployed.
+ # Therefore, the cost of operating this model will be at least
+ # `rate` * `min_nodes` * number of hours since last billing cycle,
+ # where `rate` is the cost per node-hour as documented in the
+ # [pricing guide](/ml-engine/docs/pricing),
+ # even if no predictions are performed. There is additional cost for each
+ # prediction performed.
+ #
+ # Unlike manual scaling, if the load gets too heavy for the nodes
+ # that are up, the service will automatically add nodes to handle the
+ # increased load as well as scale back as traffic drops, always maintaining
+ # at least `min_nodes`. You will be charged for the time in which additional
+ # nodes are used.
+ #
+ # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+ # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+ # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+ # (and after a cool-down period), nodes will be shut down and no charges will
+ # be incurred until traffic to the model resumes.
+ #
+ # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+ # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+ # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+ # Compute Engine machine type.
+ #
+ # Note that you cannot use AutoScaling if your version uses
+ # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+ # ManualScaling.
+ #
+ # You can set `min_nodes` when creating the model version, and you can also
+ # update `min_nodes` for an existing version:
+ # <pre>
+ # update_body.json:
+ # {
+ # 'autoScaling': {
+ # 'minNodes': 5
+ # }
+ # }
+ # </pre>
+ # HTTP request:
+ # <pre style="max-width: 626px;">
+ # PATCH
+ # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+ # -d @./update_body.json
+ # </pre>
+ },
+ "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
+ # Some explanation features require additional metadata to be loaded
+ # as part of the model payload.
+ # There are two feature attribution methods supported for TensorFlow models:
+ # integrated gradients and sampled Shapley.
+ # [Learn more about feature
+ # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
+ "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1703.01365
+ "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+ # A good value to start is 50 and gradually increase until the
+ # sum to diff property is met within the desired error range.
+ },
+ "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1906.02825
+ # Currently only implemented for models with natural image inputs.
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1906.02825
+ # Currently only implemented for models with natural image inputs.
+ "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+ # A good value to start is 50 and gradually increase until the
+ # sum to diff property is met within the desired error range.
+ },
+ "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+ # contribute to the label being predicted. A sampling strategy is used to
+ # approximate the value rather than considering all subsets of features.
+ # contribute to the label being predicted. A sampling strategy is used to
+ # approximate the value rather than considering all subsets of features.
+ "numPaths": 42, # The number of feature permutations to consider when approximating the
+ # Shapley values.
+ },
+ },
+ "pythonVersion": "A String", # Required. The version of Python used in prediction.
+ #
+ # The following Python versions are available:
+ #
+ # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+ # later.
+ # * Python '3.5' is available when `runtime_version` is set to a version
+ # from '1.4' to '1.14'.
+ # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+ # earlier.
+ #
+ # Read more about the Python versions available for [each runtime
+ # version](/ml-engine/docs/runtime-version-list).
+ "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+ # projects.models.versions.patch
+ # request. Specifying it in a
+ # projects.models.versions.create
+ # request has no effect.
+ #
+ # Configures the request-response pair logging on predictions from this
+ # Version.
+ # Online prediction requests to a model version and the responses to these
+ # requests are converted to raw strings and saved to the specified BigQuery
+ # table. Logging is constrained by [BigQuery quotas and
+ # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+ # AI Platform Prediction does not log request-response pairs, but it continues
+ # to serve predictions.
+ #
+ # If you are using [continuous
+ # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+ # specify this configuration manually. Setting up continuous evaluation
+ # automatically enables logging of request-response pairs.
+ "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+ # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+ # window is the lifetime of the model version. Defaults to 0.
+ "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
+ # "<var>project_id</var>.<var>dataset_name</var>.<var>table_name</var>"
+ #
+ # The specified table must already exist, and the "Cloud ML Service Agent"
+ # for your project must have permission to write to it. The table must have
+ # the following [schema](/bigquery/docs/schemas):
+ #
+ # <table>
+ # <tr><th>Field name</th><th style="display: table-cell">Type</th>
+ # <th style="display: table-cell">Mode</th></tr>
+ # <tr><td>model</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>model_version</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>time</td><td>TIMESTAMP</td><td>REQUIRED</td></tr>
+ # <tr><td>raw_data</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>raw_prediction</td><td>STRING</td><td>NULLABLE</td></tr>
+ # <tr><td>groundtruth</td><td>STRING</td><td>NULLABLE</td></tr>
+ # </table>
+ },
+ "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+ # model. You should generally use `auto_scaling` with an appropriate
+ # `min_nodes` instead, but this option is available if you want more
+ # predictable billing. Beware that latency and error rates will increase
+ # if the traffic exceeds that capability of the system to serve it based
+ # on the selected number of nodes.
+ "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
+ # starting from the time the model is deployed, so the cost of operating
+ # this model will be proportional to `nodes` * number of hours since
+ # last billing cycle plus the cost for each prediction performed.
+ },
+ "createTime": "A String", # Output only. The time the version was created.
+ "lastUseTime": "A String", # Output only. The time the version was last used for prediction.
+ "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
+ # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+ # `XGBOOST`. If you do not specify a framework, AI Platform
+ # will analyze files in the deployment_uri to determine a framework. If you
+ # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+ # of the model to 1.4 or greater.
+ #
+ # Do **not** specify a framework if you're deploying a [custom
+ # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ #
+ # If you specify a [Compute Engine (N1) machine
+ # type](/ml-engine/docs/machine-types-online-prediction) in the
+ # `machineType` field, you must specify `TENSORFLOW`
+ # for the framework.
+ "predictionClass": "A String", # Optional. The fully qualified name
+ # (<var>module_name</var>.<var>class_name</var>) of a class that implements
+ # the Predictor interface described in this reference field. The module
+ # containing this class should be included in a package provided to the
+ # [`packageUris` field](#Version.FIELDS.package_uris).
+ #
+ # Specify this field if and only if you are deploying a [custom prediction
+ # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ # If you specify this field, you must set
+ # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+ # you must set `machineType` to a [legacy (MLS1)
+ # machine type](/ml-engine/docs/machine-types-online-prediction).
+ #
+ # The following code sample provides the Predictor interface:
+ #
+ # <pre style="max-width: 626px;">
+ # class Predictor(object):
+ # """Interface for constructing custom predictors."""
+ #
+ # def predict(self, instances, **kwargs):
+ # """Performs custom prediction.
+ #
+ # Instances are the decoded values from the request. They have already
+ # been deserialized from JSON.
+ #
+ # Args:
+ # instances: A list of prediction input instances.
+ # **kwargs: A dictionary of keyword args provided as additional
+ # fields on the predict request body.
+ #
+ # Returns:
+ # A list of outputs containing the prediction results. This list must
+ # be JSON serializable.
+ # """
+ # raise NotImplementedError()
+ #
+ # @classmethod
+ # def from_path(cls, model_dir):
+ # """Creates an instance of Predictor using the given path.
+ #
+ # Loading of the predictor should be done in this method.
+ #
+ # Args:
+ # model_dir: The local directory that contains the exported model
+ # file along with any additional files uploaded when creating the
+ # version resource.
+ #
+ # Returns:
+ # An instance implementing this Predictor class.
+ # """
+ # raise NotImplementedError()
+ # </pre>
+ #
+ # Learn more about [the Predictor interface and custom prediction
+ # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
+ # requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.methods.versions.setDefault.
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a model from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform model updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `GetVersion`, and
+ # systems are expected to put that etag in the request to `UpdateVersion` to
+ # ensure that their change will be applied to the model as intended.
+ "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+ "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
+ # create the version. See the
+ # [guide to model
+ # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+ # information.
+ #
+ # When passing Version to
+ # projects.models.versions.create
+ # the model service uses the specified location as the source of the model.
+ # Once deployed, the model version is hosted by the prediction service, so
+ # this location is useful only as a historical record.
+ # The total number of model files can't exceed 1000.
+ "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
+ #
+ # For more information, see the
+ # [runtime version list](/ml-engine/docs/runtime-version-list) and
+ # [how to manage runtime versions](/ml-engine/docs/versioning).
+ "description": "A String", # Optional. The description specified for the version when it was created.
+ },
+ "onlinePredictionLogging": True or False, # Optional. If true, online prediction access logs are sent to StackDriver
+ # Logging. These logs are like standard server access logs, containing
+ # information like timestamp and latency for each request. Note that
+ # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+ # your project receives prediction requests at a high queries per second rate
+ # (QPS). Estimate your costs before enabling this option.
+ #
+ # Default is false.
},
- "onlinePredictionConsoleLogging": True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
- # streams to Stackdriver Logging. These can be more verbose than the standard
- # access logs (see `onlinePredictionLogging`) and can incur higher cost.
- # However, they are helpful for debugging. Note that
- # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
- # your project receives prediction requests at a high QPS. Estimate your
- # costs before enabling this option.
- #
- # Default is false.
- "regions": [ # Optional. The list of regions where the model is going to be deployed.
- # Only one region per model is supported.
- # Defaults to 'us-central1' if nothing is set.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- # Note:
- # * No matter where a model is deployed, it can always be accessed by
- # users from anywhere, both for online and batch prediction.
- # * The region for a batch prediction job is set by the region field when
- # submitting the batch prediction job and does not take its value from
- # this field.
- "A String",
- ],
- "description": "A String", # Optional. The description specified for the model when it was created.
- "onlinePredictionLogging": True or False, # Optional. If true, online prediction access logs are sent to StackDriver
- # Logging. These logs are like standard server access logs, containing
- # information like timestamp and latency for each request. Note that
- # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
- # your project receives prediction requests at a high queries per second rate
- # (QPS). Estimate your costs before enabling this option.
- #
- # Default is false.
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a model from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform model updates in order to avoid race
- # conditions: An `etag` is returned in the response to `GetModel`, and
- # systems are expected to put that etag in the request to `UpdateModel` to
- # ensure that their change will be applied to the model as intended.
- "labels": { # Optional. One or more labels that you can add, to organize your models.
- # Each label is a key-value pair, where both the key and the value are
- # arbitrary strings that you supply.
- # For more information, see the documentation on
- # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
- "a_key": "A String",
- },
- "name": "A String", # Required. The name specified for the model when it was created.
- #
- # The model name must be unique within the project it is created in.
- },
],
+ "nextPageToken": "A String", # Optional. Pass this token as the `page_token` field of the request for a
+ # subsequent call.
}</pre>
</div>
@@ -2154,393 +2154,393 @@
The object takes the form of:
{ # Represents a machine learning solution.
- #
- # A model can have multiple versions, each of which is a deployed, trained
- # model ready to receive prediction requests. The model itself is just a
- # container.
- "defaultVersion": { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
- # handle prediction requests that do not specify a version.
#
- # You can change the default version by calling
- # projects.models.versions.setDefault.
- #
- # Each version is a trained model deployed in the cloud, ready to handle
- # prediction requests. A model can have multiple versions. You can get
- # information about all of the versions of a given model by calling
- # projects.models.versions.list.
- "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
- # Only specify this field if you have specified a Compute Engine (N1) machine
- # type in the `machineType` field. Learn more about [using GPUs for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- # Note that the AcceleratorConfig can be used in both Jobs and Versions.
- # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
- # [accelerators for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- "count": "A String", # The number of accelerators to attach to each machine running the job.
- "type": "A String", # The type of accelerator to use.
- },
- "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
- # requests that do not specify a version.
- #
- # You can change the default version by calling
- # projects.methods.versions.setDefault.
- "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
- # model. You should generally use `auto_scaling` with an appropriate
- # `min_nodes` instead, but this option is available if you want more
- # predictable billing. Beware that latency and error rates will increase
- # if the traffic exceeds that capability of the system to serve it based
- # on the selected number of nodes.
- "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
- # starting from the time the model is deployed, so the cost of operating
- # this model will be proportional to `nodes` * number of hours since
- # last billing cycle plus the cost for each prediction performed.
- },
- "state": "A String", # Output only. The state of a version.
- "name": "A String", # Required. The name specified for the version when it was created.
- #
- # The version name must be unique within the model it is created in.
- "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
- "pythonVersion": "A String", # Required. The version of Python used in prediction.
- #
- # The following Python versions are available:
- #
- # * Python '3.7' is available when `runtime_version` is set to '1.15' or
- # later.
- # * Python '3.5' is available when `runtime_version` is set to a version
- # from '1.4' to '1.14'.
- # * Python '2.7' is available when `runtime_version` is set to '1.15' or
- # earlier.
- #
- # Read more about the Python versions available for [each runtime
- # version](/ml-engine/docs/runtime-version-list).
- "lastUseTime": "A String", # Output only. The time the version was last used for prediction.
- "predictionClass": "A String", # Optional. The fully qualified name
- # (<var>module_name</var>.<var>class_name</var>) of a class that implements
- # the Predictor interface described in this reference field. The module
- # containing this class should be included in a package provided to the
- # [`packageUris` field](#Version.FIELDS.package_uris).
- #
- # Specify this field if and only if you are deploying a [custom prediction
- # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
- # If you specify this field, you must set
- # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
- # you must set `machineType` to a [legacy (MLS1)
- # machine type](/ml-engine/docs/machine-types-online-prediction).
- #
- # The following code sample provides the Predictor interface:
- #
- # <pre style="max-width: 626px;">
- # class Predictor(object):
- # """Interface for constructing custom predictors."""
- #
- # def predict(self, instances, **kwargs):
- # """Performs custom prediction.
- #
- # Instances are the decoded values from the request. They have already
- # been deserialized from JSON.
- #
- # Args:
- # instances: A list of prediction input instances.
- # **kwargs: A dictionary of keyword args provided as additional
- # fields on the predict request body.
- #
- # Returns:
- # A list of outputs containing the prediction results. This list must
- # be JSON serializable.
- # """
- # raise NotImplementedError()
- #
- # @classmethod
- # def from_path(cls, model_dir):
- # """Creates an instance of Predictor using the given path.
- #
- # Loading of the predictor should be done in this method.
- #
- # Args:
- # model_dir: The local directory that contains the exported model
- # file along with any additional files uploaded when creating the
- # version resource.
- #
- # Returns:
- # An instance implementing this Predictor class.
- # """
- # raise NotImplementedError()
- # </pre>
- #
- # Learn more about [the Predictor interface and custom prediction
- # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
- "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
- # create the version. See the
- # [guide to model
- # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
- # information.
- #
- # When passing Version to
- # projects.models.versions.create
- # the model service uses the specified location as the source of the model.
- # Once deployed, the model version is hosted by the prediction service, so
- # this location is useful only as a historical record.
- # The total number of model files can't exceed 1000.
- "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
- # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
- # or [scikit-learn pipelines with custom
- # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
- #
- # For a custom prediction routine, one of these packages must contain your
- # Predictor class (see
- # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
- # include any dependencies used by your Predictor or scikit-learn pipeline
- # uses that are not already included in your selected [runtime
- # version](/ml-engine/docs/tensorflow/runtime-version-list).
- #
- # If you specify this field, you must also set
- # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+ # A model can have multiple versions, each of which is a deployed, trained
+ # model ready to receive prediction requests. The model itself is just a
+ # container.
+ "description": "A String", # Optional. The description specified for the model when it was created.
+ "regions": [ # Optional. The list of regions where the model is going to be deployed.
+ # Only one region per model is supported.
+ # Defaults to 'us-central1' if nothing is set.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ # Note:
+ # * No matter where a model is deployed, it can always be accessed by
+ # users from anywhere, both for online and batch prediction.
+ # * The region for a batch prediction job is set by the region field when
+ # submitting the batch prediction job and does not take its value from
+ # this field.
"A String",
],
- "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
- # Some explanation features require additional metadata to be loaded
- # as part of the model payload.
- # There are two feature attribution methods supported for TensorFlow models:
- # integrated gradients and sampled Shapley.
- # [Learn more about feature
- # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
- "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1703.01365
- "numIntegralSteps": 42, # Number of steps for approximating the path integral.
- # A good value to start is 50 and gradually increase until the
- # sum to diff property is met within the desired error range.
- },
- "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
- # contribute to the label being predicted. A sampling strategy is used to
- # approximate the value rather than considering all subsets of features.
- # contribute to the label being predicted. A sampling strategy is used to
- # approximate the value rather than considering all subsets of features.
- "numPaths": 42, # The number of feature permutations to consider when approximating the
- # Shapley values.
- },
- "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1906.02825
- # Currently only implemented for models with natural image inputs.
- # of the model's fully differentiable structure. Refer to this paper for
- # more details: https://arxiv.org/abs/1906.02825
- # Currently only implemented for models with natural image inputs.
- "numIntegralSteps": 42, # Number of steps for approximating the path integral.
- # A good value to start is 50 and gradually increase until the
- # sum to diff property is met within the desired error range.
- },
- },
- "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
- # response to increases and decreases in traffic. Care should be
- # taken to ramp up traffic according to the model's ability to scale
- # or you will start seeing increases in latency and 429 response codes.
- #
- # Note that you cannot use AutoScaling if your version uses
- # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
- # `manual_scaling`.
- "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
- # nodes are always up, starting from the time the model is deployed.
- # Therefore, the cost of operating this model will be at least
- # `rate` * `min_nodes` * number of hours since last billing cycle,
- # where `rate` is the cost per node-hour as documented in the
- # [pricing guide](/ml-engine/docs/pricing),
- # even if no predictions are performed. There is additional cost for each
- # prediction performed.
- #
- # Unlike manual scaling, if the load gets too heavy for the nodes
- # that are up, the service will automatically add nodes to handle the
- # increased load as well as scale back as traffic drops, always maintaining
- # at least `min_nodes`. You will be charged for the time in which additional
- # nodes are used.
- #
- # If `min_nodes` is not specified and AutoScaling is used with a [legacy
- # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
- # `min_nodes` defaults to 0, in which case, when traffic to a model stops
- # (and after a cool-down period), nodes will be shut down and no charges will
- # be incurred until traffic to the model resumes.
- #
- # If `min_nodes` is not specified and AutoScaling is used with a [Compute
- # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
- # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
- # Compute Engine machine type.
- #
- # Note that you cannot use AutoScaling if your version uses
- # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
- # ManualScaling.
- #
- # You can set `min_nodes` when creating the model version, and you can also
- # update `min_nodes` for an existing version:
- # <pre>
- # update_body.json:
- # {
- # 'autoScaling': {
- # 'minNodes': 5
- # }
- # }
- # </pre>
- # HTTP request:
- # <pre style="max-width: 626px;">
- # PATCH
- # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
- # -d @./update_body.json
- # </pre>
- },
- "createTime": "A String", # Output only. The time the version was created.
- "labels": { # Optional. One or more labels that you can add, to organize your model
- # versions. Each label is a key-value pair, where both the key and the value
- # are arbitrary strings that you supply.
- # For more information, see the documentation on
- # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
- "a_key": "A String",
- },
- "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
- # projects.models.versions.patch
- # request. Specifying it in a
- # projects.models.versions.create
- # request has no effect.
- #
- # Configures the request-response pair logging on predictions from this
- # Version.
- # Online prediction requests to a model version and the responses to these
- # requests are converted to raw strings and saved to the specified BigQuery
- # table. Logging is constrained by [BigQuery quotas and
- # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
- # AI Platform Prediction does not log request-response pairs, but it continues
- # to serve predictions.
- #
- # If you are using [continuous
- # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
- # specify this configuration manually. Setting up continuous evaluation
- # automatically enables logging of request-response pairs.
- "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
- # "<var>project_id</var>.<var>dataset_name</var>.<var>table_name</var>"
- #
- # The specified table must already exist, and the "Cloud ML Service Agent"
- # for your project must have permission to write to it. The table must have
- # the following [schema](/bigquery/docs/schemas):
- #
- # <table>
- # <tr><th>Field name</th><th style="display: table-cell">Type</th>
- # <th style="display: table-cell">Mode</th></tr>
- # <tr><td>model</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>model_version</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>time</td><td>TIMESTAMP</td><td>REQUIRED</td></tr>
- # <tr><td>raw_data</td><td>STRING</td><td>REQUIRED</td></tr>
- # <tr><td>raw_prediction</td><td>STRING</td><td>NULLABLE</td></tr>
- # <tr><td>groundtruth</td><td>STRING</td><td>NULLABLE</td></tr>
- # </table>
- "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
- # For example, if you want to log 10% of requests, enter `0.1`. The sampling
- # window is the lifetime of the model version. Defaults to 0.
- },
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
- "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
- # applies to online prediction service. If this field is not specified, it
- # defaults to `mls1-c1-m2`.
- #
- # Online prediction supports the following machine types:
- #
- # * `mls1-c1-m2`
- # * `mls1-c4-m2`
- # * `n1-standard-2`
- # * `n1-standard-4`
- # * `n1-standard-8`
- # * `n1-standard-16`
- # * `n1-standard-32`
- # * `n1-highmem-2`
- # * `n1-highmem-4`
- # * `n1-highmem-8`
- # * `n1-highmem-16`
- # * `n1-highmem-32`
- # * `n1-highcpu-2`
- # * `n1-highcpu-4`
- # * `n1-highcpu-8`
- # * `n1-highcpu-16`
- # * `n1-highcpu-32`
- #
- # `mls1-c1-m2` is generally available. All other machine types are available
- # in beta. Learn more about the [differences between machine
- # types](/ml-engine/docs/machine-types-online-prediction).
- "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
- #
- # For more information, see the
- # [runtime version list](/ml-engine/docs/runtime-version-list) and
- # [how to manage runtime versions](/ml-engine/docs/versioning).
- "description": "A String", # Optional. The description specified for the version when it was created.
- "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
- # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
- # `XGBOOST`. If you do not specify a framework, AI Platform
- # will analyze files in the deployment_uri to determine a framework. If you
- # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
- # of the model to 1.4 or greater.
- #
- # Do **not** specify a framework if you're deploying a [custom
- # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
- #
- # If you specify a [Compute Engine (N1) machine
- # type](/ml-engine/docs/machine-types-online-prediction) in the
- # `machineType` field, you must specify `TENSORFLOW`
- # for the framework.
+ "name": "A String", # Required. The name specified for the model when it was created.
+ #
+ # The model name must be unique within the project it is created in.
+ "onlinePredictionConsoleLogging": True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
+ # streams to Stackdriver Logging. These can be more verbose than the standard
+ # access logs (see `onlinePredictionLogging`) and can incur higher cost.
+ # However, they are helpful for debugging. Note that
+ # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+ # your project receives prediction requests at a high QPS. Estimate your
+ # costs before enabling this option.
+ #
+ # Default is false.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a model from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
# read-modify-write cycle to perform model updates in order to avoid race
- # conditions: An `etag` is returned in the response to `GetVersion`, and
- # systems are expected to put that etag in the request to `UpdateVersion` to
+ # conditions: An `etag` is returned in the response to `GetModel`, and
+ # systems are expected to put that etag in the request to `UpdateModel` to
# ensure that their change will be applied to the model as intended.
- },
- "onlinePredictionConsoleLogging": True or False, # Optional. If true, online prediction nodes send `stderr` and `stdout`
- # streams to Stackdriver Logging. These can be more verbose than the standard
- # access logs (see `onlinePredictionLogging`) and can incur higher cost.
- # However, they are helpful for debugging. Note that
- # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
- # your project receives prediction requests at a high QPS. Estimate your
- # costs before enabling this option.
- #
- # Default is false.
- "regions": [ # Optional. The list of regions where the model is going to be deployed.
- # Only one region per model is supported.
- # Defaults to 'us-central1' if nothing is set.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- # Note:
- # * No matter where a model is deployed, it can always be accessed by
- # users from anywhere, both for online and batch prediction.
- # * The region for a batch prediction job is set by the region field when
- # submitting the batch prediction job and does not take its value from
- # this field.
- "A String",
- ],
- "description": "A String", # Optional. The description specified for the model when it was created.
- "onlinePredictionLogging": True or False, # Optional. If true, online prediction access logs are sent to StackDriver
- # Logging. These logs are like standard server access logs, containing
- # information like timestamp and latency for each request. Note that
- # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
- # your project receives prediction requests at a high queries per second rate
- # (QPS). Estimate your costs before enabling this option.
- #
- # Default is false.
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a model from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform model updates in order to avoid race
- # conditions: An `etag` is returned in the response to `GetModel`, and
- # systems are expected to put that etag in the request to `UpdateModel` to
- # ensure that their change will be applied to the model as intended.
- "labels": { # Optional. One or more labels that you can add, to organize your models.
- # Each label is a key-value pair, where both the key and the value are
- # arbitrary strings that you supply.
- # For more information, see the documentation on
- # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
- "a_key": "A String",
- },
- "name": "A String", # Required. The name specified for the model when it was created.
- #
- # The model name must be unique within the project it is created in.
-}
+ "labels": { # Optional. One or more labels that you can add, to organize your models.
+ # Each label is a key-value pair, where both the key and the value are
+ # arbitrary strings that you supply.
+ # For more information, see the documentation on
+ # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+ "a_key": "A String",
+ },
+ "defaultVersion": { # Represents a version of the model. # Output only. The default version of the model. This version will be used to
+ # handle prediction requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.models.versions.setDefault.
+ #
+ # Each version is a trained model deployed in the cloud, ready to handle
+ # prediction requests. A model can have multiple versions. You can get
+ # information about all of the versions of a given model by calling
+ # projects.models.versions.list.
+ "labels": { # Optional. One or more labels that you can add, to organize your model
+ # versions. Each label is a key-value pair, where both the key and the value
+ # are arbitrary strings that you supply.
+ # For more information, see the documentation on
+ # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+ "a_key": "A String",
+ },
+ "machineType": "A String", # Optional. The type of machine on which to serve the model. Currently only
+ # applies to online prediction service. If this field is not specified, it
+ # defaults to `mls1-c1-m2`.
+ #
+ # Online prediction supports the following machine types:
+ #
+ # * `mls1-c1-m2`
+ # * `mls1-c4-m2`
+ # * `n1-standard-2`
+ # * `n1-standard-4`
+ # * `n1-standard-8`
+ # * `n1-standard-16`
+ # * `n1-standard-32`
+ # * `n1-highmem-2`
+ # * `n1-highmem-4`
+ # * `n1-highmem-8`
+ # * `n1-highmem-16`
+ # * `n1-highmem-32`
+ # * `n1-highcpu-2`
+ # * `n1-highcpu-4`
+ # * `n1-highcpu-8`
+ # * `n1-highcpu-16`
+ # * `n1-highcpu-32`
+ #
+ # `mls1-c1-m2` is generally available. All other machine types are available
+ # in beta. Learn more about the [differences between machine
+ # types](/ml-engine/docs/machine-types-online-prediction).
+ "packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
+ # prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
+ # or [scikit-learn pipelines with custom
+ # code](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).
+ #
+ # For a custom prediction routine, one of these packages must contain your
+ # Predictor class (see
+ # [`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,
+ # include any dependencies used by your Predictor or scikit-learn pipeline
+ # uses that are not already included in your selected [runtime
+ # version](/ml-engine/docs/tensorflow/runtime-version-list).
+ #
+ # If you specify this field, you must also set
+ # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.
+ "A String",
+ ],
+ "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+ # Only specify this field if you have specified a Compute Engine (N1) machine
+ # type in the `machineType` field. Learn more about [using GPUs for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+ # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+ # [accelerators for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ "type": "A String", # The type of accelerator to use.
+ "count": "A String", # The number of accelerators to attach to each machine running the job.
+ },
+ "state": "A String", # Output only. The state of a version.
+ "name": "A String", # Required. The name specified for the version when it was created.
+ #
+ # The version name must be unique within the model it is created in.
+ "autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
+ # response to increases and decreases in traffic. Care should be
+ # taken to ramp up traffic according to the model's ability to scale
+ # or you will start seeing increases in latency and 429 response codes.
+ #
+ # Note that you cannot use AutoScaling if your version uses
+ # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use specify
+ # `manual_scaling`.
+ "minNodes": 42, # Optional. The minimum number of nodes to allocate for this model. These
+ # nodes are always up, starting from the time the model is deployed.
+ # Therefore, the cost of operating this model will be at least
+ # `rate` * `min_nodes` * number of hours since last billing cycle,
+ # where `rate` is the cost per node-hour as documented in the
+ # [pricing guide](/ml-engine/docs/pricing),
+ # even if no predictions are performed. There is additional cost for each
+ # prediction performed.
+ #
+ # Unlike manual scaling, if the load gets too heavy for the nodes
+ # that are up, the service will automatically add nodes to handle the
+ # increased load as well as scale back as traffic drops, always maintaining
+ # at least `min_nodes`. You will be charged for the time in which additional
+ # nodes are used.
+ #
+ # If `min_nodes` is not specified and AutoScaling is used with a [legacy
+ # (MLS1) machine type](/ml-engine/docs/machine-types-online-prediction),
+ # `min_nodes` defaults to 0, in which case, when traffic to a model stops
+ # (and after a cool-down period), nodes will be shut down and no charges will
+ # be incurred until traffic to the model resumes.
+ #
+ # If `min_nodes` is not specified and AutoScaling is used with a [Compute
+ # Engine (N1) machine type](/ml-engine/docs/machine-types-online-prediction),
+ # `min_nodes` defaults to 1. `min_nodes` must be at least 1 for use with a
+ # Compute Engine machine type.
+ #
+ # Note that you cannot use AutoScaling if your version uses
+ # [GPUs](#Version.FIELDS.accelerator_config). Instead, you must use
+ # ManualScaling.
+ #
+ # You can set `min_nodes` when creating the model version, and you can also
+ # update `min_nodes` for an existing version:
+ # <pre>
+ # update_body.json:
+ # {
+ # 'autoScaling': {
+ # 'minNodes': 5
+ # }
+ # }
+ # </pre>
+ # HTTP request:
+ # <pre style="max-width: 626px;">
+ # PATCH
+ # https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes
+ # -d @./update_body.json
+ # </pre>
+ },
+ "explanationConfig": { # Message holding configuration options for explaining model predictions. # Optional. Configures explainability features on the model's version.
+ # Some explanation features require additional metadata to be loaded
+ # as part of the model payload.
+ # There are two feature attribution methods supported for TensorFlow models:
+ # integrated gradients and sampled Shapley.
+ # [Learn more about feature
+ # attributions.](/ai-platform/prediction/docs/ai-explanations/overview)
+ "integratedGradientsAttribution": { # Attributes credit by computing the Aumann-Shapley value taking advantage # Attributes credit by computing the Aumann-Shapley value taking advantage
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: http://proceedings.mlr.press/v70/sundararajan17a.html
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1703.01365
+ "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+ # A good value to start is 50 and gradually increase until the
+ # sum to diff property is met within the desired error range.
+ },
+ "xraiAttribution": { # Attributes credit by computing the XRAI taking advantage # Attributes credit by computing the XRAI taking advantage
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1906.02825
+ # Currently only implemented for models with natural image inputs.
+ # of the model's fully differentiable structure. Refer to this paper for
+ # more details: https://arxiv.org/abs/1906.02825
+ # Currently only implemented for models with natural image inputs.
+ "numIntegralSteps": 42, # Number of steps for approximating the path integral.
+ # A good value to start is 50 and gradually increase until the
+ # sum to diff property is met within the desired error range.
+ },
+ "sampledShapleyAttribution": { # An attribution method that approximates Shapley values for features that # An attribution method that approximates Shapley values for features that
+ # contribute to the label being predicted. A sampling strategy is used to
+ # approximate the value rather than considering all subsets of features.
+ # contribute to the label being predicted. A sampling strategy is used to
+ # approximate the value rather than considering all subsets of features.
+ "numPaths": 42, # The number of feature permutations to consider when approximating the
+ # Shapley values.
+ },
+ },
+ "pythonVersion": "A String", # Required. The version of Python used in prediction.
+ #
+ # The following Python versions are available:
+ #
+ # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+ # later.
+ # * Python '3.5' is available when `runtime_version` is set to a version
+ # from '1.4' to '1.14'.
+ # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+ # earlier.
+ #
+ # Read more about the Python versions available for [each runtime
+ # version](/ml-engine/docs/runtime-version-list).
+ "requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
+ # projects.models.versions.patch
+ # request. Specifying it in a
+ # projects.models.versions.create
+ # request has no effect.
+ #
+ # Configures the request-response pair logging on predictions from this
+ # Version.
+ # Online prediction requests to a model version and the responses to these
+ # requests are converted to raw strings and saved to the specified BigQuery
+ # table. Logging is constrained by [BigQuery quotas and
+ # limits](/bigquery/quotas). If your project exceeds BigQuery quotas or limits,
+ # AI Platform Prediction does not log request-response pairs, but it continues
+ # to serve predictions.
+ #
+ # If you are using [continuous
+ # evaluation](/ml-engine/docs/continuous-evaluation/), you do not need to
+ # specify this configuration manually. Setting up continuous evaluation
+ # automatically enables logging of request-response pairs.
+ "samplingPercentage": 3.14, # Percentage of requests to be logged, expressed as a fraction from 0 to 1.
+ # For example, if you want to log 10% of requests, enter `0.1`. The sampling
+ # window is the lifetime of the model version. Defaults to 0.
+ "bigqueryTableName": "A String", # Required. Fully qualified BigQuery table name in the following format:
+ # "<var>project_id</var>.<var>dataset_name</var>.<var>table_name</var>"
+ #
+ # The specified table must already exist, and the "Cloud ML Service Agent"
+ # for your project must have permission to write to it. The table must have
+ # the following [schema](/bigquery/docs/schemas):
+ #
+ # <table>
+ # <tr><th>Field name</th><th style="display: table-cell">Type</th>
+ # <th style="display: table-cell">Mode</th></tr>
+ # <tr><td>model</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>model_version</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>time</td><td>TIMESTAMP</td><td>REQUIRED</td></tr>
+ # <tr><td>raw_data</td><td>STRING</td><td>REQUIRED</td></tr>
+ # <tr><td>raw_prediction</td><td>STRING</td><td>NULLABLE</td></tr>
+ # <tr><td>groundtruth</td><td>STRING</td><td>NULLABLE</td></tr>
+ # </table>
+ },
+ "manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
+ # model. You should generally use `auto_scaling` with an appropriate
+ # `min_nodes` instead, but this option is available if you want more
+ # predictable billing. Beware that latency and error rates will increase
+ # if the traffic exceeds that capability of the system to serve it based
+ # on the selected number of nodes.
+ "nodes": 42, # The number of nodes to allocate for this model. These nodes are always up,
+ # starting from the time the model is deployed, so the cost of operating
+ # this model will be proportional to `nodes` * number of hours since
+ # last billing cycle plus the cost for each prediction performed.
+ },
+ "createTime": "A String", # Output only. The time the version was created.
+ "lastUseTime": "A String", # Output only. The time the version was last used for prediction.
+ "framework": "A String", # Optional. The machine learning framework AI Platform uses to train
+ # this version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,
+ # `XGBOOST`. If you do not specify a framework, AI Platform
+ # will analyze files in the deployment_uri to determine a framework. If you
+ # choose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version
+ # of the model to 1.4 or greater.
+ #
+ # Do **not** specify a framework if you're deploying a [custom
+ # prediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ #
+ # If you specify a [Compute Engine (N1) machine
+ # type](/ml-engine/docs/machine-types-online-prediction) in the
+ # `machineType` field, you must specify `TENSORFLOW`
+ # for the framework.
+ "predictionClass": "A String", # Optional. The fully qualified name
+ # (<var>module_name</var>.<var>class_name</var>) of a class that implements
+ # the Predictor interface described in this reference field. The module
+ # containing this class should be included in a package provided to the
+ # [`packageUris` field](#Version.FIELDS.package_uris).
+ #
+ # Specify this field if and only if you are deploying a [custom prediction
+ # routine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ # If you specify this field, you must set
+ # [`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater and
+ # you must set `machineType` to a [legacy (MLS1)
+ # machine type](/ml-engine/docs/machine-types-online-prediction).
+ #
+ # The following code sample provides the Predictor interface:
+ #
+ # <pre style="max-width: 626px;">
+ # class Predictor(object):
+ # """Interface for constructing custom predictors."""
+ #
+ # def predict(self, instances, **kwargs):
+ # """Performs custom prediction.
+ #
+ # Instances are the decoded values from the request. They have already
+ # been deserialized from JSON.
+ #
+ # Args:
+ # instances: A list of prediction input instances.
+ # **kwargs: A dictionary of keyword args provided as additional
+ # fields on the predict request body.
+ #
+ # Returns:
+ # A list of outputs containing the prediction results. This list must
+ # be JSON serializable.
+ # """
+ # raise NotImplementedError()
+ #
+ # @classmethod
+ # def from_path(cls, model_dir):
+ # """Creates an instance of Predictor using the given path.
+ #
+ # Loading of the predictor should be done in this method.
+ #
+ # Args:
+ # model_dir: The local directory that contains the exported model
+ # file along with any additional files uploaded when creating the
+ # version resource.
+ #
+ # Returns:
+ # An instance implementing this Predictor class.
+ # """
+ # raise NotImplementedError()
+ # </pre>
+ #
+ # Learn more about [the Predictor interface and custom prediction
+ # routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
+ # requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.methods.versions.setDefault.
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a model from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform model updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `GetVersion`, and
+ # systems are expected to put that etag in the request to `UpdateVersion` to
+ # ensure that their change will be applied to the model as intended.
+ "serviceAccount": "A String", # Optional. Specifies the service account for resource access control.
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
+ "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
+ # create the version. See the
+ # [guide to model
+ # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+ # information.
+ #
+ # When passing Version to
+ # projects.models.versions.create
+ # the model service uses the specified location as the source of the model.
+ # Once deployed, the model version is hosted by the prediction service, so
+ # this location is useful only as a historical record.
+ # The total number of model files can't exceed 1000.
+ "runtimeVersion": "A String", # Required. The AI Platform runtime version to use for this deployment.
+ #
+ # For more information, see the
+ # [runtime version list](/ml-engine/docs/runtime-version-list) and
+ # [how to manage runtime versions](/ml-engine/docs/versioning).
+ "description": "A String", # Optional. The description specified for the version when it was created.
+ },
+ "onlinePredictionLogging": True or False, # Optional. If true, online prediction access logs are sent to StackDriver
+ # Logging. These logs are like standard server access logs, containing
+ # information like timestamp and latency for each request. Note that
+ # [Stackdriver logs may incur a cost](/stackdriver/pricing), especially if
+ # your project receives prediction requests at a high queries per second rate
+ # (QPS). Estimate your costs before enabling this option.
+ #
+ # Default is false.
+ }
updateMask: string, Required. Specifies the path, relative to `Model`, of the field to update.
@@ -2567,9 +2567,6 @@
{ # This resource represents a long-running operation that is the result of a
# network API call.
- "name": "A String", # The server-assigned name, which is only unique within the same service that
- # originally returns it. If you use the default HTTP mapping, the
- # `name` should be a resource name ending with `operations/{unique_id}`.
"error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
# different programming environments, including REST APIs and RPC APIs. It is
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
@@ -2577,22 +2574,16 @@
#
# You can find out more about this error model and how to work with it in the
# [API Design Guide](https://cloud.google.com/apis/design/errors).
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
- "message": "A String", # A developer-facing error message, which should be in English. Any
- # user-facing error message should be localized and sent in the
- # google.rpc.Status.details field, or localized by the client.
"details": [ # A list of messages that carry the error details. There is a common set of
# message types for APIs to use.
{
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
],
- },
- "metadata": { # Service-specific metadata associated with the operation. It typically
- # contains progress information and common metadata such as create time.
- # Some services might not provide such metadata. Any method that returns a
- # long-running operation should document the metadata type, if any.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
+ "message": "A String", # A developer-facing error message, which should be in English. Any
+ # user-facing error message should be localized and sent in the
+ # google.rpc.Status.details field, or localized by the client.
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
},
"done": True or False, # If the value is `false`, it means the operation is still in progress.
# If `true`, the operation is completed, and either `error` or `response` is
@@ -2607,6 +2598,15 @@
# `TakeSnapshotResponse`.
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
+ "metadata": { # Service-specific metadata associated with the operation. It typically
+ # contains progress information and common metadata such as create time.
+ # Some services might not provide such metadata. Any method that returns a
+ # long-running operation should document the metadata type, if any.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ "name": "A String", # The server-assigned name, which is only unique within the same service that
+ # originally returns it. If you use the default HTTP mapping, the
+ # `name` should be a resource name ending with `operations/{unique_id}`.
}</pre>
</div>
@@ -2624,11 +2624,6 @@
The object takes the form of:
{ # Request message for `SetIamPolicy` method.
- "updateMask": "A String", # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
- # the fields in the mask will be modified. If no mask is provided, the
- # following default mask is used:
- #
- # `paths: "bindings, etag"`
"policy": { # An Identity and Access Management (IAM) policy, which specifies access # REQUIRED: The complete policy to be applied to the `resource`. The size of
# the policy is limited to a few 10s of KB. An empty policy is a
# valid policy but certain Cloud Platform services (such as Projects)
@@ -2699,30 +2694,18 @@
#
# For a description of IAM and its features, see the
# [IAM documentation](https://cloud.google.com/iam/docs/).
- "version": 42, # Specifies the format of the policy.
- #
- # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
- # are rejected.
- #
- # Any operation that affects conditional role bindings must specify version
- # `3`. This requirement applies to the following operations:
- #
- # * Getting a policy that includes a conditional role binding
- # * Adding a conditional role binding to a policy
- # * Changing a conditional role binding in a policy
- # * Removing any role binding, with or without a condition, from a policy
- # that includes conditions
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a policy from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform policy updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+ # systems are expected to put that etag in the request to `setIamPolicy` to
+ # ensure that their change will be applied to the same version of the policy.
#
# **Important:** If you use IAM Conditions, you must include the `etag` field
# whenever you call `setIamPolicy`. If you omit this field, then IAM allows
# you to overwrite a version `3` policy with a version `1` policy, and all of
# the conditions in the version `3` policy are lost.
- #
- # If a policy does not include any conditions, operations on that policy may
- # specify any valid version or leave the field unset.
- #
- # To learn which resources support conditions in their IAM policies, see the
- # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
{ # Specifies the audit configuration for a service.
# The configuration determines which permission types are logged, and what
@@ -2739,7 +2722,7 @@
# {
# "audit_configs": [
# {
- # "service": "allServices"
+ # "service": "allServices",
# "audit_log_configs": [
# {
# "log_type": "DATA_READ",
@@ -2748,18 +2731,18 @@
# ]
# },
# {
- # "log_type": "DATA_WRITE",
+ # "log_type": "DATA_WRITE"
# },
# {
- # "log_type": "ADMIN_READ",
+ # "log_type": "ADMIN_READ"
# }
# ]
# },
# {
- # "service": "sampleservice.googleapis.com"
+ # "service": "sampleservice.googleapis.com",
# "audit_log_configs": [
# {
- # "log_type": "DATA_READ",
+ # "log_type": "DATA_READ"
# },
# {
# "log_type": "DATA_WRITE",
@@ -2791,27 +2774,53 @@
# ]
# },
# {
- # "log_type": "DATA_WRITE",
+ # "log_type": "DATA_WRITE"
# }
# ]
# }
#
# This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
# jose@example.com from DATA_READ logging.
+ "logType": "A String", # The log type that this config enables.
"exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
# permission.
# Follows the same format of Binding.members.
"A String",
],
- "logType": "A String", # The log type that this config enables.
},
],
},
],
+ "version": 42, # Specifies the format of the policy.
+ #
+ # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+ # are rejected.
+ #
+ # Any operation that affects conditional role bindings must specify version
+ # `3`. This requirement applies to the following operations:
+ #
+ # * Getting a policy that includes a conditional role binding
+ # * Adding a conditional role binding to a policy
+ # * Changing a conditional role binding in a policy
+ # * Removing any role binding, with or without a condition, from a policy
+ # that includes conditions
+ #
+ # **Important:** If you use IAM Conditions, you must include the `etag` field
+ # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+ # you to overwrite a version `3` policy with a version `1` policy, and all of
+ # the conditions in the version `3` policy are lost.
+ #
+ # If a policy does not include any conditions, operations on that policy may
+ # specify any valid version or leave the field unset.
+ #
+ # To learn which resources support conditions in their IAM policies, see the
+ # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"bindings": [ # Associates a list of `members` to a `role`. Optionally, may specify a
# `condition` that determines how and when the `bindings` are applied. Each
# of the `bindings` must contain at least one member.
{ # Associates `members` with a `role`.
+ "role": "A String", # Role that is assigned to `members`.
+ # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
#
# If the condition evaluates to `true`, then this binding applies to the
@@ -2854,8 +2863,6 @@
# The exact variables and functions that may be referenced within an expression
# are determined by the service that evaluates it. See the service
# documentation for additional information.
- "description": "A String", # Optional. Description of the expression. This is a longer text which
- # describes the expression, e.g. when hovered over it in a UI.
"expression": "A String", # Textual representation of an expression in Common Expression Language
# syntax.
"title": "A String", # Optional. Title for the expression, i.e. a short string describing
@@ -2863,6 +2870,8 @@
# expression.
"location": "A String", # Optional. String indicating the location of the expression for error
# reporting, e.g. a file name and a position in the file.
+ "description": "A String", # Optional. Description of the expression. This is a longer text which
+ # describes the expression, e.g. when hovered over it in a UI.
},
"members": [ # Specifies the identities requesting access for a Cloud Platform resource.
# `members` can have the following values:
@@ -2909,23 +2918,14 @@
#
"A String",
],
- "role": "A String", # Role that is assigned to `members`.
- # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
},
],
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a policy from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform policy updates in order to avoid race
- # conditions: An `etag` is returned in the response to `getIamPolicy`, and
- # systems are expected to put that etag in the request to `setIamPolicy` to
- # ensure that their change will be applied to the same version of the policy.
- #
- # **Important:** If you use IAM Conditions, you must include the `etag` field
- # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
- # you to overwrite a version `3` policy with a version `1` policy, and all of
- # the conditions in the version `3` policy are lost.
},
+ "updateMask": "A String", # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
+ # the fields in the mask will be modified. If no mask is provided, the
+ # following default mask is used:
+ #
+ # `paths: "bindings, etag"`
}
x__xgafv: string, V1 error format.
@@ -3003,30 +3003,18 @@
#
# For a description of IAM and its features, see the
# [IAM documentation](https://cloud.google.com/iam/docs/).
- "version": 42, # Specifies the format of the policy.
- #
- # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
- # are rejected.
- #
- # Any operation that affects conditional role bindings must specify version
- # `3`. This requirement applies to the following operations:
- #
- # * Getting a policy that includes a conditional role binding
- # * Adding a conditional role binding to a policy
- # * Changing a conditional role binding in a policy
- # * Removing any role binding, with or without a condition, from a policy
- # that includes conditions
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a policy from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform policy updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+ # systems are expected to put that etag in the request to `setIamPolicy` to
+ # ensure that their change will be applied to the same version of the policy.
#
# **Important:** If you use IAM Conditions, you must include the `etag` field
# whenever you call `setIamPolicy`. If you omit this field, then IAM allows
# you to overwrite a version `3` policy with a version `1` policy, and all of
# the conditions in the version `3` policy are lost.
- #
- # If a policy does not include any conditions, operations on that policy may
- # specify any valid version or leave the field unset.
- #
- # To learn which resources support conditions in their IAM policies, see the
- # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
{ # Specifies the audit configuration for a service.
# The configuration determines which permission types are logged, and what
@@ -3043,7 +3031,7 @@
# {
# "audit_configs": [
# {
- # "service": "allServices"
+ # "service": "allServices",
# "audit_log_configs": [
# {
# "log_type": "DATA_READ",
@@ -3052,18 +3040,18 @@
# ]
# },
# {
- # "log_type": "DATA_WRITE",
+ # "log_type": "DATA_WRITE"
# },
# {
- # "log_type": "ADMIN_READ",
+ # "log_type": "ADMIN_READ"
# }
# ]
# },
# {
- # "service": "sampleservice.googleapis.com"
+ # "service": "sampleservice.googleapis.com",
# "audit_log_configs": [
# {
- # "log_type": "DATA_READ",
+ # "log_type": "DATA_READ"
# },
# {
# "log_type": "DATA_WRITE",
@@ -3095,27 +3083,53 @@
# ]
# },
# {
- # "log_type": "DATA_WRITE",
+ # "log_type": "DATA_WRITE"
# }
# ]
# }
#
# This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
# jose@example.com from DATA_READ logging.
+ "logType": "A String", # The log type that this config enables.
"exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
# permission.
# Follows the same format of Binding.members.
"A String",
],
- "logType": "A String", # The log type that this config enables.
},
],
},
],
+ "version": 42, # Specifies the format of the policy.
+ #
+ # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+ # are rejected.
+ #
+ # Any operation that affects conditional role bindings must specify version
+ # `3`. This requirement applies to the following operations:
+ #
+ # * Getting a policy that includes a conditional role binding
+ # * Adding a conditional role binding to a policy
+ # * Changing a conditional role binding in a policy
+ # * Removing any role binding, with or without a condition, from a policy
+ # that includes conditions
+ #
+ # **Important:** If you use IAM Conditions, you must include the `etag` field
+ # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+ # you to overwrite a version `3` policy with a version `1` policy, and all of
+ # the conditions in the version `3` policy are lost.
+ #
+ # If a policy does not include any conditions, operations on that policy may
+ # specify any valid version or leave the field unset.
+ #
+ # To learn which resources support conditions in their IAM policies, see the
+ # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"bindings": [ # Associates a list of `members` to a `role`. Optionally, may specify a
# `condition` that determines how and when the `bindings` are applied. Each
# of the `bindings` must contain at least one member.
{ # Associates `members` with a `role`.
+ "role": "A String", # Role that is assigned to `members`.
+ # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
"condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
#
# If the condition evaluates to `true`, then this binding applies to the
@@ -3158,8 +3172,6 @@
# The exact variables and functions that may be referenced within an expression
# are determined by the service that evaluates it. See the service
# documentation for additional information.
- "description": "A String", # Optional. Description of the expression. This is a longer text which
- # describes the expression, e.g. when hovered over it in a UI.
"expression": "A String", # Textual representation of an expression in Common Expression Language
# syntax.
"title": "A String", # Optional. Title for the expression, i.e. a short string describing
@@ -3167,6 +3179,8 @@
# expression.
"location": "A String", # Optional. String indicating the location of the expression for error
# reporting, e.g. a file name and a position in the file.
+ "description": "A String", # Optional. Description of the expression. This is a longer text which
+ # describes the expression, e.g. when hovered over it in a UI.
},
"members": [ # Specifies the identities requesting access for a Cloud Platform resource.
# `members` can have the following values:
@@ -3213,22 +3227,8 @@
#
"A String",
],
- "role": "A String", # Role that is assigned to `members`.
- # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
},
],
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a policy from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform policy updates in order to avoid race
- # conditions: An `etag` is returned in the response to `getIamPolicy`, and
- # systems are expected to put that etag in the request to `setIamPolicy` to
- # ensure that their change will be applied to the same version of the policy.
- #
- # **Important:** If you use IAM Conditions, you must include the `etag` field
- # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
- # you to overwrite a version `3` policy with a version `1` policy, and all of
- # the conditions in the version `3` policy are lost.
}</pre>
</div>