docs: update docs (#916)
* fix: re-run script
* test: fix noxfile
diff --git a/docs/dyn/ml_v1.projects.models.versions.html b/docs/dyn/ml_v1.projects.models.versions.html
index 9cc9ec9..cc2e449 100644
--- a/docs/dyn/ml_v1.projects.models.versions.html
+++ b/docs/dyn/ml_v1.projects.models.versions.html
@@ -118,7 +118,22 @@
# prediction requests. A model can have multiple versions. You can get
# information about all of the versions of a given model by calling
# projects.models.versions.list.
- "state": "A String", # Output only. The state of a version.
+ "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+ # Only specify this field if you have specified a Compute Engine (N1) machine
+ # type in the `machineType` field. Learn more about [using GPUs for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+ # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+ # [accelerators for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ "count": "A String", # The number of accelerators to attach to each machine running the job.
+ "type": "A String", # The type of accelerator to use.
+ },
+ "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
+ # requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.methods.versions.setDefault.
"manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
# model. You should generally use `auto_scaling` with an appropriate
# `min_nodes` instead, but this option is available if you want more
@@ -130,6 +145,7 @@
# this model will be proportional to `nodes` * number of hours since
# last billing cycle plus the cost for each prediction performed.
},
+ "state": "A String", # Output only. The state of a version.
"name": "A String", # Required. The name specified for the version when it was created.
#
# The version name must be unique within the model it is created in.
@@ -203,6 +219,18 @@
#
# Learn more about [the Predictor interface and custom prediction
# routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
+ # create the version. See the
+ # [guide to model
+ # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+ # information.
+ #
+ # When passing Version to
+ # projects.models.versions.create
+ # the model service uses the specified location as the source of the model.
+ # Once deployed, the model version is hosted by the prediction service, so
+ # this location is useful only as a historical record.
+ # The total number of model files can't exceed 1000.
"packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
# prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
# or [scikit-learn pipelines with custom
@@ -255,18 +283,6 @@
# sum to diff property is met within the desired error range.
},
},
- "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
- # create the version. See the
- # [guide to model
- # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
- # information.
- #
- # When passing Version to
- # projects.models.versions.create
- # the model service uses the specified location as the source of the model.
- # Once deployed, the model version is hosted by the prediction service, so
- # this location is useful only as a historical record.
- # The total number of model files can't exceed 1000.
"autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
# response to increases and decreases in traffic. Care should be
# taken to ramp up traffic according to the model's ability to scale
@@ -322,6 +338,7 @@
# -d @./update_body.json
# </pre>
},
+ "createTime": "A String", # Output only. The time the version was created.
"labels": { # Optional. One or more labels that you can add, to organize your model
# versions. Each label is a key-value pair, where both the key and the value
# are arbitrary strings that you supply.
@@ -329,7 +346,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "createTime": "A String", # Output only. The time the version was created.
"requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
# projects.models.versions.patch
# request. Specifying it in a
@@ -425,22 +441,6 @@
# conditions: An `etag` is returned in the response to `GetVersion`, and
# systems are expected to put that etag in the request to `UpdateVersion` to
# ensure that their change will be applied to the model as intended.
- "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
- # requests that do not specify a version.
- #
- # You can change the default version by calling
- # projects.methods.versions.setDefault.
- "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
- # Only specify this field if you have specified a Compute Engine (N1) machine
- # type in the `machineType` field. Learn more about [using GPUs for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- # Note that the AcceleratorConfig can be used in both Jobs and Versions.
- # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
- # [accelerators for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- "count": "A String", # The number of accelerators to attach to each machine running the job.
- "type": "A String", # The type of accelerator to use.
- },
}
x__xgafv: string, V1 error format.
@@ -453,16 +453,6 @@
{ # This resource represents a long-running operation that is the result of a
# network API call.
- "response": { # The normal response of the operation in case of success. If the original
- # method returns no data on success, such as `Delete`, the response is
- # `google.protobuf.Empty`. If the original method is standard
- # `Get`/`Create`/`Update`, the response should be the resource. For other
- # methods, the response should have the type `XxxResponse`, where `Xxx`
- # is the original method name. For example, if the original method name
- # is `TakeSnapshot()`, the inferred response type is
- # `TakeSnapshotResponse`.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
"name": "A String", # The server-assigned name, which is only unique within the same service that
# originally returns it. If you use the default HTTP mapping, the
# `name` should be a resource name ending with `operations/{unique_id}`.
@@ -473,16 +463,16 @@
#
# You can find out more about this error model and how to work with it in the
# [API Design Guide](https://cloud.google.com/apis/design/errors).
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+ "message": "A String", # A developer-facing error message, which should be in English. Any
+ # user-facing error message should be localized and sent in the
+ # google.rpc.Status.details field, or localized by the client.
"details": [ # A list of messages that carry the error details. There is a common set of
# message types for APIs to use.
{
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
],
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
- "message": "A String", # A developer-facing error message, which should be in English. Any
- # user-facing error message should be localized and sent in the
- # google.rpc.Status.details field, or localized by the client.
},
"metadata": { # Service-specific metadata associated with the operation. It typically
# contains progress information and common metadata such as create time.
@@ -493,6 +483,16 @@
"done": True or False, # If the value is `false`, it means the operation is still in progress.
# If `true`, the operation is completed, and either `error` or `response` is
# available.
+ "response": { # The normal response of the operation in case of success. If the original
+ # method returns no data on success, such as `Delete`, the response is
+ # `google.protobuf.Empty`. If the original method is standard
+ # `Get`/`Create`/`Update`, the response should be the resource. For other
+ # methods, the response should have the type `XxxResponse`, where `Xxx`
+ # is the original method name. For example, if the original method name
+ # is `TakeSnapshot()`, the inferred response type is
+ # `TakeSnapshotResponse`.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
}</pre>
</div>
@@ -520,16 +520,6 @@
{ # This resource represents a long-running operation that is the result of a
# network API call.
- "response": { # The normal response of the operation in case of success. If the original
- # method returns no data on success, such as `Delete`, the response is
- # `google.protobuf.Empty`. If the original method is standard
- # `Get`/`Create`/`Update`, the response should be the resource. For other
- # methods, the response should have the type `XxxResponse`, where `Xxx`
- # is the original method name. For example, if the original method name
- # is `TakeSnapshot()`, the inferred response type is
- # `TakeSnapshotResponse`.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
"name": "A String", # The server-assigned name, which is only unique within the same service that
# originally returns it. If you use the default HTTP mapping, the
# `name` should be a resource name ending with `operations/{unique_id}`.
@@ -540,16 +530,16 @@
#
# You can find out more about this error model and how to work with it in the
# [API Design Guide](https://cloud.google.com/apis/design/errors).
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+ "message": "A String", # A developer-facing error message, which should be in English. Any
+ # user-facing error message should be localized and sent in the
+ # google.rpc.Status.details field, or localized by the client.
"details": [ # A list of messages that carry the error details. There is a common set of
# message types for APIs to use.
{
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
],
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
- "message": "A String", # A developer-facing error message, which should be in English. Any
- # user-facing error message should be localized and sent in the
- # google.rpc.Status.details field, or localized by the client.
},
"metadata": { # Service-specific metadata associated with the operation. It typically
# contains progress information and common metadata such as create time.
@@ -560,6 +550,16 @@
"done": True or False, # If the value is `false`, it means the operation is still in progress.
# If `true`, the operation is completed, and either `error` or `response` is
# available.
+ "response": { # The normal response of the operation in case of success. If the original
+ # method returns no data on success, such as `Delete`, the response is
+ # `google.protobuf.Empty`. If the original method is standard
+ # `Get`/`Create`/`Update`, the response should be the resource. For other
+ # methods, the response should have the type `XxxResponse`, where `Xxx`
+ # is the original method name. For example, if the original method name
+ # is `TakeSnapshot()`, the inferred response type is
+ # `TakeSnapshotResponse`.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
}</pre>
</div>
@@ -588,7 +588,22 @@
# prediction requests. A model can have multiple versions. You can get
# information about all of the versions of a given model by calling
# projects.models.versions.list.
- "state": "A String", # Output only. The state of a version.
+ "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+ # Only specify this field if you have specified a Compute Engine (N1) machine
+ # type in the `machineType` field. Learn more about [using GPUs for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+ # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+ # [accelerators for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ "count": "A String", # The number of accelerators to attach to each machine running the job.
+ "type": "A String", # The type of accelerator to use.
+ },
+ "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
+ # requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.methods.versions.setDefault.
"manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
# model. You should generally use `auto_scaling` with an appropriate
# `min_nodes` instead, but this option is available if you want more
@@ -600,6 +615,7 @@
# this model will be proportional to `nodes` * number of hours since
# last billing cycle plus the cost for each prediction performed.
},
+ "state": "A String", # Output only. The state of a version.
"name": "A String", # Required. The name specified for the version when it was created.
#
# The version name must be unique within the model it is created in.
@@ -673,6 +689,18 @@
#
# Learn more about [the Predictor interface and custom prediction
# routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
+ # create the version. See the
+ # [guide to model
+ # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+ # information.
+ #
+ # When passing Version to
+ # projects.models.versions.create
+ # the model service uses the specified location as the source of the model.
+ # Once deployed, the model version is hosted by the prediction service, so
+ # this location is useful only as a historical record.
+ # The total number of model files can't exceed 1000.
"packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
# prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
# or [scikit-learn pipelines with custom
@@ -725,18 +753,6 @@
# sum to diff property is met within the desired error range.
},
},
- "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
- # create the version. See the
- # [guide to model
- # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
- # information.
- #
- # When passing Version to
- # projects.models.versions.create
- # the model service uses the specified location as the source of the model.
- # Once deployed, the model version is hosted by the prediction service, so
- # this location is useful only as a historical record.
- # The total number of model files can't exceed 1000.
"autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
# response to increases and decreases in traffic. Care should be
# taken to ramp up traffic according to the model's ability to scale
@@ -792,6 +808,7 @@
# -d @./update_body.json
# </pre>
},
+ "createTime": "A String", # Output only. The time the version was created.
"labels": { # Optional. One or more labels that you can add, to organize your model
# versions. Each label is a key-value pair, where both the key and the value
# are arbitrary strings that you supply.
@@ -799,7 +816,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "createTime": "A String", # Output only. The time the version was created.
"requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
# projects.models.versions.patch
# request. Specifying it in a
@@ -895,22 +911,6 @@
# conditions: An `etag` is returned in the response to `GetVersion`, and
# systems are expected to put that etag in the request to `UpdateVersion` to
# ensure that their change will be applied to the model as intended.
- "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
- # requests that do not specify a version.
- #
- # You can change the default version by calling
- # projects.methods.versions.setDefault.
- "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
- # Only specify this field if you have specified a Compute Engine (N1) machine
- # type in the `machineType` field. Learn more about [using GPUs for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- # Note that the AcceleratorConfig can be used in both Jobs and Versions.
- # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
- # [accelerators for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- "count": "A String", # The number of accelerators to attach to each machine running the job.
- "type": "A String", # The type of accelerator to use.
- },
}</pre>
</div>
@@ -955,7 +955,22 @@
# prediction requests. A model can have multiple versions. You can get
# information about all of the versions of a given model by calling
# projects.models.versions.list.
- "state": "A String", # Output only. The state of a version.
+ "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+ # Only specify this field if you have specified a Compute Engine (N1) machine
+ # type in the `machineType` field. Learn more about [using GPUs for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+ # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+ # [accelerators for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ "count": "A String", # The number of accelerators to attach to each machine running the job.
+ "type": "A String", # The type of accelerator to use.
+ },
+ "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
+ # requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.methods.versions.setDefault.
"manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
# model. You should generally use `auto_scaling` with an appropriate
# `min_nodes` instead, but this option is available if you want more
@@ -967,6 +982,7 @@
# this model will be proportional to `nodes` * number of hours since
# last billing cycle plus the cost for each prediction performed.
},
+ "state": "A String", # Output only. The state of a version.
"name": "A String", # Required. The name specified for the version when it was created.
#
# The version name must be unique within the model it is created in.
@@ -1040,6 +1056,18 @@
#
# Learn more about [the Predictor interface and custom prediction
# routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
+ # create the version. See the
+ # [guide to model
+ # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+ # information.
+ #
+ # When passing Version to
+ # projects.models.versions.create
+ # the model service uses the specified location as the source of the model.
+ # Once deployed, the model version is hosted by the prediction service, so
+ # this location is useful only as a historical record.
+ # The total number of model files can't exceed 1000.
"packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
# prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
# or [scikit-learn pipelines with custom
@@ -1092,18 +1120,6 @@
# sum to diff property is met within the desired error range.
},
},
- "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
- # create the version. See the
- # [guide to model
- # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
- # information.
- #
- # When passing Version to
- # projects.models.versions.create
- # the model service uses the specified location as the source of the model.
- # Once deployed, the model version is hosted by the prediction service, so
- # this location is useful only as a historical record.
- # The total number of model files can't exceed 1000.
"autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
# response to increases and decreases in traffic. Care should be
# taken to ramp up traffic according to the model's ability to scale
@@ -1159,6 +1175,7 @@
# -d @./update_body.json
# </pre>
},
+ "createTime": "A String", # Output only. The time the version was created.
"labels": { # Optional. One or more labels that you can add, to organize your model
# versions. Each label is a key-value pair, where both the key and the value
# are arbitrary strings that you supply.
@@ -1166,7 +1183,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "createTime": "A String", # Output only. The time the version was created.
"requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
# projects.models.versions.patch
# request. Specifying it in a
@@ -1262,22 +1278,6 @@
# conditions: An `etag` is returned in the response to `GetVersion`, and
# systems are expected to put that etag in the request to `UpdateVersion` to
# ensure that their change will be applied to the model as intended.
- "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
- # requests that do not specify a version.
- #
- # You can change the default version by calling
- # projects.methods.versions.setDefault.
- "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
- # Only specify this field if you have specified a Compute Engine (N1) machine
- # type in the `machineType` field. Learn more about [using GPUs for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- # Note that the AcceleratorConfig can be used in both Jobs and Versions.
- # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
- # [accelerators for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- "count": "A String", # The number of accelerators to attach to each machine running the job.
- "type": "A String", # The type of accelerator to use.
- },
},
],
}</pre>
@@ -1315,7 +1315,22 @@
# prediction requests. A model can have multiple versions. You can get
# information about all of the versions of a given model by calling
# projects.models.versions.list.
- "state": "A String", # Output only. The state of a version.
+ "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+ # Only specify this field if you have specified a Compute Engine (N1) machine
+ # type in the `machineType` field. Learn more about [using GPUs for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+ # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+ # [accelerators for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ "count": "A String", # The number of accelerators to attach to each machine running the job.
+ "type": "A String", # The type of accelerator to use.
+ },
+ "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
+ # requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.methods.versions.setDefault.
"manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
# model. You should generally use `auto_scaling` with an appropriate
# `min_nodes` instead, but this option is available if you want more
@@ -1327,6 +1342,7 @@
# this model will be proportional to `nodes` * number of hours since
# last billing cycle plus the cost for each prediction performed.
},
+ "state": "A String", # Output only. The state of a version.
"name": "A String", # Required. The name specified for the version when it was created.
#
# The version name must be unique within the model it is created in.
@@ -1400,6 +1416,18 @@
#
# Learn more about [the Predictor interface and custom prediction
# routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
+ # create the version. See the
+ # [guide to model
+ # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+ # information.
+ #
+ # When passing Version to
+ # projects.models.versions.create
+ # the model service uses the specified location as the source of the model.
+ # Once deployed, the model version is hosted by the prediction service, so
+ # this location is useful only as a historical record.
+ # The total number of model files can't exceed 1000.
"packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
# prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
# or [scikit-learn pipelines with custom
@@ -1452,18 +1480,6 @@
# sum to diff property is met within the desired error range.
},
},
- "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
- # create the version. See the
- # [guide to model
- # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
- # information.
- #
- # When passing Version to
- # projects.models.versions.create
- # the model service uses the specified location as the source of the model.
- # Once deployed, the model version is hosted by the prediction service, so
- # this location is useful only as a historical record.
- # The total number of model files can't exceed 1000.
"autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
# response to increases and decreases in traffic. Care should be
# taken to ramp up traffic according to the model's ability to scale
@@ -1519,6 +1535,7 @@
# -d @./update_body.json
# </pre>
},
+ "createTime": "A String", # Output only. The time the version was created.
"labels": { # Optional. One or more labels that you can add, to organize your model
# versions. Each label is a key-value pair, where both the key and the value
# are arbitrary strings that you supply.
@@ -1526,7 +1543,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "createTime": "A String", # Output only. The time the version was created.
"requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
# projects.models.versions.patch
# request. Specifying it in a
@@ -1622,22 +1638,6 @@
# conditions: An `etag` is returned in the response to `GetVersion`, and
# systems are expected to put that etag in the request to `UpdateVersion` to
# ensure that their change will be applied to the model as intended.
- "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
- # requests that do not specify a version.
- #
- # You can change the default version by calling
- # projects.methods.versions.setDefault.
- "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
- # Only specify this field if you have specified a Compute Engine (N1) machine
- # type in the `machineType` field. Learn more about [using GPUs for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- # Note that the AcceleratorConfig can be used in both Jobs and Versions.
- # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
- # [accelerators for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- "count": "A String", # The number of accelerators to attach to each machine running the job.
- "type": "A String", # The type of accelerator to use.
- },
}
updateMask: string, Required. Specifies the path, relative to `Version`, of the field to
@@ -1668,16 +1668,6 @@
{ # This resource represents a long-running operation that is the result of a
# network API call.
- "response": { # The normal response of the operation in case of success. If the original
- # method returns no data on success, such as `Delete`, the response is
- # `google.protobuf.Empty`. If the original method is standard
- # `Get`/`Create`/`Update`, the response should be the resource. For other
- # methods, the response should have the type `XxxResponse`, where `Xxx`
- # is the original method name. For example, if the original method name
- # is `TakeSnapshot()`, the inferred response type is
- # `TakeSnapshotResponse`.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
"name": "A String", # The server-assigned name, which is only unique within the same service that
# originally returns it. If you use the default HTTP mapping, the
# `name` should be a resource name ending with `operations/{unique_id}`.
@@ -1688,16 +1678,16 @@
#
# You can find out more about this error model and how to work with it in the
# [API Design Guide](https://cloud.google.com/apis/design/errors).
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+ "message": "A String", # A developer-facing error message, which should be in English. Any
+ # user-facing error message should be localized and sent in the
+ # google.rpc.Status.details field, or localized by the client.
"details": [ # A list of messages that carry the error details. There is a common set of
# message types for APIs to use.
{
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
],
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
- "message": "A String", # A developer-facing error message, which should be in English. Any
- # user-facing error message should be localized and sent in the
- # google.rpc.Status.details field, or localized by the client.
},
"metadata": { # Service-specific metadata associated with the operation. It typically
# contains progress information and common metadata such as create time.
@@ -1708,6 +1698,16 @@
"done": True or False, # If the value is `false`, it means the operation is still in progress.
# If `true`, the operation is completed, and either `error` or `response` is
# available.
+ "response": { # The normal response of the operation in case of success. If the original
+ # method returns no data on success, such as `Delete`, the response is
+ # `google.protobuf.Empty`. If the original method is standard
+ # `Get`/`Create`/`Update`, the response should be the resource. For other
+ # methods, the response should have the type `XxxResponse`, where `Xxx`
+ # is the original method name. For example, if the original method name
+ # is `TakeSnapshot()`, the inferred response type is
+ # `TakeSnapshotResponse`.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
}</pre>
</div>
@@ -1746,7 +1746,22 @@
# prediction requests. A model can have multiple versions. You can get
# information about all of the versions of a given model by calling
# projects.models.versions.list.
- "state": "A String", # Output only. The state of a version.
+ "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
+ # Only specify this field if you have specified a Compute Engine (N1) machine
+ # type in the `machineType` field. Learn more about [using GPUs for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+ # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+ # [accelerators for online
+ # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+ "count": "A String", # The number of accelerators to attach to each machine running the job.
+ "type": "A String", # The type of accelerator to use.
+ },
+ "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
+ # requests that do not specify a version.
+ #
+ # You can change the default version by calling
+ # projects.methods.versions.setDefault.
"manualScaling": { # Options for manually scaling a model. # Manually select the number of nodes to use for serving the
# model. You should generally use `auto_scaling` with an appropriate
# `min_nodes` instead, but this option is available if you want more
@@ -1758,6 +1773,7 @@
# this model will be proportional to `nodes` * number of hours since
# last billing cycle plus the cost for each prediction performed.
},
+ "state": "A String", # Output only. The state of a version.
"name": "A String", # Required. The name specified for the version when it was created.
#
# The version name must be unique within the model it is created in.
@@ -1831,6 +1847,18 @@
#
# Learn more about [the Predictor interface and custom prediction
# routines](/ml-engine/docs/tensorflow/custom-prediction-routines).
+ "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
+ # create the version. See the
+ # [guide to model
+ # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
+ # information.
+ #
+ # When passing Version to
+ # projects.models.versions.create
+ # the model service uses the specified location as the source of the model.
+ # Once deployed, the model version is hosted by the prediction service, so
+ # this location is useful only as a historical record.
+ # The total number of model files can't exceed 1000.
"packageUris": [ # Optional. Cloud Storage paths (`gs://…`) of packages for [custom
# prediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)
# or [scikit-learn pipelines with custom
@@ -1883,18 +1911,6 @@
# sum to diff property is met within the desired error range.
},
},
- "deploymentUri": "A String", # Required. The Cloud Storage location of the trained model used to
- # create the version. See the
- # [guide to model
- # deployment](/ml-engine/docs/tensorflow/deploying-models) for more
- # information.
- #
- # When passing Version to
- # projects.models.versions.create
- # the model service uses the specified location as the source of the model.
- # Once deployed, the model version is hosted by the prediction service, so
- # this location is useful only as a historical record.
- # The total number of model files can't exceed 1000.
"autoScaling": { # Options for automatically scaling a model. # Automatically scale the number of nodes used to serve the model in
# response to increases and decreases in traffic. Care should be
# taken to ramp up traffic according to the model's ability to scale
@@ -1950,6 +1966,7 @@
# -d @./update_body.json
# </pre>
},
+ "createTime": "A String", # Output only. The time the version was created.
"labels": { # Optional. One or more labels that you can add, to organize your model
# versions. Each label is a key-value pair, where both the key and the value
# are arbitrary strings that you supply.
@@ -1957,7 +1974,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "createTime": "A String", # Output only. The time the version was created.
"requestLoggingConfig": { # Configuration for logging request-response pairs to a BigQuery table. # Optional. *Only* specify this field in a
# projects.models.versions.patch
# request. Specifying it in a
@@ -2053,22 +2069,6 @@
# conditions: An `etag` is returned in the response to `GetVersion`, and
# systems are expected to put that etag in the request to `UpdateVersion` to
# ensure that their change will be applied to the model as intended.
- "isDefault": True or False, # Output only. If true, this version will be used to handle prediction
- # requests that do not specify a version.
- #
- # You can change the default version by calling
- # projects.methods.versions.setDefault.
- "acceleratorConfig": { # Represents a hardware accelerator request config. # Optional. Accelerator config for using GPUs for online prediction (beta).
- # Only specify this field if you have specified a Compute Engine (N1) machine
- # type in the `machineType` field. Learn more about [using GPUs for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- # Note that the AcceleratorConfig can be used in both Jobs and Versions.
- # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
- # [accelerators for online
- # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
- "count": "A String", # The number of accelerators to attach to each machine running the job.
- "type": "A String", # The type of accelerator to use.
- },
}</pre>
</div>