docs: update docs (#916)
* fix: re-run script
* test: fix noxfile
diff --git a/docs/dyn/ml_v1.projects.jobs.html b/docs/dyn/ml_v1.projects.jobs.html
index 979bc29..3952cf6 100644
--- a/docs/dyn/ml_v1.projects.jobs.html
+++ b/docs/dyn/ml_v1.projects.jobs.html
@@ -87,7 +87,7 @@
<code><a href="#getIamPolicy">getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)</a></code></p>
<p class="firstline">Gets the access control policy for a resource.</p>
<p class="toc_element">
- <code><a href="#list">list(parent, pageToken=None, pageSize=None, filter=None, x__xgafv=None)</a></code></p>
+ <code><a href="#list">list(parent, filter=None, pageToken=None, pageSize=None, x__xgafv=None)</a></code></p>
<p class="firstline">Lists the jobs in the project.</p>
<p class="toc_element">
<code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
@@ -144,6 +144,49 @@
The object takes the form of:
{ # Represents a training or prediction job.
+ "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+ "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
+ # string is formatted the same way as `model_version`, with the addition
+ # of the version information:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
+ "modelName": "A String", # Use this field if you want to use the default version for the specified
+ # model. The string must use the following format:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
+ "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
+ # the model to use.
+ "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
+ # Defaults to 10 if not specified.
+ "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
+ # this job. Please refer to
+ # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+ # for information about how to use signatures.
+ #
+ # Defaults to
+ # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+ # , which is "serving_default".
+ "outputPath": "A String", # Required. The output Google Cloud Storage location.
+ "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
+ "dataFormat": "A String", # Required. The format of the input data files.
+ "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
+ # The service will buffer batch_size number of records in memory before
+ # invoking one Tensorflow prediction call internally. So take the record
+ # size and memory available into consideration when setting this parameter.
+ "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+ # prediction. If not set, AI Platform will pick the runtime version used
+ # during the CreateVersion request for this model version, or choose the
+ # latest stable version when model version information is not available
+ # such as when the model is specified by uri.
+ "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+ # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
+ "A String",
+ ],
+ "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ },
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a job from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
@@ -156,6 +199,21 @@
# command-line arguments and/or in a YAML configuration file referenced from
# the --config command-line argument. For details, see the guide to [submitting
# a training job](/ai-platform/training/docs/training-jobs).
+ "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
+ # replica in the cluster will be of the type specified in `worker_type`.
+ #
+ # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+ # set this value, you must also set `worker_type`.
+ #
+ # The default value is zero.
+ "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
+ "args": [ # Optional. Command-line arguments passed to the training application when it
+ # starts. If your job uses a custom container, then the arguments are passed
+ # to the container's <a class="external" target="_blank"
+ # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
+ # `ENTRYPOINT`</a> command.
+ "A String",
+ ],
"parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
#
# You should only set `parameterServerConfig.acceleratorConfig` if
@@ -167,29 +225,6 @@
# your parameter server. If `parameterServerConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -222,6 +257,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
# protect resources created by a training job, instead of using Google's
@@ -238,14 +296,8 @@
# `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
},
"hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
- "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
- # current versions of TensorFlow, this tag name should exactly match what is
- # shown in TensorBoard, including all scopes. For versions of TensorFlow
- # prior to 0.12, this should be only the tag passed to tf.Summary.
- # By default, "training/hptuning/metric" will be used.
"params": [ # Required. The set of parameters to tune.
{ # Represents a single hyperparameter to optimize.
- "type": "A String", # Required. The type of the parameter.
"categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
"A String",
],
@@ -268,6 +320,7 @@
"maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
# should be unset if type is `CATEGORICAL`. This value should be integers if
# type is `INTEGER`.
+ "type": "A String", # Required. The type of the parameter.
},
],
"enableTrialEarlyStopping": True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -303,6 +356,11 @@
# tuning job.
# Uses the default AI Platform hyperparameter tuning
# algorithm if unspecified.
+ "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+ # current versions of TensorFlow, this tag name should exactly match what is
+ # shown in TensorBoard, including all scopes. For versions of TensorFlow
+ # prior to 0.12, this should be only the tag passed to tf.Summary.
+ # By default, "training/hptuning/metric" will be used.
},
"workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
#
@@ -315,29 +373,6 @@
# worker. If `workerConfig.imageUri` has not been set, AI Platform uses
# the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -370,6 +405,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
# job. Each replica in the cluster will be of the type specified in
@@ -462,8 +520,6 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `evaluatorCount` is greater than zero.
- "region": "A String", # Required. The region to run the training job in. See the [available
- # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's worker nodes.
#
@@ -480,6 +536,8 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `workerCount` is greater than zero.
+ "region": "A String", # Required. The region to run the training job in. See the [available
+ # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's parameter server.
#
@@ -503,29 +561,6 @@
# `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
# about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -558,6 +593,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
# and parameter servers.
@@ -579,16 +637,6 @@
#
# Read more about the Python versions available for [each runtime
# version](/ml-engine/docs/runtime-version-list).
- "network": "A String", # Optional. The full name of the Google Compute Engine
- # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
- # is peered. For example, projects/12345/global/networks/myVPC. Format is of
- # the form projects/{project}/global/networks/{network}. Where {project} is a
- # project number, as in '12345', and {network} is network name.".
- #
- # Private services access must already be configured for the network. If left
- # unspecified, the Job is not peered with any network. Learn more -
- # Connecting Job to user network over private
- # IP.
"scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
"maxWaitTime": "A String",
"maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
@@ -615,6 +663,16 @@
# ...
# ```
},
+ "network": "A String", # Optional. The full name of the Google Compute Engine
+ # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+ # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+ # the form projects/{project}/global/networks/{network}. Where {project} is a
+ # project number, as in '12345', and {network} is network name.".
+ #
+ # Private services access must already be configured for the network. If left
+ # unspecified, the Job is not peered with any network. Learn more -
+ # Connecting Job to user network over private
+ # IP.
"evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
#
# You should only set `evaluatorConfig.acceleratorConfig` if
@@ -626,29 +684,6 @@
# your evaluator. If `evaluatorConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -681,6 +716,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
# variable when training with a custom container. Defaults to `false`. [Learn
@@ -689,21 +747,6 @@
#
# This field has no effect for training jobs that don't use a custom
# container.
- "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
- # replica in the cluster will be of the type specified in `worker_type`.
- #
- # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
- # set this value, you must also set `worker_type`.
- #
- # The default value is zero.
- "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
- "args": [ # Optional. Command-line arguments passed to the training application when it
- # starts. If your job uses a custom container, then the arguments are passed
- # to the container's <a class="external" target="_blank"
- # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
- # `ENTRYPOINT`</a> command.
- "A String",
- ],
},
"state": "A String", # Output only. The detailed state of a job.
"jobId": "A String", # Required. The user-specified id of the job.
@@ -716,41 +759,52 @@
"predictionCount": "A String", # The number of generated predictions.
},
"trainingOutput": { # Represents results of a training job. Output only. # The current training job result.
+ "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
+ "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+ # Only set for built-in algorithms jobs.
+ "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
+ # saves the trained model. Only set for successful jobs that don't use
+ # hyperparameter tuning.
+ "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
+ "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
+ # trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
+ },
"trials": [ # Results for individual Hyperparameter trials.
# Only set for hyperparameter tuning jobs.
{ # Represents the result of a single hyperparameter tuning trial from a
# training job. The TrainingOutput object that is returned on successful
# completion of a training job with hyperparameter tuning includes a list
# of HyperparameterOutput objects, one for each successful trial.
- "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
- # populated.
- { # An observed value of a metric.
- "objectiveValue": 3.14, # The objective value at this training step.
- "trainingStep": "A String", # The global training step for this metric.
- },
- ],
- "hyperparameters": { # The hyperparameters given to this trial.
- "a_key": "A String",
- },
"trialId": "A String", # The trial id for these results.
"endTime": "A String", # Output only. End time for the trial.
- "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"startTime": "A String", # Output only. Start time for the trial.
+ "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"finalMetric": { # An observed value of a metric. # The final objective metric seen for this trial.
- "objectiveValue": 3.14, # The objective value at this training step.
"trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
},
"builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
# Only set for trials of built-in algorithms jobs that have succeeded.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
"modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
# saves the trained model. Only set for successful jobs that don't use
# hyperparameter tuning.
"pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
"runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
# trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
},
"state": "A String", # Output only. The detailed state of the trial.
+ "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
+ # populated.
+ { # An observed value of a metric.
+ "trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
+ },
+ ],
+ "hyperparameters": { # The hyperparameters given to this trial.
+ "a_key": "A String",
+ },
},
],
"hyperparameterMetricTag": "A String", # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -761,17 +815,6 @@
# Only set for hyperparameter tuning jobs.
"isHyperparameterTuningJob": True or False, # Whether this job is a hyperparameter tuning job.
"consumedMLUnits": 3.14, # The amount of ML units consumed by the job.
- "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
- "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
- # Only set for built-in algorithms jobs.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
- "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
- # saves the trained model. Only set for successful jobs that don't use
- # hyperparameter tuning.
- "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
- "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
- # trained.
- },
},
"createTime": "A String", # Output only. When the job was created.
"labels": { # Optional. One or more labels that you can add, to organize your jobs.
@@ -781,49 +824,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
- "outputPath": "A String", # Required. The output Google Cloud Storage location.
- "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
- "dataFormat": "A String", # Required. The format of the input data files.
- "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
- # The service will buffer batch_size number of records in memory before
- # invoking one Tensorflow prediction call internally. So take the record
- # size and memory available into consideration when setting this parameter.
- "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
- # prediction. If not set, AI Platform will pick the runtime version used
- # during the CreateVersion request for this model version, or choose the
- # latest stable version when model version information is not available
- # such as when the model is specified by uri.
- "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
- # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
- "A String",
- ],
- "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
- # string is formatted the same way as `model_version`, with the addition
- # of the version information:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
- "modelName": "A String", # Use this field if you want to use the default version for the specified
- # model. The string must use the following format:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
- "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
- # the model to use.
- "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
- # Defaults to 10 if not specified.
- "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
- # this job. Please refer to
- # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
- # for information about how to use signatures.
- #
- # Defaults to
- # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
- # , which is "serving_default".
- },
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
}
x__xgafv: string, V1 error format.
@@ -835,6 +835,49 @@
An object of the form:
{ # Represents a training or prediction job.
+ "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+ "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
+ # string is formatted the same way as `model_version`, with the addition
+ # of the version information:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
+ "modelName": "A String", # Use this field if you want to use the default version for the specified
+ # model. The string must use the following format:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
+ "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
+ # the model to use.
+ "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
+ # Defaults to 10 if not specified.
+ "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
+ # this job. Please refer to
+ # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+ # for information about how to use signatures.
+ #
+ # Defaults to
+ # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+ # , which is "serving_default".
+ "outputPath": "A String", # Required. The output Google Cloud Storage location.
+ "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
+ "dataFormat": "A String", # Required. The format of the input data files.
+ "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
+ # The service will buffer batch_size number of records in memory before
+ # invoking one Tensorflow prediction call internally. So take the record
+ # size and memory available into consideration when setting this parameter.
+ "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+ # prediction. If not set, AI Platform will pick the runtime version used
+ # during the CreateVersion request for this model version, or choose the
+ # latest stable version when model version information is not available
+ # such as when the model is specified by uri.
+ "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+ # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
+ "A String",
+ ],
+ "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ },
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a job from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
@@ -847,6 +890,21 @@
# command-line arguments and/or in a YAML configuration file referenced from
# the --config command-line argument. For details, see the guide to [submitting
# a training job](/ai-platform/training/docs/training-jobs).
+ "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
+ # replica in the cluster will be of the type specified in `worker_type`.
+ #
+ # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+ # set this value, you must also set `worker_type`.
+ #
+ # The default value is zero.
+ "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
+ "args": [ # Optional. Command-line arguments passed to the training application when it
+ # starts. If your job uses a custom container, then the arguments are passed
+ # to the container's <a class="external" target="_blank"
+ # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
+ # `ENTRYPOINT`</a> command.
+ "A String",
+ ],
"parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
#
# You should only set `parameterServerConfig.acceleratorConfig` if
@@ -858,29 +916,6 @@
# your parameter server. If `parameterServerConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -913,6 +948,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
# protect resources created by a training job, instead of using Google's
@@ -929,14 +987,8 @@
# `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
},
"hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
- "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
- # current versions of TensorFlow, this tag name should exactly match what is
- # shown in TensorBoard, including all scopes. For versions of TensorFlow
- # prior to 0.12, this should be only the tag passed to tf.Summary.
- # By default, "training/hptuning/metric" will be used.
"params": [ # Required. The set of parameters to tune.
{ # Represents a single hyperparameter to optimize.
- "type": "A String", # Required. The type of the parameter.
"categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
"A String",
],
@@ -959,6 +1011,7 @@
"maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
# should be unset if type is `CATEGORICAL`. This value should be integers if
# type is `INTEGER`.
+ "type": "A String", # Required. The type of the parameter.
},
],
"enableTrialEarlyStopping": True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -994,6 +1047,11 @@
# tuning job.
# Uses the default AI Platform hyperparameter tuning
# algorithm if unspecified.
+ "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+ # current versions of TensorFlow, this tag name should exactly match what is
+ # shown in TensorBoard, including all scopes. For versions of TensorFlow
+ # prior to 0.12, this should be only the tag passed to tf.Summary.
+ # By default, "training/hptuning/metric" will be used.
},
"workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
#
@@ -1006,29 +1064,6 @@
# worker. If `workerConfig.imageUri` has not been set, AI Platform uses
# the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1061,6 +1096,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
# job. Each replica in the cluster will be of the type specified in
@@ -1153,8 +1211,6 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `evaluatorCount` is greater than zero.
- "region": "A String", # Required. The region to run the training job in. See the [available
- # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's worker nodes.
#
@@ -1171,6 +1227,8 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `workerCount` is greater than zero.
+ "region": "A String", # Required. The region to run the training job in. See the [available
+ # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's parameter server.
#
@@ -1194,29 +1252,6 @@
# `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
# about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1249,6 +1284,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
# and parameter servers.
@@ -1270,16 +1328,6 @@
#
# Read more about the Python versions available for [each runtime
# version](/ml-engine/docs/runtime-version-list).
- "network": "A String", # Optional. The full name of the Google Compute Engine
- # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
- # is peered. For example, projects/12345/global/networks/myVPC. Format is of
- # the form projects/{project}/global/networks/{network}. Where {project} is a
- # project number, as in '12345', and {network} is network name.".
- #
- # Private services access must already be configured for the network. If left
- # unspecified, the Job is not peered with any network. Learn more -
- # Connecting Job to user network over private
- # IP.
"scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
"maxWaitTime": "A String",
"maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
@@ -1306,6 +1354,16 @@
# ...
# ```
},
+ "network": "A String", # Optional. The full name of the Google Compute Engine
+ # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+ # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+ # the form projects/{project}/global/networks/{network}. Where {project} is a
+ # project number, as in '12345', and {network} is network name.".
+ #
+ # Private services access must already be configured for the network. If left
+ # unspecified, the Job is not peered with any network. Learn more -
+ # Connecting Job to user network over private
+ # IP.
"evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
#
# You should only set `evaluatorConfig.acceleratorConfig` if
@@ -1317,29 +1375,6 @@
# your evaluator. If `evaluatorConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1372,6 +1407,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
# variable when training with a custom container. Defaults to `false`. [Learn
@@ -1380,21 +1438,6 @@
#
# This field has no effect for training jobs that don't use a custom
# container.
- "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
- # replica in the cluster will be of the type specified in `worker_type`.
- #
- # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
- # set this value, you must also set `worker_type`.
- #
- # The default value is zero.
- "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
- "args": [ # Optional. Command-line arguments passed to the training application when it
- # starts. If your job uses a custom container, then the arguments are passed
- # to the container's <a class="external" target="_blank"
- # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
- # `ENTRYPOINT`</a> command.
- "A String",
- ],
},
"state": "A String", # Output only. The detailed state of a job.
"jobId": "A String", # Required. The user-specified id of the job.
@@ -1407,41 +1450,52 @@
"predictionCount": "A String", # The number of generated predictions.
},
"trainingOutput": { # Represents results of a training job. Output only. # The current training job result.
+ "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
+ "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+ # Only set for built-in algorithms jobs.
+ "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
+ # saves the trained model. Only set for successful jobs that don't use
+ # hyperparameter tuning.
+ "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
+ "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
+ # trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
+ },
"trials": [ # Results for individual Hyperparameter trials.
# Only set for hyperparameter tuning jobs.
{ # Represents the result of a single hyperparameter tuning trial from a
# training job. The TrainingOutput object that is returned on successful
# completion of a training job with hyperparameter tuning includes a list
# of HyperparameterOutput objects, one for each successful trial.
- "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
- # populated.
- { # An observed value of a metric.
- "objectiveValue": 3.14, # The objective value at this training step.
- "trainingStep": "A String", # The global training step for this metric.
- },
- ],
- "hyperparameters": { # The hyperparameters given to this trial.
- "a_key": "A String",
- },
"trialId": "A String", # The trial id for these results.
"endTime": "A String", # Output only. End time for the trial.
- "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"startTime": "A String", # Output only. Start time for the trial.
+ "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"finalMetric": { # An observed value of a metric. # The final objective metric seen for this trial.
- "objectiveValue": 3.14, # The objective value at this training step.
"trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
},
"builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
# Only set for trials of built-in algorithms jobs that have succeeded.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
"modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
# saves the trained model. Only set for successful jobs that don't use
# hyperparameter tuning.
"pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
"runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
# trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
},
"state": "A String", # Output only. The detailed state of the trial.
+ "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
+ # populated.
+ { # An observed value of a metric.
+ "trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
+ },
+ ],
+ "hyperparameters": { # The hyperparameters given to this trial.
+ "a_key": "A String",
+ },
},
],
"hyperparameterMetricTag": "A String", # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -1452,17 +1506,6 @@
# Only set for hyperparameter tuning jobs.
"isHyperparameterTuningJob": True or False, # Whether this job is a hyperparameter tuning job.
"consumedMLUnits": 3.14, # The amount of ML units consumed by the job.
- "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
- "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
- # Only set for built-in algorithms jobs.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
- "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
- # saves the trained model. Only set for successful jobs that don't use
- # hyperparameter tuning.
- "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
- "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
- # trained.
- },
},
"createTime": "A String", # Output only. When the job was created.
"labels": { # Optional. One or more labels that you can add, to organize your jobs.
@@ -1472,49 +1515,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
- "outputPath": "A String", # Required. The output Google Cloud Storage location.
- "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
- "dataFormat": "A String", # Required. The format of the input data files.
- "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
- # The service will buffer batch_size number of records in memory before
- # invoking one Tensorflow prediction call internally. So take the record
- # size and memory available into consideration when setting this parameter.
- "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
- # prediction. If not set, AI Platform will pick the runtime version used
- # during the CreateVersion request for this model version, or choose the
- # latest stable version when model version information is not available
- # such as when the model is specified by uri.
- "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
- # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
- "A String",
- ],
- "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
- # string is formatted the same way as `model_version`, with the addition
- # of the version information:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
- "modelName": "A String", # Use this field if you want to use the default version for the specified
- # model. The string must use the following format:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
- "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
- # the model to use.
- "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
- # Defaults to 10 if not specified.
- "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
- # this job. Please refer to
- # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
- # for information about how to use signatures.
- #
- # Defaults to
- # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
- # , which is "serving_default".
- },
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
}</pre>
</div>
@@ -1533,6 +1533,49 @@
An object of the form:
{ # Represents a training or prediction job.
+ "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+ "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
+ # string is formatted the same way as `model_version`, with the addition
+ # of the version information:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
+ "modelName": "A String", # Use this field if you want to use the default version for the specified
+ # model. The string must use the following format:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
+ "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
+ # the model to use.
+ "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
+ # Defaults to 10 if not specified.
+ "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
+ # this job. Please refer to
+ # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+ # for information about how to use signatures.
+ #
+ # Defaults to
+ # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+ # , which is "serving_default".
+ "outputPath": "A String", # Required. The output Google Cloud Storage location.
+ "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
+ "dataFormat": "A String", # Required. The format of the input data files.
+ "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
+ # The service will buffer batch_size number of records in memory before
+ # invoking one Tensorflow prediction call internally. So take the record
+ # size and memory available into consideration when setting this parameter.
+ "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+ # prediction. If not set, AI Platform will pick the runtime version used
+ # during the CreateVersion request for this model version, or choose the
+ # latest stable version when model version information is not available
+ # such as when the model is specified by uri.
+ "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+ # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
+ "A String",
+ ],
+ "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ },
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a job from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
@@ -1545,6 +1588,21 @@
# command-line arguments and/or in a YAML configuration file referenced from
# the --config command-line argument. For details, see the guide to [submitting
# a training job](/ai-platform/training/docs/training-jobs).
+ "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
+ # replica in the cluster will be of the type specified in `worker_type`.
+ #
+ # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+ # set this value, you must also set `worker_type`.
+ #
+ # The default value is zero.
+ "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
+ "args": [ # Optional. Command-line arguments passed to the training application when it
+ # starts. If your job uses a custom container, then the arguments are passed
+ # to the container's <a class="external" target="_blank"
+ # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
+ # `ENTRYPOINT`</a> command.
+ "A String",
+ ],
"parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
#
# You should only set `parameterServerConfig.acceleratorConfig` if
@@ -1556,29 +1614,6 @@
# your parameter server. If `parameterServerConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1611,6 +1646,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
# protect resources created by a training job, instead of using Google's
@@ -1627,14 +1685,8 @@
# `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
},
"hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
- "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
- # current versions of TensorFlow, this tag name should exactly match what is
- # shown in TensorBoard, including all scopes. For versions of TensorFlow
- # prior to 0.12, this should be only the tag passed to tf.Summary.
- # By default, "training/hptuning/metric" will be used.
"params": [ # Required. The set of parameters to tune.
{ # Represents a single hyperparameter to optimize.
- "type": "A String", # Required. The type of the parameter.
"categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
"A String",
],
@@ -1657,6 +1709,7 @@
"maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
# should be unset if type is `CATEGORICAL`. This value should be integers if
# type is `INTEGER`.
+ "type": "A String", # Required. The type of the parameter.
},
],
"enableTrialEarlyStopping": True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -1692,6 +1745,11 @@
# tuning job.
# Uses the default AI Platform hyperparameter tuning
# algorithm if unspecified.
+ "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+ # current versions of TensorFlow, this tag name should exactly match what is
+ # shown in TensorBoard, including all scopes. For versions of TensorFlow
+ # prior to 0.12, this should be only the tag passed to tf.Summary.
+ # By default, "training/hptuning/metric" will be used.
},
"workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
#
@@ -1704,29 +1762,6 @@
# worker. If `workerConfig.imageUri` has not been set, AI Platform uses
# the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1759,6 +1794,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
# job. Each replica in the cluster will be of the type specified in
@@ -1851,8 +1909,6 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `evaluatorCount` is greater than zero.
- "region": "A String", # Required. The region to run the training job in. See the [available
- # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's worker nodes.
#
@@ -1869,6 +1925,8 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `workerCount` is greater than zero.
+ "region": "A String", # Required. The region to run the training job in. See the [available
+ # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's parameter server.
#
@@ -1892,29 +1950,6 @@
# `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
# about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1947,6 +1982,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
# and parameter servers.
@@ -1968,16 +2026,6 @@
#
# Read more about the Python versions available for [each runtime
# version](/ml-engine/docs/runtime-version-list).
- "network": "A String", # Optional. The full name of the Google Compute Engine
- # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
- # is peered. For example, projects/12345/global/networks/myVPC. Format is of
- # the form projects/{project}/global/networks/{network}. Where {project} is a
- # project number, as in '12345', and {network} is network name.".
- #
- # Private services access must already be configured for the network. If left
- # unspecified, the Job is not peered with any network. Learn more -
- # Connecting Job to user network over private
- # IP.
"scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
"maxWaitTime": "A String",
"maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
@@ -2004,6 +2052,16 @@
# ...
# ```
},
+ "network": "A String", # Optional. The full name of the Google Compute Engine
+ # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+ # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+ # the form projects/{project}/global/networks/{network}. Where {project} is a
+ # project number, as in '12345', and {network} is network name.".
+ #
+ # Private services access must already be configured for the network. If left
+ # unspecified, the Job is not peered with any network. Learn more -
+ # Connecting Job to user network over private
+ # IP.
"evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
#
# You should only set `evaluatorConfig.acceleratorConfig` if
@@ -2015,29 +2073,6 @@
# your evaluator. If `evaluatorConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -2070,6 +2105,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
# variable when training with a custom container. Defaults to `false`. [Learn
@@ -2078,21 +2136,6 @@
#
# This field has no effect for training jobs that don't use a custom
# container.
- "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
- # replica in the cluster will be of the type specified in `worker_type`.
- #
- # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
- # set this value, you must also set `worker_type`.
- #
- # The default value is zero.
- "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
- "args": [ # Optional. Command-line arguments passed to the training application when it
- # starts. If your job uses a custom container, then the arguments are passed
- # to the container's <a class="external" target="_blank"
- # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
- # `ENTRYPOINT`</a> command.
- "A String",
- ],
},
"state": "A String", # Output only. The detailed state of a job.
"jobId": "A String", # Required. The user-specified id of the job.
@@ -2105,41 +2148,52 @@
"predictionCount": "A String", # The number of generated predictions.
},
"trainingOutput": { # Represents results of a training job. Output only. # The current training job result.
+ "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
+ "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+ # Only set for built-in algorithms jobs.
+ "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
+ # saves the trained model. Only set for successful jobs that don't use
+ # hyperparameter tuning.
+ "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
+ "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
+ # trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
+ },
"trials": [ # Results for individual Hyperparameter trials.
# Only set for hyperparameter tuning jobs.
{ # Represents the result of a single hyperparameter tuning trial from a
# training job. The TrainingOutput object that is returned on successful
# completion of a training job with hyperparameter tuning includes a list
# of HyperparameterOutput objects, one for each successful trial.
- "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
- # populated.
- { # An observed value of a metric.
- "objectiveValue": 3.14, # The objective value at this training step.
- "trainingStep": "A String", # The global training step for this metric.
- },
- ],
- "hyperparameters": { # The hyperparameters given to this trial.
- "a_key": "A String",
- },
"trialId": "A String", # The trial id for these results.
"endTime": "A String", # Output only. End time for the trial.
- "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"startTime": "A String", # Output only. Start time for the trial.
+ "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"finalMetric": { # An observed value of a metric. # The final objective metric seen for this trial.
- "objectiveValue": 3.14, # The objective value at this training step.
"trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
},
"builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
# Only set for trials of built-in algorithms jobs that have succeeded.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
"modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
# saves the trained model. Only set for successful jobs that don't use
# hyperparameter tuning.
"pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
"runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
# trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
},
"state": "A String", # Output only. The detailed state of the trial.
+ "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
+ # populated.
+ { # An observed value of a metric.
+ "trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
+ },
+ ],
+ "hyperparameters": { # The hyperparameters given to this trial.
+ "a_key": "A String",
+ },
},
],
"hyperparameterMetricTag": "A String", # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -2150,17 +2204,6 @@
# Only set for hyperparameter tuning jobs.
"isHyperparameterTuningJob": True or False, # Whether this job is a hyperparameter tuning job.
"consumedMLUnits": 3.14, # The amount of ML units consumed by the job.
- "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
- "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
- # Only set for built-in algorithms jobs.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
- "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
- # saves the trained model. Only set for successful jobs that don't use
- # hyperparameter tuning.
- "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
- "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
- # trained.
- },
},
"createTime": "A String", # Output only. When the job was created.
"labels": { # Optional. One or more labels that you can add, to organize your jobs.
@@ -2170,49 +2213,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
- "outputPath": "A String", # Required. The output Google Cloud Storage location.
- "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
- "dataFormat": "A String", # Required. The format of the input data files.
- "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
- # The service will buffer batch_size number of records in memory before
- # invoking one Tensorflow prediction call internally. So take the record
- # size and memory available into consideration when setting this parameter.
- "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
- # prediction. If not set, AI Platform will pick the runtime version used
- # during the CreateVersion request for this model version, or choose the
- # latest stable version when model version information is not available
- # such as when the model is specified by uri.
- "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
- # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
- "A String",
- ],
- "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
- # string is formatted the same way as `model_version`, with the addition
- # of the version information:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
- "modelName": "A String", # Use this field if you want to use the default version for the specified
- # model. The string must use the following format:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
- "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
- # the model to use.
- "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
- # Defaults to 10 if not specified.
- "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
- # this job. Please refer to
- # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
- # for information about how to use signatures.
- #
- # Defaults to
- # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
- # , which is "serving_default".
- },
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
}</pre>
</div>
@@ -2312,18 +2312,6 @@
#
# For a description of IAM and its features, see the
# [IAM documentation](https://cloud.google.com/iam/docs/).
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a policy from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform policy updates in order to avoid race
- # conditions: An `etag` is returned in the response to `getIamPolicy`, and
- # systems are expected to put that etag in the request to `setIamPolicy` to
- # ensure that their change will be applied to the same version of the policy.
- #
- # **Important:** If you use IAM Conditions, you must include the `etag` field
- # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
- # you to overwrite a version `3` policy with a version `1` policy, and all of
- # the conditions in the version `3` policy are lost.
"version": 42, # Specifies the format of the policy.
#
# Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
@@ -2479,15 +2467,15 @@
# The exact variables and functions that may be referenced within an expression
# are determined by the service that evaluates it. See the service
# documentation for additional information.
+ "description": "A String", # Optional. Description of the expression. This is a longer text which
+ # describes the expression, e.g. when hovered over it in a UI.
+ "expression": "A String", # Textual representation of an expression in Common Expression Language
+ # syntax.
"title": "A String", # Optional. Title for the expression, i.e. a short string describing
# its purpose. This can be used e.g. in UIs which allow to enter the
# expression.
"location": "A String", # Optional. String indicating the location of the expression for error
# reporting, e.g. a file name and a position in the file.
- "description": "A String", # Optional. Description of the expression. This is a longer text which
- # describes the expression, e.g. when hovered over it in a UI.
- "expression": "A String", # Textual representation of an expression in Common Expression Language
- # syntax.
},
"members": [ # Specifies the identities requesting access for a Cloud Platform resource.
# `members` can have the following values:
@@ -2538,11 +2526,23 @@
# For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
},
],
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a policy from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform policy updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+ # systems are expected to put that etag in the request to `setIamPolicy` to
+ # ensure that their change will be applied to the same version of the policy.
+ #
+ # **Important:** If you use IAM Conditions, you must include the `etag` field
+ # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+ # you to overwrite a version `3` policy with a version `1` policy, and all of
+ # the conditions in the version `3` policy are lost.
}</pre>
</div>
<div class="method">
- <code class="details" id="list">list(parent, pageToken=None, pageSize=None, filter=None, x__xgafv=None)</code>
+ <code class="details" id="list">list(parent, filter=None, pageToken=None, pageSize=None, x__xgafv=None)</code>
<pre>Lists the jobs in the project.
If there are no jobs that match the request parameters, the list
@@ -2550,15 +2550,6 @@
Args:
parent: string, Required. The name of the project for which to list jobs. (required)
- pageToken: string, Optional. A page token to request the next page of results.
-
-You get the token from the `next_page_token` field of the response from
-the previous call.
- pageSize: integer, Optional. The number of jobs to retrieve per "page" of results. If there
-are more remaining results than this number, the response message will
-contain a valid value in the `next_page_token` field.
-
-The default value is 20, and the maximum page size is 100.
filter: string, Optional. Specifies the subset of jobs to retrieve.
You can filter on the value of one or more attributes of the job object.
For example, retrieve jobs with a job identifier that starts with 'census':
@@ -2568,6 +2559,15 @@
AND state:FAILED'</code>
<p>For more examples, see the guide to
<a href="/ml-engine/docs/tensorflow/monitor-training">monitoring jobs</a>.
+ pageToken: string, Optional. A page token to request the next page of results.
+
+You get the token from the `next_page_token` field of the response from
+the previous call.
+ pageSize: integer, Optional. The number of jobs to retrieve per "page" of results. If there
+are more remaining results than this number, the response message will
+contain a valid value in the `next_page_token` field.
+
+The default value is 20, and the maximum page size is 100.
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
@@ -2579,6 +2579,49 @@
{ # Response message for the ListJobs method.
"jobs": [ # The list of jobs.
{ # Represents a training or prediction job.
+ "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+ "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
+ # string is formatted the same way as `model_version`, with the addition
+ # of the version information:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
+ "modelName": "A String", # Use this field if you want to use the default version for the specified
+ # model. The string must use the following format:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
+ "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
+ # the model to use.
+ "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
+ # Defaults to 10 if not specified.
+ "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
+ # this job. Please refer to
+ # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+ # for information about how to use signatures.
+ #
+ # Defaults to
+ # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+ # , which is "serving_default".
+ "outputPath": "A String", # Required. The output Google Cloud Storage location.
+ "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
+ "dataFormat": "A String", # Required. The format of the input data files.
+ "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
+ # The service will buffer batch_size number of records in memory before
+ # invoking one Tensorflow prediction call internally. So take the record
+ # size and memory available into consideration when setting this parameter.
+ "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+ # prediction. If not set, AI Platform will pick the runtime version used
+ # during the CreateVersion request for this model version, or choose the
+ # latest stable version when model version information is not available
+ # such as when the model is specified by uri.
+ "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+ # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
+ "A String",
+ ],
+ "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ },
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a job from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
@@ -2591,6 +2634,21 @@
# command-line arguments and/or in a YAML configuration file referenced from
# the --config command-line argument. For details, see the guide to [submitting
# a training job](/ai-platform/training/docs/training-jobs).
+ "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
+ # replica in the cluster will be of the type specified in `worker_type`.
+ #
+ # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+ # set this value, you must also set `worker_type`.
+ #
+ # The default value is zero.
+ "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
+ "args": [ # Optional. Command-line arguments passed to the training application when it
+ # starts. If your job uses a custom container, then the arguments are passed
+ # to the container's <a class="external" target="_blank"
+ # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
+ # `ENTRYPOINT`</a> command.
+ "A String",
+ ],
"parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
#
# You should only set `parameterServerConfig.acceleratorConfig` if
@@ -2602,29 +2660,6 @@
# your parameter server. If `parameterServerConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -2657,6 +2692,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
# protect resources created by a training job, instead of using Google's
@@ -2673,14 +2731,8 @@
# `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
},
"hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
- "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
- # current versions of TensorFlow, this tag name should exactly match what is
- # shown in TensorBoard, including all scopes. For versions of TensorFlow
- # prior to 0.12, this should be only the tag passed to tf.Summary.
- # By default, "training/hptuning/metric" will be used.
"params": [ # Required. The set of parameters to tune.
{ # Represents a single hyperparameter to optimize.
- "type": "A String", # Required. The type of the parameter.
"categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
"A String",
],
@@ -2703,6 +2755,7 @@
"maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
# should be unset if type is `CATEGORICAL`. This value should be integers if
# type is `INTEGER`.
+ "type": "A String", # Required. The type of the parameter.
},
],
"enableTrialEarlyStopping": True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -2738,6 +2791,11 @@
# tuning job.
# Uses the default AI Platform hyperparameter tuning
# algorithm if unspecified.
+ "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+ # current versions of TensorFlow, this tag name should exactly match what is
+ # shown in TensorBoard, including all scopes. For versions of TensorFlow
+ # prior to 0.12, this should be only the tag passed to tf.Summary.
+ # By default, "training/hptuning/metric" will be used.
},
"workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
#
@@ -2750,29 +2808,6 @@
# worker. If `workerConfig.imageUri` has not been set, AI Platform uses
# the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -2805,6 +2840,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
# job. Each replica in the cluster will be of the type specified in
@@ -2897,8 +2955,6 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `evaluatorCount` is greater than zero.
- "region": "A String", # Required. The region to run the training job in. See the [available
- # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's worker nodes.
#
@@ -2915,6 +2971,8 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `workerCount` is greater than zero.
+ "region": "A String", # Required. The region to run the training job in. See the [available
+ # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's parameter server.
#
@@ -2938,29 +2996,6 @@
# `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
# about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -2993,6 +3028,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
# and parameter servers.
@@ -3014,16 +3072,6 @@
#
# Read more about the Python versions available for [each runtime
# version](/ml-engine/docs/runtime-version-list).
- "network": "A String", # Optional. The full name of the Google Compute Engine
- # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
- # is peered. For example, projects/12345/global/networks/myVPC. Format is of
- # the form projects/{project}/global/networks/{network}. Where {project} is a
- # project number, as in '12345', and {network} is network name.".
- #
- # Private services access must already be configured for the network. If left
- # unspecified, the Job is not peered with any network. Learn more -
- # Connecting Job to user network over private
- # IP.
"scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
"maxWaitTime": "A String",
"maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
@@ -3050,6 +3098,16 @@
# ...
# ```
},
+ "network": "A String", # Optional. The full name of the Google Compute Engine
+ # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+ # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+ # the form projects/{project}/global/networks/{network}. Where {project} is a
+ # project number, as in '12345', and {network} is network name.".
+ #
+ # Private services access must already be configured for the network. If left
+ # unspecified, the Job is not peered with any network. Learn more -
+ # Connecting Job to user network over private
+ # IP.
"evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
#
# You should only set `evaluatorConfig.acceleratorConfig` if
@@ -3061,29 +3119,6 @@
# your evaluator. If `evaluatorConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3116,6 +3151,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
# variable when training with a custom container. Defaults to `false`. [Learn
@@ -3124,21 +3182,6 @@
#
# This field has no effect for training jobs that don't use a custom
# container.
- "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
- # replica in the cluster will be of the type specified in `worker_type`.
- #
- # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
- # set this value, you must also set `worker_type`.
- #
- # The default value is zero.
- "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
- "args": [ # Optional. Command-line arguments passed to the training application when it
- # starts. If your job uses a custom container, then the arguments are passed
- # to the container's <a class="external" target="_blank"
- # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
- # `ENTRYPOINT`</a> command.
- "A String",
- ],
},
"state": "A String", # Output only. The detailed state of a job.
"jobId": "A String", # Required. The user-specified id of the job.
@@ -3151,41 +3194,52 @@
"predictionCount": "A String", # The number of generated predictions.
},
"trainingOutput": { # Represents results of a training job. Output only. # The current training job result.
+ "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
+ "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+ # Only set for built-in algorithms jobs.
+ "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
+ # saves the trained model. Only set for successful jobs that don't use
+ # hyperparameter tuning.
+ "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
+ "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
+ # trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
+ },
"trials": [ # Results for individual Hyperparameter trials.
# Only set for hyperparameter tuning jobs.
{ # Represents the result of a single hyperparameter tuning trial from a
# training job. The TrainingOutput object that is returned on successful
# completion of a training job with hyperparameter tuning includes a list
# of HyperparameterOutput objects, one for each successful trial.
- "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
- # populated.
- { # An observed value of a metric.
- "objectiveValue": 3.14, # The objective value at this training step.
- "trainingStep": "A String", # The global training step for this metric.
- },
- ],
- "hyperparameters": { # The hyperparameters given to this trial.
- "a_key": "A String",
- },
"trialId": "A String", # The trial id for these results.
"endTime": "A String", # Output only. End time for the trial.
- "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"startTime": "A String", # Output only. Start time for the trial.
+ "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"finalMetric": { # An observed value of a metric. # The final objective metric seen for this trial.
- "objectiveValue": 3.14, # The objective value at this training step.
"trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
},
"builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
# Only set for trials of built-in algorithms jobs that have succeeded.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
"modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
# saves the trained model. Only set for successful jobs that don't use
# hyperparameter tuning.
"pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
"runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
# trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
},
"state": "A String", # Output only. The detailed state of the trial.
+ "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
+ # populated.
+ { # An observed value of a metric.
+ "trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
+ },
+ ],
+ "hyperparameters": { # The hyperparameters given to this trial.
+ "a_key": "A String",
+ },
},
],
"hyperparameterMetricTag": "A String", # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -3196,17 +3250,6 @@
# Only set for hyperparameter tuning jobs.
"isHyperparameterTuningJob": True or False, # Whether this job is a hyperparameter tuning job.
"consumedMLUnits": 3.14, # The amount of ML units consumed by the job.
- "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
- "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
- # Only set for built-in algorithms jobs.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
- "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
- # saves the trained model. Only set for successful jobs that don't use
- # hyperparameter tuning.
- "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
- "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
- # trained.
- },
},
"createTime": "A String", # Output only. When the job was created.
"labels": { # Optional. One or more labels that you can add, to organize your jobs.
@@ -3216,49 +3259,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
- "outputPath": "A String", # Required. The output Google Cloud Storage location.
- "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
- "dataFormat": "A String", # Required. The format of the input data files.
- "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
- # The service will buffer batch_size number of records in memory before
- # invoking one Tensorflow prediction call internally. So take the record
- # size and memory available into consideration when setting this parameter.
- "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
- # prediction. If not set, AI Platform will pick the runtime version used
- # during the CreateVersion request for this model version, or choose the
- # latest stable version when model version information is not available
- # such as when the model is specified by uri.
- "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
- # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
- "A String",
- ],
- "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
- # string is formatted the same way as `model_version`, with the addition
- # of the version information:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
- "modelName": "A String", # Use this field if you want to use the default version for the specified
- # model. The string must use the following format:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
- "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
- # the model to use.
- "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
- # Defaults to 10 if not specified.
- "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
- # this job. Please refer to
- # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
- # for information about how to use signatures.
- #
- # Defaults to
- # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
- # , which is "serving_default".
- },
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
},
],
"nextPageToken": "A String", # Optional. Pass this token as the `page_token` field of the request for a
@@ -3292,6 +3292,49 @@
The object takes the form of:
{ # Represents a training or prediction job.
+ "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+ "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
+ # string is formatted the same way as `model_version`, with the addition
+ # of the version information:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
+ "modelName": "A String", # Use this field if you want to use the default version for the specified
+ # model. The string must use the following format:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
+ "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
+ # the model to use.
+ "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
+ # Defaults to 10 if not specified.
+ "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
+ # this job. Please refer to
+ # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+ # for information about how to use signatures.
+ #
+ # Defaults to
+ # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+ # , which is "serving_default".
+ "outputPath": "A String", # Required. The output Google Cloud Storage location.
+ "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
+ "dataFormat": "A String", # Required. The format of the input data files.
+ "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
+ # The service will buffer batch_size number of records in memory before
+ # invoking one Tensorflow prediction call internally. So take the record
+ # size and memory available into consideration when setting this parameter.
+ "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+ # prediction. If not set, AI Platform will pick the runtime version used
+ # during the CreateVersion request for this model version, or choose the
+ # latest stable version when model version information is not available
+ # such as when the model is specified by uri.
+ "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+ # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
+ "A String",
+ ],
+ "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ },
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a job from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
@@ -3304,6 +3347,21 @@
# command-line arguments and/or in a YAML configuration file referenced from
# the --config command-line argument. For details, see the guide to [submitting
# a training job](/ai-platform/training/docs/training-jobs).
+ "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
+ # replica in the cluster will be of the type specified in `worker_type`.
+ #
+ # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+ # set this value, you must also set `worker_type`.
+ #
+ # The default value is zero.
+ "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
+ "args": [ # Optional. Command-line arguments passed to the training application when it
+ # starts. If your job uses a custom container, then the arguments are passed
+ # to the container's <a class="external" target="_blank"
+ # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
+ # `ENTRYPOINT`</a> command.
+ "A String",
+ ],
"parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
#
# You should only set `parameterServerConfig.acceleratorConfig` if
@@ -3315,29 +3373,6 @@
# your parameter server. If `parameterServerConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3370,6 +3405,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
# protect resources created by a training job, instead of using Google's
@@ -3386,14 +3444,8 @@
# `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
},
"hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
- "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
- # current versions of TensorFlow, this tag name should exactly match what is
- # shown in TensorBoard, including all scopes. For versions of TensorFlow
- # prior to 0.12, this should be only the tag passed to tf.Summary.
- # By default, "training/hptuning/metric" will be used.
"params": [ # Required. The set of parameters to tune.
{ # Represents a single hyperparameter to optimize.
- "type": "A String", # Required. The type of the parameter.
"categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
"A String",
],
@@ -3416,6 +3468,7 @@
"maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
# should be unset if type is `CATEGORICAL`. This value should be integers if
# type is `INTEGER`.
+ "type": "A String", # Required. The type of the parameter.
},
],
"enableTrialEarlyStopping": True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -3451,6 +3504,11 @@
# tuning job.
# Uses the default AI Platform hyperparameter tuning
# algorithm if unspecified.
+ "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+ # current versions of TensorFlow, this tag name should exactly match what is
+ # shown in TensorBoard, including all scopes. For versions of TensorFlow
+ # prior to 0.12, this should be only the tag passed to tf.Summary.
+ # By default, "training/hptuning/metric" will be used.
},
"workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
#
@@ -3463,29 +3521,6 @@
# worker. If `workerConfig.imageUri` has not been set, AI Platform uses
# the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3518,6 +3553,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
# job. Each replica in the cluster will be of the type specified in
@@ -3610,8 +3668,6 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `evaluatorCount` is greater than zero.
- "region": "A String", # Required. The region to run the training job in. See the [available
- # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's worker nodes.
#
@@ -3628,6 +3684,8 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `workerCount` is greater than zero.
+ "region": "A String", # Required. The region to run the training job in. See the [available
+ # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's parameter server.
#
@@ -3651,29 +3709,6 @@
# `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
# about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3706,6 +3741,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
# and parameter servers.
@@ -3727,16 +3785,6 @@
#
# Read more about the Python versions available for [each runtime
# version](/ml-engine/docs/runtime-version-list).
- "network": "A String", # Optional. The full name of the Google Compute Engine
- # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
- # is peered. For example, projects/12345/global/networks/myVPC. Format is of
- # the form projects/{project}/global/networks/{network}. Where {project} is a
- # project number, as in '12345', and {network} is network name.".
- #
- # Private services access must already be configured for the network. If left
- # unspecified, the Job is not peered with any network. Learn more -
- # Connecting Job to user network over private
- # IP.
"scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
"maxWaitTime": "A String",
"maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
@@ -3763,6 +3811,16 @@
# ...
# ```
},
+ "network": "A String", # Optional. The full name of the Google Compute Engine
+ # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+ # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+ # the form projects/{project}/global/networks/{network}. Where {project} is a
+ # project number, as in '12345', and {network} is network name.".
+ #
+ # Private services access must already be configured for the network. If left
+ # unspecified, the Job is not peered with any network. Learn more -
+ # Connecting Job to user network over private
+ # IP.
"evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
#
# You should only set `evaluatorConfig.acceleratorConfig` if
@@ -3774,29 +3832,6 @@
# your evaluator. If `evaluatorConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3829,6 +3864,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
# variable when training with a custom container. Defaults to `false`. [Learn
@@ -3837,21 +3895,6 @@
#
# This field has no effect for training jobs that don't use a custom
# container.
- "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
- # replica in the cluster will be of the type specified in `worker_type`.
- #
- # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
- # set this value, you must also set `worker_type`.
- #
- # The default value is zero.
- "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
- "args": [ # Optional. Command-line arguments passed to the training application when it
- # starts. If your job uses a custom container, then the arguments are passed
- # to the container's <a class="external" target="_blank"
- # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
- # `ENTRYPOINT`</a> command.
- "A String",
- ],
},
"state": "A String", # Output only. The detailed state of a job.
"jobId": "A String", # Required. The user-specified id of the job.
@@ -3864,41 +3907,52 @@
"predictionCount": "A String", # The number of generated predictions.
},
"trainingOutput": { # Represents results of a training job. Output only. # The current training job result.
+ "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
+ "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+ # Only set for built-in algorithms jobs.
+ "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
+ # saves the trained model. Only set for successful jobs that don't use
+ # hyperparameter tuning.
+ "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
+ "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
+ # trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
+ },
"trials": [ # Results for individual Hyperparameter trials.
# Only set for hyperparameter tuning jobs.
{ # Represents the result of a single hyperparameter tuning trial from a
# training job. The TrainingOutput object that is returned on successful
# completion of a training job with hyperparameter tuning includes a list
# of HyperparameterOutput objects, one for each successful trial.
- "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
- # populated.
- { # An observed value of a metric.
- "objectiveValue": 3.14, # The objective value at this training step.
- "trainingStep": "A String", # The global training step for this metric.
- },
- ],
- "hyperparameters": { # The hyperparameters given to this trial.
- "a_key": "A String",
- },
"trialId": "A String", # The trial id for these results.
"endTime": "A String", # Output only. End time for the trial.
- "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"startTime": "A String", # Output only. Start time for the trial.
+ "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"finalMetric": { # An observed value of a metric. # The final objective metric seen for this trial.
- "objectiveValue": 3.14, # The objective value at this training step.
"trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
},
"builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
# Only set for trials of built-in algorithms jobs that have succeeded.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
"modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
# saves the trained model. Only set for successful jobs that don't use
# hyperparameter tuning.
"pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
"runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
# trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
},
"state": "A String", # Output only. The detailed state of the trial.
+ "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
+ # populated.
+ { # An observed value of a metric.
+ "trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
+ },
+ ],
+ "hyperparameters": { # The hyperparameters given to this trial.
+ "a_key": "A String",
+ },
},
],
"hyperparameterMetricTag": "A String", # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -3909,17 +3963,6 @@
# Only set for hyperparameter tuning jobs.
"isHyperparameterTuningJob": True or False, # Whether this job is a hyperparameter tuning job.
"consumedMLUnits": 3.14, # The amount of ML units consumed by the job.
- "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
- "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
- # Only set for built-in algorithms jobs.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
- "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
- # saves the trained model. Only set for successful jobs that don't use
- # hyperparameter tuning.
- "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
- "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
- # trained.
- },
},
"createTime": "A String", # Output only. When the job was created.
"labels": { # Optional. One or more labels that you can add, to organize your jobs.
@@ -3929,49 +3972,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
- "outputPath": "A String", # Required. The output Google Cloud Storage location.
- "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
- "dataFormat": "A String", # Required. The format of the input data files.
- "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
- # The service will buffer batch_size number of records in memory before
- # invoking one Tensorflow prediction call internally. So take the record
- # size and memory available into consideration when setting this parameter.
- "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
- # prediction. If not set, AI Platform will pick the runtime version used
- # during the CreateVersion request for this model version, or choose the
- # latest stable version when model version information is not available
- # such as when the model is specified by uri.
- "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
- # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
- "A String",
- ],
- "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
- # string is formatted the same way as `model_version`, with the addition
- # of the version information:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
- "modelName": "A String", # Use this field if you want to use the default version for the specified
- # model. The string must use the following format:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
- "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
- # the model to use.
- "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
- # Defaults to 10 if not specified.
- "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
- # this job. Please refer to
- # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
- # for information about how to use signatures.
- #
- # Defaults to
- # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
- # , which is "serving_default".
- },
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
}
updateMask: string, Required. Specifies the path, relative to `Job`, of the field to update.
@@ -4002,6 +4002,49 @@
An object of the form:
{ # Represents a training or prediction job.
+ "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+ "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
+ # string is formatted the same way as `model_version`, with the addition
+ # of the version information:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
+ "modelName": "A String", # Use this field if you want to use the default version for the specified
+ # model. The string must use the following format:
+ #
+ # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
+ "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
+ # the model to use.
+ "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
+ # Defaults to 10 if not specified.
+ "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
+ # this job. Please refer to
+ # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+ # for information about how to use signatures.
+ #
+ # Defaults to
+ # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+ # , which is "serving_default".
+ "outputPath": "A String", # Required. The output Google Cloud Storage location.
+ "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
+ "dataFormat": "A String", # Required. The format of the input data files.
+ "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
+ # The service will buffer batch_size number of records in memory before
+ # invoking one Tensorflow prediction call internally. So take the record
+ # size and memory available into consideration when setting this parameter.
+ "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+ # prediction. If not set, AI Platform will pick the runtime version used
+ # during the CreateVersion request for this model version, or choose the
+ # latest stable version when model version information is not available
+ # such as when the model is specified by uri.
+ "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+ # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
+ "A String",
+ ],
+ "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
+ # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+ # for AI Platform services.
+ },
+ "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
"etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
# prevent simultaneous updates of a job from overwriting each other.
# It is strongly suggested that systems make use of the `etag` in the
@@ -4014,6 +4057,21 @@
# command-line arguments and/or in a YAML configuration file referenced from
# the --config command-line argument. For details, see the guide to [submitting
# a training job](/ai-platform/training/docs/training-jobs).
+ "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
+ # replica in the cluster will be of the type specified in `worker_type`.
+ #
+ # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+ # set this value, you must also set `worker_type`.
+ #
+ # The default value is zero.
+ "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
+ "args": [ # Optional. Command-line arguments passed to the training application when it
+ # starts. If your job uses a custom container, then the arguments are passed
+ # to the container's <a class="external" target="_blank"
+ # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
+ # `ENTRYPOINT`</a> command.
+ "A String",
+ ],
"parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
#
# You should only set `parameterServerConfig.acceleratorConfig` if
@@ -4025,29 +4083,6 @@
# your parameter server. If `parameterServerConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -4080,6 +4115,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
# protect resources created by a training job, instead of using Google's
@@ -4096,14 +4154,8 @@
# `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
},
"hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
- "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
- # current versions of TensorFlow, this tag name should exactly match what is
- # shown in TensorBoard, including all scopes. For versions of TensorFlow
- # prior to 0.12, this should be only the tag passed to tf.Summary.
- # By default, "training/hptuning/metric" will be used.
"params": [ # Required. The set of parameters to tune.
{ # Represents a single hyperparameter to optimize.
- "type": "A String", # Required. The type of the parameter.
"categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
"A String",
],
@@ -4126,6 +4178,7 @@
"maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
# should be unset if type is `CATEGORICAL`. This value should be integers if
# type is `INTEGER`.
+ "type": "A String", # Required. The type of the parameter.
},
],
"enableTrialEarlyStopping": True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -4161,6 +4214,11 @@
# tuning job.
# Uses the default AI Platform hyperparameter tuning
# algorithm if unspecified.
+ "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+ # current versions of TensorFlow, this tag name should exactly match what is
+ # shown in TensorBoard, including all scopes. For versions of TensorFlow
+ # prior to 0.12, this should be only the tag passed to tf.Summary.
+ # By default, "training/hptuning/metric" will be used.
},
"workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
#
@@ -4173,29 +4231,6 @@
# worker. If `workerConfig.imageUri` has not been set, AI Platform uses
# the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -4228,6 +4263,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
# job. Each replica in the cluster will be of the type specified in
@@ -4320,8 +4378,6 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `evaluatorCount` is greater than zero.
- "region": "A String", # Required. The region to run the training job in. See the [available
- # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's worker nodes.
#
@@ -4338,6 +4394,8 @@
#
# This value must be present when `scaleTier` is set to `CUSTOM` and
# `workerCount` is greater than zero.
+ "region": "A String", # Required. The region to run the training job in. See the [available
+ # regions](/ai-platform/training/docs/regions) for AI Platform Training.
"parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
# job's parameter server.
#
@@ -4361,29 +4419,6 @@
# `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
# about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -4416,6 +4451,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
# and parameter servers.
@@ -4437,16 +4495,6 @@
#
# Read more about the Python versions available for [each runtime
# version](/ml-engine/docs/runtime-version-list).
- "network": "A String", # Optional. The full name of the Google Compute Engine
- # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
- # is peered. For example, projects/12345/global/networks/myVPC. Format is of
- # the form projects/{project}/global/networks/{network}. Where {project} is a
- # project number, as in '12345', and {network} is network name.".
- #
- # Private services access must already be configured for the network. If left
- # unspecified, the Job is not peered with any network. Learn more -
- # Connecting Job to user network over private
- # IP.
"scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
"maxWaitTime": "A String",
"maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
@@ -4473,6 +4521,16 @@
# ...
# ```
},
+ "network": "A String", # Optional. The full name of the Google Compute Engine
+ # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+ # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+ # the form projects/{project}/global/networks/{network}. Where {project} is a
+ # project number, as in '12345', and {network} is network name.".
+ #
+ # Private services access must already be configured for the network. If left
+ # unspecified, the Job is not peered with any network. Learn more -
+ # Connecting Job to user network over private
+ # IP.
"evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
#
# You should only set `evaluatorConfig.acceleratorConfig` if
@@ -4484,29 +4542,6 @@
# your evaluator. If `evaluatorConfig.imageUri` has not been
# set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
- "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
- # the one used in the custom container. This field is required if the replica
- # is a TPU worker that uses a custom container. Otherwise, do not specify
- # this field. This must be a [runtime version that currently supports
- # training with
- # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
- #
- # Note that the version of TensorFlow included in a runtime version may
- # differ from the numbering of the runtime version itself, because it may
- # have a different [patch
- # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
- # In this field, you must specify the runtime version (TensorFlow minor
- # version). For example, if your custom container runs TensorFlow `1.x.y`,
- # specify `1.x`.
- "containerCommand": [ # The command with which the replica's custom container is run.
- # If provided, it will override default ENTRYPOINT of the docker image.
- # If not provided, the docker image's ENTRYPOINT is used.
- # It cannot be set if custom container image is
- # not provided.
- # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
- # both cannot be set at the same time.
- "A String",
- ],
"imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
# Registry. Learn more about [configuring custom
# containers](/ai-platform/training/docs/distributed-training-containers).
@@ -4539,6 +4574,29 @@
"count": "A String", # The number of accelerators to attach to each machine running the job.
"type": "A String", # The type of accelerator to use.
},
+ "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+ # the one used in the custom container. This field is required if the replica
+ # is a TPU worker that uses a custom container. Otherwise, do not specify
+ # this field. This must be a [runtime version that currently supports
+ # training with
+ # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+ #
+ # Note that the version of TensorFlow included in a runtime version may
+ # differ from the numbering of the runtime version itself, because it may
+ # have a different [patch
+ # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+ # In this field, you must specify the runtime version (TensorFlow minor
+ # version). For example, if your custom container runs TensorFlow `1.x.y`,
+ # specify `1.x`.
+ "containerCommand": [ # The command with which the replica's custom container is run.
+ # If provided, it will override default ENTRYPOINT of the docker image.
+ # If not provided, the docker image's ENTRYPOINT is used.
+ # It cannot be set if custom container image is
+ # not provided.
+ # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+ # both cannot be set at the same time.
+ "A String",
+ ],
},
"useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
# variable when training with a custom container. Defaults to `false`. [Learn
@@ -4547,21 +4605,6 @@
#
# This field has no effect for training jobs that don't use a custom
# container.
- "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
- # replica in the cluster will be of the type specified in `worker_type`.
- #
- # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
- # set this value, you must also set `worker_type`.
- #
- # The default value is zero.
- "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
- "args": [ # Optional. Command-line arguments passed to the training application when it
- # starts. If your job uses a custom container, then the arguments are passed
- # to the container's <a class="external" target="_blank"
- # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
- # `ENTRYPOINT`</a> command.
- "A String",
- ],
},
"state": "A String", # Output only. The detailed state of a job.
"jobId": "A String", # Required. The user-specified id of the job.
@@ -4574,41 +4617,52 @@
"predictionCount": "A String", # The number of generated predictions.
},
"trainingOutput": { # Represents results of a training job. Output only. # The current training job result.
+ "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
+ "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+ # Only set for built-in algorithms jobs.
+ "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
+ # saves the trained model. Only set for successful jobs that don't use
+ # hyperparameter tuning.
+ "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
+ "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
+ # trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
+ },
"trials": [ # Results for individual Hyperparameter trials.
# Only set for hyperparameter tuning jobs.
{ # Represents the result of a single hyperparameter tuning trial from a
# training job. The TrainingOutput object that is returned on successful
# completion of a training job with hyperparameter tuning includes a list
# of HyperparameterOutput objects, one for each successful trial.
- "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
- # populated.
- { # An observed value of a metric.
- "objectiveValue": 3.14, # The objective value at this training step.
- "trainingStep": "A String", # The global training step for this metric.
- },
- ],
- "hyperparameters": { # The hyperparameters given to this trial.
- "a_key": "A String",
- },
"trialId": "A String", # The trial id for these results.
"endTime": "A String", # Output only. End time for the trial.
- "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"startTime": "A String", # Output only. Start time for the trial.
+ "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
"finalMetric": { # An observed value of a metric. # The final objective metric seen for this trial.
- "objectiveValue": 3.14, # The objective value at this training step.
"trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
},
"builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
# Only set for trials of built-in algorithms jobs that have succeeded.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
"modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
# saves the trained model. Only set for successful jobs that don't use
# hyperparameter tuning.
"pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
"runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
# trained.
+ "framework": "A String", # Framework on which the built-in algorithm was trained.
},
"state": "A String", # Output only. The detailed state of the trial.
+ "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
+ # populated.
+ { # An observed value of a metric.
+ "trainingStep": "A String", # The global training step for this metric.
+ "objectiveValue": 3.14, # The objective value at this training step.
+ },
+ ],
+ "hyperparameters": { # The hyperparameters given to this trial.
+ "a_key": "A String",
+ },
},
],
"hyperparameterMetricTag": "A String", # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -4619,17 +4673,6 @@
# Only set for hyperparameter tuning jobs.
"isHyperparameterTuningJob": True or False, # Whether this job is a hyperparameter tuning job.
"consumedMLUnits": 3.14, # The amount of ML units consumed by the job.
- "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
- "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
- # Only set for built-in algorithms jobs.
- "framework": "A String", # Framework on which the built-in algorithm was trained.
- "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
- # saves the trained model. Only set for successful jobs that don't use
- # hyperparameter tuning.
- "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
- "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
- # trained.
- },
},
"createTime": "A String", # Output only. When the job was created.
"labels": { # Optional. One or more labels that you can add, to organize your jobs.
@@ -4639,49 +4682,6 @@
# <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
"a_key": "A String",
},
- "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
- "outputPath": "A String", # Required. The output Google Cloud Storage location.
- "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
- "dataFormat": "A String", # Required. The format of the input data files.
- "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
- # The service will buffer batch_size number of records in memory before
- # invoking one Tensorflow prediction call internally. So take the record
- # size and memory available into consideration when setting this parameter.
- "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
- # prediction. If not set, AI Platform will pick the runtime version used
- # during the CreateVersion request for this model version, or choose the
- # latest stable version when model version information is not available
- # such as when the model is specified by uri.
- "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
- # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
- "A String",
- ],
- "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
- # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
- # for AI Platform services.
- "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
- # string is formatted the same way as `model_version`, with the addition
- # of the version information:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
- "modelName": "A String", # Use this field if you want to use the default version for the specified
- # model. The string must use the following format:
- #
- # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
- "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
- # the model to use.
- "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
- # Defaults to 10 if not specified.
- "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
- # this job. Please refer to
- # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
- # for information about how to use signatures.
- #
- # Defaults to
- # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
- # , which is "serving_default".
- },
- "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
}</pre>
</div>
@@ -4699,6 +4699,11 @@
The object takes the form of:
{ # Request message for `SetIamPolicy` method.
+ "updateMask": "A String", # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
+ # the fields in the mask will be modified. If no mask is provided, the
+ # following default mask is used:
+ #
+ # `paths: "bindings, etag"`
"policy": { # An Identity and Access Management (IAM) policy, which specifies access # REQUIRED: The complete policy to be applied to the `resource`. The size of
# the policy is limited to a few 10s of KB. An empty policy is a
# valid policy but certain Cloud Platform services (such as Projects)
@@ -4769,18 +4774,6 @@
#
# For a description of IAM and its features, see the
# [IAM documentation](https://cloud.google.com/iam/docs/).
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a policy from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform policy updates in order to avoid race
- # conditions: An `etag` is returned in the response to `getIamPolicy`, and
- # systems are expected to put that etag in the request to `setIamPolicy` to
- # ensure that their change will be applied to the same version of the policy.
- #
- # **Important:** If you use IAM Conditions, you must include the `etag` field
- # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
- # you to overwrite a version `3` policy with a version `1` policy, and all of
- # the conditions in the version `3` policy are lost.
"version": 42, # Specifies the format of the policy.
#
# Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
@@ -4936,15 +4929,15 @@
# The exact variables and functions that may be referenced within an expression
# are determined by the service that evaluates it. See the service
# documentation for additional information.
+ "description": "A String", # Optional. Description of the expression. This is a longer text which
+ # describes the expression, e.g. when hovered over it in a UI.
+ "expression": "A String", # Textual representation of an expression in Common Expression Language
+ # syntax.
"title": "A String", # Optional. Title for the expression, i.e. a short string describing
# its purpose. This can be used e.g. in UIs which allow to enter the
# expression.
"location": "A String", # Optional. String indicating the location of the expression for error
# reporting, e.g. a file name and a position in the file.
- "description": "A String", # Optional. Description of the expression. This is a longer text which
- # describes the expression, e.g. when hovered over it in a UI.
- "expression": "A String", # Textual representation of an expression in Common Expression Language
- # syntax.
},
"members": [ # Specifies the identities requesting access for a Cloud Platform resource.
# `members` can have the following values:
@@ -4995,12 +4988,19 @@
# For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
},
],
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a policy from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform policy updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+ # systems are expected to put that etag in the request to `setIamPolicy` to
+ # ensure that their change will be applied to the same version of the policy.
+ #
+ # **Important:** If you use IAM Conditions, you must include the `etag` field
+ # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+ # you to overwrite a version `3` policy with a version `1` policy, and all of
+ # the conditions in the version `3` policy are lost.
},
- "updateMask": "A String", # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
- # the fields in the mask will be modified. If no mask is provided, the
- # following default mask is used:
- #
- # `paths: "bindings, etag"`
}
x__xgafv: string, V1 error format.
@@ -5078,18 +5078,6 @@
#
# For a description of IAM and its features, see the
# [IAM documentation](https://cloud.google.com/iam/docs/).
- "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
- # prevent simultaneous updates of a policy from overwriting each other.
- # It is strongly suggested that systems make use of the `etag` in the
- # read-modify-write cycle to perform policy updates in order to avoid race
- # conditions: An `etag` is returned in the response to `getIamPolicy`, and
- # systems are expected to put that etag in the request to `setIamPolicy` to
- # ensure that their change will be applied to the same version of the policy.
- #
- # **Important:** If you use IAM Conditions, you must include the `etag` field
- # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
- # you to overwrite a version `3` policy with a version `1` policy, and all of
- # the conditions in the version `3` policy are lost.
"version": 42, # Specifies the format of the policy.
#
# Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
@@ -5245,15 +5233,15 @@
# The exact variables and functions that may be referenced within an expression
# are determined by the service that evaluates it. See the service
# documentation for additional information.
+ "description": "A String", # Optional. Description of the expression. This is a longer text which
+ # describes the expression, e.g. when hovered over it in a UI.
+ "expression": "A String", # Textual representation of an expression in Common Expression Language
+ # syntax.
"title": "A String", # Optional. Title for the expression, i.e. a short string describing
# its purpose. This can be used e.g. in UIs which allow to enter the
# expression.
"location": "A String", # Optional. String indicating the location of the expression for error
# reporting, e.g. a file name and a position in the file.
- "description": "A String", # Optional. Description of the expression. This is a longer text which
- # describes the expression, e.g. when hovered over it in a UI.
- "expression": "A String", # Textual representation of an expression in Common Expression Language
- # syntax.
},
"members": [ # Specifies the identities requesting access for a Cloud Platform resource.
# `members` can have the following values:
@@ -5304,6 +5292,18 @@
# For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
},
],
+ "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+ # prevent simultaneous updates of a policy from overwriting each other.
+ # It is strongly suggested that systems make use of the `etag` in the
+ # read-modify-write cycle to perform policy updates in order to avoid race
+ # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+ # systems are expected to put that etag in the request to `setIamPolicy` to
+ # ensure that their change will be applied to the same version of the policy.
+ #
+ # **Important:** If you use IAM Conditions, you must include the `etag` field
+ # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+ # you to overwrite a version `3` policy with a version `1` policy, and all of
+ # the conditions in the version `3` policy are lost.
}</pre>
</div>