docs: update docs (#916)

* fix: re-run script

* test: fix noxfile
diff --git a/docs/dyn/ml_v1.projects.jobs.html b/docs/dyn/ml_v1.projects.jobs.html
index 979bc29..3952cf6 100644
--- a/docs/dyn/ml_v1.projects.jobs.html
+++ b/docs/dyn/ml_v1.projects.jobs.html
@@ -87,7 +87,7 @@
   <code><a href="#getIamPolicy">getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Gets the access control policy for a resource.</p>
 <p class="toc_element">
-  <code><a href="#list">list(parent, pageToken=None, pageSize=None, filter=None, x__xgafv=None)</a></code></p>
+  <code><a href="#list">list(parent, filter=None, pageToken=None, pageSize=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Lists the jobs in the project.</p>
 <p class="toc_element">
   <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
@@ -144,6 +144,49 @@
     The object takes the form of:
 
 { # Represents a training or prediction job.
+    &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+      &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
+          # string is formatted the same way as `model_version`, with the addition
+          # of the version information:
+          #
+          # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
+      &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
+          # model. The string must use the following format:
+          #
+          # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
+      &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
+          # the model to use.
+      &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
+          # Defaults to 10 if not specified.
+      &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
+          # this job. Please refer to
+          # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+          # for information about how to use signatures.
+          #
+          # Defaults to
+          # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+          # , which is &quot;serving_default&quot;.
+      &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
+      &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
+      &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
+      &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
+          # The service will buffer batch_size number of records in memory before
+          # invoking one Tensorflow prediction call internally. So take the record
+          # size and memory available into consideration when setting this parameter.
+      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
+          # prediction. If not set, AI Platform will pick the runtime version used
+          # during the CreateVersion request for this model version, or choose the
+          # latest stable version when model version information is not available
+          # such as when the model is specified by uri.
+      &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
+          # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
+        &quot;A String&quot;,
+      ],
+      &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
+          # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+          # for AI Platform services.
+    },
+    &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
     &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
         # prevent simultaneous updates of a job from overwriting each other.
         # It is strongly suggested that systems make use of the `etag` in the
@@ -156,6 +199,21 @@
         # command-line arguments and/or in a YAML configuration file referenced from
         # the --config command-line argument. For details, see the guide to [submitting
         # a training job](/ai-platform/training/docs/training-jobs).
+      &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
+          # replica in the cluster will be of the type specified in `worker_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `worker_type`.
+          #
+          # The default value is zero.
+      &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
+      &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
+          # starts. If your job uses a custom container, then the arguments are passed
+          # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
+          # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
+          # `ENTRYPOINT`&lt;/a&gt; command.
+        &quot;A String&quot;,
+      ],
       &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
           #
           # You should only set `parameterServerConfig.acceleratorConfig` if
@@ -167,29 +225,6 @@
           # your parameter server. If `parameterServerConfig.imageUri` has not been
           # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
         &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -222,6 +257,29 @@
           &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
           &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
         },
+        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
       },
       &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
           # protect resources created by a training job, instead of using Google&#x27;s
@@ -238,14 +296,8 @@
             # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
       },
       &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-        &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-            # current versions of TensorFlow, this tag name should exactly match what is
-            # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-            # prior to 0.12, this should be only the tag passed to tf.Summary.
-            # By default, &quot;training/hptuning/metric&quot; will be used.
         &quot;params&quot;: [ # Required. The set of parameters to tune.
           { # Represents a single hyperparameter to optimize.
-            &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
             &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
               &quot;A String&quot;,
             ],
@@ -268,6 +320,7 @@
             &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
                 # should be unset if type is `CATEGORICAL`. This value should be integers if
                 # type is `INTEGER`.
+            &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
           },
         ],
         &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -303,6 +356,11 @@
             # tuning job.
             # Uses the default AI Platform hyperparameter tuning
             # algorithm if unspecified.
+        &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+            # current versions of TensorFlow, this tag name should exactly match what is
+            # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+            # prior to 0.12, this should be only the tag passed to tf.Summary.
+            # By default, &quot;training/hptuning/metric&quot; will be used.
       },
       &quot;workerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
           #
@@ -315,29 +373,6 @@
           # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
           # the value of `masterConfig.imageUri`. Learn more about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
         &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -370,6 +405,29 @@
           &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
           &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
         },
+        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
       },
       &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
           # job. Each replica in the cluster will be of the type specified in
@@ -462,8 +520,6 @@
           #
           # This value must be present when `scaleTier` is set to `CUSTOM` and
           # `evaluatorCount` is greater than zero.
-      &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
-          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
       &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
           # job&#x27;s worker nodes.
           #
@@ -480,6 +536,8 @@
           #
           # This value must be present when `scaleTier` is set to `CUSTOM` and
           # `workerCount` is greater than zero.
+      &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
+          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
       &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
           # job&#x27;s parameter server.
           #
@@ -503,29 +561,6 @@
           # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
           # about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
         &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -558,6 +593,29 @@
           &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
           &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
         },
+        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
       },
       &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
           # and parameter servers.
@@ -579,16 +637,6 @@
           #
           # Read more about the Python versions available for [each runtime
           # version](/ml-engine/docs/runtime-version-list).
-      &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
-          # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-          # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-          # the form projects/{project}/global/networks/{network}. Where {project} is a
-          # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
-          #
-          # Private services access must already be configured for the network. If left
-          # unspecified, the Job is not peered with any network. Learn more -
-          # Connecting Job to user network over private
-          # IP.
       &quot;scheduling&quot;: { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
         &quot;maxWaitTime&quot;: &quot;A String&quot;,
         &quot;maxRunningTime&quot;: &quot;A String&quot;, # Optional. The maximum job running time, expressed in seconds. The field can
@@ -615,6 +663,16 @@
             #   ...
             # ```
       },
+      &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
+          # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+          # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+          # the form projects/{project}/global/networks/{network}. Where {project} is a
+          # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
+          #
+          # Private services access must already be configured for the network. If left
+          # unspecified, the Job is not peered with any network. Learn more -
+          # Connecting Job to user network over private
+          # IP.
       &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
           #
           # You should only set `evaluatorConfig.acceleratorConfig` if
@@ -626,29 +684,6 @@
           # your evaluator. If `evaluatorConfig.imageUri` has not been
           # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
         &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -681,6 +716,29 @@
           &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
           &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
         },
+        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
       },
       &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
           # variable when training with a custom container. Defaults to `false`. [Learn
@@ -689,21 +747,6 @@
           #
           # This field has no effect for training jobs that don&#x27;t use a custom
           # container.
-      &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
-          # replica in the cluster will be of the type specified in `worker_type`.
-          #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-          # set this value, you must also set `worker_type`.
-          #
-          # The default value is zero.
-      &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
-      &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
-          # starts. If your job uses a custom container, then the arguments are passed
-          # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
-          # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
-          # `ENTRYPOINT`&lt;/a&gt; command.
-        &quot;A String&quot;,
-      ],
     },
     &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
     &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
@@ -716,41 +759,52 @@
       &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
     },
     &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
+      &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
+      &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+          # Only set for built-in algorithms jobs.
+        &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+            # saves the trained model. Only set for successful jobs that don&#x27;t use
+            # hyperparameter tuning.
+        &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+            # trained.
+        &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+      },
       &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
           # Only set for hyperparameter tuning jobs.
         { # Represents the result of a single hyperparameter tuning trial from a
             # training job. The TrainingOutput object that is returned on successful
             # completion of a training job with hyperparameter tuning includes a list
             # of HyperparameterOutput objects, one for each successful trial.
-          &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
-              # populated.
-            { # An observed value of a metric.
-              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-              &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-            },
-          ],
-          &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
-            &quot;a_key&quot;: &quot;A String&quot;,
-          },
           &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
           &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
-          &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
           &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
+          &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
           &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
-            &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
             &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+            &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
           },
           &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
               # Only set for trials of built-in algorithms jobs that have succeeded.
-            &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
             &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
                 # saves the trained model. Only set for successful jobs that don&#x27;t use
                 # hyperparameter tuning.
             &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
             &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
                 # trained.
+            &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
           },
           &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
+          &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
+              # populated.
+            { # An observed value of a metric.
+              &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+            },
+          ],
+          &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
+            &quot;a_key&quot;: &quot;A String&quot;,
+          },
         },
       ],
       &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -761,17 +815,6 @@
           # Only set for hyperparameter tuning jobs.
       &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
       &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
-      &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
-      &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-          # Only set for built-in algorithms jobs.
-        &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-        &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-            # saves the trained model. Only set for successful jobs that don&#x27;t use
-            # hyperparameter tuning.
-        &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-        &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-            # trained.
-      },
     },
     &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
     &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
@@ -781,49 +824,6 @@
         # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
       &quot;a_key&quot;: &quot;A String&quot;,
     },
-    &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
-      &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
-      &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
-      &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
-      &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
-          # The service will buffer batch_size number of records in memory before
-          # invoking one Tensorflow prediction call internally. So take the record
-          # size and memory available into consideration when setting this parameter.
-      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
-          # prediction. If not set, AI Platform will pick the runtime version used
-          # during the CreateVersion request for this model version, or choose the
-          # latest stable version when model version information is not available
-          # such as when the model is specified by uri.
-      &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
-          # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
-        &quot;A String&quot;,
-      ],
-      &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
-          # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-          # for AI Platform services.
-      &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
-          # string is formatted the same way as `model_version`, with the addition
-          # of the version information:
-          #
-          # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
-      &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
-          # model. The string must use the following format:
-          #
-          # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
-      &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
-          # the model to use.
-      &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
-          # Defaults to 10 if not specified.
-      &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
-          # this job. Please refer to
-          # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
-          # for information about how to use signatures.
-          #
-          # Defaults to
-          # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
-          # , which is &quot;serving_default&quot;.
-    },
-    &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
   }
 
   x__xgafv: string, V1 error format.
@@ -835,6 +835,49 @@
   An object of the form:
 
     { # Represents a training or prediction job.
+      &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+        &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
+            # string is formatted the same way as `model_version`, with the addition
+            # of the version information:
+            #
+            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
+        &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
+            # model. The string must use the following format:
+            #
+            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
+        &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
+            # the model to use.
+        &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
+            # Defaults to 10 if not specified.
+        &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
+            # this job. Please refer to
+            # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+            # for information about how to use signatures.
+            #
+            # Defaults to
+            # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+            # , which is &quot;serving_default&quot;.
+        &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
+        &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
+        &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
+        &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
+            # The service will buffer batch_size number of records in memory before
+            # invoking one Tensorflow prediction call internally. So take the record
+            # size and memory available into consideration when setting this parameter.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
+            # prediction. If not set, AI Platform will pick the runtime version used
+            # during the CreateVersion request for this model version, or choose the
+            # latest stable version when model version information is not available
+            # such as when the model is specified by uri.
+        &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
+            # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
+          &quot;A String&quot;,
+        ],
+        &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
+            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+            # for AI Platform services.
+      },
+      &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
       &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
           # prevent simultaneous updates of a job from overwriting each other.
           # It is strongly suggested that systems make use of the `etag` in the
@@ -847,6 +890,21 @@
           # command-line arguments and/or in a YAML configuration file referenced from
           # the --config command-line argument. For details, see the guide to [submitting
           # a training job](/ai-platform/training/docs/training-jobs).
+        &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
+            # replica in the cluster will be of the type specified in `worker_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `worker_type`.
+            #
+            # The default value is zero.
+        &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
+        &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
+            # starts. If your job uses a custom container, then the arguments are passed
+            # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
+            # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
+            # `ENTRYPOINT`&lt;/a&gt; command.
+          &quot;A String&quot;,
+        ],
         &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
             #
             # You should only set `parameterServerConfig.acceleratorConfig` if
@@ -858,29 +916,6 @@
             # your parameter server. If `parameterServerConfig.imageUri` has not been
             # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -913,6 +948,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
             # protect resources created by a training job, instead of using Google&#x27;s
@@ -929,14 +987,8 @@
               # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
         },
         &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-          &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-              # current versions of TensorFlow, this tag name should exactly match what is
-              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-              # prior to 0.12, this should be only the tag passed to tf.Summary.
-              # By default, &quot;training/hptuning/metric&quot; will be used.
           &quot;params&quot;: [ # Required. The set of parameters to tune.
             { # Represents a single hyperparameter to optimize.
-              &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
               &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
                 &quot;A String&quot;,
               ],
@@ -959,6 +1011,7 @@
               &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
                   # should be unset if type is `CATEGORICAL`. This value should be integers if
                   # type is `INTEGER`.
+              &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
             },
           ],
           &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -994,6 +1047,11 @@
               # tuning job.
               # Uses the default AI Platform hyperparameter tuning
               # algorithm if unspecified.
+          &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+              # current versions of TensorFlow, this tag name should exactly match what is
+              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+              # prior to 0.12, this should be only the tag passed to tf.Summary.
+              # By default, &quot;training/hptuning/metric&quot; will be used.
         },
         &quot;workerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
             #
@@ -1006,29 +1064,6 @@
             # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
             # the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1061,6 +1096,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
             # job. Each replica in the cluster will be of the type specified in
@@ -1153,8 +1211,6 @@
             #
             # This value must be present when `scaleTier` is set to `CUSTOM` and
             # `evaluatorCount` is greater than zero.
-        &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
-            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
         &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
             # job&#x27;s worker nodes.
             #
@@ -1171,6 +1227,8 @@
             #
             # This value must be present when `scaleTier` is set to `CUSTOM` and
             # `workerCount` is greater than zero.
+        &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
+            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
         &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
             # job&#x27;s parameter server.
             #
@@ -1194,29 +1252,6 @@
             # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
             # about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1249,6 +1284,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
             # and parameter servers.
@@ -1270,16 +1328,6 @@
             #
             # Read more about the Python versions available for [each runtime
             # version](/ml-engine/docs/runtime-version-list).
-        &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
-            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-            # the form projects/{project}/global/networks/{network}. Where {project} is a
-            # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
-            #
-            # Private services access must already be configured for the network. If left
-            # unspecified, the Job is not peered with any network. Learn more -
-            # Connecting Job to user network over private
-            # IP.
         &quot;scheduling&quot;: { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
           &quot;maxWaitTime&quot;: &quot;A String&quot;,
           &quot;maxRunningTime&quot;: &quot;A String&quot;, # Optional. The maximum job running time, expressed in seconds. The field can
@@ -1306,6 +1354,16 @@
               #   ...
               # ```
         },
+        &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
+            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+            # the form projects/{project}/global/networks/{network}. Where {project} is a
+            # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
+            #
+            # Private services access must already be configured for the network. If left
+            # unspecified, the Job is not peered with any network. Learn more -
+            # Connecting Job to user network over private
+            # IP.
         &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
             #
             # You should only set `evaluatorConfig.acceleratorConfig` if
@@ -1317,29 +1375,6 @@
             # your evaluator. If `evaluatorConfig.imageUri` has not been
             # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1372,6 +1407,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
             # variable when training with a custom container. Defaults to `false`. [Learn
@@ -1380,21 +1438,6 @@
             #
             # This field has no effect for training jobs that don&#x27;t use a custom
             # container.
-        &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
-            # replica in the cluster will be of the type specified in `worker_type`.
-            #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `worker_type`.
-            #
-            # The default value is zero.
-        &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
-        &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
-            # starts. If your job uses a custom container, then the arguments are passed
-            # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
-            # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
-            # `ENTRYPOINT`&lt;/a&gt; command.
-          &quot;A String&quot;,
-        ],
       },
       &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
       &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
@@ -1407,41 +1450,52 @@
         &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
       },
       &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
+        &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
+        &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+            # Only set for built-in algorithms jobs.
+          &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+              # saves the trained model. Only set for successful jobs that don&#x27;t use
+              # hyperparameter tuning.
+          &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+          &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+              # trained.
+          &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+        },
         &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
             # Only set for hyperparameter tuning jobs.
           { # Represents the result of a single hyperparameter tuning trial from a
               # training job. The TrainingOutput object that is returned on successful
               # completion of a training job with hyperparameter tuning includes a list
               # of HyperparameterOutput objects, one for each successful trial.
-            &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
-                # populated.
-              { # An observed value of a metric.
-                &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-                &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-              },
-            ],
-            &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
-              &quot;a_key&quot;: &quot;A String&quot;,
-            },
             &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
             &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
-            &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
             &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
+            &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
             &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
-              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
               &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
             },
             &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
                 # Only set for trials of built-in algorithms jobs that have succeeded.
-              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
               &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
                   # saves the trained model. Only set for successful jobs that don&#x27;t use
                   # hyperparameter tuning.
               &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
               &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
                   # trained.
+              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
             },
             &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
+            &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
+                # populated.
+              { # An observed value of a metric.
+                &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+                &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+              },
+            ],
+            &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
+              &quot;a_key&quot;: &quot;A String&quot;,
+            },
           },
         ],
         &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -1452,17 +1506,6 @@
             # Only set for hyperparameter tuning jobs.
         &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
         &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
-        &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
-        &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-            # Only set for built-in algorithms jobs.
-          &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-          &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-              # saves the trained model. Only set for successful jobs that don&#x27;t use
-              # hyperparameter tuning.
-          &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-          &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-              # trained.
-        },
       },
       &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
       &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
@@ -1472,49 +1515,6 @@
           # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
         &quot;a_key&quot;: &quot;A String&quot;,
       },
-      &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
-        &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
-        &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
-        &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
-        &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
-            # The service will buffer batch_size number of records in memory before
-            # invoking one Tensorflow prediction call internally. So take the record
-            # size and memory available into consideration when setting this parameter.
-        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
-            # prediction. If not set, AI Platform will pick the runtime version used
-            # during the CreateVersion request for this model version, or choose the
-            # latest stable version when model version information is not available
-            # such as when the model is specified by uri.
-        &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
-            # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
-          &quot;A String&quot;,
-        ],
-        &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
-            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-            # for AI Platform services.
-        &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
-            # string is formatted the same way as `model_version`, with the addition
-            # of the version information:
-            #
-            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
-        &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
-            # model. The string must use the following format:
-            #
-            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
-        &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
-            # the model to use.
-        &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
-            # Defaults to 10 if not specified.
-        &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
-            # this job. Please refer to
-            # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
-            # for information about how to use signatures.
-            #
-            # Defaults to
-            # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
-            # , which is &quot;serving_default&quot;.
-      },
-      &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
     }</pre>
 </div>
 
@@ -1533,6 +1533,49 @@
   An object of the form:
 
     { # Represents a training or prediction job.
+      &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+        &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
+            # string is formatted the same way as `model_version`, with the addition
+            # of the version information:
+            #
+            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
+        &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
+            # model. The string must use the following format:
+            #
+            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
+        &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
+            # the model to use.
+        &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
+            # Defaults to 10 if not specified.
+        &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
+            # this job. Please refer to
+            # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+            # for information about how to use signatures.
+            #
+            # Defaults to
+            # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+            # , which is &quot;serving_default&quot;.
+        &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
+        &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
+        &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
+        &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
+            # The service will buffer batch_size number of records in memory before
+            # invoking one Tensorflow prediction call internally. So take the record
+            # size and memory available into consideration when setting this parameter.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
+            # prediction. If not set, AI Platform will pick the runtime version used
+            # during the CreateVersion request for this model version, or choose the
+            # latest stable version when model version information is not available
+            # such as when the model is specified by uri.
+        &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
+            # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
+          &quot;A String&quot;,
+        ],
+        &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
+            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+            # for AI Platform services.
+      },
+      &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
       &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
           # prevent simultaneous updates of a job from overwriting each other.
           # It is strongly suggested that systems make use of the `etag` in the
@@ -1545,6 +1588,21 @@
           # command-line arguments and/or in a YAML configuration file referenced from
           # the --config command-line argument. For details, see the guide to [submitting
           # a training job](/ai-platform/training/docs/training-jobs).
+        &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
+            # replica in the cluster will be of the type specified in `worker_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `worker_type`.
+            #
+            # The default value is zero.
+        &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
+        &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
+            # starts. If your job uses a custom container, then the arguments are passed
+            # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
+            # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
+            # `ENTRYPOINT`&lt;/a&gt; command.
+          &quot;A String&quot;,
+        ],
         &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
             #
             # You should only set `parameterServerConfig.acceleratorConfig` if
@@ -1556,29 +1614,6 @@
             # your parameter server. If `parameterServerConfig.imageUri` has not been
             # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1611,6 +1646,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
             # protect resources created by a training job, instead of using Google&#x27;s
@@ -1627,14 +1685,8 @@
               # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
         },
         &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-          &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-              # current versions of TensorFlow, this tag name should exactly match what is
-              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-              # prior to 0.12, this should be only the tag passed to tf.Summary.
-              # By default, &quot;training/hptuning/metric&quot; will be used.
           &quot;params&quot;: [ # Required. The set of parameters to tune.
             { # Represents a single hyperparameter to optimize.
-              &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
               &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
                 &quot;A String&quot;,
               ],
@@ -1657,6 +1709,7 @@
               &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
                   # should be unset if type is `CATEGORICAL`. This value should be integers if
                   # type is `INTEGER`.
+              &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
             },
           ],
           &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -1692,6 +1745,11 @@
               # tuning job.
               # Uses the default AI Platform hyperparameter tuning
               # algorithm if unspecified.
+          &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+              # current versions of TensorFlow, this tag name should exactly match what is
+              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+              # prior to 0.12, this should be only the tag passed to tf.Summary.
+              # By default, &quot;training/hptuning/metric&quot; will be used.
         },
         &quot;workerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
             #
@@ -1704,29 +1762,6 @@
             # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
             # the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1759,6 +1794,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
             # job. Each replica in the cluster will be of the type specified in
@@ -1851,8 +1909,6 @@
             #
             # This value must be present when `scaleTier` is set to `CUSTOM` and
             # `evaluatorCount` is greater than zero.
-        &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
-            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
         &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
             # job&#x27;s worker nodes.
             #
@@ -1869,6 +1925,8 @@
             #
             # This value must be present when `scaleTier` is set to `CUSTOM` and
             # `workerCount` is greater than zero.
+        &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
+            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
         &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
             # job&#x27;s parameter server.
             #
@@ -1892,29 +1950,6 @@
             # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
             # about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1947,6 +1982,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
             # and parameter servers.
@@ -1968,16 +2026,6 @@
             #
             # Read more about the Python versions available for [each runtime
             # version](/ml-engine/docs/runtime-version-list).
-        &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
-            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-            # the form projects/{project}/global/networks/{network}. Where {project} is a
-            # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
-            #
-            # Private services access must already be configured for the network. If left
-            # unspecified, the Job is not peered with any network. Learn more -
-            # Connecting Job to user network over private
-            # IP.
         &quot;scheduling&quot;: { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
           &quot;maxWaitTime&quot;: &quot;A String&quot;,
           &quot;maxRunningTime&quot;: &quot;A String&quot;, # Optional. The maximum job running time, expressed in seconds. The field can
@@ -2004,6 +2052,16 @@
               #   ...
               # ```
         },
+        &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
+            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+            # the form projects/{project}/global/networks/{network}. Where {project} is a
+            # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
+            #
+            # Private services access must already be configured for the network. If left
+            # unspecified, the Job is not peered with any network. Learn more -
+            # Connecting Job to user network over private
+            # IP.
         &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
             #
             # You should only set `evaluatorConfig.acceleratorConfig` if
@@ -2015,29 +2073,6 @@
             # your evaluator. If `evaluatorConfig.imageUri` has not been
             # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -2070,6 +2105,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
             # variable when training with a custom container. Defaults to `false`. [Learn
@@ -2078,21 +2136,6 @@
             #
             # This field has no effect for training jobs that don&#x27;t use a custom
             # container.
-        &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
-            # replica in the cluster will be of the type specified in `worker_type`.
-            #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `worker_type`.
-            #
-            # The default value is zero.
-        &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
-        &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
-            # starts. If your job uses a custom container, then the arguments are passed
-            # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
-            # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
-            # `ENTRYPOINT`&lt;/a&gt; command.
-          &quot;A String&quot;,
-        ],
       },
       &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
       &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
@@ -2105,41 +2148,52 @@
         &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
       },
       &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
+        &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
+        &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+            # Only set for built-in algorithms jobs.
+          &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+              # saves the trained model. Only set for successful jobs that don&#x27;t use
+              # hyperparameter tuning.
+          &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+          &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+              # trained.
+          &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+        },
         &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
             # Only set for hyperparameter tuning jobs.
           { # Represents the result of a single hyperparameter tuning trial from a
               # training job. The TrainingOutput object that is returned on successful
               # completion of a training job with hyperparameter tuning includes a list
               # of HyperparameterOutput objects, one for each successful trial.
-            &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
-                # populated.
-              { # An observed value of a metric.
-                &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-                &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-              },
-            ],
-            &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
-              &quot;a_key&quot;: &quot;A String&quot;,
-            },
             &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
             &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
-            &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
             &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
+            &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
             &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
-              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
               &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
             },
             &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
                 # Only set for trials of built-in algorithms jobs that have succeeded.
-              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
               &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
                   # saves the trained model. Only set for successful jobs that don&#x27;t use
                   # hyperparameter tuning.
               &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
               &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
                   # trained.
+              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
             },
             &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
+            &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
+                # populated.
+              { # An observed value of a metric.
+                &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+                &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+              },
+            ],
+            &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
+              &quot;a_key&quot;: &quot;A String&quot;,
+            },
           },
         ],
         &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -2150,17 +2204,6 @@
             # Only set for hyperparameter tuning jobs.
         &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
         &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
-        &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
-        &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-            # Only set for built-in algorithms jobs.
-          &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-          &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-              # saves the trained model. Only set for successful jobs that don&#x27;t use
-              # hyperparameter tuning.
-          &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-          &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-              # trained.
-        },
       },
       &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
       &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
@@ -2170,49 +2213,6 @@
           # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
         &quot;a_key&quot;: &quot;A String&quot;,
       },
-      &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
-        &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
-        &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
-        &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
-        &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
-            # The service will buffer batch_size number of records in memory before
-            # invoking one Tensorflow prediction call internally. So take the record
-            # size and memory available into consideration when setting this parameter.
-        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
-            # prediction. If not set, AI Platform will pick the runtime version used
-            # during the CreateVersion request for this model version, or choose the
-            # latest stable version when model version information is not available
-            # such as when the model is specified by uri.
-        &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
-            # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
-          &quot;A String&quot;,
-        ],
-        &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
-            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-            # for AI Platform services.
-        &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
-            # string is formatted the same way as `model_version`, with the addition
-            # of the version information:
-            #
-            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
-        &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
-            # model. The string must use the following format:
-            #
-            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
-        &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
-            # the model to use.
-        &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
-            # Defaults to 10 if not specified.
-        &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
-            # this job. Please refer to
-            # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
-            # for information about how to use signatures.
-            #
-            # Defaults to
-            # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
-            # , which is &quot;serving_default&quot;.
-      },
-      &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
     }</pre>
 </div>
 
@@ -2312,18 +2312,6 @@
       #
       # For a description of IAM and its features, see the
       # [IAM documentation](https://cloud.google.com/iam/docs/).
-    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a policy from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform policy updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-        # systems are expected to put that etag in the request to `setIamPolicy` to
-        # ensure that their change will be applied to the same version of the policy.
-        #
-        # **Important:** If you use IAM Conditions, you must include the `etag` field
-        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
-        # you to overwrite a version `3` policy with a version `1` policy, and all of
-        # the conditions in the version `3` policy are lost.
     &quot;version&quot;: 42, # Specifies the format of the policy.
         #
         # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
@@ -2479,15 +2467,15 @@
             # The exact variables and functions that may be referenced within an expression
             # are determined by the service that evaluates it. See the service
             # documentation for additional information.
+          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
+              # describes the expression, e.g. when hovered over it in a UI.
+          &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
+              # syntax.
           &quot;title&quot;: &quot;A String&quot;, # Optional. Title for the expression, i.e. a short string describing
               # its purpose. This can be used e.g. in UIs which allow to enter the
               # expression.
           &quot;location&quot;: &quot;A String&quot;, # Optional. String indicating the location of the expression for error
               # reporting, e.g. a file name and a position in the file.
-          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
-              # describes the expression, e.g. when hovered over it in a UI.
-          &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
-              # syntax.
         },
         &quot;members&quot;: [ # Specifies the identities requesting access for a Cloud Platform resource.
             # `members` can have the following values:
@@ -2538,11 +2526,23 @@
             # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
       },
     ],
+    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a policy from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform policy updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+        # systems are expected to put that etag in the request to `setIamPolicy` to
+        # ensure that their change will be applied to the same version of the policy.
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
   }</pre>
 </div>
 
 <div class="method">
-    <code class="details" id="list">list(parent, pageToken=None, pageSize=None, filter=None, x__xgafv=None)</code>
+    <code class="details" id="list">list(parent, filter=None, pageToken=None, pageSize=None, x__xgafv=None)</code>
   <pre>Lists the jobs in the project.
 
 If there are no jobs that match the request parameters, the list
@@ -2550,15 +2550,6 @@
 
 Args:
   parent: string, Required. The name of the project for which to list jobs. (required)
-  pageToken: string, Optional. A page token to request the next page of results.
-
-You get the token from the `next_page_token` field of the response from
-the previous call.
-  pageSize: integer, Optional. The number of jobs to retrieve per &quot;page&quot; of results. If there
-are more remaining results than this number, the response message will
-contain a valid value in the `next_page_token` field.
-
-The default value is 20, and the maximum page size is 100.
   filter: string, Optional. Specifies the subset of jobs to retrieve.
 You can filter on the value of one or more attributes of the job object.
 For example, retrieve jobs with a job identifier that starts with &#x27;census&#x27;:
@@ -2568,6 +2559,15 @@
 AND state:FAILED&#x27;&lt;/code&gt;
 &lt;p&gt;For more examples, see the guide to
 &lt;a href=&quot;/ml-engine/docs/tensorflow/monitor-training&quot;&gt;monitoring jobs&lt;/a&gt;.
+  pageToken: string, Optional. A page token to request the next page of results.
+
+You get the token from the `next_page_token` field of the response from
+the previous call.
+  pageSize: integer, Optional. The number of jobs to retrieve per &quot;page&quot; of results. If there
+are more remaining results than this number, the response message will
+contain a valid value in the `next_page_token` field.
+
+The default value is 20, and the maximum page size is 100.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -2579,6 +2579,49 @@
     { # Response message for the ListJobs method.
     &quot;jobs&quot;: [ # The list of jobs.
       { # Represents a training or prediction job.
+          &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+            &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
+                # string is formatted the same way as `model_version`, with the addition
+                # of the version information:
+                #
+                # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
+            &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
+                # model. The string must use the following format:
+                #
+                # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
+            &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
+                # the model to use.
+            &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
+                # Defaults to 10 if not specified.
+            &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
+                # this job. Please refer to
+                # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+                # for information about how to use signatures.
+                #
+                # Defaults to
+                # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+                # , which is &quot;serving_default&quot;.
+            &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
+            &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
+            &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
+            &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
+                # The service will buffer batch_size number of records in memory before
+                # invoking one Tensorflow prediction call internally. So take the record
+                # size and memory available into consideration when setting this parameter.
+            &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
+                # prediction. If not set, AI Platform will pick the runtime version used
+                # during the CreateVersion request for this model version, or choose the
+                # latest stable version when model version information is not available
+                # such as when the model is specified by uri.
+            &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
+                # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
+              &quot;A String&quot;,
+            ],
+            &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
+                # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+                # for AI Platform services.
+          },
+          &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
           &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
               # prevent simultaneous updates of a job from overwriting each other.
               # It is strongly suggested that systems make use of the `etag` in the
@@ -2591,6 +2634,21 @@
               # command-line arguments and/or in a YAML configuration file referenced from
               # the --config command-line argument. For details, see the guide to [submitting
               # a training job](/ai-platform/training/docs/training-jobs).
+            &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
+                # replica in the cluster will be of the type specified in `worker_type`.
+                #
+                # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+                # set this value, you must also set `worker_type`.
+                #
+                # The default value is zero.
+            &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
+            &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
+                # starts. If your job uses a custom container, then the arguments are passed
+                # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
+                # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
+                # `ENTRYPOINT`&lt;/a&gt; command.
+              &quot;A String&quot;,
+            ],
             &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
                 #
                 # You should only set `parameterServerConfig.acceleratorConfig` if
@@ -2602,29 +2660,6 @@
                 # your parameter server. If `parameterServerConfig.imageUri` has not been
                 # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
                 # containers](/ai-platform/training/docs/distributed-training-containers).
-              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-                  # the one used in the custom container. This field is required if the replica
-                  # is a TPU worker that uses a custom container. Otherwise, do not specify
-                  # this field. This must be a [runtime version that currently supports
-                  # training with
-                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-                  #
-                  # Note that the version of TensorFlow included in a runtime version may
-                  # differ from the numbering of the runtime version itself, because it may
-                  # have a different [patch
-                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-                  # In this field, you must specify the runtime version (TensorFlow minor
-                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
-                  # specify `1.x`.
-              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-                  # If provided, it will override default ENTRYPOINT of the docker image.
-                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-                  # It cannot be set if custom container image is
-                  # not provided.
-                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-                  # both cannot be set at the same time.
-                &quot;A String&quot;,
-              ],
               &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
                   # Registry. Learn more about [configuring custom
                   # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -2657,6 +2692,29 @@
                 &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
                 &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
               },
+              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+                  # the one used in the custom container. This field is required if the replica
+                  # is a TPU worker that uses a custom container. Otherwise, do not specify
+                  # this field. This must be a [runtime version that currently supports
+                  # training with
+                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+                  #
+                  # Note that the version of TensorFlow included in a runtime version may
+                  # differ from the numbering of the runtime version itself, because it may
+                  # have a different [patch
+                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+                  # In this field, you must specify the runtime version (TensorFlow minor
+                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
+                  # specify `1.x`.
+              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+                  # If provided, it will override default ENTRYPOINT of the docker image.
+                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+                  # It cannot be set if custom container image is
+                  # not provided.
+                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+                  # both cannot be set at the same time.
+                &quot;A String&quot;,
+              ],
             },
             &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
                 # protect resources created by a training job, instead of using Google&#x27;s
@@ -2673,14 +2731,8 @@
                   # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
             },
             &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-              &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-                  # current versions of TensorFlow, this tag name should exactly match what is
-                  # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-                  # prior to 0.12, this should be only the tag passed to tf.Summary.
-                  # By default, &quot;training/hptuning/metric&quot; will be used.
               &quot;params&quot;: [ # Required. The set of parameters to tune.
                 { # Represents a single hyperparameter to optimize.
-                  &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
                   &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
                     &quot;A String&quot;,
                   ],
@@ -2703,6 +2755,7 @@
                   &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
                       # should be unset if type is `CATEGORICAL`. This value should be integers if
                       # type is `INTEGER`.
+                  &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
                 },
               ],
               &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -2738,6 +2791,11 @@
                   # tuning job.
                   # Uses the default AI Platform hyperparameter tuning
                   # algorithm if unspecified.
+              &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+                  # current versions of TensorFlow, this tag name should exactly match what is
+                  # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+                  # prior to 0.12, this should be only the tag passed to tf.Summary.
+                  # By default, &quot;training/hptuning/metric&quot; will be used.
             },
             &quot;workerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
                 #
@@ -2750,29 +2808,6 @@
                 # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
                 # the value of `masterConfig.imageUri`. Learn more about [configuring custom
                 # containers](/ai-platform/training/docs/distributed-training-containers).
-              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-                  # the one used in the custom container. This field is required if the replica
-                  # is a TPU worker that uses a custom container. Otherwise, do not specify
-                  # this field. This must be a [runtime version that currently supports
-                  # training with
-                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-                  #
-                  # Note that the version of TensorFlow included in a runtime version may
-                  # differ from the numbering of the runtime version itself, because it may
-                  # have a different [patch
-                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-                  # In this field, you must specify the runtime version (TensorFlow minor
-                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
-                  # specify `1.x`.
-              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-                  # If provided, it will override default ENTRYPOINT of the docker image.
-                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-                  # It cannot be set if custom container image is
-                  # not provided.
-                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-                  # both cannot be set at the same time.
-                &quot;A String&quot;,
-              ],
               &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
                   # Registry. Learn more about [configuring custom
                   # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -2805,6 +2840,29 @@
                 &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
                 &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
               },
+              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+                  # the one used in the custom container. This field is required if the replica
+                  # is a TPU worker that uses a custom container. Otherwise, do not specify
+                  # this field. This must be a [runtime version that currently supports
+                  # training with
+                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+                  #
+                  # Note that the version of TensorFlow included in a runtime version may
+                  # differ from the numbering of the runtime version itself, because it may
+                  # have a different [patch
+                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+                  # In this field, you must specify the runtime version (TensorFlow minor
+                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
+                  # specify `1.x`.
+              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+                  # If provided, it will override default ENTRYPOINT of the docker image.
+                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+                  # It cannot be set if custom container image is
+                  # not provided.
+                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+                  # both cannot be set at the same time.
+                &quot;A String&quot;,
+              ],
             },
             &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
                 # job. Each replica in the cluster will be of the type specified in
@@ -2897,8 +2955,6 @@
                 #
                 # This value must be present when `scaleTier` is set to `CUSTOM` and
                 # `evaluatorCount` is greater than zero.
-            &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
-                # regions](/ai-platform/training/docs/regions) for AI Platform Training.
             &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
                 # job&#x27;s worker nodes.
                 #
@@ -2915,6 +2971,8 @@
                 #
                 # This value must be present when `scaleTier` is set to `CUSTOM` and
                 # `workerCount` is greater than zero.
+            &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
+                # regions](/ai-platform/training/docs/regions) for AI Platform Training.
             &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
                 # job&#x27;s parameter server.
                 #
@@ -2938,29 +2996,6 @@
                 # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
                 # about [configuring custom
                 # containers](/ai-platform/training/docs/distributed-training-containers).
-              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-                  # the one used in the custom container. This field is required if the replica
-                  # is a TPU worker that uses a custom container. Otherwise, do not specify
-                  # this field. This must be a [runtime version that currently supports
-                  # training with
-                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-                  #
-                  # Note that the version of TensorFlow included in a runtime version may
-                  # differ from the numbering of the runtime version itself, because it may
-                  # have a different [patch
-                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-                  # In this field, you must specify the runtime version (TensorFlow minor
-                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
-                  # specify `1.x`.
-              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-                  # If provided, it will override default ENTRYPOINT of the docker image.
-                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-                  # It cannot be set if custom container image is
-                  # not provided.
-                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-                  # both cannot be set at the same time.
-                &quot;A String&quot;,
-              ],
               &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
                   # Registry. Learn more about [configuring custom
                   # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -2993,6 +3028,29 @@
                 &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
                 &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
               },
+              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+                  # the one used in the custom container. This field is required if the replica
+                  # is a TPU worker that uses a custom container. Otherwise, do not specify
+                  # this field. This must be a [runtime version that currently supports
+                  # training with
+                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+                  #
+                  # Note that the version of TensorFlow included in a runtime version may
+                  # differ from the numbering of the runtime version itself, because it may
+                  # have a different [patch
+                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+                  # In this field, you must specify the runtime version (TensorFlow minor
+                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
+                  # specify `1.x`.
+              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+                  # If provided, it will override default ENTRYPOINT of the docker image.
+                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+                  # It cannot be set if custom container image is
+                  # not provided.
+                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+                  # both cannot be set at the same time.
+                &quot;A String&quot;,
+              ],
             },
             &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
                 # and parameter servers.
@@ -3014,16 +3072,6 @@
                 #
                 # Read more about the Python versions available for [each runtime
                 # version](/ml-engine/docs/runtime-version-list).
-            &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
-                # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-                # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-                # the form projects/{project}/global/networks/{network}. Where {project} is a
-                # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
-                #
-                # Private services access must already be configured for the network. If left
-                # unspecified, the Job is not peered with any network. Learn more -
-                # Connecting Job to user network over private
-                # IP.
             &quot;scheduling&quot;: { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
               &quot;maxWaitTime&quot;: &quot;A String&quot;,
               &quot;maxRunningTime&quot;: &quot;A String&quot;, # Optional. The maximum job running time, expressed in seconds. The field can
@@ -3050,6 +3098,16 @@
                   #   ...
                   # ```
             },
+            &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
+                # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+                # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+                # the form projects/{project}/global/networks/{network}. Where {project} is a
+                # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
+                #
+                # Private services access must already be configured for the network. If left
+                # unspecified, the Job is not peered with any network. Learn more -
+                # Connecting Job to user network over private
+                # IP.
             &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
                 #
                 # You should only set `evaluatorConfig.acceleratorConfig` if
@@ -3061,29 +3119,6 @@
                 # your evaluator. If `evaluatorConfig.imageUri` has not been
                 # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
                 # containers](/ai-platform/training/docs/distributed-training-containers).
-              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-                  # the one used in the custom container. This field is required if the replica
-                  # is a TPU worker that uses a custom container. Otherwise, do not specify
-                  # this field. This must be a [runtime version that currently supports
-                  # training with
-                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-                  #
-                  # Note that the version of TensorFlow included in a runtime version may
-                  # differ from the numbering of the runtime version itself, because it may
-                  # have a different [patch
-                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-                  # In this field, you must specify the runtime version (TensorFlow minor
-                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
-                  # specify `1.x`.
-              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-                  # If provided, it will override default ENTRYPOINT of the docker image.
-                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-                  # It cannot be set if custom container image is
-                  # not provided.
-                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-                  # both cannot be set at the same time.
-                &quot;A String&quot;,
-              ],
               &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
                   # Registry. Learn more about [configuring custom
                   # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3116,6 +3151,29 @@
                 &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
                 &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
               },
+              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+                  # the one used in the custom container. This field is required if the replica
+                  # is a TPU worker that uses a custom container. Otherwise, do not specify
+                  # this field. This must be a [runtime version that currently supports
+                  # training with
+                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+                  #
+                  # Note that the version of TensorFlow included in a runtime version may
+                  # differ from the numbering of the runtime version itself, because it may
+                  # have a different [patch
+                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+                  # In this field, you must specify the runtime version (TensorFlow minor
+                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
+                  # specify `1.x`.
+              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+                  # If provided, it will override default ENTRYPOINT of the docker image.
+                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+                  # It cannot be set if custom container image is
+                  # not provided.
+                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+                  # both cannot be set at the same time.
+                &quot;A String&quot;,
+              ],
             },
             &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
                 # variable when training with a custom container. Defaults to `false`. [Learn
@@ -3124,21 +3182,6 @@
                 #
                 # This field has no effect for training jobs that don&#x27;t use a custom
                 # container.
-            &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
-                # replica in the cluster will be of the type specified in `worker_type`.
-                #
-                # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-                # set this value, you must also set `worker_type`.
-                #
-                # The default value is zero.
-            &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
-            &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
-                # starts. If your job uses a custom container, then the arguments are passed
-                # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
-                # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
-                # `ENTRYPOINT`&lt;/a&gt; command.
-              &quot;A String&quot;,
-            ],
           },
           &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
           &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
@@ -3151,41 +3194,52 @@
             &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
           },
           &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
+            &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
+            &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+                # Only set for built-in algorithms jobs.
+              &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+                  # saves the trained model. Only set for successful jobs that don&#x27;t use
+                  # hyperparameter tuning.
+              &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+              &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+                  # trained.
+              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+            },
             &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
                 # Only set for hyperparameter tuning jobs.
               { # Represents the result of a single hyperparameter tuning trial from a
                   # training job. The TrainingOutput object that is returned on successful
                   # completion of a training job with hyperparameter tuning includes a list
                   # of HyperparameterOutput objects, one for each successful trial.
-                &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
-                    # populated.
-                  { # An observed value of a metric.
-                    &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-                    &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-                  },
-                ],
-                &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
-                  &quot;a_key&quot;: &quot;A String&quot;,
-                },
                 &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
                 &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
-                &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
                 &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
+                &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
                 &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
-                  &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
                   &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+                  &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
                 },
                 &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
                     # Only set for trials of built-in algorithms jobs that have succeeded.
-                  &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
                   &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
                       # saves the trained model. Only set for successful jobs that don&#x27;t use
                       # hyperparameter tuning.
                   &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
                   &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
                       # trained.
+                  &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
                 },
                 &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
+                &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
+                    # populated.
+                  { # An observed value of a metric.
+                    &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+                    &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+                  },
+                ],
+                &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
+                  &quot;a_key&quot;: &quot;A String&quot;,
+                },
               },
             ],
             &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -3196,17 +3250,6 @@
                 # Only set for hyperparameter tuning jobs.
             &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
             &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
-            &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
-            &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-                # Only set for built-in algorithms jobs.
-              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-              &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-                  # saves the trained model. Only set for successful jobs that don&#x27;t use
-                  # hyperparameter tuning.
-              &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-              &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-                  # trained.
-            },
           },
           &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
           &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
@@ -3216,49 +3259,6 @@
               # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
             &quot;a_key&quot;: &quot;A String&quot;,
           },
-          &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
-            &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
-            &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
-            &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
-            &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
-                # The service will buffer batch_size number of records in memory before
-                # invoking one Tensorflow prediction call internally. So take the record
-                # size and memory available into consideration when setting this parameter.
-            &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
-                # prediction. If not set, AI Platform will pick the runtime version used
-                # during the CreateVersion request for this model version, or choose the
-                # latest stable version when model version information is not available
-                # such as when the model is specified by uri.
-            &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
-                # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
-              &quot;A String&quot;,
-            ],
-            &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
-                # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-                # for AI Platform services.
-            &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
-                # string is formatted the same way as `model_version`, with the addition
-                # of the version information:
-                #
-                # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
-            &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
-                # model. The string must use the following format:
-                #
-                # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
-            &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
-                # the model to use.
-            &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
-                # Defaults to 10 if not specified.
-            &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
-                # this job. Please refer to
-                # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
-                # for information about how to use signatures.
-                #
-                # Defaults to
-                # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
-                # , which is &quot;serving_default&quot;.
-          },
-          &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
         },
     ],
     &quot;nextPageToken&quot;: &quot;A String&quot;, # Optional. Pass this token as the `page_token` field of the request for a
@@ -3292,6 +3292,49 @@
     The object takes the form of:
 
 { # Represents a training or prediction job.
+    &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+      &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
+          # string is formatted the same way as `model_version`, with the addition
+          # of the version information:
+          #
+          # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
+      &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
+          # model. The string must use the following format:
+          #
+          # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
+      &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
+          # the model to use.
+      &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
+          # Defaults to 10 if not specified.
+      &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
+          # this job. Please refer to
+          # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+          # for information about how to use signatures.
+          #
+          # Defaults to
+          # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+          # , which is &quot;serving_default&quot;.
+      &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
+      &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
+      &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
+      &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
+          # The service will buffer batch_size number of records in memory before
+          # invoking one Tensorflow prediction call internally. So take the record
+          # size and memory available into consideration when setting this parameter.
+      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
+          # prediction. If not set, AI Platform will pick the runtime version used
+          # during the CreateVersion request for this model version, or choose the
+          # latest stable version when model version information is not available
+          # such as when the model is specified by uri.
+      &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
+          # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
+        &quot;A String&quot;,
+      ],
+      &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
+          # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+          # for AI Platform services.
+    },
+    &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
     &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
         # prevent simultaneous updates of a job from overwriting each other.
         # It is strongly suggested that systems make use of the `etag` in the
@@ -3304,6 +3347,21 @@
         # command-line arguments and/or in a YAML configuration file referenced from
         # the --config command-line argument. For details, see the guide to [submitting
         # a training job](/ai-platform/training/docs/training-jobs).
+      &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
+          # replica in the cluster will be of the type specified in `worker_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `worker_type`.
+          #
+          # The default value is zero.
+      &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
+      &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
+          # starts. If your job uses a custom container, then the arguments are passed
+          # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
+          # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
+          # `ENTRYPOINT`&lt;/a&gt; command.
+        &quot;A String&quot;,
+      ],
       &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
           #
           # You should only set `parameterServerConfig.acceleratorConfig` if
@@ -3315,29 +3373,6 @@
           # your parameter server. If `parameterServerConfig.imageUri` has not been
           # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
         &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3370,6 +3405,29 @@
           &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
           &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
         },
+        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
       },
       &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
           # protect resources created by a training job, instead of using Google&#x27;s
@@ -3386,14 +3444,8 @@
             # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
       },
       &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-        &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-            # current versions of TensorFlow, this tag name should exactly match what is
-            # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-            # prior to 0.12, this should be only the tag passed to tf.Summary.
-            # By default, &quot;training/hptuning/metric&quot; will be used.
         &quot;params&quot;: [ # Required. The set of parameters to tune.
           { # Represents a single hyperparameter to optimize.
-            &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
             &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
               &quot;A String&quot;,
             ],
@@ -3416,6 +3468,7 @@
             &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
                 # should be unset if type is `CATEGORICAL`. This value should be integers if
                 # type is `INTEGER`.
+            &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
           },
         ],
         &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -3451,6 +3504,11 @@
             # tuning job.
             # Uses the default AI Platform hyperparameter tuning
             # algorithm if unspecified.
+        &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+            # current versions of TensorFlow, this tag name should exactly match what is
+            # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+            # prior to 0.12, this should be only the tag passed to tf.Summary.
+            # By default, &quot;training/hptuning/metric&quot; will be used.
       },
       &quot;workerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
           #
@@ -3463,29 +3521,6 @@
           # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
           # the value of `masterConfig.imageUri`. Learn more about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
         &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3518,6 +3553,29 @@
           &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
           &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
         },
+        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
       },
       &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
           # job. Each replica in the cluster will be of the type specified in
@@ -3610,8 +3668,6 @@
           #
           # This value must be present when `scaleTier` is set to `CUSTOM` and
           # `evaluatorCount` is greater than zero.
-      &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
-          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
       &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
           # job&#x27;s worker nodes.
           #
@@ -3628,6 +3684,8 @@
           #
           # This value must be present when `scaleTier` is set to `CUSTOM` and
           # `workerCount` is greater than zero.
+      &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
+          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
       &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
           # job&#x27;s parameter server.
           #
@@ -3651,29 +3709,6 @@
           # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
           # about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
         &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3706,6 +3741,29 @@
           &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
           &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
         },
+        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
       },
       &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
           # and parameter servers.
@@ -3727,16 +3785,6 @@
           #
           # Read more about the Python versions available for [each runtime
           # version](/ml-engine/docs/runtime-version-list).
-      &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
-          # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-          # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-          # the form projects/{project}/global/networks/{network}. Where {project} is a
-          # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
-          #
-          # Private services access must already be configured for the network. If left
-          # unspecified, the Job is not peered with any network. Learn more -
-          # Connecting Job to user network over private
-          # IP.
       &quot;scheduling&quot;: { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
         &quot;maxWaitTime&quot;: &quot;A String&quot;,
         &quot;maxRunningTime&quot;: &quot;A String&quot;, # Optional. The maximum job running time, expressed in seconds. The field can
@@ -3763,6 +3811,16 @@
             #   ...
             # ```
       },
+      &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
+          # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+          # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+          # the form projects/{project}/global/networks/{network}. Where {project} is a
+          # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
+          #
+          # Private services access must already be configured for the network. If left
+          # unspecified, the Job is not peered with any network. Learn more -
+          # Connecting Job to user network over private
+          # IP.
       &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
           #
           # You should only set `evaluatorConfig.acceleratorConfig` if
@@ -3774,29 +3832,6 @@
           # your evaluator. If `evaluatorConfig.imageUri` has not been
           # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
         &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3829,6 +3864,29 @@
           &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
           &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
         },
+        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
       },
       &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
           # variable when training with a custom container. Defaults to `false`. [Learn
@@ -3837,21 +3895,6 @@
           #
           # This field has no effect for training jobs that don&#x27;t use a custom
           # container.
-      &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
-          # replica in the cluster will be of the type specified in `worker_type`.
-          #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-          # set this value, you must also set `worker_type`.
-          #
-          # The default value is zero.
-      &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
-      &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
-          # starts. If your job uses a custom container, then the arguments are passed
-          # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
-          # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
-          # `ENTRYPOINT`&lt;/a&gt; command.
-        &quot;A String&quot;,
-      ],
     },
     &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
     &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
@@ -3864,41 +3907,52 @@
       &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
     },
     &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
+      &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
+      &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+          # Only set for built-in algorithms jobs.
+        &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+            # saves the trained model. Only set for successful jobs that don&#x27;t use
+            # hyperparameter tuning.
+        &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+            # trained.
+        &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+      },
       &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
           # Only set for hyperparameter tuning jobs.
         { # Represents the result of a single hyperparameter tuning trial from a
             # training job. The TrainingOutput object that is returned on successful
             # completion of a training job with hyperparameter tuning includes a list
             # of HyperparameterOutput objects, one for each successful trial.
-          &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
-              # populated.
-            { # An observed value of a metric.
-              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-              &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-            },
-          ],
-          &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
-            &quot;a_key&quot;: &quot;A String&quot;,
-          },
           &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
           &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
-          &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
           &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
+          &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
           &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
-            &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
             &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+            &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
           },
           &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
               # Only set for trials of built-in algorithms jobs that have succeeded.
-            &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
             &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
                 # saves the trained model. Only set for successful jobs that don&#x27;t use
                 # hyperparameter tuning.
             &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
             &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
                 # trained.
+            &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
           },
           &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
+          &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
+              # populated.
+            { # An observed value of a metric.
+              &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+            },
+          ],
+          &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
+            &quot;a_key&quot;: &quot;A String&quot;,
+          },
         },
       ],
       &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -3909,17 +3963,6 @@
           # Only set for hyperparameter tuning jobs.
       &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
       &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
-      &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
-      &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-          # Only set for built-in algorithms jobs.
-        &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-        &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-            # saves the trained model. Only set for successful jobs that don&#x27;t use
-            # hyperparameter tuning.
-        &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-        &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-            # trained.
-      },
     },
     &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
     &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
@@ -3929,49 +3972,6 @@
         # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
       &quot;a_key&quot;: &quot;A String&quot;,
     },
-    &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
-      &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
-      &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
-      &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
-      &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
-          # The service will buffer batch_size number of records in memory before
-          # invoking one Tensorflow prediction call internally. So take the record
-          # size and memory available into consideration when setting this parameter.
-      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
-          # prediction. If not set, AI Platform will pick the runtime version used
-          # during the CreateVersion request for this model version, or choose the
-          # latest stable version when model version information is not available
-          # such as when the model is specified by uri.
-      &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
-          # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
-        &quot;A String&quot;,
-      ],
-      &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
-          # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-          # for AI Platform services.
-      &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
-          # string is formatted the same way as `model_version`, with the addition
-          # of the version information:
-          #
-          # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
-      &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
-          # model. The string must use the following format:
-          #
-          # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
-      &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
-          # the model to use.
-      &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
-          # Defaults to 10 if not specified.
-      &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
-          # this job. Please refer to
-          # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
-          # for information about how to use signatures.
-          #
-          # Defaults to
-          # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
-          # , which is &quot;serving_default&quot;.
-    },
-    &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
   }
 
   updateMask: string, Required. Specifies the path, relative to `Job`, of the field to update.
@@ -4002,6 +4002,49 @@
   An object of the form:
 
     { # Represents a training or prediction job.
+      &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+        &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
+            # string is formatted the same way as `model_version`, with the addition
+            # of the version information:
+            #
+            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
+        &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
+            # model. The string must use the following format:
+            #
+            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
+        &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
+            # the model to use.
+        &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
+            # Defaults to 10 if not specified.
+        &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
+            # this job. Please refer to
+            # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
+            # for information about how to use signatures.
+            #
+            # Defaults to
+            # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
+            # , which is &quot;serving_default&quot;.
+        &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
+        &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
+        &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
+        &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
+            # The service will buffer batch_size number of records in memory before
+            # invoking one Tensorflow prediction call internally. So take the record
+            # size and memory available into consideration when setting this parameter.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
+            # prediction. If not set, AI Platform will pick the runtime version used
+            # during the CreateVersion request for this model version, or choose the
+            # latest stable version when model version information is not available
+            # such as when the model is specified by uri.
+        &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
+            # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
+          &quot;A String&quot;,
+        ],
+        &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
+            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+            # for AI Platform services.
+      },
+      &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
       &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
           # prevent simultaneous updates of a job from overwriting each other.
           # It is strongly suggested that systems make use of the `etag` in the
@@ -4014,6 +4057,21 @@
           # command-line arguments and/or in a YAML configuration file referenced from
           # the --config command-line argument. For details, see the guide to [submitting
           # a training job](/ai-platform/training/docs/training-jobs).
+        &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
+            # replica in the cluster will be of the type specified in `worker_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `worker_type`.
+            #
+            # The default value is zero.
+        &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
+        &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
+            # starts. If your job uses a custom container, then the arguments are passed
+            # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
+            # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
+            # `ENTRYPOINT`&lt;/a&gt; command.
+          &quot;A String&quot;,
+        ],
         &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
             #
             # You should only set `parameterServerConfig.acceleratorConfig` if
@@ -4025,29 +4083,6 @@
             # your parameter server. If `parameterServerConfig.imageUri` has not been
             # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -4080,6 +4115,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
             # protect resources created by a training job, instead of using Google&#x27;s
@@ -4096,14 +4154,8 @@
               # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
         },
         &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-          &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-              # current versions of TensorFlow, this tag name should exactly match what is
-              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-              # prior to 0.12, this should be only the tag passed to tf.Summary.
-              # By default, &quot;training/hptuning/metric&quot; will be used.
           &quot;params&quot;: [ # Required. The set of parameters to tune.
             { # Represents a single hyperparameter to optimize.
-              &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
               &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
                 &quot;A String&quot;,
               ],
@@ -4126,6 +4178,7 @@
               &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
                   # should be unset if type is `CATEGORICAL`. This value should be integers if
                   # type is `INTEGER`.
+              &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
             },
           ],
           &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
@@ -4161,6 +4214,11 @@
               # tuning job.
               # Uses the default AI Platform hyperparameter tuning
               # algorithm if unspecified.
+          &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+              # current versions of TensorFlow, this tag name should exactly match what is
+              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+              # prior to 0.12, this should be only the tag passed to tf.Summary.
+              # By default, &quot;training/hptuning/metric&quot; will be used.
         },
         &quot;workerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
             #
@@ -4173,29 +4231,6 @@
             # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
             # the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -4228,6 +4263,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
             # job. Each replica in the cluster will be of the type specified in
@@ -4320,8 +4378,6 @@
             #
             # This value must be present when `scaleTier` is set to `CUSTOM` and
             # `evaluatorCount` is greater than zero.
-        &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
-            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
         &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
             # job&#x27;s worker nodes.
             #
@@ -4338,6 +4394,8 @@
             #
             # This value must be present when `scaleTier` is set to `CUSTOM` and
             # `workerCount` is greater than zero.
+        &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
+            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
         &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
             # job&#x27;s parameter server.
             #
@@ -4361,29 +4419,6 @@
             # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
             # about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -4416,6 +4451,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
             # and parameter servers.
@@ -4437,16 +4495,6 @@
             #
             # Read more about the Python versions available for [each runtime
             # version](/ml-engine/docs/runtime-version-list).
-        &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
-            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-            # the form projects/{project}/global/networks/{network}. Where {project} is a
-            # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
-            #
-            # Private services access must already be configured for the network. If left
-            # unspecified, the Job is not peered with any network. Learn more -
-            # Connecting Job to user network over private
-            # IP.
         &quot;scheduling&quot;: { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
           &quot;maxWaitTime&quot;: &quot;A String&quot;,
           &quot;maxRunningTime&quot;: &quot;A String&quot;, # Optional. The maximum job running time, expressed in seconds. The field can
@@ -4473,6 +4521,16 @@
               #   ...
               # ```
         },
+        &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
+            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+            # the form projects/{project}/global/networks/{network}. Where {project} is a
+            # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
+            #
+            # Private services access must already be configured for the network. If left
+            # unspecified, the Job is not peered with any network. Learn more -
+            # Connecting Job to user network over private
+            # IP.
         &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
             #
             # You should only set `evaluatorConfig.acceleratorConfig` if
@@ -4484,29 +4542,6 @@
             # your evaluator. If `evaluatorConfig.imageUri` has not been
             # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -4539,6 +4574,29 @@
             &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
             &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
           },
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
         },
         &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
             # variable when training with a custom container. Defaults to `false`. [Learn
@@ -4547,21 +4605,6 @@
             #
             # This field has no effect for training jobs that don&#x27;t use a custom
             # container.
-        &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
-            # replica in the cluster will be of the type specified in `worker_type`.
-            #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `worker_type`.
-            #
-            # The default value is zero.
-        &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
-        &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
-            # starts. If your job uses a custom container, then the arguments are passed
-            # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
-            # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
-            # `ENTRYPOINT`&lt;/a&gt; command.
-          &quot;A String&quot;,
-        ],
       },
       &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
       &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
@@ -4574,41 +4617,52 @@
         &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
       },
       &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
+        &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
+        &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+            # Only set for built-in algorithms jobs.
+          &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+              # saves the trained model. Only set for successful jobs that don&#x27;t use
+              # hyperparameter tuning.
+          &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+          &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+              # trained.
+          &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+        },
         &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
             # Only set for hyperparameter tuning jobs.
           { # Represents the result of a single hyperparameter tuning trial from a
               # training job. The TrainingOutput object that is returned on successful
               # completion of a training job with hyperparameter tuning includes a list
               # of HyperparameterOutput objects, one for each successful trial.
-            &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
-                # populated.
-              { # An observed value of a metric.
-                &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-                &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-              },
-            ],
-            &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
-              &quot;a_key&quot;: &quot;A String&quot;,
-            },
             &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
             &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
-            &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
             &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
+            &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
             &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
-              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
               &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
             },
             &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
                 # Only set for trials of built-in algorithms jobs that have succeeded.
-              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
               &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
                   # saves the trained model. Only set for successful jobs that don&#x27;t use
                   # hyperparameter tuning.
               &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
               &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
                   # trained.
+              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
             },
             &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
+            &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
+                # populated.
+              { # An observed value of a metric.
+                &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+                &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+              },
+            ],
+            &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
+              &quot;a_key&quot;: &quot;A String&quot;,
+            },
           },
         ],
         &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
@@ -4619,17 +4673,6 @@
             # Only set for hyperparameter tuning jobs.
         &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
         &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
-        &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
-        &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-            # Only set for built-in algorithms jobs.
-          &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-          &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-              # saves the trained model. Only set for successful jobs that don&#x27;t use
-              # hyperparameter tuning.
-          &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-          &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-              # trained.
-        },
       },
       &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
       &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
@@ -4639,49 +4682,6 @@
           # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
         &quot;a_key&quot;: &quot;A String&quot;,
       },
-      &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
-        &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
-        &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
-        &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
-        &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
-            # The service will buffer batch_size number of records in memory before
-            # invoking one Tensorflow prediction call internally. So take the record
-            # size and memory available into consideration when setting this parameter.
-        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
-            # prediction. If not set, AI Platform will pick the runtime version used
-            # during the CreateVersion request for this model version, or choose the
-            # latest stable version when model version information is not available
-            # such as when the model is specified by uri.
-        &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
-            # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
-          &quot;A String&quot;,
-        ],
-        &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
-            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-            # for AI Platform services.
-        &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
-            # string is formatted the same way as `model_version`, with the addition
-            # of the version information:
-            #
-            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
-        &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
-            # model. The string must use the following format:
-            #
-            # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
-        &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
-            # the model to use.
-        &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
-            # Defaults to 10 if not specified.
-        &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
-            # this job. Please refer to
-            # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
-            # for information about how to use signatures.
-            #
-            # Defaults to
-            # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
-            # , which is &quot;serving_default&quot;.
-      },
-      &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
     }</pre>
 </div>
 
@@ -4699,6 +4699,11 @@
     The object takes the form of:
 
 { # Request message for `SetIamPolicy` method.
+    &quot;updateMask&quot;: &quot;A String&quot;, # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
+        # the fields in the mask will be modified. If no mask is provided, the
+        # following default mask is used:
+        # 
+        # `paths: &quot;bindings, etag&quot;`
     &quot;policy&quot;: { # An Identity and Access Management (IAM) policy, which specifies access # REQUIRED: The complete policy to be applied to the `resource`. The size of
         # the policy is limited to a few 10s of KB. An empty policy is a
         # valid policy but certain Cloud Platform services (such as Projects)
@@ -4769,18 +4774,6 @@
         #
         # For a description of IAM and its features, see the
         # [IAM documentation](https://cloud.google.com/iam/docs/).
-      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-          # prevent simultaneous updates of a policy from overwriting each other.
-          # It is strongly suggested that systems make use of the `etag` in the
-          # read-modify-write cycle to perform policy updates in order to avoid race
-          # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-          # systems are expected to put that etag in the request to `setIamPolicy` to
-          # ensure that their change will be applied to the same version of the policy.
-          #
-          # **Important:** If you use IAM Conditions, you must include the `etag` field
-          # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
-          # you to overwrite a version `3` policy with a version `1` policy, and all of
-          # the conditions in the version `3` policy are lost.
       &quot;version&quot;: 42, # Specifies the format of the policy.
           #
           # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
@@ -4936,15 +4929,15 @@
               # The exact variables and functions that may be referenced within an expression
               # are determined by the service that evaluates it. See the service
               # documentation for additional information.
+            &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
+                # describes the expression, e.g. when hovered over it in a UI.
+            &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
+                # syntax.
             &quot;title&quot;: &quot;A String&quot;, # Optional. Title for the expression, i.e. a short string describing
                 # its purpose. This can be used e.g. in UIs which allow to enter the
                 # expression.
             &quot;location&quot;: &quot;A String&quot;, # Optional. String indicating the location of the expression for error
                 # reporting, e.g. a file name and a position in the file.
-            &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
-                # describes the expression, e.g. when hovered over it in a UI.
-            &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
-                # syntax.
           },
           &quot;members&quot;: [ # Specifies the identities requesting access for a Cloud Platform resource.
               # `members` can have the following values:
@@ -4995,12 +4988,19 @@
               # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
         },
       ],
+      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+          # prevent simultaneous updates of a policy from overwriting each other.
+          # It is strongly suggested that systems make use of the `etag` in the
+          # read-modify-write cycle to perform policy updates in order to avoid race
+          # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+          # systems are expected to put that etag in the request to `setIamPolicy` to
+          # ensure that their change will be applied to the same version of the policy.
+          #
+          # **Important:** If you use IAM Conditions, you must include the `etag` field
+          # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+          # you to overwrite a version `3` policy with a version `1` policy, and all of
+          # the conditions in the version `3` policy are lost.
     },
-    &quot;updateMask&quot;: &quot;A String&quot;, # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
-        # the fields in the mask will be modified. If no mask is provided, the
-        # following default mask is used:
-        # 
-        # `paths: &quot;bindings, etag&quot;`
   }
 
   x__xgafv: string, V1 error format.
@@ -5078,18 +5078,6 @@
       #
       # For a description of IAM and its features, see the
       # [IAM documentation](https://cloud.google.com/iam/docs/).
-    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a policy from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform policy updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-        # systems are expected to put that etag in the request to `setIamPolicy` to
-        # ensure that their change will be applied to the same version of the policy.
-        #
-        # **Important:** If you use IAM Conditions, you must include the `etag` field
-        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
-        # you to overwrite a version `3` policy with a version `1` policy, and all of
-        # the conditions in the version `3` policy are lost.
     &quot;version&quot;: 42, # Specifies the format of the policy.
         #
         # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
@@ -5245,15 +5233,15 @@
             # The exact variables and functions that may be referenced within an expression
             # are determined by the service that evaluates it. See the service
             # documentation for additional information.
+          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
+              # describes the expression, e.g. when hovered over it in a UI.
+          &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
+              # syntax.
           &quot;title&quot;: &quot;A String&quot;, # Optional. Title for the expression, i.e. a short string describing
               # its purpose. This can be used e.g. in UIs which allow to enter the
               # expression.
           &quot;location&quot;: &quot;A String&quot;, # Optional. String indicating the location of the expression for error
               # reporting, e.g. a file name and a position in the file.
-          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
-              # describes the expression, e.g. when hovered over it in a UI.
-          &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
-              # syntax.
         },
         &quot;members&quot;: [ # Specifies the identities requesting access for a Cloud Platform resource.
             # `members` can have the following values:
@@ -5304,6 +5292,18 @@
             # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
       },
     ],
+    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a policy from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform policy updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+        # systems are expected to put that etag in the request to `setIamPolicy` to
+        # ensure that their change will be applied to the same version of the policy.
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
   }</pre>
 </div>