docs: update generated docs (#981)

diff --git a/docs/dyn/ml_v1.projects.jobs.html b/docs/dyn/ml_v1.projects.jobs.html
index 3952cf6..8916455 100644
--- a/docs/dyn/ml_v1.projects.jobs.html
+++ b/docs/dyn/ml_v1.projects.jobs.html
@@ -144,20 +144,34 @@
     The object takes the form of:
 
 { # Represents a training or prediction job.
+    "createTime": "A String", # Output only. When the job was created.
     "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+      "dataFormat": "A String", # Required. The format of the input data files.
+      "outputPath": "A String", # Required. The output Google Cloud Storage location.
+      "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+          # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
+        "A String",
+      ],
+      "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
+          # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+          # for AI Platform services.
       "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
           # string is formatted the same way as `model_version`, with the addition
           # of the version information:
           #
           # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
+      "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
+          # the model to use.
+      "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
+      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+          # prediction. If not set, AI Platform will pick the runtime version used
+          # during the CreateVersion request for this model version, or choose the
+          # latest stable version when model version information is not available
+          # such as when the model is specified by uri.
       "modelName": "A String", # Use this field if you want to use the default version for the specified
           # model. The string must use the following format:
           #
           # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
-      "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
-          # the model to use.
-      "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
-          # Defaults to 10 if not specified.
       "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
           # this job. Please refer to
           # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -166,202 +180,96 @@
           # Defaults to
           # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
           # , which is "serving_default".
-      "outputPath": "A String", # Required. The output Google Cloud Storage location.
-      "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
-      "dataFormat": "A String", # Required. The format of the input data files.
       "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
           # The service will buffer batch_size number of records in memory before
           # invoking one Tensorflow prediction call internally. So take the record
           # size and memory available into consideration when setting this parameter.
-      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
-          # prediction. If not set, AI Platform will pick the runtime version used
-          # during the CreateVersion request for this model version, or choose the
-          # latest stable version when model version information is not available
-          # such as when the model is specified by uri.
-      "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
-          # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
-        "A String",
+      "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
+          # Defaults to 10 if not specified.
+    },
+    "labels": { # Optional. One or more labels that you can add, to organize your jobs.
+        # Each label is a key-value pair, where both the key and the value are
+        # arbitrary strings that you supply.
+        # For more information, see the documentation on
+        # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+      "a_key": "A String",
+    },
+    "endTime": "A String", # Output only. When the job processing was completed.
+    "trainingOutput": { # Represents results of a training job. Output only. # The current training job result.
+      "trials": [ # Results for individual Hyperparameter trials.
+          # Only set for hyperparameter tuning jobs.
+        { # Represents the result of a single hyperparameter tuning trial from a
+            # training job. The TrainingOutput object that is returned on successful
+            # completion of a training job with hyperparameter tuning includes a list
+            # of HyperparameterOutput objects, one for each successful trial.
+          "endTime": "A String", # Output only. End time for the trial.
+          "finalMetric": { # An observed value of a metric. # The final objective metric seen for this trial.
+            "trainingStep": "A String", # The global training step for this metric.
+            "objectiveValue": 3.14, # The objective value at this training step.
+          },
+          "hyperparameters": { # The hyperparameters given to this trial.
+            "a_key": "A String",
+          },
+          "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+              # Only set for trials of built-in algorithms jobs that have succeeded.
+            "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
+                # saves the trained model. Only set for successful jobs that don't use
+                # hyperparameter tuning.
+            "framework": "A String", # Framework on which the built-in algorithm was trained.
+            "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
+                # trained.
+            "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
+          },
+          "startTime": "A String", # Output only. Start time for the trial.
+          "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
+              # populated.
+            { # An observed value of a metric.
+              "trainingStep": "A String", # The global training step for this metric.
+              "objectiveValue": 3.14, # The objective value at this training step.
+            },
+          ],
+          "trialId": "A String", # The trial id for these results.
+          "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
+          "state": "A String", # Output only. The detailed state of the trial.
+        },
       ],
-      "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
-          # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
-          # for AI Platform services.
+      "completedTrialCount": "A String", # The number of hyperparameter tuning trials that completed successfully.
+          # Only set for hyperparameter tuning jobs.
+      "isHyperparameterTuningJob": True or False, # Whether this job is a hyperparameter tuning job.
+      "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
+      "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+          # Only set for built-in algorithms jobs.
+        "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
+            # saves the trained model. Only set for successful jobs that don't use
+            # hyperparameter tuning.
+        "framework": "A String", # Framework on which the built-in algorithm was trained.
+        "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
+            # trained.
+        "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
+      },
+      "consumedMLUnits": 3.14, # The amount of ML units consumed by the job.
+      "hyperparameterMetricTag": "A String", # The TensorFlow summary tag name used for optimizing hyperparameter tuning
+          # trials. See
+          # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
+          # for more information. Only set for hyperparameter tuning jobs.
     },
     "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
-    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a job from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform job updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `GetJob`, and
-        # systems are expected to put that etag in the request to `UpdateJob` to
-        # ensure that their change will be applied to the same version of the job.
+    "predictionOutput": { # Represents results of a prediction job. # The current prediction job result.
+      "errorCount": "A String", # The number of data instances which resulted in errors.
+      "nodeHours": 3.14, # Node hours used by the batch prediction job.
+      "outputPath": "A String", # The output Google Cloud Storage location provided at the job creation time.
+      "predictionCount": "A String", # The number of generated predictions.
+    },
+    "startTime": "A String", # Output only. When the job processing was started.
+    "state": "A String", # Output only. The detailed state of a job.
     "trainingInput": { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
         # to submit your training job, you can specify the input parameters as
         # command-line arguments and/or in a YAML configuration file referenced from
         # the --config command-line argument. For details, see the guide to [submitting
         # a training job](/ai-platform/training/docs/training-jobs).
-      "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
-          # replica in the cluster will be of the type specified in `worker_type`.
-          #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-          # set this value, you must also set `worker_type`.
-          #
-          # The default value is zero.
-      "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
-      "args": [ # Optional. Command-line arguments passed to the training application when it
-          # starts. If your job uses a custom container, then the arguments are passed
-          # to the container's <a class="external" target="_blank"
-          # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
-          # `ENTRYPOINT`</a> command.
-        "A String",
-      ],
-      "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-          #
-          # You should only set `parameterServerConfig.acceleratorConfig` if
-          # `parameterServerType` is set to a Compute Engine machine type. [Learn
-          # about restrictions on accelerator configurations for
-          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `parameterServerConfig.imageUri` only if you build a custom image for
-          # your parameter server. If `parameterServerConfig.imageUri` has not been
-          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-          # containers](/ai-platform/training/docs/distributed-training-containers).
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ai-platform/training/docs/distributed-training-containers).
-        "containerArgs": [ # Arguments to the entrypoint command.
-            # The following rules apply for container_command and container_args:
-            # - If you do not supply command or args:
-            #   The defaults defined in the Docker image are used.
-            # - If you supply a command but no args:
-            #   The default EntryPoint and the default Cmd defined in the Docker image
-            #   are ignored. Your command is run without any arguments.
-            # - If you supply only args:
-            #   The default Entrypoint defined in the Docker image is run with the args
-            #   that you supplied.
-            # - If you supply a command and args:
-            #   The default Entrypoint and the default Cmd defined in the Docker image
-            #   are ignored. Your command is run with your args.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          "A String",
-        ],
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-            # [accelerators for online
-            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        "containerCommand": [ # The command with which the replica's custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image's ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          "A String",
-        ],
-      },
-      "encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
-          # protect resources created by a training job, instead of using Google's
-          # default encryption. If this is set, then all resources created by the
-          # training job will be encrypted with the customer-managed encryption key
-          # that you specify.
-          #
-          # [Learn how and when to use CMEK with AI Platform
-          # Training](/ai-platform/training/docs/cmek).
-          # a resource.
-        "kmsKeyName": "A String", # The Cloud KMS resource identifier of the customer-managed encryption key
-            # used to protect a resource, such as a training job. It has the following
-            # format:
-            # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
-      },
-      "hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-        "params": [ # Required. The set of parameters to tune.
-          { # Represents a single hyperparameter to optimize.
-            "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
-              "A String",
-            ],
-            "parameterName": "A String", # Required. The parameter name must be unique amongst all ParameterConfigs in
-                # a HyperparameterSpec message. E.g., "learning_rate".
-            "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                # should be unset if type is `CATEGORICAL`. This value should be integers if
-                # type is INTEGER.
-            "discreteValues": [ # Required if type is `DISCRETE`.
-                # A list of feasible points.
-                # The list should be in strictly increasing order. For instance, this
-                # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
-                # should not contain more than 1,000 values.
-              3.14,
-            ],
-            "scaleType": "A String", # Optional. How the parameter should be scaled to the hypercube.
-                # Leave unset for categorical parameters.
-                # Some kind of scaling is strongly recommended for real or integral
-                # parameters (e.g., `UNIT_LINEAR_SCALE`).
-            "maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                # should be unset if type is `CATEGORICAL`. This value should be integers if
-                # type is `INTEGER`.
-            "type": "A String", # Required. The type of the parameter.
-          },
-        ],
-        "enableTrialEarlyStopping": True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
-            # early stopping.
-        "resumePreviousJobId": "A String", # Optional. The prior hyperparameter tuning job id that users hope to
-            # continue with. The job id will be used to find the corresponding vizier
-            # study guid and resume the study.
-        "maxParallelTrials": 42, # Optional. The number of training trials to run concurrently.
-            # You can reduce the time it takes to perform hyperparameter tuning by adding
-            # trials in parallel. However, each trail only benefits from the information
-            # gained in completed trials. That means that a trial does not get access to
-            # the results of trials running at the same time, which could reduce the
-            # quality of the overall optimization.
-            #
-            # Each trial will use the same scale tier and machine types.
-            #
-            # Defaults to one.
-        "maxFailedTrials": 42, # Optional. The number of failed trials that need to be seen before failing
-            # the hyperparameter tuning job. You can specify this field to override the
-            # default failing criteria for AI Platform hyperparameter tuning jobs.
-            #
-            # Defaults to zero, which means the service decides when a hyperparameter
-            # job should fail.
-        "goal": "A String", # Required. The type of goal to use for tuning. Available types are
-            # `MAXIMIZE` and `MINIMIZE`.
-            #
-            # Defaults to `MAXIMIZE`.
-        "maxTrials": 42, # Optional. How many training trials should be attempted to optimize
-            # the specified hyperparameters.
-            #
-            # Defaults to one.
-        "algorithm": "A String", # Optional. The search algorithm specified for the hyperparameter
-            # tuning job.
-            # Uses the default AI Platform hyperparameter tuning
-            # algorithm if unspecified.
-        "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-            # current versions of TensorFlow, this tag name should exactly match what is
-            # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-            # prior to 0.12, this should be only the tag passed to tf.Summary.
-            # By default, "training/hptuning/metric" will be used.
-      },
+      "serviceAccount": "A String", # Optional. Specifies the service account for workload run-as account.
+          # Users submitting jobs must have act-as permission on this run-as account.
+          # If not specified, then CMLE P4SA will be used by default.
       "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
           #
           # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
@@ -373,6 +281,16 @@
           # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
           # the value of `masterConfig.imageUri`. Learn more about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "type": "A String", # The type of accelerator to use.
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+        },
         "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -395,16 +313,6 @@
             # both cannot be set at the same time.
           "A String",
         ],
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-            # [accelerators for online
-            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
         "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
             # the one used in the custom container. This field is required if the replica
             # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -429,27 +337,13 @@
           "A String",
         ],
       },
-      "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
-          # job. Each replica in the cluster will be of the type specified in
-          # `parameter_server_type`.
+      "useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+          # variable when training with a custom container. Defaults to `false`. [Learn
+          # more about this
+          # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
           #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-          # set this value, you must also set `parameter_server_type`.
-          #
-          # The default value is zero.
-      "packageUris": [ # Required. The Google Cloud Storage location of the packages with
-          # the training program and any additional dependencies.
-          # The maximum number of package URIs is 100.
-        "A String",
-      ],
-      "evaluatorCount": "A String", # Optional. The number of evaluator replicas to use for the training job.
-          # Each replica in the cluster will be of the type specified in
-          # `evaluator_type`.
-          #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-          # set this value, you must also set `evaluator_type`.
-          #
-          # The default value is zero.
+          # This field has no effect for training jobs that don't use a custom
+          # container.
       "masterType": "A String", # Optional. Specifies the type of virtual machine to use for your training
           # job's master worker. You must specify this field when `scaleTier` is set to
           # `CUSTOM`.
@@ -502,65 +396,27 @@
           # field. Learn more about the [special configuration options for training
           # with
           # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. You must
-          # either specify this field or specify `masterConfig.imageUri`.
+      "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
           #
-          # For more information, see the [runtime version
-          # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
-          # manage runtime versions](/ai-platform/training/docs/versioning).
-      "evaluatorType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-          # job's evaluator nodes.
-          #
-          # The supported values are the same as those described in the entry for
-          # `masterType`.
-          #
-          # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be Compute Engine machine
-          # types or both must be legacy machine types.
-          #
-          # This value must be present when `scaleTier` is set to `CUSTOM` and
-          # `evaluatorCount` is greater than zero.
-      "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-          # job's worker nodes.
-          #
-          # The supported values are the same as those described in the entry for
-          # `masterType`.
-          #
-          # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be Compute Engine machine
-          # types or both must be legacy machine types.
-          #
-          # If you use `cloud_tpu` for this value, see special instructions for
-          # [configuring a custom TPU
-          # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-          #
-          # This value must be present when `scaleTier` is set to `CUSTOM` and
-          # `workerCount` is greater than zero.
-      "region": "A String", # Required. The region to run the training job in. See the [available
-          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
-      "parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-          # job's parameter server.
-          #
-          # The supported values are the same as those described in the entry for
-          # `master_type`.
-          #
-          # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be Compute Engine machine
-          # types or both must be legacy machine types.
-          #
-          # This value must be present when `scaleTier` is set to `CUSTOM` and
-          # `parameter_server_count` is greater than zero.
-      "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-          #
-          # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-          # to a Compute Engine machine type. Learn about [restrictions on accelerator
-          # configurations for
+          # You should only set `parameterServerConfig.acceleratorConfig` if
+          # `parameterServerType` is set to a Compute Engine machine type. [Learn
+          # about restrictions on accelerator configurations for
           # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
           #
-          # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-          # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
-          # about [configuring custom
+          # Set `parameterServerConfig.imageUri` only if you build a custom image for
+          # your parameter server. If `parameterServerConfig.imageUri` has not been
+          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "type": "A String", # The type of accelerator to use.
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+        },
         "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -583,16 +439,6 @@
             # both cannot be set at the same time.
           "A String",
         ],
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-            # [accelerators for online
-            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
         "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
             # the one used in the custom container. This field is required if the replica
             # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -617,8 +463,8 @@
           "A String",
         ],
       },
-      "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
-          # and parameter servers.
+      "region": "A String", # Required. The region to run the training job in. See the [available
+          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
       "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
           # and other data needed for training. This path is passed to your TensorFlow
           # program as the '--job-dir' command-line argument. The benefit of specifying
@@ -637,6 +483,308 @@
           #
           # Read more about the Python versions available for [each runtime
           # version](/ml-engine/docs/runtime-version-list).
+      "hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
+        "maxTrials": 42, # Optional. How many training trials should be attempted to optimize
+            # the specified hyperparameters.
+            #
+            # Defaults to one.
+        "enableTrialEarlyStopping": True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
+            # early stopping.
+        "params": [ # Required. The set of parameters to tune.
+          { # Represents a single hyperparameter to optimize.
+            "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                # should be unset if type is `CATEGORICAL`. This value should be integers if
+                # type is INTEGER.
+            "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
+              "A String",
+            ],
+            "scaleType": "A String", # Optional. How the parameter should be scaled to the hypercube.
+                # Leave unset for categorical parameters.
+                # Some kind of scaling is strongly recommended for real or integral
+                # parameters (e.g., `UNIT_LINEAR_SCALE`).
+            "discreteValues": [ # Required if type is `DISCRETE`.
+                # A list of feasible points.
+                # The list should be in strictly increasing order. For instance, this
+                # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
+                # should not contain more than 1,000 values.
+              3.14,
+            ],
+            "type": "A String", # Required. The type of the parameter.
+            "maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                # should be unset if type is `CATEGORICAL`. This value should be integers if
+                # type is `INTEGER`.
+            "parameterName": "A String", # Required. The parameter name must be unique amongst all ParameterConfigs in
+                # a HyperparameterSpec message. E.g., "learning_rate".
+          },
+        ],
+        "maxFailedTrials": 42, # Optional. The number of failed trials that need to be seen before failing
+            # the hyperparameter tuning job. You can specify this field to override the
+            # default failing criteria for AI Platform hyperparameter tuning jobs.
+            #
+            # Defaults to zero, which means the service decides when a hyperparameter
+            # job should fail.
+        "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+            # current versions of TensorFlow, this tag name should exactly match what is
+            # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+            # prior to 0.12, this should be only the tag passed to tf.Summary.
+            # By default, "training/hptuning/metric" will be used.
+        "resumePreviousJobId": "A String", # Optional. The prior hyperparameter tuning job id that users hope to
+            # continue with. The job id will be used to find the corresponding vizier
+            # study guid and resume the study.
+        "goal": "A String", # Required. The type of goal to use for tuning. Available types are
+            # `MAXIMIZE` and `MINIMIZE`.
+            #
+            # Defaults to `MAXIMIZE`.
+        "algorithm": "A String", # Optional. The search algorithm specified for the hyperparameter
+            # tuning job.
+            # Uses the default AI Platform hyperparameter tuning
+            # algorithm if unspecified.
+        "maxParallelTrials": 42, # Optional. The number of training trials to run concurrently.
+            # You can reduce the time it takes to perform hyperparameter tuning by adding
+            # trials in parallel. However, each trail only benefits from the information
+            # gained in completed trials. That means that a trial does not get access to
+            # the results of trials running at the same time, which could reduce the
+            # quality of the overall optimization.
+            #
+            # Each trial will use the same scale tier and machine types.
+            #
+            # Defaults to one.
+      },
+      "evaluatorType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+          # job's evaluator nodes.
+          #
+          # The supported values are the same as those described in the entry for
+          # `masterType`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `evaluatorCount` is greater than zero.
+      "network": "A String", # Optional. The full name of the Google Compute Engine
+          # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+          # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+          # the form projects/{project}/global/networks/{network}. Where {project} is a
+          # project number, as in '12345', and {network} is network name.".
+          #
+          # Private services access must already be configured for the network. If left
+          # unspecified, the Job is not peered with any network. Learn more -
+          # Connecting Job to user network over private
+          # IP.
+      "parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+          # job's parameter server.
+          #
+          # The supported values are the same as those described in the entry for
+          # `master_type`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `parameter_server_count` is greater than zero.
+      "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+          # job's worker nodes.
+          #
+          # The supported values are the same as those described in the entry for
+          # `masterType`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # If you use `cloud_tpu` for this value, see special instructions for
+          # [configuring a custom TPU
+          # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `workerCount` is greater than zero.
+      "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+          #
+          # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+          # to a Compute Engine machine type. Learn about [restrictions on accelerator
+          # configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+          # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+          # about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "type": "A String", # The type of accelerator to use.
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+        "containerArgs": [ # Arguments to the entrypoint command.
+            # The following rules apply for container_command and container_args:
+            # - If you do not supply command or args:
+            #   The defaults defined in the Docker image are used.
+            # - If you supply a command but no args:
+            #   The default EntryPoint and the default Cmd defined in the Docker image
+            #   are ignored. Your command is run without any arguments.
+            # - If you supply only args:
+            #   The default Entrypoint defined in the Docker image is run with the args
+            #   that you supplied.
+            # - If you supply a command and args:
+            #   The default Entrypoint and the default Cmd defined in the Docker image
+            #   are ignored. Your command is run with your args.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          "A String",
+        ],
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "containerCommand": [ # The command with which the replica's custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image's ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          "A String",
+        ],
+      },
+      "evaluatorCount": "A String", # Optional. The number of evaluator replicas to use for the training job.
+          # Each replica in the cluster will be of the type specified in
+          # `evaluator_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `evaluator_type`.
+          #
+          # The default value is zero.
+      "args": [ # Optional. Command-line arguments passed to the training application when it
+          # starts. If your job uses a custom container, then the arguments are passed
+          # to the container's <a class="external" target="_blank"
+          # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
+          # `ENTRYPOINT`</a> command.
+        "A String",
+      ],
+      "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
+      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. You must
+          # either specify this field or specify `masterConfig.imageUri`.
+          #
+          # For more information, see the [runtime version
+          # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+          # manage runtime versions](/ai-platform/training/docs/versioning).
+      "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
+          # job. Each replica in the cluster will be of the type specified in
+          # `parameter_server_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `parameter_server_type`.
+          #
+          # The default value is zero.
+      "evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+          #
+          # You should only set `evaluatorConfig.acceleratorConfig` if
+          # `evaluatorType` is set to a Compute Engine machine type. [Learn
+          # about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `evaluatorConfig.imageUri` only if you build a custom image for
+          # your evaluator. If `evaluatorConfig.imageUri` has not been
+          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "type": "A String", # The type of accelerator to use.
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+        "containerArgs": [ # Arguments to the entrypoint command.
+            # The following rules apply for container_command and container_args:
+            # - If you do not supply command or args:
+            #   The defaults defined in the Docker image are used.
+            # - If you supply a command but no args:
+            #   The default EntryPoint and the default Cmd defined in the Docker image
+            #   are ignored. Your command is run without any arguments.
+            # - If you supply only args:
+            #   The default Entrypoint defined in the Docker image is run with the args
+            #   that you supplied.
+            # - If you supply a command and args:
+            #   The default Entrypoint and the default Cmd defined in the Docker image
+            #   are ignored. Your command is run with your args.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          "A String",
+        ],
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "containerCommand": [ # The command with which the replica's custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image's ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          "A String",
+        ],
+      },
+      "encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+          # protect resources created by a training job, instead of using Google's
+          # default encryption. If this is set, then all resources created by the
+          # training job will be encrypted with the customer-managed encryption key
+          # that you specify.
+          #
+          # [Learn how and when to use CMEK with AI Platform
+          # Training](/ai-platform/training/docs/cmek).
+          # a resource.
+        "kmsKeyName": "A String", # The Cloud KMS resource identifier of the customer-managed encryption key
+            # used to protect a resource, such as a training job. It has the following
+            # format:
+            # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+      },
+      "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
+          # replica in the cluster will be of the type specified in `worker_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `worker_type`.
+          #
+          # The default value is zero.
       "scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
         "maxWaitTime": "A String",
         "maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
@@ -663,167 +811,22 @@
             #   ...
             # ```
       },
-      "network": "A String", # Optional. The full name of the Google Compute Engine
-          # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-          # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-          # the form projects/{project}/global/networks/{network}. Where {project} is a
-          # project number, as in '12345', and {network} is network name.".
-          #
-          # Private services access must already be configured for the network. If left
-          # unspecified, the Job is not peered with any network. Learn more -
-          # Connecting Job to user network over private
-          # IP.
-      "evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
-          #
-          # You should only set `evaluatorConfig.acceleratorConfig` if
-          # `evaluatorType` is set to a Compute Engine machine type. [Learn
-          # about restrictions on accelerator configurations for
-          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `evaluatorConfig.imageUri` only if you build a custom image for
-          # your evaluator. If `evaluatorConfig.imageUri` has not been
-          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-          # containers](/ai-platform/training/docs/distributed-training-containers).
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ai-platform/training/docs/distributed-training-containers).
-        "containerArgs": [ # Arguments to the entrypoint command.
-            # The following rules apply for container_command and container_args:
-            # - If you do not supply command or args:
-            #   The defaults defined in the Docker image are used.
-            # - If you supply a command but no args:
-            #   The default EntryPoint and the default Cmd defined in the Docker image
-            #   are ignored. Your command is run without any arguments.
-            # - If you supply only args:
-            #   The default Entrypoint defined in the Docker image is run with the args
-            #   that you supplied.
-            # - If you supply a command and args:
-            #   The default Entrypoint and the default Cmd defined in the Docker image
-            #   are ignored. Your command is run with your args.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          "A String",
-        ],
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-            # [accelerators for online
-            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        "containerCommand": [ # The command with which the replica's custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image's ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          "A String",
-        ],
-      },
-      "useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
-          # variable when training with a custom container. Defaults to `false`. [Learn
-          # more about this
-          # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
-          #
-          # This field has no effect for training jobs that don't use a custom
-          # container.
-    },
-    "state": "A String", # Output only. The detailed state of a job.
-    "jobId": "A String", # Required. The user-specified id of the job.
-    "endTime": "A String", # Output only. When the job processing was completed.
-    "startTime": "A String", # Output only. When the job processing was started.
-    "predictionOutput": { # Represents results of a prediction job. # The current prediction job result.
-      "errorCount": "A String", # The number of data instances which resulted in errors.
-      "outputPath": "A String", # The output Google Cloud Storage location provided at the job creation time.
-      "nodeHours": 3.14, # Node hours used by the batch prediction job.
-      "predictionCount": "A String", # The number of generated predictions.
-    },
-    "trainingOutput": { # Represents results of a training job. Output only. # The current training job result.
-      "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
-      "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-          # Only set for built-in algorithms jobs.
-        "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
-            # saves the trained model. Only set for successful jobs that don't use
-            # hyperparameter tuning.
-        "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
-        "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
-            # trained.
-        "framework": "A String", # Framework on which the built-in algorithm was trained.
-      },
-      "trials": [ # Results for individual Hyperparameter trials.
-          # Only set for hyperparameter tuning jobs.
-        { # Represents the result of a single hyperparameter tuning trial from a
-            # training job. The TrainingOutput object that is returned on successful
-            # completion of a training job with hyperparameter tuning includes a list
-            # of HyperparameterOutput objects, one for each successful trial.
-          "trialId": "A String", # The trial id for these results.
-          "endTime": "A String", # Output only. End time for the trial.
-          "startTime": "A String", # Output only. Start time for the trial.
-          "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
-          "finalMetric": { # An observed value of a metric. # The final objective metric seen for this trial.
-            "trainingStep": "A String", # The global training step for this metric.
-            "objectiveValue": 3.14, # The objective value at this training step.
-          },
-          "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-              # Only set for trials of built-in algorithms jobs that have succeeded.
-            "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
-                # saves the trained model. Only set for successful jobs that don't use
-                # hyperparameter tuning.
-            "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
-            "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
-                # trained.
-            "framework": "A String", # Framework on which the built-in algorithm was trained.
-          },
-          "state": "A String", # Output only. The detailed state of the trial.
-          "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
-              # populated.
-            { # An observed value of a metric.
-              "trainingStep": "A String", # The global training step for this metric.
-              "objectiveValue": 3.14, # The objective value at this training step.
-            },
-          ],
-          "hyperparameters": { # The hyperparameters given to this trial.
-            "a_key": "A String",
-          },
-        },
+      "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
+          # and parameter servers.
+      "packageUris": [ # Required. The Google Cloud Storage location of the packages with
+          # the training program and any additional dependencies.
+          # The maximum number of package URIs is 100.
+        "A String",
       ],
-      "hyperparameterMetricTag": "A String", # The TensorFlow summary tag name used for optimizing hyperparameter tuning
-          # trials. See
-          # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
-          # for more information. Only set for hyperparameter tuning jobs.
-      "completedTrialCount": "A String", # The number of hyperparameter tuning trials that completed successfully.
-          # Only set for hyperparameter tuning jobs.
-      "isHyperparameterTuningJob": True or False, # Whether this job is a hyperparameter tuning job.
-      "consumedMLUnits": 3.14, # The amount of ML units consumed by the job.
     },
-    "createTime": "A String", # Output only. When the job was created.
-    "labels": { # Optional. One or more labels that you can add, to organize your jobs.
-        # Each label is a key-value pair, where both the key and the value are
-        # arbitrary strings that you supply.
-        # For more information, see the documentation on
-        # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
-      "a_key": "A String",
-    },
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a job from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform job updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `GetJob`, and
+        # systems are expected to put that etag in the request to `UpdateJob` to
+        # ensure that their change will be applied to the same version of the job.
+    "jobId": "A String", # Required. The user-specified id of the job.
   }
 
   x__xgafv: string, V1 error format.
@@ -835,20 +838,34 @@
   An object of the form:
 
     { # Represents a training or prediction job.
+      "createTime": "A String", # Output only. When the job was created.
       "predictionInput": { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+        "dataFormat": "A String", # Required. The format of the input data files.
+        "outputPath": "A String", # Required. The output Google Cloud Storage location.
+        "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+            # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
+          "A String",
+        ],
+        "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
+            # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+            # for AI Platform services.
         "versionName": "A String", # Use this field if you want to specify a version of the model to use. The
             # string is formatted the same way as `model_version`, with the addition
             # of the version information:
             #
             # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
+        "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
+            # the model to use.
+        "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
+        "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+            # prediction. If not set, AI Platform will pick the runtime version used
+            # during the CreateVersion request for this model version, or choose the
+            # latest stable version when model version information is not available
+            # such as when the model is specified by uri.
         "modelName": "A String", # Use this field if you want to use the default version for the specified
             # model. The string must use the following format:
             #
             # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
-        "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
-            # the model to use.
-        "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
-            # Defaults to 10 if not specified.
         "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
             # this job. Please refer to
             # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -857,202 +874,96 @@
             # Defaults to
             # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
             # , which is "serving_default".
-        "outputPath": "A String", # Required. The output Google Cloud Storage location.
-        "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
-        "dataFormat": "A String", # Required. The format of the input data files.
         "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
             # The service will buffer batch_size number of records in memory before
             # invoking one Tensorflow prediction call internally. So take the record
             # size and memory available into consideration when setting this parameter.
-        "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
-            # prediction. If not set, AI Platform will pick the runtime version used
-            # during the CreateVersion request for this model version, or choose the
-            # latest stable version when model version information is not available
-            # such as when the model is specified by uri.
-        "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
-            # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
-          "A String",
+        "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
+            # Defaults to 10 if not specified.
+      },
+      "labels": { # Optional. One or more labels that you can add, to organize your jobs.
+          # Each label is a key-value pair, where both the key and the value are
+          # arbitrary strings that you supply.
+          # For more information, see the documentation on
+          # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
+        "a_key": "A String",
+      },
+      "endTime": "A String", # Output only. When the job processing was completed.
+      "trainingOutput": { # Represents results of a training job. Output only. # The current training job result.
+        "trials": [ # Results for individual Hyperparameter trials.
+            # Only set for hyperparameter tuning jobs.
+          { # Represents the result of a single hyperparameter tuning trial from a
+              # training job. The TrainingOutput object that is returned on successful
+              # completion of a training job with hyperparameter tuning includes a list
+              # of HyperparameterOutput objects, one for each successful trial.
+            "endTime": "A String", # Output only. End time for the trial.
+            "finalMetric": { # An observed value of a metric. # The final objective metric seen for this trial.
+              "trainingStep": "A String", # The global training step for this metric.
+              "objectiveValue": 3.14, # The objective value at this training step.
+            },
+            "hyperparameters": { # The hyperparameters given to this trial.
+              "a_key": "A String",
+            },
+            "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+                # Only set for trials of built-in algorithms jobs that have succeeded.
+              "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
+                  # saves the trained model. Only set for successful jobs that don't use
+                  # hyperparameter tuning.
+              "framework": "A String", # Framework on which the built-in algorithm was trained.
+              "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
+                  # trained.
+              "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
+            },
+            "startTime": "A String", # Output only. Start time for the trial.
+            "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
+                # populated.
+              { # An observed value of a metric.
+                "trainingStep": "A String", # The global training step for this metric.
+                "objectiveValue": 3.14, # The objective value at this training step.
+              },
+            ],
+            "trialId": "A String", # The trial id for these results.
+            "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
+            "state": "A String", # Output only. The detailed state of the trial.
+          },
         ],
-        "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
-            # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
-            # for AI Platform services.
+        "completedTrialCount": "A String", # The number of hyperparameter tuning trials that completed successfully.
+            # Only set for hyperparameter tuning jobs.
+        "isHyperparameterTuningJob": True or False, # Whether this job is a hyperparameter tuning job.
+        "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
+        "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+            # Only set for built-in algorithms jobs.
+          "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
+              # saves the trained model. Only set for successful jobs that don't use
+              # hyperparameter tuning.
+          "framework": "A String", # Framework on which the built-in algorithm was trained.
+          "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
+              # trained.
+          "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
+        },
+        "consumedMLUnits": 3.14, # The amount of ML units consumed by the job.
+        "hyperparameterMetricTag": "A String", # The TensorFlow summary tag name used for optimizing hyperparameter tuning
+            # trials. See
+            # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
+            # for more information. Only set for hyperparameter tuning jobs.
       },
       "errorMessage": "A String", # Output only. The details of a failure or a cancellation.
-      "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-          # prevent simultaneous updates of a job from overwriting each other.
-          # It is strongly suggested that systems make use of the `etag` in the
-          # read-modify-write cycle to perform job updates in order to avoid race
-          # conditions: An `etag` is returned in the response to `GetJob`, and
-          # systems are expected to put that etag in the request to `UpdateJob` to
-          # ensure that their change will be applied to the same version of the job.
+      "predictionOutput": { # Represents results of a prediction job. # The current prediction job result.
+        "errorCount": "A String", # The number of data instances which resulted in errors.
+        "nodeHours": 3.14, # Node hours used by the batch prediction job.
+        "outputPath": "A String", # The output Google Cloud Storage location provided at the job creation time.
+        "predictionCount": "A String", # The number of generated predictions.
+      },
+      "startTime": "A String", # Output only. When the job processing was started.
+      "state": "A String", # Output only. The detailed state of a job.
       "trainingInput": { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
           # to submit your training job, you can specify the input parameters as
           # command-line arguments and/or in a YAML configuration file referenced from
           # the --config command-line argument. For details, see the guide to [submitting
           # a training job](/ai-platform/training/docs/training-jobs).
-        "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
-            # replica in the cluster will be of the type specified in `worker_type`.
-            #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `worker_type`.
-            #
-            # The default value is zero.
-        "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
-        "args": [ # Optional. Command-line arguments passed to the training application when it
-            # starts. If your job uses a custom container, then the arguments are passed
-            # to the container's <a class="external" target="_blank"
-            # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
-            # `ENTRYPOINT`</a> command.
-          "A String",
-        ],
-        "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-            #
-            # You should only set `parameterServerConfig.acceleratorConfig` if
-            # `parameterServerType` is set to a Compute Engine machine type. [Learn
-            # about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            #
-            # Set `parameterServerConfig.imageUri` only if you build a custom image for
-            # your parameter server. If `parameterServerConfig.imageUri` has not been
-            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-            # containers](/ai-platform/training/docs/distributed-training-containers).
-          "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-              # Registry. Learn more about [configuring custom
-              # containers](/ai-platform/training/docs/distributed-training-containers).
-          "containerArgs": [ # Arguments to the entrypoint command.
-              # The following rules apply for container_command and container_args:
-              # - If you do not supply command or args:
-              #   The defaults defined in the Docker image are used.
-              # - If you supply a command but no args:
-              #   The default EntryPoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run without any arguments.
-              # - If you supply only args:
-              #   The default Entrypoint defined in the Docker image is run with the args
-              #   that you supplied.
-              # - If you supply a command and args:
-              #   The default Entrypoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run with your args.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            "A String",
-          ],
-          "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            "count": "A String", # The number of accelerators to attach to each machine running the job.
-            "type": "A String", # The type of accelerator to use.
-          },
-          "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          "containerCommand": [ # The command with which the replica's custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image's ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            "A String",
-          ],
-        },
-        "encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
-            # protect resources created by a training job, instead of using Google's
-            # default encryption. If this is set, then all resources created by the
-            # training job will be encrypted with the customer-managed encryption key
-            # that you specify.
-            #
-            # [Learn how and when to use CMEK with AI Platform
-            # Training](/ai-platform/training/docs/cmek).
-            # a resource.
-          "kmsKeyName": "A String", # The Cloud KMS resource identifier of the customer-managed encryption key
-              # used to protect a resource, such as a training job. It has the following
-              # format:
-              # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
-        },
-        "hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-          "params": [ # Required. The set of parameters to tune.
-            { # Represents a single hyperparameter to optimize.
-              "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
-                "A String",
-              ],
-              "parameterName": "A String", # Required. The parameter name must be unique amongst all ParameterConfigs in
-                  # a HyperparameterSpec message. E.g., "learning_rate".
-              "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                  # should be unset if type is `CATEGORICAL`. This value should be integers if
-                  # type is INTEGER.
-              "discreteValues": [ # Required if type is `DISCRETE`.
-                  # A list of feasible points.
-                  # The list should be in strictly increasing order. For instance, this
-                  # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
-                  # should not contain more than 1,000 values.
-                3.14,
-              ],
-              "scaleType": "A String", # Optional. How the parameter should be scaled to the hypercube.
-                  # Leave unset for categorical parameters.
-                  # Some kind of scaling is strongly recommended for real or integral
-                  # parameters (e.g., `UNIT_LINEAR_SCALE`).
-              "maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                  # should be unset if type is `CATEGORICAL`. This value should be integers if
-                  # type is `INTEGER`.
-              "type": "A String", # Required. The type of the parameter.
-            },
-          ],
-          "enableTrialEarlyStopping": True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
-              # early stopping.
-          "resumePreviousJobId": "A String", # Optional. The prior hyperparameter tuning job id that users hope to
-              # continue with. The job id will be used to find the corresponding vizier
-              # study guid and resume the study.
-          "maxParallelTrials": 42, # Optional. The number of training trials to run concurrently.
-              # You can reduce the time it takes to perform hyperparameter tuning by adding
-              # trials in parallel. However, each trail only benefits from the information
-              # gained in completed trials. That means that a trial does not get access to
-              # the results of trials running at the same time, which could reduce the
-              # quality of the overall optimization.
-              #
-              # Each trial will use the same scale tier and machine types.
-              #
-              # Defaults to one.
-          "maxFailedTrials": 42, # Optional. The number of failed trials that need to be seen before failing
-              # the hyperparameter tuning job. You can specify this field to override the
-              # default failing criteria for AI Platform hyperparameter tuning jobs.
-              #
-              # Defaults to zero, which means the service decides when a hyperparameter
-              # job should fail.
-          "goal": "A String", # Required. The type of goal to use for tuning. Available types are
-              # `MAXIMIZE` and `MINIMIZE`.
-              #
-              # Defaults to `MAXIMIZE`.
-          "maxTrials": 42, # Optional. How many training trials should be attempted to optimize
-              # the specified hyperparameters.
-              #
-              # Defaults to one.
-          "algorithm": "A String", # Optional. The search algorithm specified for the hyperparameter
-              # tuning job.
-              # Uses the default AI Platform hyperparameter tuning
-              # algorithm if unspecified.
-          "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-              # current versions of TensorFlow, this tag name should exactly match what is
-              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-              # prior to 0.12, this should be only the tag passed to tf.Summary.
-              # By default, "training/hptuning/metric" will be used.
-        },
+        "serviceAccount": "A String", # Optional. Specifies the service account for workload run-as account.
+            # Users submitting jobs must have act-as permission on this run-as account.
+            # If not specified, then CMLE P4SA will be used by default.
         "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
             #
             # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
@@ -1064,6 +975,16 @@
             # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
             # the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
+          "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            "type": "A String", # The type of accelerator to use.
+            "count": "A String", # The number of accelerators to attach to each machine running the job.
+          },
           "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1086,16 +1007,6 @@
               # both cannot be set at the same time.
             "A String",
           ],
-          "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            "count": "A String", # The number of accelerators to attach to each machine running the job.
-            "type": "A String", # The type of accelerator to use.
-          },
           "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
               # the one used in the custom container. This field is required if the replica
               # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -1120,27 +1031,13 @@
             "A String",
           ],
         },
-        "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
-            # job. Each replica in the cluster will be of the type specified in
-            # `parameter_server_type`.
+        "useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+            # variable when training with a custom container. Defaults to `false`. [Learn
+            # more about this
+            # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
             #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `parameter_server_type`.
-            #
-            # The default value is zero.
-        "packageUris": [ # Required. The Google Cloud Storage location of the packages with
-            # the training program and any additional dependencies.
-            # The maximum number of package URIs is 100.
-          "A String",
-        ],
-        "evaluatorCount": "A String", # Optional. The number of evaluator replicas to use for the training job.
-            # Each replica in the cluster will be of the type specified in
-            # `evaluator_type`.
-            #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `evaluator_type`.
-            #
-            # The default value is zero.
+            # This field has no effect for training jobs that don't use a custom
+            # container.
         "masterType": "A String", # Optional. Specifies the type of virtual machine to use for your training
             # job's master worker. You must specify this field when `scaleTier` is set to
             # `CUSTOM`.
@@ -1193,65 +1090,27 @@
             # field. Learn more about the [special configuration options for training
             # with
             # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-        "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. You must
-            # either specify this field or specify `masterConfig.imageUri`.
+        "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
             #
-            # For more information, see the [runtime version
-            # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
-            # manage runtime versions](/ai-platform/training/docs/versioning).
-        "evaluatorType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-            # job's evaluator nodes.
-            #
-            # The supported values are the same as those described in the entry for
-            # `masterType`.
-            #
-            # This value must be consistent with the category of machine type that
-            # `masterType` uses. In other words, both must be Compute Engine machine
-            # types or both must be legacy machine types.
-            #
-            # This value must be present when `scaleTier` is set to `CUSTOM` and
-            # `evaluatorCount` is greater than zero.
-        "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-            # job's worker nodes.
-            #
-            # The supported values are the same as those described in the entry for
-            # `masterType`.
-            #
-            # This value must be consistent with the category of machine type that
-            # `masterType` uses. In other words, both must be Compute Engine machine
-            # types or both must be legacy machine types.
-            #
-            # If you use `cloud_tpu` for this value, see special instructions for
-            # [configuring a custom TPU
-            # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-            #
-            # This value must be present when `scaleTier` is set to `CUSTOM` and
-            # `workerCount` is greater than zero.
-        "region": "A String", # Required. The region to run the training job in. See the [available
-            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
-        "parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-            # job's parameter server.
-            #
-            # The supported values are the same as those described in the entry for
-            # `master_type`.
-            #
-            # This value must be consistent with the category of machine type that
-            # `masterType` uses. In other words, both must be Compute Engine machine
-            # types or both must be legacy machine types.
-            #
-            # This value must be present when `scaleTier` is set to `CUSTOM` and
-            # `parameter_server_count` is greater than zero.
-        "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-            #
-            # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-            # to a Compute Engine machine type. Learn about [restrictions on accelerator
-            # configurations for
+            # You should only set `parameterServerConfig.acceleratorConfig` if
+            # `parameterServerType` is set to a Compute Engine machine type. [Learn
+            # about restrictions on accelerator configurations for
             # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
             #
-            # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-            # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
-            # about [configuring custom
+            # Set `parameterServerConfig.imageUri` only if you build a custom image for
+            # your parameter server. If `parameterServerConfig.imageUri` has not been
+            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
+          "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            "type": "A String", # The type of accelerator to use.
+            "count": "A String", # The number of accelerators to attach to each machine running the job.
+          },
           "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1274,16 +1133,6 @@
               # both cannot be set at the same time.
             "A String",
           ],
-          "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            "count": "A String", # The number of accelerators to attach to each machine running the job.
-            "type": "A String", # The type of accelerator to use.
-          },
           "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
               # the one used in the custom container. This field is required if the replica
               # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -1308,8 +1157,8 @@
             "A String",
           ],
         },
-        "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
-            # and parameter servers.
+        "region": "A String", # Required. The region to run the training job in. See the [available
+            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
         "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
             # and other data needed for training. This path is passed to your TensorFlow
             # program as the '--job-dir' command-line argument. The benefit of specifying
@@ -1328,6 +1177,308 @@
             #
             # Read more about the Python versions available for [each runtime
             # version](/ml-engine/docs/runtime-version-list).
+        "hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
+          "maxTrials": 42, # Optional. How many training trials should be attempted to optimize
+              # the specified hyperparameters.
+              #
+              # Defaults to one.
+          "enableTrialEarlyStopping": True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
+              # early stopping.
+          "params": [ # Required. The set of parameters to tune.
+            { # Represents a single hyperparameter to optimize.
+              "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                  # should be unset if type is `CATEGORICAL`. This value should be integers if
+                  # type is INTEGER.
+              "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
+                "A String",
+              ],
+              "scaleType": "A String", # Optional. How the parameter should be scaled to the hypercube.
+                  # Leave unset for categorical parameters.
+                  # Some kind of scaling is strongly recommended for real or integral
+                  # parameters (e.g., `UNIT_LINEAR_SCALE`).
+              "discreteValues": [ # Required if type is `DISCRETE`.
+                  # A list of feasible points.
+                  # The list should be in strictly increasing order. For instance, this
+                  # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
+                  # should not contain more than 1,000 values.
+                3.14,
+              ],
+              "type": "A String", # Required. The type of the parameter.
+              "maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                  # should be unset if type is `CATEGORICAL`. This value should be integers if
+                  # type is `INTEGER`.
+              "parameterName": "A String", # Required. The parameter name must be unique amongst all ParameterConfigs in
+                  # a HyperparameterSpec message. E.g., "learning_rate".
+            },
+          ],
+          "maxFailedTrials": 42, # Optional. The number of failed trials that need to be seen before failing
+              # the hyperparameter tuning job. You can specify this field to override the
+              # default failing criteria for AI Platform hyperparameter tuning jobs.
+              #
+              # Defaults to zero, which means the service decides when a hyperparameter
+              # job should fail.
+          "hyperparameterMetricTag": "A String", # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+              # current versions of TensorFlow, this tag name should exactly match what is
+              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+              # prior to 0.12, this should be only the tag passed to tf.Summary.
+              # By default, "training/hptuning/metric" will be used.
+          "resumePreviousJobId": "A String", # Optional. The prior hyperparameter tuning job id that users hope to
+              # continue with. The job id will be used to find the corresponding vizier
+              # study guid and resume the study.
+          "goal": "A String", # Required. The type of goal to use for tuning. Available types are
+              # `MAXIMIZE` and `MINIMIZE`.
+              #
+              # Defaults to `MAXIMIZE`.
+          "algorithm": "A String", # Optional. The search algorithm specified for the hyperparameter
+              # tuning job.
+              # Uses the default AI Platform hyperparameter tuning
+              # algorithm if unspecified.
+          "maxParallelTrials": 42, # Optional. The number of training trials to run concurrently.
+              # You can reduce the time it takes to perform hyperparameter tuning by adding
+              # trials in parallel. However, each trail only benefits from the information
+              # gained in completed trials. That means that a trial does not get access to
+              # the results of trials running at the same time, which could reduce the
+              # quality of the overall optimization.
+              #
+              # Each trial will use the same scale tier and machine types.
+              #
+              # Defaults to one.
+        },
+        "evaluatorType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+            # job's evaluator nodes.
+            #
+            # The supported values are the same as those described in the entry for
+            # `masterType`.
+            #
+            # This value must be consistent with the category of machine type that
+            # `masterType` uses. In other words, both must be Compute Engine machine
+            # types or both must be legacy machine types.
+            #
+            # This value must be present when `scaleTier` is set to `CUSTOM` and
+            # `evaluatorCount` is greater than zero.
+        "network": "A String", # Optional. The full name of the Google Compute Engine
+            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+            # the form projects/{project}/global/networks/{network}. Where {project} is a
+            # project number, as in '12345', and {network} is network name.".
+            #
+            # Private services access must already be configured for the network. If left
+            # unspecified, the Job is not peered with any network. Learn more -
+            # Connecting Job to user network over private
+            # IP.
+        "parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+            # job's parameter server.
+            #
+            # The supported values are the same as those described in the entry for
+            # `master_type`.
+            #
+            # This value must be consistent with the category of machine type that
+            # `masterType` uses. In other words, both must be Compute Engine machine
+            # types or both must be legacy machine types.
+            #
+            # This value must be present when `scaleTier` is set to `CUSTOM` and
+            # `parameter_server_count` is greater than zero.
+        "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+            # job's worker nodes.
+            #
+            # The supported values are the same as those described in the entry for
+            # `masterType`.
+            #
+            # This value must be consistent with the category of machine type that
+            # `masterType` uses. In other words, both must be Compute Engine machine
+            # types or both must be legacy machine types.
+            #
+            # If you use `cloud_tpu` for this value, see special instructions for
+            # [configuring a custom TPU
+            # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+            #
+            # This value must be present when `scaleTier` is set to `CUSTOM` and
+            # `workerCount` is greater than zero.
+        "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+            #
+            # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+            # to a Compute Engine machine type. Learn about [restrictions on accelerator
+            # configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            #
+            # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+            # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+            # about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+          "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            "type": "A String", # The type of accelerator to use.
+            "count": "A String", # The number of accelerators to attach to each machine running the job.
+          },
+          "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+              # Registry. Learn more about [configuring custom
+              # containers](/ai-platform/training/docs/distributed-training-containers).
+          "containerArgs": [ # Arguments to the entrypoint command.
+              # The following rules apply for container_command and container_args:
+              # - If you do not supply command or args:
+              #   The defaults defined in the Docker image are used.
+              # - If you supply a command but no args:
+              #   The default EntryPoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run without any arguments.
+              # - If you supply only args:
+              #   The default Entrypoint defined in the Docker image is run with the args
+              #   that you supplied.
+              # - If you supply a command and args:
+              #   The default Entrypoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run with your args.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            "A String",
+          ],
+          "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          "containerCommand": [ # The command with which the replica's custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image's ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            "A String",
+          ],
+        },
+        "evaluatorCount": "A String", # Optional. The number of evaluator replicas to use for the training job.
+            # Each replica in the cluster will be of the type specified in
+            # `evaluator_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `evaluator_type`.
+            #
+            # The default value is zero.
+        "args": [ # Optional. Command-line arguments passed to the training application when it
+            # starts. If your job uses a custom container, then the arguments are passed
+            # to the container's <a class="external" target="_blank"
+            # href="https://docs.docker.com/engine/reference/builder/#entrypoint">
+            # `ENTRYPOINT`</a> command.
+          "A String",
+        ],
+        "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
+        "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. You must
+            # either specify this field or specify `masterConfig.imageUri`.
+            #
+            # For more information, see the [runtime version
+            # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+            # manage runtime versions](/ai-platform/training/docs/versioning).
+        "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
+            # job. Each replica in the cluster will be of the type specified in
+            # `parameter_server_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `parameter_server_type`.
+            #
+            # The default value is zero.
+        "evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+            #
+            # You should only set `evaluatorConfig.acceleratorConfig` if
+            # `evaluatorType` is set to a Compute Engine machine type. [Learn
+            # about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            #
+            # Set `evaluatorConfig.imageUri` only if you build a custom image for
+            # your evaluator. If `evaluatorConfig.imageUri` has not been
+            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+          "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            "type": "A String", # The type of accelerator to use.
+            "count": "A String", # The number of accelerators to attach to each machine running the job.
+          },
+          "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+              # Registry. Learn more about [configuring custom
+              # containers](/ai-platform/training/docs/distributed-training-containers).
+          "containerArgs": [ # Arguments to the entrypoint command.
+              # The following rules apply for container_command and container_args:
+              # - If you do not supply command or args:
+              #   The defaults defined in the Docker image are used.
+              # - If you supply a command but no args:
+              #   The default EntryPoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run without any arguments.
+              # - If you supply only args:
+              #   The default Entrypoint defined in the Docker image is run with the args
+              #   that you supplied.
+              # - If you supply a command and args:
+              #   The default Entrypoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run with your args.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            "A String",
+          ],
+          "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          "containerCommand": [ # The command with which the replica's custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image's ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            "A String",
+          ],
+        },
+        "encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+            # protect resources created by a training job, instead of using Google's
+            # default encryption. If this is set, then all resources created by the
+            # training job will be encrypted with the customer-managed encryption key
+            # that you specify.
+            #
+            # [Learn how and when to use CMEK with AI Platform
+            # Training](/ai-platform/training/docs/cmek).
+            # a resource.
+          "kmsKeyName": "A String", # The Cloud KMS resource identifier of the customer-managed encryption key
+              # used to protect a resource, such as a training job. It has the following
+              # format:
+              # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+        },
+        "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
+            # replica in the cluster will be of the type specified in `worker_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `worker_type`.
+            #
+            # The default value is zero.
         "scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
           "maxWaitTime": "A String",
           "maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
@@ -1354,167 +1505,22 @@
               #   ...
               # ```
         },
-        "network": "A String", # Optional. The full name of the Google Compute Engine
-            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-            # the form projects/{project}/global/networks/{network}. Where {project} is a
-            # project number, as in '12345', and {network} is network name.".
-            #
-            # Private services access must already be configured for the network. If left
-            # unspecified, the Job is not peered with any network. Learn more -
-            # Connecting Job to user network over private
-            # IP.
-        "evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
-            #
-            # You should only set `evaluatorConfig.acceleratorConfig` if
-            # `evaluatorType` is set to a Compute Engine machine type. [Learn
-            # about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            #
-            # Set `evaluatorConfig.imageUri` only if you build a custom image for
-            # your evaluator. If `evaluatorConfig.imageUri` has not been
-            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-            # containers](/ai-platform/training/docs/distributed-training-containers).
-          "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-              # Registry. Learn more about [configuring custom
-              # containers](/ai-platform/training/docs/distributed-training-containers).
-          "containerArgs": [ # Arguments to the entrypoint command.
-              # The following rules apply for container_command and container_args:
-              # - If you do not supply command or args:
-              #   The defaults defined in the Docker image are used.
-              # - If you supply a command but no args:
-              #   The default EntryPoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run without any arguments.
-              # - If you supply only args:
-              #   The default Entrypoint defined in the Docker image is run with the args
-              #   that you supplied.
-              # - If you supply a command and args:
-              #   The default Entrypoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run with your args.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            "A String",
-          ],
-          "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            "count": "A String", # The number of accelerators to attach to each machine running the job.
-            "type": "A String", # The type of accelerator to use.
-          },
-          "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          "containerCommand": [ # The command with which the replica's custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image's ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            "A String",
-          ],
-        },
-        "useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
-            # variable when training with a custom container. Defaults to `false`. [Learn
-            # more about this
-            # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
-            #
-            # This field has no effect for training jobs that don't use a custom
-            # container.
-      },
-      "state": "A String", # Output only. The detailed state of a job.
-      "jobId": "A String", # Required. The user-specified id of the job.
-      "endTime": "A String", # Output only. When the job processing was completed.
-      "startTime": "A String", # Output only. When the job processing was started.
-      "predictionOutput": { # Represents results of a prediction job. # The current prediction job result.
-        "errorCount": "A String", # The number of data instances which resulted in errors.
-        "outputPath": "A String", # The output Google Cloud Storage location provided at the job creation time.
-        "nodeHours": 3.14, # Node hours used by the batch prediction job.
-        "predictionCount": "A String", # The number of generated predictions.
-      },
-      "trainingOutput": { # Represents results of a training job. Output only. # The current training job result.
-        "isBuiltInAlgorithmJob": True or False, # Whether this job is a built-in Algorithm job.
-        "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-            # Only set for built-in algorithms jobs.
-          "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
-              # saves the trained model. Only set for successful jobs that don't use
-              # hyperparameter tuning.
-          "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
-          "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
-              # trained.
-          "framework": "A String", # Framework on which the built-in algorithm was trained.
-        },
-        "trials": [ # Results for individual Hyperparameter trials.
-            # Only set for hyperparameter tuning jobs.
-          { # Represents the result of a single hyperparameter tuning trial from a
-              # training job. The TrainingOutput object that is returned on successful
-              # completion of a training job with hyperparameter tuning includes a list
-              # of HyperparameterOutput objects, one for each successful trial.
-            "trialId": "A String", # The trial id for these results.
-            "endTime": "A String", # Output only. End time for the trial.
-            "startTime": "A String", # Output only. Start time for the trial.
-            "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
-            "finalMetric": { # An observed value of a metric. # The final objective metric seen for this trial.
-              "trainingStep": "A String", # The global training step for this metric.
-              "objectiveValue": 3.14, # The objective value at this training step.
-            },
-            "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-                # Only set for trials of built-in algorithms jobs that have succeeded.
-              "modelPath": "A String", # The Cloud Storage path to the `model/` directory where the training job
-                  # saves the trained model. Only set for successful jobs that don't use
-                  # hyperparameter tuning.
-              "pythonVersion": "A String", # Python version on which the built-in algorithm was trained.
-              "runtimeVersion": "A String", # AI Platform runtime version on which the built-in algorithm was
-                  # trained.
-              "framework": "A String", # Framework on which the built-in algorithm was trained.
-            },
-            "state": "A String", # Output only. The detailed state of the trial.
-            "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
-                # populated.
-              { # An observed value of a metric.
-                "trainingStep": "A String", # The global training step for this metric.
-                "objectiveValue": 3.14, # The objective value at this training step.
-              },
-            ],
-            "hyperparameters": { # The hyperparameters given to this trial.
-              "a_key": "A String",
-            },
-          },
+        "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
+            # and parameter servers.
+        "packageUris": [ # Required. The Google Cloud Storage location of the packages with
+            # the training program and any additional dependencies.
+            # The maximum number of package URIs is 100.
+          "A String",
         ],
-        "hyperparameterMetricTag": "A String", # The TensorFlow summary tag name used for optimizing hyperparameter tuning
-            # trials. See
-            # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
-            # for more information. Only set for hyperparameter tuning jobs.
-        "completedTrialCount": "A String", # The number of hyperparameter tuning trials that completed successfully.
-            # Only set for hyperparameter tuning jobs.
-        "isHyperparameterTuningJob": True or False, # Whether this job is a hyperparameter tuning job.
-        "consumedMLUnits": 3.14, # The amount of ML units consumed by the job.
       },
-      "createTime": "A String", # Output only. When the job was created.
-      "labels": { # Optional. One or more labels that you can add, to organize your jobs.
-          # Each label is a key-value pair, where both the key and the value are
-          # arbitrary strings that you supply.
-          # For more information, see the documentation on
-          # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
-        "a_key": "A String",
-      },
+      "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+          # prevent simultaneous updates of a job from overwriting each other.
+          # It is strongly suggested that systems make use of the `etag` in the
+          # read-modify-write cycle to perform job updates in order to avoid race
+          # conditions: An `etag` is returned in the response to `GetJob`, and
+          # systems are expected to put that etag in the request to `UpdateJob` to
+          # ensure that their change will be applied to the same version of the job.
+      "jobId": "A String", # Required. The user-specified id of the job.
     }</pre>
 </div>
 
@@ -1533,20 +1539,34 @@
   An object of the form:
 
     { # Represents a training or prediction job.
+      &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
       &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+        &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
+        &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
+        &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
+            # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
+          &quot;A String&quot;,
+        ],
+        &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
+            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+            # for AI Platform services.
         &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
             # string is formatted the same way as `model_version`, with the addition
             # of the version information:
             #
             # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
+        &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
+            # the model to use.
+        &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
+            # prediction. If not set, AI Platform will pick the runtime version used
+            # during the CreateVersion request for this model version, or choose the
+            # latest stable version when model version information is not available
+            # such as when the model is specified by uri.
         &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
             # model. The string must use the following format:
             #
             # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
-        &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
-            # the model to use.
-        &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
-            # Defaults to 10 if not specified.
         &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
             # this job. Please refer to
             # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -1555,202 +1575,96 @@
             # Defaults to
             # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
             # , which is &quot;serving_default&quot;.
-        &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
-        &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
-        &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
         &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
             # The service will buffer batch_size number of records in memory before
             # invoking one Tensorflow prediction call internally. So take the record
             # size and memory available into consideration when setting this parameter.
-        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
-            # prediction. If not set, AI Platform will pick the runtime version used
-            # during the CreateVersion request for this model version, or choose the
-            # latest stable version when model version information is not available
-            # such as when the model is specified by uri.
-        &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
-            # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
-          &quot;A String&quot;,
+        &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
+            # Defaults to 10 if not specified.
+      },
+      &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
+          # Each label is a key-value pair, where both the key and the value are
+          # arbitrary strings that you supply.
+          # For more information, see the documentation on
+          # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+        &quot;a_key&quot;: &quot;A String&quot;,
+      },
+      &quot;endTime&quot;: &quot;A String&quot;, # Output only. When the job processing was completed.
+      &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
+        &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
+            # Only set for hyperparameter tuning jobs.
+          { # Represents the result of a single hyperparameter tuning trial from a
+              # training job. The TrainingOutput object that is returned on successful
+              # completion of a training job with hyperparameter tuning includes a list
+              # of HyperparameterOutput objects, one for each successful trial.
+            &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
+            &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
+              &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+            },
+            &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
+              &quot;a_key&quot;: &quot;A String&quot;,
+            },
+            &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+                # Only set for trials of built-in algorithms jobs that have succeeded.
+              &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+                  # saves the trained model. Only set for successful jobs that don&#x27;t use
+                  # hyperparameter tuning.
+              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+              &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+                  # trained.
+              &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+            },
+            &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
+            &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
+                # populated.
+              { # An observed value of a metric.
+                &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+                &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+              },
+            ],
+            &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
+            &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
+            &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
+          },
         ],
-        &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
-            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-            # for AI Platform services.
+        &quot;completedTrialCount&quot;: &quot;A String&quot;, # The number of hyperparameter tuning trials that completed successfully.
+            # Only set for hyperparameter tuning jobs.
+        &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
+        &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
+        &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+            # Only set for built-in algorithms jobs.
+          &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+              # saves the trained model. Only set for successful jobs that don&#x27;t use
+              # hyperparameter tuning.
+          &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+          &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+              # trained.
+          &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+        },
+        &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
+        &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
+            # trials. See
+            # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
+            # for more information. Only set for hyperparameter tuning jobs.
       },
       &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
-      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-          # prevent simultaneous updates of a job from overwriting each other.
-          # It is strongly suggested that systems make use of the `etag` in the
-          # read-modify-write cycle to perform job updates in order to avoid race
-          # conditions: An `etag` is returned in the response to `GetJob`, and
-          # systems are expected to put that etag in the request to `UpdateJob` to
-          # ensure that their change will be applied to the same version of the job.
+      &quot;predictionOutput&quot;: { # Represents results of a prediction job. # The current prediction job result.
+        &quot;errorCount&quot;: &quot;A String&quot;, # The number of data instances which resulted in errors.
+        &quot;nodeHours&quot;: 3.14, # Node hours used by the batch prediction job.
+        &quot;outputPath&quot;: &quot;A String&quot;, # The output Google Cloud Storage location provided at the job creation time.
+        &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
+      },
+      &quot;startTime&quot;: &quot;A String&quot;, # Output only. When the job processing was started.
+      &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
       &quot;trainingInput&quot;: { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
           # to submit your training job, you can specify the input parameters as
           # command-line arguments and/or in a YAML configuration file referenced from
           # the --config command-line argument. For details, see the guide to [submitting
           # a training job](/ai-platform/training/docs/training-jobs).
-        &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
-            # replica in the cluster will be of the type specified in `worker_type`.
-            #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `worker_type`.
-            #
-            # The default value is zero.
-        &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
-        &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
-            # starts. If your job uses a custom container, then the arguments are passed
-            # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
-            # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
-            # `ENTRYPOINT`&lt;/a&gt; command.
-          &quot;A String&quot;,
-        ],
-        &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-            #
-            # You should only set `parameterServerConfig.acceleratorConfig` if
-            # `parameterServerType` is set to a Compute Engine machine type. [Learn
-            # about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            #
-            # Set `parameterServerConfig.imageUri` only if you build a custom image for
-            # your parameter server. If `parameterServerConfig.imageUri` has not been
-            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-            # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
-              # Registry. Learn more about [configuring custom
-              # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
-              # The following rules apply for container_command and container_args:
-              # - If you do not supply command or args:
-              #   The defaults defined in the Docker image are used.
-              # - If you supply a command but no args:
-              #   The default EntryPoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run without any arguments.
-              # - If you supply only args:
-              #   The default Entrypoint defined in the Docker image is run with the args
-              #   that you supplied.
-              # - If you supply a command and args:
-              #   The default Entrypoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run with your args.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
-          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-          },
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
-        },
-        &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
-            # protect resources created by a training job, instead of using Google&#x27;s
-            # default encryption. If this is set, then all resources created by the
-            # training job will be encrypted with the customer-managed encryption key
-            # that you specify.
-            #
-            # [Learn how and when to use CMEK with AI Platform
-            # Training](/ai-platform/training/docs/cmek).
-            # a resource.
-          &quot;kmsKeyName&quot;: &quot;A String&quot;, # The Cloud KMS resource identifier of the customer-managed encryption key
-              # used to protect a resource, such as a training job. It has the following
-              # format:
-              # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
-        },
-        &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-          &quot;params&quot;: [ # Required. The set of parameters to tune.
-            { # Represents a single hyperparameter to optimize.
-              &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
-                &quot;A String&quot;,
-              ],
-              &quot;parameterName&quot;: &quot;A String&quot;, # Required. The parameter name must be unique amongst all ParameterConfigs in
-                  # a HyperparameterSpec message. E.g., &quot;learning_rate&quot;.
-              &quot;minValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                  # should be unset if type is `CATEGORICAL`. This value should be integers if
-                  # type is INTEGER.
-              &quot;discreteValues&quot;: [ # Required if type is `DISCRETE`.
-                  # A list of feasible points.
-                  # The list should be in strictly increasing order. For instance, this
-                  # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
-                  # should not contain more than 1,000 values.
-                3.14,
-              ],
-              &quot;scaleType&quot;: &quot;A String&quot;, # Optional. How the parameter should be scaled to the hypercube.
-                  # Leave unset for categorical parameters.
-                  # Some kind of scaling is strongly recommended for real or integral
-                  # parameters (e.g., `UNIT_LINEAR_SCALE`).
-              &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                  # should be unset if type is `CATEGORICAL`. This value should be integers if
-                  # type is `INTEGER`.
-              &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
-            },
-          ],
-          &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
-              # early stopping.
-          &quot;resumePreviousJobId&quot;: &quot;A String&quot;, # Optional. The prior hyperparameter tuning job id that users hope to
-              # continue with. The job id will be used to find the corresponding vizier
-              # study guid and resume the study.
-          &quot;maxParallelTrials&quot;: 42, # Optional. The number of training trials to run concurrently.
-              # You can reduce the time it takes to perform hyperparameter tuning by adding
-              # trials in parallel. However, each trail only benefits from the information
-              # gained in completed trials. That means that a trial does not get access to
-              # the results of trials running at the same time, which could reduce the
-              # quality of the overall optimization.
-              #
-              # Each trial will use the same scale tier and machine types.
-              #
-              # Defaults to one.
-          &quot;maxFailedTrials&quot;: 42, # Optional. The number of failed trials that need to be seen before failing
-              # the hyperparameter tuning job. You can specify this field to override the
-              # default failing criteria for AI Platform hyperparameter tuning jobs.
-              #
-              # Defaults to zero, which means the service decides when a hyperparameter
-              # job should fail.
-          &quot;goal&quot;: &quot;A String&quot;, # Required. The type of goal to use for tuning. Available types are
-              # `MAXIMIZE` and `MINIMIZE`.
-              #
-              # Defaults to `MAXIMIZE`.
-          &quot;maxTrials&quot;: 42, # Optional. How many training trials should be attempted to optimize
-              # the specified hyperparameters.
-              #
-              # Defaults to one.
-          &quot;algorithm&quot;: &quot;A String&quot;, # Optional. The search algorithm specified for the hyperparameter
-              # tuning job.
-              # Uses the default AI Platform hyperparameter tuning
-              # algorithm if unspecified.
-          &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-              # current versions of TensorFlow, this tag name should exactly match what is
-              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-              # prior to 0.12, this should be only the tag passed to tf.Summary.
-              # By default, &quot;training/hptuning/metric&quot; will be used.
-        },
+        &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for workload run-as account.
+            # Users submitting jobs must have act-as permission on this run-as account.
+            # If not specified, then CMLE P4SA will be used by default.
         &quot;workerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
             #
             # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
@@ -1762,6 +1676,16 @@
             # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
             # the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+          },
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1784,16 +1708,6 @@
               # both cannot be set at the same time.
             &quot;A String&quot;,
           ],
-          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-          },
           &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
               # the one used in the custom container. This field is required if the replica
               # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -1818,27 +1732,13 @@
             &quot;A String&quot;,
           ],
         },
-        &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
-            # job. Each replica in the cluster will be of the type specified in
-            # `parameter_server_type`.
+        &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+            # variable when training with a custom container. Defaults to `false`. [Learn
+            # more about this
+            # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
             #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `parameter_server_type`.
-            #
-            # The default value is zero.
-        &quot;packageUris&quot;: [ # Required. The Google Cloud Storage location of the packages with
-            # the training program and any additional dependencies.
-            # The maximum number of package URIs is 100.
-          &quot;A String&quot;,
-        ],
-        &quot;evaluatorCount&quot;: &quot;A String&quot;, # Optional. The number of evaluator replicas to use for the training job.
-            # Each replica in the cluster will be of the type specified in
-            # `evaluator_type`.
-            #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `evaluator_type`.
-            #
-            # The default value is zero.
+            # This field has no effect for training jobs that don&#x27;t use a custom
+            # container.
         &quot;masterType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
             # job&#x27;s master worker. You must specify this field when `scaleTier` is set to
             # `CUSTOM`.
@@ -1891,65 +1791,27 @@
             # field. Learn more about the [special configuration options for training
             # with
             # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for training. You must
-            # either specify this field or specify `masterConfig.imageUri`.
+        &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
             #
-            # For more information, see the [runtime version
-            # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
-            # manage runtime versions](/ai-platform/training/docs/versioning).
-        &quot;evaluatorType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-            # job&#x27;s evaluator nodes.
-            #
-            # The supported values are the same as those described in the entry for
-            # `masterType`.
-            #
-            # This value must be consistent with the category of machine type that
-            # `masterType` uses. In other words, both must be Compute Engine machine
-            # types or both must be legacy machine types.
-            #
-            # This value must be present when `scaleTier` is set to `CUSTOM` and
-            # `evaluatorCount` is greater than zero.
-        &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-            # job&#x27;s worker nodes.
-            #
-            # The supported values are the same as those described in the entry for
-            # `masterType`.
-            #
-            # This value must be consistent with the category of machine type that
-            # `masterType` uses. In other words, both must be Compute Engine machine
-            # types or both must be legacy machine types.
-            #
-            # If you use `cloud_tpu` for this value, see special instructions for
-            # [configuring a custom TPU
-            # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-            #
-            # This value must be present when `scaleTier` is set to `CUSTOM` and
-            # `workerCount` is greater than zero.
-        &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
-            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
-        &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-            # job&#x27;s parameter server.
-            #
-            # The supported values are the same as those described in the entry for
-            # `master_type`.
-            #
-            # This value must be consistent with the category of machine type that
-            # `masterType` uses. In other words, both must be Compute Engine machine
-            # types or both must be legacy machine types.
-            #
-            # This value must be present when `scaleTier` is set to `CUSTOM` and
-            # `parameter_server_count` is greater than zero.
-        &quot;masterConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-            #
-            # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-            # to a Compute Engine machine type. Learn about [restrictions on accelerator
-            # configurations for
+            # You should only set `parameterServerConfig.acceleratorConfig` if
+            # `parameterServerType` is set to a Compute Engine machine type. [Learn
+            # about restrictions on accelerator configurations for
             # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
             #
-            # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-            # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
-            # about [configuring custom
+            # Set `parameterServerConfig.imageUri` only if you build a custom image for
+            # your parameter server. If `parameterServerConfig.imageUri` has not been
+            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+          },
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -1972,16 +1834,6 @@
               # both cannot be set at the same time.
             &quot;A String&quot;,
           ],
-          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-          },
           &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
               # the one used in the custom container. This field is required if the replica
               # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -2006,8 +1858,8 @@
             &quot;A String&quot;,
           ],
         },
-        &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
-            # and parameter servers.
+        &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
+            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
         &quot;jobDir&quot;: &quot;A String&quot;, # Optional. A Google Cloud Storage path in which to store training outputs
             # and other data needed for training. This path is passed to your TensorFlow
             # program as the &#x27;--job-dir&#x27; command-line argument. The benefit of specifying
@@ -2026,6 +1878,308 @@
             #
             # Read more about the Python versions available for [each runtime
             # version](/ml-engine/docs/runtime-version-list).
+        &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
+          &quot;maxTrials&quot;: 42, # Optional. How many training trials should be attempted to optimize
+              # the specified hyperparameters.
+              #
+              # Defaults to one.
+          &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
+              # early stopping.
+          &quot;params&quot;: [ # Required. The set of parameters to tune.
+            { # Represents a single hyperparameter to optimize.
+              &quot;minValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                  # should be unset if type is `CATEGORICAL`. This value should be integers if
+                  # type is INTEGER.
+              &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
+                &quot;A String&quot;,
+              ],
+              &quot;scaleType&quot;: &quot;A String&quot;, # Optional. How the parameter should be scaled to the hypercube.
+                  # Leave unset for categorical parameters.
+                  # Some kind of scaling is strongly recommended for real or integral
+                  # parameters (e.g., `UNIT_LINEAR_SCALE`).
+              &quot;discreteValues&quot;: [ # Required if type is `DISCRETE`.
+                  # A list of feasible points.
+                  # The list should be in strictly increasing order. For instance, this
+                  # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
+                  # should not contain more than 1,000 values.
+                3.14,
+              ],
+              &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
+              &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                  # should be unset if type is `CATEGORICAL`. This value should be integers if
+                  # type is `INTEGER`.
+              &quot;parameterName&quot;: &quot;A String&quot;, # Required. The parameter name must be unique amongst all ParameterConfigs in
+                  # a HyperparameterSpec message. E.g., &quot;learning_rate&quot;.
+            },
+          ],
+          &quot;maxFailedTrials&quot;: 42, # Optional. The number of failed trials that need to be seen before failing
+              # the hyperparameter tuning job. You can specify this field to override the
+              # default failing criteria for AI Platform hyperparameter tuning jobs.
+              #
+              # Defaults to zero, which means the service decides when a hyperparameter
+              # job should fail.
+          &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+              # current versions of TensorFlow, this tag name should exactly match what is
+              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+              # prior to 0.12, this should be only the tag passed to tf.Summary.
+              # By default, &quot;training/hptuning/metric&quot; will be used.
+          &quot;resumePreviousJobId&quot;: &quot;A String&quot;, # Optional. The prior hyperparameter tuning job id that users hope to
+              # continue with. The job id will be used to find the corresponding vizier
+              # study guid and resume the study.
+          &quot;goal&quot;: &quot;A String&quot;, # Required. The type of goal to use for tuning. Available types are
+              # `MAXIMIZE` and `MINIMIZE`.
+              #
+              # Defaults to `MAXIMIZE`.
+          &quot;algorithm&quot;: &quot;A String&quot;, # Optional. The search algorithm specified for the hyperparameter
+              # tuning job.
+              # Uses the default AI Platform hyperparameter tuning
+              # algorithm if unspecified.
+          &quot;maxParallelTrials&quot;: 42, # Optional. The number of training trials to run concurrently.
+              # You can reduce the time it takes to perform hyperparameter tuning by adding
+              # trials in parallel. However, each trail only benefits from the information
+              # gained in completed trials. That means that a trial does not get access to
+              # the results of trials running at the same time, which could reduce the
+              # quality of the overall optimization.
+              #
+              # Each trial will use the same scale tier and machine types.
+              #
+              # Defaults to one.
+        },
+        &quot;evaluatorType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+            # job&#x27;s evaluator nodes.
+            #
+            # The supported values are the same as those described in the entry for
+            # `masterType`.
+            #
+            # This value must be consistent with the category of machine type that
+            # `masterType` uses. In other words, both must be Compute Engine machine
+            # types or both must be legacy machine types.
+            #
+            # This value must be present when `scaleTier` is set to `CUSTOM` and
+            # `evaluatorCount` is greater than zero.
+        &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
+            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+            # the form projects/{project}/global/networks/{network}. Where {project} is a
+            # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
+            #
+            # Private services access must already be configured for the network. If left
+            # unspecified, the Job is not peered with any network. Learn more -
+            # Connecting Job to user network over private
+            # IP.
+        &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+            # job&#x27;s parameter server.
+            #
+            # The supported values are the same as those described in the entry for
+            # `master_type`.
+            #
+            # This value must be consistent with the category of machine type that
+            # `masterType` uses. In other words, both must be Compute Engine machine
+            # types or both must be legacy machine types.
+            #
+            # This value must be present when `scaleTier` is set to `CUSTOM` and
+            # `parameter_server_count` is greater than zero.
+        &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+            # job&#x27;s worker nodes.
+            #
+            # The supported values are the same as those described in the entry for
+            # `masterType`.
+            #
+            # This value must be consistent with the category of machine type that
+            # `masterType` uses. In other words, both must be Compute Engine machine
+            # types or both must be legacy machine types.
+            #
+            # If you use `cloud_tpu` for this value, see special instructions for
+            # [configuring a custom TPU
+            # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+            #
+            # This value must be present when `scaleTier` is set to `CUSTOM` and
+            # `workerCount` is greater than zero.
+        &quot;masterConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+            #
+            # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+            # to a Compute Engine machine type. Learn about [restrictions on accelerator
+            # configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            #
+            # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+            # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+            # about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+          },
+          &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
+              # Registry. Learn more about [configuring custom
+              # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
+              # The following rules apply for container_command and container_args:
+              # - If you do not supply command or args:
+              #   The defaults defined in the Docker image are used.
+              # - If you supply a command but no args:
+              #   The default EntryPoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run without any arguments.
+              # - If you supply only args:
+              #   The default Entrypoint defined in the Docker image is run with the args
+              #   that you supplied.
+              # - If you supply a command and args:
+              #   The default Entrypoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run with your args.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
+        },
+        &quot;evaluatorCount&quot;: &quot;A String&quot;, # Optional. The number of evaluator replicas to use for the training job.
+            # Each replica in the cluster will be of the type specified in
+            # `evaluator_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `evaluator_type`.
+            #
+            # The default value is zero.
+        &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
+            # starts. If your job uses a custom container, then the arguments are passed
+            # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
+            # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
+            # `ENTRYPOINT`&lt;/a&gt; command.
+          &quot;A String&quot;,
+        ],
+        &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for training. You must
+            # either specify this field or specify `masterConfig.imageUri`.
+            #
+            # For more information, see the [runtime version
+            # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+            # manage runtime versions](/ai-platform/training/docs/versioning).
+        &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
+            # job. Each replica in the cluster will be of the type specified in
+            # `parameter_server_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `parameter_server_type`.
+            #
+            # The default value is zero.
+        &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+            #
+            # You should only set `evaluatorConfig.acceleratorConfig` if
+            # `evaluatorType` is set to a Compute Engine machine type. [Learn
+            # about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            #
+            # Set `evaluatorConfig.imageUri` only if you build a custom image for
+            # your evaluator. If `evaluatorConfig.imageUri` has not been
+            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+          },
+          &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
+              # Registry. Learn more about [configuring custom
+              # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
+              # The following rules apply for container_command and container_args:
+              # - If you do not supply command or args:
+              #   The defaults defined in the Docker image are used.
+              # - If you supply a command but no args:
+              #   The default EntryPoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run without any arguments.
+              # - If you supply only args:
+              #   The default Entrypoint defined in the Docker image is run with the args
+              #   that you supplied.
+              # - If you supply a command and args:
+              #   The default Entrypoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run with your args.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
+        },
+        &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+            # protect resources created by a training job, instead of using Google&#x27;s
+            # default encryption. If this is set, then all resources created by the
+            # training job will be encrypted with the customer-managed encryption key
+            # that you specify.
+            #
+            # [Learn how and when to use CMEK with AI Platform
+            # Training](/ai-platform/training/docs/cmek).
+            # a resource.
+          &quot;kmsKeyName&quot;: &quot;A String&quot;, # The Cloud KMS resource identifier of the customer-managed encryption key
+              # used to protect a resource, such as a training job. It has the following
+              # format:
+              # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+        },
+        &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
+            # replica in the cluster will be of the type specified in `worker_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `worker_type`.
+            #
+            # The default value is zero.
         &quot;scheduling&quot;: { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
           &quot;maxWaitTime&quot;: &quot;A String&quot;,
           &quot;maxRunningTime&quot;: &quot;A String&quot;, # Optional. The maximum job running time, expressed in seconds. The field can
@@ -2052,167 +2206,22 @@
               #   ...
               # ```
         },
-        &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
-            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-            # the form projects/{project}/global/networks/{network}. Where {project} is a
-            # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
-            #
-            # Private services access must already be configured for the network. If left
-            # unspecified, the Job is not peered with any network. Learn more -
-            # Connecting Job to user network over private
-            # IP.
-        &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
-            #
-            # You should only set `evaluatorConfig.acceleratorConfig` if
-            # `evaluatorType` is set to a Compute Engine machine type. [Learn
-            # about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            #
-            # Set `evaluatorConfig.imageUri` only if you build a custom image for
-            # your evaluator. If `evaluatorConfig.imageUri` has not been
-            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-            # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
-              # Registry. Learn more about [configuring custom
-              # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
-              # The following rules apply for container_command and container_args:
-              # - If you do not supply command or args:
-              #   The defaults defined in the Docker image are used.
-              # - If you supply a command but no args:
-              #   The default EntryPoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run without any arguments.
-              # - If you supply only args:
-              #   The default Entrypoint defined in the Docker image is run with the args
-              #   that you supplied.
-              # - If you supply a command and args:
-              #   The default Entrypoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run with your args.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
-          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-          },
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
-        },
-        &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
-            # variable when training with a custom container. Defaults to `false`. [Learn
-            # more about this
-            # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
-            #
-            # This field has no effect for training jobs that don&#x27;t use a custom
-            # container.
-      },
-      &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
-      &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
-      &quot;endTime&quot;: &quot;A String&quot;, # Output only. When the job processing was completed.
-      &quot;startTime&quot;: &quot;A String&quot;, # Output only. When the job processing was started.
-      &quot;predictionOutput&quot;: { # Represents results of a prediction job. # The current prediction job result.
-        &quot;errorCount&quot;: &quot;A String&quot;, # The number of data instances which resulted in errors.
-        &quot;outputPath&quot;: &quot;A String&quot;, # The output Google Cloud Storage location provided at the job creation time.
-        &quot;nodeHours&quot;: 3.14, # Node hours used by the batch prediction job.
-        &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
-      },
-      &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
-        &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
-        &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-            # Only set for built-in algorithms jobs.
-          &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-              # saves the trained model. Only set for successful jobs that don&#x27;t use
-              # hyperparameter tuning.
-          &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-          &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-              # trained.
-          &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-        },
-        &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
-            # Only set for hyperparameter tuning jobs.
-          { # Represents the result of a single hyperparameter tuning trial from a
-              # training job. The TrainingOutput object that is returned on successful
-              # completion of a training job with hyperparameter tuning includes a list
-              # of HyperparameterOutput objects, one for each successful trial.
-            &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
-            &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
-            &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
-            &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
-            &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
-              &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-            },
-            &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-                # Only set for trials of built-in algorithms jobs that have succeeded.
-              &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-                  # saves the trained model. Only set for successful jobs that don&#x27;t use
-                  # hyperparameter tuning.
-              &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-              &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-                  # trained.
-              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-            },
-            &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
-            &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
-                # populated.
-              { # An observed value of a metric.
-                &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-                &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-              },
-            ],
-            &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
-              &quot;a_key&quot;: &quot;A String&quot;,
-            },
-          },
+        &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
+            # and parameter servers.
+        &quot;packageUris&quot;: [ # Required. The Google Cloud Storage location of the packages with
+            # the training program and any additional dependencies.
+            # The maximum number of package URIs is 100.
+          &quot;A String&quot;,
         ],
-        &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
-            # trials. See
-            # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
-            # for more information. Only set for hyperparameter tuning jobs.
-        &quot;completedTrialCount&quot;: &quot;A String&quot;, # The number of hyperparameter tuning trials that completed successfully.
-            # Only set for hyperparameter tuning jobs.
-        &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
-        &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
       },
-      &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
-      &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
-          # Each label is a key-value pair, where both the key and the value are
-          # arbitrary strings that you supply.
-          # For more information, see the documentation on
-          # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-        &quot;a_key&quot;: &quot;A String&quot;,
-      },
+      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+          # prevent simultaneous updates of a job from overwriting each other.
+          # It is strongly suggested that systems make use of the `etag` in the
+          # read-modify-write cycle to perform job updates in order to avoid race
+          # conditions: An `etag` is returned in the response to `GetJob`, and
+          # systems are expected to put that etag in the request to `UpdateJob` to
+          # ensure that their change will be applied to the same version of the job.
+      &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
     }</pre>
 </div>
 
@@ -2312,30 +2321,18 @@
       #
       # For a description of IAM and its features, see the
       # [IAM documentation](https://cloud.google.com/iam/docs/).
-    &quot;version&quot;: 42, # Specifies the format of the policy.
-        #
-        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
-        # are rejected.
-        #
-        # Any operation that affects conditional role bindings must specify version
-        # `3`. This requirement applies to the following operations:
-        #
-        # * Getting a policy that includes a conditional role binding
-        # * Adding a conditional role binding to a policy
-        # * Changing a conditional role binding in a policy
-        # * Removing any role binding, with or without a condition, from a policy
-        #   that includes conditions
+    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a policy from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform policy updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+        # systems are expected to put that etag in the request to `setIamPolicy` to
+        # ensure that their change will be applied to the same version of the policy.
         #
         # **Important:** If you use IAM Conditions, you must include the `etag` field
         # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
         # you to overwrite a version `3` policy with a version `1` policy, and all of
         # the conditions in the version `3` policy are lost.
-        #
-        # If a policy does not include any conditions, operations on that policy may
-        # specify any valid version or leave the field unset.
-        #
-        # To learn which resources support conditions in their IAM policies, see the
-        # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
     &quot;auditConfigs&quot;: [ # Specifies cloud audit logging configuration for this policy.
       { # Specifies the audit configuration for a service.
           # The configuration determines which permission types are logged, and what
@@ -2352,7 +2349,7 @@
           #     {
           #       &quot;audit_configs&quot;: [
           #         {
-          #           &quot;service&quot;: &quot;allServices&quot;
+          #           &quot;service&quot;: &quot;allServices&quot;,
           #           &quot;audit_log_configs&quot;: [
           #             {
           #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
@@ -2361,18 +2358,18 @@
           #               ]
           #             },
           #             {
-          #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+          #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;
           #             },
           #             {
-          #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;,
+          #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;
           #             }
           #           ]
           #         },
           #         {
-          #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;
+          #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;,
           #           &quot;audit_log_configs&quot;: [
           #             {
-          #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
+          #               &quot;log_type&quot;: &quot;DATA_READ&quot;
           #             },
           #             {
           #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
@@ -2404,27 +2401,53 @@
               #           ]
               #         },
               #         {
-              #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+              #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;
               #         }
               #       ]
               #     }
               #
               # This enables &#x27;DATA_READ&#x27; and &#x27;DATA_WRITE&#x27; logging, while exempting
               # jose@example.com from DATA_READ logging.
+            &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
             &quot;exemptedMembers&quot;: [ # Specifies the identities that do not cause logging for this type of
                 # permission.
                 # Follows the same format of Binding.members.
               &quot;A String&quot;,
             ],
-            &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
           },
         ],
       },
     ],
+    &quot;version&quot;: 42, # Specifies the format of the policy.
+        #
+        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+        # are rejected.
+        #
+        # Any operation that affects conditional role bindings must specify version
+        # `3`. This requirement applies to the following operations:
+        #
+        # * Getting a policy that includes a conditional role binding
+        # * Adding a conditional role binding to a policy
+        # * Changing a conditional role binding in a policy
+        # * Removing any role binding, with or without a condition, from a policy
+        #   that includes conditions
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+        #
+        # If a policy does not include any conditions, operations on that policy may
+        # specify any valid version or leave the field unset.
+        #
+        # To learn which resources support conditions in their IAM policies, see the
+        # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
     &quot;bindings&quot;: [ # Associates a list of `members` to a `role`. Optionally, may specify a
         # `condition` that determines how and when the `bindings` are applied. Each
         # of the `bindings` must contain at least one member.
       { # Associates `members` with a `role`.
+        &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
+            # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
         &quot;condition&quot;: { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
             #
             # If the condition evaluates to `true`, then this binding applies to the
@@ -2467,8 +2490,6 @@
             # The exact variables and functions that may be referenced within an expression
             # are determined by the service that evaluates it. See the service
             # documentation for additional information.
-          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
-              # describes the expression, e.g. when hovered over it in a UI.
           &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
               # syntax.
           &quot;title&quot;: &quot;A String&quot;, # Optional. Title for the expression, i.e. a short string describing
@@ -2476,6 +2497,8 @@
               # expression.
           &quot;location&quot;: &quot;A String&quot;, # Optional. String indicating the location of the expression for error
               # reporting, e.g. a file name and a position in the file.
+          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
+              # describes the expression, e.g. when hovered over it in a UI.
         },
         &quot;members&quot;: [ # Specifies the identities requesting access for a Cloud Platform resource.
             # `members` can have the following values:
@@ -2522,22 +2545,8 @@
             #
           &quot;A String&quot;,
         ],
-        &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
-            # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
       },
     ],
-    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a policy from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform policy updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-        # systems are expected to put that etag in the request to `setIamPolicy` to
-        # ensure that their change will be applied to the same version of the policy.
-        #
-        # **Important:** If you use IAM Conditions, you must include the `etag` field
-        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
-        # you to overwrite a version `3` policy with a version `1` policy, and all of
-        # the conditions in the version `3` policy are lost.
   }</pre>
 </div>
 
@@ -2577,22 +2586,38 @@
   An object of the form:
 
     { # Response message for the ListJobs method.
+    &quot;nextPageToken&quot;: &quot;A String&quot;, # Optional. Pass this token as the `page_token` field of the request for a
+        # subsequent call.
     &quot;jobs&quot;: [ # The list of jobs.
       { # Represents a training or prediction job.
+          &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
           &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+            &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
+            &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
+            &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
+                # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
+              &quot;A String&quot;,
+            ],
+            &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
+                # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+                # for AI Platform services.
             &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
                 # string is formatted the same way as `model_version`, with the addition
                 # of the version information:
                 #
                 # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
+            &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
+                # the model to use.
+            &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
+            &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
+                # prediction. If not set, AI Platform will pick the runtime version used
+                # during the CreateVersion request for this model version, or choose the
+                # latest stable version when model version information is not available
+                # such as when the model is specified by uri.
             &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
                 # model. The string must use the following format:
                 #
                 # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
-            &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
-                # the model to use.
-            &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
-                # Defaults to 10 if not specified.
             &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
                 # this job. Please refer to
                 # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -2601,202 +2626,96 @@
                 # Defaults to
                 # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
                 # , which is &quot;serving_default&quot;.
-            &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
-            &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
-            &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
             &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
                 # The service will buffer batch_size number of records in memory before
                 # invoking one Tensorflow prediction call internally. So take the record
                 # size and memory available into consideration when setting this parameter.
-            &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
-                # prediction. If not set, AI Platform will pick the runtime version used
-                # during the CreateVersion request for this model version, or choose the
-                # latest stable version when model version information is not available
-                # such as when the model is specified by uri.
-            &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
-                # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
-              &quot;A String&quot;,
+            &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
+                # Defaults to 10 if not specified.
+          },
+          &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
+              # Each label is a key-value pair, where both the key and the value are
+              # arbitrary strings that you supply.
+              # For more information, see the documentation on
+              # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+            &quot;a_key&quot;: &quot;A String&quot;,
+          },
+          &quot;endTime&quot;: &quot;A String&quot;, # Output only. When the job processing was completed.
+          &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
+            &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
+                # Only set for hyperparameter tuning jobs.
+              { # Represents the result of a single hyperparameter tuning trial from a
+                  # training job. The TrainingOutput object that is returned on successful
+                  # completion of a training job with hyperparameter tuning includes a list
+                  # of HyperparameterOutput objects, one for each successful trial.
+                &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
+                &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
+                  &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+                  &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+                },
+                &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
+                  &quot;a_key&quot;: &quot;A String&quot;,
+                },
+                &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+                    # Only set for trials of built-in algorithms jobs that have succeeded.
+                  &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+                      # saves the trained model. Only set for successful jobs that don&#x27;t use
+                      # hyperparameter tuning.
+                  &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+                  &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+                      # trained.
+                  &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+                },
+                &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
+                &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
+                    # populated.
+                  { # An observed value of a metric.
+                    &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+                    &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+                  },
+                ],
+                &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
+                &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
+                &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
+              },
             ],
-            &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
-                # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-                # for AI Platform services.
+            &quot;completedTrialCount&quot;: &quot;A String&quot;, # The number of hyperparameter tuning trials that completed successfully.
+                # Only set for hyperparameter tuning jobs.
+            &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
+            &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
+            &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+                # Only set for built-in algorithms jobs.
+              &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+                  # saves the trained model. Only set for successful jobs that don&#x27;t use
+                  # hyperparameter tuning.
+              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+              &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+                  # trained.
+              &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+            },
+            &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
+            &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
+                # trials. See
+                # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
+                # for more information. Only set for hyperparameter tuning jobs.
           },
           &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
-          &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-              # prevent simultaneous updates of a job from overwriting each other.
-              # It is strongly suggested that systems make use of the `etag` in the
-              # read-modify-write cycle to perform job updates in order to avoid race
-              # conditions: An `etag` is returned in the response to `GetJob`, and
-              # systems are expected to put that etag in the request to `UpdateJob` to
-              # ensure that their change will be applied to the same version of the job.
+          &quot;predictionOutput&quot;: { # Represents results of a prediction job. # The current prediction job result.
+            &quot;errorCount&quot;: &quot;A String&quot;, # The number of data instances which resulted in errors.
+            &quot;nodeHours&quot;: 3.14, # Node hours used by the batch prediction job.
+            &quot;outputPath&quot;: &quot;A String&quot;, # The output Google Cloud Storage location provided at the job creation time.
+            &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
+          },
+          &quot;startTime&quot;: &quot;A String&quot;, # Output only. When the job processing was started.
+          &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
           &quot;trainingInput&quot;: { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
               # to submit your training job, you can specify the input parameters as
               # command-line arguments and/or in a YAML configuration file referenced from
               # the --config command-line argument. For details, see the guide to [submitting
               # a training job](/ai-platform/training/docs/training-jobs).
-            &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
-                # replica in the cluster will be of the type specified in `worker_type`.
-                #
-                # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-                # set this value, you must also set `worker_type`.
-                #
-                # The default value is zero.
-            &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
-            &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
-                # starts. If your job uses a custom container, then the arguments are passed
-                # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
-                # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
-                # `ENTRYPOINT`&lt;/a&gt; command.
-              &quot;A String&quot;,
-            ],
-            &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-                #
-                # You should only set `parameterServerConfig.acceleratorConfig` if
-                # `parameterServerType` is set to a Compute Engine machine type. [Learn
-                # about restrictions on accelerator configurations for
-                # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-                #
-                # Set `parameterServerConfig.imageUri` only if you build a custom image for
-                # your parameter server. If `parameterServerConfig.imageUri` has not been
-                # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-                # containers](/ai-platform/training/docs/distributed-training-containers).
-              &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
-                  # Registry. Learn more about [configuring custom
-                  # containers](/ai-platform/training/docs/distributed-training-containers).
-              &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
-                  # The following rules apply for container_command and container_args:
-                  # - If you do not supply command or args:
-                  #   The defaults defined in the Docker image are used.
-                  # - If you supply a command but no args:
-                  #   The default EntryPoint and the default Cmd defined in the Docker image
-                  #   are ignored. Your command is run without any arguments.
-                  # - If you supply only args:
-                  #   The default Entrypoint defined in the Docker image is run with the args
-                  #   that you supplied.
-                  # - If you supply a command and args:
-                  #   The default Entrypoint and the default Cmd defined in the Docker image
-                  #   are ignored. Your command is run with your args.
-                  # It cannot be set if custom container image is
-                  # not provided.
-                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-                  # both cannot be set at the same time.
-                &quot;A String&quot;,
-              ],
-              &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-                  # [Learn about restrictions on accelerator configurations for
-                  # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-                  # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-                  # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-                  # [accelerators for online
-                  # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-                &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-                &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-              },
-              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-                  # the one used in the custom container. This field is required if the replica
-                  # is a TPU worker that uses a custom container. Otherwise, do not specify
-                  # this field. This must be a [runtime version that currently supports
-                  # training with
-                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-                  #
-                  # Note that the version of TensorFlow included in a runtime version may
-                  # differ from the numbering of the runtime version itself, because it may
-                  # have a different [patch
-                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-                  # In this field, you must specify the runtime version (TensorFlow minor
-                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
-                  # specify `1.x`.
-              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-                  # If provided, it will override default ENTRYPOINT of the docker image.
-                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-                  # It cannot be set if custom container image is
-                  # not provided.
-                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-                  # both cannot be set at the same time.
-                &quot;A String&quot;,
-              ],
-            },
-            &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
-                # protect resources created by a training job, instead of using Google&#x27;s
-                # default encryption. If this is set, then all resources created by the
-                # training job will be encrypted with the customer-managed encryption key
-                # that you specify.
-                #
-                # [Learn how and when to use CMEK with AI Platform
-                # Training](/ai-platform/training/docs/cmek).
-                # a resource.
-              &quot;kmsKeyName&quot;: &quot;A String&quot;, # The Cloud KMS resource identifier of the customer-managed encryption key
-                  # used to protect a resource, such as a training job. It has the following
-                  # format:
-                  # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
-            },
-            &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-              &quot;params&quot;: [ # Required. The set of parameters to tune.
-                { # Represents a single hyperparameter to optimize.
-                  &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
-                    &quot;A String&quot;,
-                  ],
-                  &quot;parameterName&quot;: &quot;A String&quot;, # Required. The parameter name must be unique amongst all ParameterConfigs in
-                      # a HyperparameterSpec message. E.g., &quot;learning_rate&quot;.
-                  &quot;minValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                      # should be unset if type is `CATEGORICAL`. This value should be integers if
-                      # type is INTEGER.
-                  &quot;discreteValues&quot;: [ # Required if type is `DISCRETE`.
-                      # A list of feasible points.
-                      # The list should be in strictly increasing order. For instance, this
-                      # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
-                      # should not contain more than 1,000 values.
-                    3.14,
-                  ],
-                  &quot;scaleType&quot;: &quot;A String&quot;, # Optional. How the parameter should be scaled to the hypercube.
-                      # Leave unset for categorical parameters.
-                      # Some kind of scaling is strongly recommended for real or integral
-                      # parameters (e.g., `UNIT_LINEAR_SCALE`).
-                  &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                      # should be unset if type is `CATEGORICAL`. This value should be integers if
-                      # type is `INTEGER`.
-                  &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
-                },
-              ],
-              &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
-                  # early stopping.
-              &quot;resumePreviousJobId&quot;: &quot;A String&quot;, # Optional. The prior hyperparameter tuning job id that users hope to
-                  # continue with. The job id will be used to find the corresponding vizier
-                  # study guid and resume the study.
-              &quot;maxParallelTrials&quot;: 42, # Optional. The number of training trials to run concurrently.
-                  # You can reduce the time it takes to perform hyperparameter tuning by adding
-                  # trials in parallel. However, each trail only benefits from the information
-                  # gained in completed trials. That means that a trial does not get access to
-                  # the results of trials running at the same time, which could reduce the
-                  # quality of the overall optimization.
-                  #
-                  # Each trial will use the same scale tier and machine types.
-                  #
-                  # Defaults to one.
-              &quot;maxFailedTrials&quot;: 42, # Optional. The number of failed trials that need to be seen before failing
-                  # the hyperparameter tuning job. You can specify this field to override the
-                  # default failing criteria for AI Platform hyperparameter tuning jobs.
-                  #
-                  # Defaults to zero, which means the service decides when a hyperparameter
-                  # job should fail.
-              &quot;goal&quot;: &quot;A String&quot;, # Required. The type of goal to use for tuning. Available types are
-                  # `MAXIMIZE` and `MINIMIZE`.
-                  #
-                  # Defaults to `MAXIMIZE`.
-              &quot;maxTrials&quot;: 42, # Optional. How many training trials should be attempted to optimize
-                  # the specified hyperparameters.
-                  #
-                  # Defaults to one.
-              &quot;algorithm&quot;: &quot;A String&quot;, # Optional. The search algorithm specified for the hyperparameter
-                  # tuning job.
-                  # Uses the default AI Platform hyperparameter tuning
-                  # algorithm if unspecified.
-              &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-                  # current versions of TensorFlow, this tag name should exactly match what is
-                  # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-                  # prior to 0.12, this should be only the tag passed to tf.Summary.
-                  # By default, &quot;training/hptuning/metric&quot; will be used.
-            },
+            &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for workload run-as account.
+                # Users submitting jobs must have act-as permission on this run-as account.
+                # If not specified, then CMLE P4SA will be used by default.
             &quot;workerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
                 #
                 # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
@@ -2808,6 +2727,16 @@
                 # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
                 # the value of `masterConfig.imageUri`. Learn more about [configuring custom
                 # containers](/ai-platform/training/docs/distributed-training-containers).
+              &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+                  # [Learn about restrictions on accelerator configurations for
+                  # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+                  # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+                  # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+                  # [accelerators for online
+                  # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+                &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+                &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+              },
               &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
                   # Registry. Learn more about [configuring custom
                   # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -2830,16 +2759,6 @@
                   # both cannot be set at the same time.
                 &quot;A String&quot;,
               ],
-              &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-                  # [Learn about restrictions on accelerator configurations for
-                  # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-                  # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-                  # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-                  # [accelerators for online
-                  # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-                &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-                &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-              },
               &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
                   # the one used in the custom container. This field is required if the replica
                   # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -2864,27 +2783,13 @@
                 &quot;A String&quot;,
               ],
             },
-            &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
-                # job. Each replica in the cluster will be of the type specified in
-                # `parameter_server_type`.
+            &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+                # variable when training with a custom container. Defaults to `false`. [Learn
+                # more about this
+                # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
                 #
-                # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-                # set this value, you must also set `parameter_server_type`.
-                #
-                # The default value is zero.
-            &quot;packageUris&quot;: [ # Required. The Google Cloud Storage location of the packages with
-                # the training program and any additional dependencies.
-                # The maximum number of package URIs is 100.
-              &quot;A String&quot;,
-            ],
-            &quot;evaluatorCount&quot;: &quot;A String&quot;, # Optional. The number of evaluator replicas to use for the training job.
-                # Each replica in the cluster will be of the type specified in
-                # `evaluator_type`.
-                #
-                # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-                # set this value, you must also set `evaluator_type`.
-                #
-                # The default value is zero.
+                # This field has no effect for training jobs that don&#x27;t use a custom
+                # container.
             &quot;masterType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
                 # job&#x27;s master worker. You must specify this field when `scaleTier` is set to
                 # `CUSTOM`.
@@ -2937,65 +2842,27 @@
                 # field. Learn more about the [special configuration options for training
                 # with
                 # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-            &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for training. You must
-                # either specify this field or specify `masterConfig.imageUri`.
+            &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
                 #
-                # For more information, see the [runtime version
-                # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
-                # manage runtime versions](/ai-platform/training/docs/versioning).
-            &quot;evaluatorType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-                # job&#x27;s evaluator nodes.
-                #
-                # The supported values are the same as those described in the entry for
-                # `masterType`.
-                #
-                # This value must be consistent with the category of machine type that
-                # `masterType` uses. In other words, both must be Compute Engine machine
-                # types or both must be legacy machine types.
-                #
-                # This value must be present when `scaleTier` is set to `CUSTOM` and
-                # `evaluatorCount` is greater than zero.
-            &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-                # job&#x27;s worker nodes.
-                #
-                # The supported values are the same as those described in the entry for
-                # `masterType`.
-                #
-                # This value must be consistent with the category of machine type that
-                # `masterType` uses. In other words, both must be Compute Engine machine
-                # types or both must be legacy machine types.
-                #
-                # If you use `cloud_tpu` for this value, see special instructions for
-                # [configuring a custom TPU
-                # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-                #
-                # This value must be present when `scaleTier` is set to `CUSTOM` and
-                # `workerCount` is greater than zero.
-            &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
-                # regions](/ai-platform/training/docs/regions) for AI Platform Training.
-            &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-                # job&#x27;s parameter server.
-                #
-                # The supported values are the same as those described in the entry for
-                # `master_type`.
-                #
-                # This value must be consistent with the category of machine type that
-                # `masterType` uses. In other words, both must be Compute Engine machine
-                # types or both must be legacy machine types.
-                #
-                # This value must be present when `scaleTier` is set to `CUSTOM` and
-                # `parameter_server_count` is greater than zero.
-            &quot;masterConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-                #
-                # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-                # to a Compute Engine machine type. Learn about [restrictions on accelerator
-                # configurations for
+                # You should only set `parameterServerConfig.acceleratorConfig` if
+                # `parameterServerType` is set to a Compute Engine machine type. [Learn
+                # about restrictions on accelerator configurations for
                 # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
                 #
-                # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-                # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
-                # about [configuring custom
+                # Set `parameterServerConfig.imageUri` only if you build a custom image for
+                # your parameter server. If `parameterServerConfig.imageUri` has not been
+                # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
                 # containers](/ai-platform/training/docs/distributed-training-containers).
+              &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+                  # [Learn about restrictions on accelerator configurations for
+                  # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+                  # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+                  # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+                  # [accelerators for online
+                  # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+                &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+                &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+              },
               &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
                   # Registry. Learn more about [configuring custom
                   # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3018,16 +2885,6 @@
                   # both cannot be set at the same time.
                 &quot;A String&quot;,
               ],
-              &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-                  # [Learn about restrictions on accelerator configurations for
-                  # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-                  # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-                  # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-                  # [accelerators for online
-                  # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-                &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-                &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-              },
               &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
                   # the one used in the custom container. This field is required if the replica
                   # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -3052,8 +2909,8 @@
                 &quot;A String&quot;,
               ],
             },
-            &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
-                # and parameter servers.
+            &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
+                # regions](/ai-platform/training/docs/regions) for AI Platform Training.
             &quot;jobDir&quot;: &quot;A String&quot;, # Optional. A Google Cloud Storage path in which to store training outputs
                 # and other data needed for training. This path is passed to your TensorFlow
                 # program as the &#x27;--job-dir&#x27; command-line argument. The benefit of specifying
@@ -3072,6 +2929,308 @@
                 #
                 # Read more about the Python versions available for [each runtime
                 # version](/ml-engine/docs/runtime-version-list).
+            &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
+              &quot;maxTrials&quot;: 42, # Optional. How many training trials should be attempted to optimize
+                  # the specified hyperparameters.
+                  #
+                  # Defaults to one.
+              &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
+                  # early stopping.
+              &quot;params&quot;: [ # Required. The set of parameters to tune.
+                { # Represents a single hyperparameter to optimize.
+                  &quot;minValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                      # should be unset if type is `CATEGORICAL`. This value should be integers if
+                      # type is INTEGER.
+                  &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
+                    &quot;A String&quot;,
+                  ],
+                  &quot;scaleType&quot;: &quot;A String&quot;, # Optional. How the parameter should be scaled to the hypercube.
+                      # Leave unset for categorical parameters.
+                      # Some kind of scaling is strongly recommended for real or integral
+                      # parameters (e.g., `UNIT_LINEAR_SCALE`).
+                  &quot;discreteValues&quot;: [ # Required if type is `DISCRETE`.
+                      # A list of feasible points.
+                      # The list should be in strictly increasing order. For instance, this
+                      # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
+                      # should not contain more than 1,000 values.
+                    3.14,
+                  ],
+                  &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
+                  &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                      # should be unset if type is `CATEGORICAL`. This value should be integers if
+                      # type is `INTEGER`.
+                  &quot;parameterName&quot;: &quot;A String&quot;, # Required. The parameter name must be unique amongst all ParameterConfigs in
+                      # a HyperparameterSpec message. E.g., &quot;learning_rate&quot;.
+                },
+              ],
+              &quot;maxFailedTrials&quot;: 42, # Optional. The number of failed trials that need to be seen before failing
+                  # the hyperparameter tuning job. You can specify this field to override the
+                  # default failing criteria for AI Platform hyperparameter tuning jobs.
+                  #
+                  # Defaults to zero, which means the service decides when a hyperparameter
+                  # job should fail.
+              &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+                  # current versions of TensorFlow, this tag name should exactly match what is
+                  # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+                  # prior to 0.12, this should be only the tag passed to tf.Summary.
+                  # By default, &quot;training/hptuning/metric&quot; will be used.
+              &quot;resumePreviousJobId&quot;: &quot;A String&quot;, # Optional. The prior hyperparameter tuning job id that users hope to
+                  # continue with. The job id will be used to find the corresponding vizier
+                  # study guid and resume the study.
+              &quot;goal&quot;: &quot;A String&quot;, # Required. The type of goal to use for tuning. Available types are
+                  # `MAXIMIZE` and `MINIMIZE`.
+                  #
+                  # Defaults to `MAXIMIZE`.
+              &quot;algorithm&quot;: &quot;A String&quot;, # Optional. The search algorithm specified for the hyperparameter
+                  # tuning job.
+                  # Uses the default AI Platform hyperparameter tuning
+                  # algorithm if unspecified.
+              &quot;maxParallelTrials&quot;: 42, # Optional. The number of training trials to run concurrently.
+                  # You can reduce the time it takes to perform hyperparameter tuning by adding
+                  # trials in parallel. However, each trail only benefits from the information
+                  # gained in completed trials. That means that a trial does not get access to
+                  # the results of trials running at the same time, which could reduce the
+                  # quality of the overall optimization.
+                  #
+                  # Each trial will use the same scale tier and machine types.
+                  #
+                  # Defaults to one.
+            },
+            &quot;evaluatorType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+                # job&#x27;s evaluator nodes.
+                #
+                # The supported values are the same as those described in the entry for
+                # `masterType`.
+                #
+                # This value must be consistent with the category of machine type that
+                # `masterType` uses. In other words, both must be Compute Engine machine
+                # types or both must be legacy machine types.
+                #
+                # This value must be present when `scaleTier` is set to `CUSTOM` and
+                # `evaluatorCount` is greater than zero.
+            &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
+                # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+                # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+                # the form projects/{project}/global/networks/{network}. Where {project} is a
+                # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
+                #
+                # Private services access must already be configured for the network. If left
+                # unspecified, the Job is not peered with any network. Learn more -
+                # Connecting Job to user network over private
+                # IP.
+            &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+                # job&#x27;s parameter server.
+                #
+                # The supported values are the same as those described in the entry for
+                # `master_type`.
+                #
+                # This value must be consistent with the category of machine type that
+                # `masterType` uses. In other words, both must be Compute Engine machine
+                # types or both must be legacy machine types.
+                #
+                # This value must be present when `scaleTier` is set to `CUSTOM` and
+                # `parameter_server_count` is greater than zero.
+            &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+                # job&#x27;s worker nodes.
+                #
+                # The supported values are the same as those described in the entry for
+                # `masterType`.
+                #
+                # This value must be consistent with the category of machine type that
+                # `masterType` uses. In other words, both must be Compute Engine machine
+                # types or both must be legacy machine types.
+                #
+                # If you use `cloud_tpu` for this value, see special instructions for
+                # [configuring a custom TPU
+                # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+                #
+                # This value must be present when `scaleTier` is set to `CUSTOM` and
+                # `workerCount` is greater than zero.
+            &quot;masterConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+                #
+                # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+                # to a Compute Engine machine type. Learn about [restrictions on accelerator
+                # configurations for
+                # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+                #
+                # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+                # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+                # about [configuring custom
+                # containers](/ai-platform/training/docs/distributed-training-containers).
+              &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+                  # [Learn about restrictions on accelerator configurations for
+                  # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+                  # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+                  # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+                  # [accelerators for online
+                  # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+                &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+                &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+              },
+              &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
+                  # Registry. Learn more about [configuring custom
+                  # containers](/ai-platform/training/docs/distributed-training-containers).
+              &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
+                  # The following rules apply for container_command and container_args:
+                  # - If you do not supply command or args:
+                  #   The defaults defined in the Docker image are used.
+                  # - If you supply a command but no args:
+                  #   The default EntryPoint and the default Cmd defined in the Docker image
+                  #   are ignored. Your command is run without any arguments.
+                  # - If you supply only args:
+                  #   The default Entrypoint defined in the Docker image is run with the args
+                  #   that you supplied.
+                  # - If you supply a command and args:
+                  #   The default Entrypoint and the default Cmd defined in the Docker image
+                  #   are ignored. Your command is run with your args.
+                  # It cannot be set if custom container image is
+                  # not provided.
+                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+                  # both cannot be set at the same time.
+                &quot;A String&quot;,
+              ],
+              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+                  # the one used in the custom container. This field is required if the replica
+                  # is a TPU worker that uses a custom container. Otherwise, do not specify
+                  # this field. This must be a [runtime version that currently supports
+                  # training with
+                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+                  #
+                  # Note that the version of TensorFlow included in a runtime version may
+                  # differ from the numbering of the runtime version itself, because it may
+                  # have a different [patch
+                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+                  # In this field, you must specify the runtime version (TensorFlow minor
+                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
+                  # specify `1.x`.
+              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+                  # If provided, it will override default ENTRYPOINT of the docker image.
+                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+                  # It cannot be set if custom container image is
+                  # not provided.
+                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+                  # both cannot be set at the same time.
+                &quot;A String&quot;,
+              ],
+            },
+            &quot;evaluatorCount&quot;: &quot;A String&quot;, # Optional. The number of evaluator replicas to use for the training job.
+                # Each replica in the cluster will be of the type specified in
+                # `evaluator_type`.
+                #
+                # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+                # set this value, you must also set `evaluator_type`.
+                #
+                # The default value is zero.
+            &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
+                # starts. If your job uses a custom container, then the arguments are passed
+                # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
+                # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
+                # `ENTRYPOINT`&lt;/a&gt; command.
+              &quot;A String&quot;,
+            ],
+            &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
+            &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for training. You must
+                # either specify this field or specify `masterConfig.imageUri`.
+                #
+                # For more information, see the [runtime version
+                # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+                # manage runtime versions](/ai-platform/training/docs/versioning).
+            &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
+                # job. Each replica in the cluster will be of the type specified in
+                # `parameter_server_type`.
+                #
+                # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+                # set this value, you must also set `parameter_server_type`.
+                #
+                # The default value is zero.
+            &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+                #
+                # You should only set `evaluatorConfig.acceleratorConfig` if
+                # `evaluatorType` is set to a Compute Engine machine type. [Learn
+                # about restrictions on accelerator configurations for
+                # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+                #
+                # Set `evaluatorConfig.imageUri` only if you build a custom image for
+                # your evaluator. If `evaluatorConfig.imageUri` has not been
+                # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+                # containers](/ai-platform/training/docs/distributed-training-containers).
+              &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+                  # [Learn about restrictions on accelerator configurations for
+                  # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+                  # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+                  # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+                  # [accelerators for online
+                  # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+                &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+                &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+              },
+              &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
+                  # Registry. Learn more about [configuring custom
+                  # containers](/ai-platform/training/docs/distributed-training-containers).
+              &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
+                  # The following rules apply for container_command and container_args:
+                  # - If you do not supply command or args:
+                  #   The defaults defined in the Docker image are used.
+                  # - If you supply a command but no args:
+                  #   The default EntryPoint and the default Cmd defined in the Docker image
+                  #   are ignored. Your command is run without any arguments.
+                  # - If you supply only args:
+                  #   The default Entrypoint defined in the Docker image is run with the args
+                  #   that you supplied.
+                  # - If you supply a command and args:
+                  #   The default Entrypoint and the default Cmd defined in the Docker image
+                  #   are ignored. Your command is run with your args.
+                  # It cannot be set if custom container image is
+                  # not provided.
+                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+                  # both cannot be set at the same time.
+                &quot;A String&quot;,
+              ],
+              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+                  # the one used in the custom container. This field is required if the replica
+                  # is a TPU worker that uses a custom container. Otherwise, do not specify
+                  # this field. This must be a [runtime version that currently supports
+                  # training with
+                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+                  #
+                  # Note that the version of TensorFlow included in a runtime version may
+                  # differ from the numbering of the runtime version itself, because it may
+                  # have a different [patch
+                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+                  # In this field, you must specify the runtime version (TensorFlow minor
+                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
+                  # specify `1.x`.
+              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+                  # If provided, it will override default ENTRYPOINT of the docker image.
+                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+                  # It cannot be set if custom container image is
+                  # not provided.
+                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+                  # both cannot be set at the same time.
+                &quot;A String&quot;,
+              ],
+            },
+            &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+                # protect resources created by a training job, instead of using Google&#x27;s
+                # default encryption. If this is set, then all resources created by the
+                # training job will be encrypted with the customer-managed encryption key
+                # that you specify.
+                #
+                # [Learn how and when to use CMEK with AI Platform
+                # Training](/ai-platform/training/docs/cmek).
+                # a resource.
+              &quot;kmsKeyName&quot;: &quot;A String&quot;, # The Cloud KMS resource identifier of the customer-managed encryption key
+                  # used to protect a resource, such as a training job. It has the following
+                  # format:
+                  # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+            },
+            &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
+                # replica in the cluster will be of the type specified in `worker_type`.
+                #
+                # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+                # set this value, you must also set `worker_type`.
+                #
+                # The default value is zero.
             &quot;scheduling&quot;: { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
               &quot;maxWaitTime&quot;: &quot;A String&quot;,
               &quot;maxRunningTime&quot;: &quot;A String&quot;, # Optional. The maximum job running time, expressed in seconds. The field can
@@ -3098,171 +3257,24 @@
                   #   ...
                   # ```
             },
-            &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
-                # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-                # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-                # the form projects/{project}/global/networks/{network}. Where {project} is a
-                # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
-                #
-                # Private services access must already be configured for the network. If left
-                # unspecified, the Job is not peered with any network. Learn more -
-                # Connecting Job to user network over private
-                # IP.
-            &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
-                #
-                # You should only set `evaluatorConfig.acceleratorConfig` if
-                # `evaluatorType` is set to a Compute Engine machine type. [Learn
-                # about restrictions on accelerator configurations for
-                # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-                #
-                # Set `evaluatorConfig.imageUri` only if you build a custom image for
-                # your evaluator. If `evaluatorConfig.imageUri` has not been
-                # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-                # containers](/ai-platform/training/docs/distributed-training-containers).
-              &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
-                  # Registry. Learn more about [configuring custom
-                  # containers](/ai-platform/training/docs/distributed-training-containers).
-              &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
-                  # The following rules apply for container_command and container_args:
-                  # - If you do not supply command or args:
-                  #   The defaults defined in the Docker image are used.
-                  # - If you supply a command but no args:
-                  #   The default EntryPoint and the default Cmd defined in the Docker image
-                  #   are ignored. Your command is run without any arguments.
-                  # - If you supply only args:
-                  #   The default Entrypoint defined in the Docker image is run with the args
-                  #   that you supplied.
-                  # - If you supply a command and args:
-                  #   The default Entrypoint and the default Cmd defined in the Docker image
-                  #   are ignored. Your command is run with your args.
-                  # It cannot be set if custom container image is
-                  # not provided.
-                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-                  # both cannot be set at the same time.
-                &quot;A String&quot;,
-              ],
-              &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-                  # [Learn about restrictions on accelerator configurations for
-                  # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-                  # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-                  # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-                  # [accelerators for online
-                  # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-                &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-                &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-              },
-              &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-                  # the one used in the custom container. This field is required if the replica
-                  # is a TPU worker that uses a custom container. Otherwise, do not specify
-                  # this field. This must be a [runtime version that currently supports
-                  # training with
-                  # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-                  #
-                  # Note that the version of TensorFlow included in a runtime version may
-                  # differ from the numbering of the runtime version itself, because it may
-                  # have a different [patch
-                  # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-                  # In this field, you must specify the runtime version (TensorFlow minor
-                  # version). For example, if your custom container runs TensorFlow `1.x.y`,
-                  # specify `1.x`.
-              &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-                  # If provided, it will override default ENTRYPOINT of the docker image.
-                  # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-                  # It cannot be set if custom container image is
-                  # not provided.
-                  # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-                  # both cannot be set at the same time.
-                &quot;A String&quot;,
-              ],
-            },
-            &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
-                # variable when training with a custom container. Defaults to `false`. [Learn
-                # more about this
-                # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
-                #
-                # This field has no effect for training jobs that don&#x27;t use a custom
-                # container.
-          },
-          &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
-          &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
-          &quot;endTime&quot;: &quot;A String&quot;, # Output only. When the job processing was completed.
-          &quot;startTime&quot;: &quot;A String&quot;, # Output only. When the job processing was started.
-          &quot;predictionOutput&quot;: { # Represents results of a prediction job. # The current prediction job result.
-            &quot;errorCount&quot;: &quot;A String&quot;, # The number of data instances which resulted in errors.
-            &quot;outputPath&quot;: &quot;A String&quot;, # The output Google Cloud Storage location provided at the job creation time.
-            &quot;nodeHours&quot;: 3.14, # Node hours used by the batch prediction job.
-            &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
-          },
-          &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
-            &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
-            &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-                # Only set for built-in algorithms jobs.
-              &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-                  # saves the trained model. Only set for successful jobs that don&#x27;t use
-                  # hyperparameter tuning.
-              &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-              &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-                  # trained.
-              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-            },
-            &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
-                # Only set for hyperparameter tuning jobs.
-              { # Represents the result of a single hyperparameter tuning trial from a
-                  # training job. The TrainingOutput object that is returned on successful
-                  # completion of a training job with hyperparameter tuning includes a list
-                  # of HyperparameterOutput objects, one for each successful trial.
-                &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
-                &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
-                &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
-                &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
-                &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
-                  &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-                  &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-                },
-                &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-                    # Only set for trials of built-in algorithms jobs that have succeeded.
-                  &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-                      # saves the trained model. Only set for successful jobs that don&#x27;t use
-                      # hyperparameter tuning.
-                  &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-                  &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-                      # trained.
-                  &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-                },
-                &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
-                &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
-                    # populated.
-                  { # An observed value of a metric.
-                    &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-                    &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-                  },
-                ],
-                &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
-                  &quot;a_key&quot;: &quot;A String&quot;,
-                },
-              },
+            &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
+                # and parameter servers.
+            &quot;packageUris&quot;: [ # Required. The Google Cloud Storage location of the packages with
+                # the training program and any additional dependencies.
+                # The maximum number of package URIs is 100.
+              &quot;A String&quot;,
             ],
-            &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
-                # trials. See
-                # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
-                # for more information. Only set for hyperparameter tuning jobs.
-            &quot;completedTrialCount&quot;: &quot;A String&quot;, # The number of hyperparameter tuning trials that completed successfully.
-                # Only set for hyperparameter tuning jobs.
-            &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
-            &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
           },
-          &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
-          &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
-              # Each label is a key-value pair, where both the key and the value are
-              # arbitrary strings that you supply.
-              # For more information, see the documentation on
-              # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-            &quot;a_key&quot;: &quot;A String&quot;,
-          },
+          &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+              # prevent simultaneous updates of a job from overwriting each other.
+              # It is strongly suggested that systems make use of the `etag` in the
+              # read-modify-write cycle to perform job updates in order to avoid race
+              # conditions: An `etag` is returned in the response to `GetJob`, and
+              # systems are expected to put that etag in the request to `UpdateJob` to
+              # ensure that their change will be applied to the same version of the job.
+          &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
         },
     ],
-    &quot;nextPageToken&quot;: &quot;A String&quot;, # Optional. Pass this token as the `page_token` field of the request for a
-        # subsequent call.
   }</pre>
 </div>
 
@@ -3292,20 +3304,34 @@
     The object takes the form of:
 
 { # Represents a training or prediction job.
+    &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
     &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+      &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
+      &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
+      &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
+          # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
+        &quot;A String&quot;,
+      ],
+      &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
+          # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+          # for AI Platform services.
       &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
           # string is formatted the same way as `model_version`, with the addition
           # of the version information:
           #
           # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
+      &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
+          # the model to use.
+      &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
+      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
+          # prediction. If not set, AI Platform will pick the runtime version used
+          # during the CreateVersion request for this model version, or choose the
+          # latest stable version when model version information is not available
+          # such as when the model is specified by uri.
       &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
           # model. The string must use the following format:
           #
           # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
-      &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
-          # the model to use.
-      &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
-          # Defaults to 10 if not specified.
       &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
           # this job. Please refer to
           # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -3314,202 +3340,96 @@
           # Defaults to
           # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
           # , which is &quot;serving_default&quot;.
-      &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
-      &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
-      &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
       &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
           # The service will buffer batch_size number of records in memory before
           # invoking one Tensorflow prediction call internally. So take the record
           # size and memory available into consideration when setting this parameter.
-      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
-          # prediction. If not set, AI Platform will pick the runtime version used
-          # during the CreateVersion request for this model version, or choose the
-          # latest stable version when model version information is not available
-          # such as when the model is specified by uri.
-      &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
-          # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
-        &quot;A String&quot;,
+      &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
+          # Defaults to 10 if not specified.
+    },
+    &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
+        # Each label is a key-value pair, where both the key and the value are
+        # arbitrary strings that you supply.
+        # For more information, see the documentation on
+        # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+      &quot;a_key&quot;: &quot;A String&quot;,
+    },
+    &quot;endTime&quot;: &quot;A String&quot;, # Output only. When the job processing was completed.
+    &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
+      &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
+          # Only set for hyperparameter tuning jobs.
+        { # Represents the result of a single hyperparameter tuning trial from a
+            # training job. The TrainingOutput object that is returned on successful
+            # completion of a training job with hyperparameter tuning includes a list
+            # of HyperparameterOutput objects, one for each successful trial.
+          &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
+          &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
+            &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+            &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+          },
+          &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
+            &quot;a_key&quot;: &quot;A String&quot;,
+          },
+          &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+              # Only set for trials of built-in algorithms jobs that have succeeded.
+            &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+                # saves the trained model. Only set for successful jobs that don&#x27;t use
+                # hyperparameter tuning.
+            &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+            &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+                # trained.
+            &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+          },
+          &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
+          &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
+              # populated.
+            { # An observed value of a metric.
+              &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+            },
+          ],
+          &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
+          &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
+          &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
+        },
       ],
-      &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
-          # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-          # for AI Platform services.
+      &quot;completedTrialCount&quot;: &quot;A String&quot;, # The number of hyperparameter tuning trials that completed successfully.
+          # Only set for hyperparameter tuning jobs.
+      &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
+      &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
+      &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+          # Only set for built-in algorithms jobs.
+        &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+            # saves the trained model. Only set for successful jobs that don&#x27;t use
+            # hyperparameter tuning.
+        &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+            # trained.
+        &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+      },
+      &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
+      &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
+          # trials. See
+          # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
+          # for more information. Only set for hyperparameter tuning jobs.
     },
     &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
-    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a job from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform job updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `GetJob`, and
-        # systems are expected to put that etag in the request to `UpdateJob` to
-        # ensure that their change will be applied to the same version of the job.
+    &quot;predictionOutput&quot;: { # Represents results of a prediction job. # The current prediction job result.
+      &quot;errorCount&quot;: &quot;A String&quot;, # The number of data instances which resulted in errors.
+      &quot;nodeHours&quot;: 3.14, # Node hours used by the batch prediction job.
+      &quot;outputPath&quot;: &quot;A String&quot;, # The output Google Cloud Storage location provided at the job creation time.
+      &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
+    },
+    &quot;startTime&quot;: &quot;A String&quot;, # Output only. When the job processing was started.
+    &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
     &quot;trainingInput&quot;: { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
         # to submit your training job, you can specify the input parameters as
         # command-line arguments and/or in a YAML configuration file referenced from
         # the --config command-line argument. For details, see the guide to [submitting
         # a training job](/ai-platform/training/docs/training-jobs).
-      &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
-          # replica in the cluster will be of the type specified in `worker_type`.
-          #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-          # set this value, you must also set `worker_type`.
-          #
-          # The default value is zero.
-      &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
-      &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
-          # starts. If your job uses a custom container, then the arguments are passed
-          # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
-          # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
-          # `ENTRYPOINT`&lt;/a&gt; command.
-        &quot;A String&quot;,
-      ],
-      &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-          #
-          # You should only set `parameterServerConfig.acceleratorConfig` if
-          # `parameterServerType` is set to a Compute Engine machine type. [Learn
-          # about restrictions on accelerator configurations for
-          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `parameterServerConfig.imageUri` only if you build a custom image for
-          # your parameter server. If `parameterServerConfig.imageUri` has not been
-          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-          # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
-            # The following rules apply for container_command and container_args:
-            # - If you do not supply command or args:
-            #   The defaults defined in the Docker image are used.
-            # - If you supply a command but no args:
-            #   The default EntryPoint and the default Cmd defined in the Docker image
-            #   are ignored. Your command is run without any arguments.
-            # - If you supply only args:
-            #   The default Entrypoint defined in the Docker image is run with the args
-            #   that you supplied.
-            # - If you supply a command and args:
-            #   The default Entrypoint and the default Cmd defined in the Docker image
-            #   are ignored. Your command is run with your args.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
-        &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-            # [accelerators for online
-            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-          &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-          &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-        },
-        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
-      },
-      &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
-          # protect resources created by a training job, instead of using Google&#x27;s
-          # default encryption. If this is set, then all resources created by the
-          # training job will be encrypted with the customer-managed encryption key
-          # that you specify.
-          #
-          # [Learn how and when to use CMEK with AI Platform
-          # Training](/ai-platform/training/docs/cmek).
-          # a resource.
-        &quot;kmsKeyName&quot;: &quot;A String&quot;, # The Cloud KMS resource identifier of the customer-managed encryption key
-            # used to protect a resource, such as a training job. It has the following
-            # format:
-            # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
-      },
-      &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-        &quot;params&quot;: [ # Required. The set of parameters to tune.
-          { # Represents a single hyperparameter to optimize.
-            &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
-              &quot;A String&quot;,
-            ],
-            &quot;parameterName&quot;: &quot;A String&quot;, # Required. The parameter name must be unique amongst all ParameterConfigs in
-                # a HyperparameterSpec message. E.g., &quot;learning_rate&quot;.
-            &quot;minValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                # should be unset if type is `CATEGORICAL`. This value should be integers if
-                # type is INTEGER.
-            &quot;discreteValues&quot;: [ # Required if type is `DISCRETE`.
-                # A list of feasible points.
-                # The list should be in strictly increasing order. For instance, this
-                # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
-                # should not contain more than 1,000 values.
-              3.14,
-            ],
-            &quot;scaleType&quot;: &quot;A String&quot;, # Optional. How the parameter should be scaled to the hypercube.
-                # Leave unset for categorical parameters.
-                # Some kind of scaling is strongly recommended for real or integral
-                # parameters (e.g., `UNIT_LINEAR_SCALE`).
-            &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                # should be unset if type is `CATEGORICAL`. This value should be integers if
-                # type is `INTEGER`.
-            &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
-          },
-        ],
-        &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
-            # early stopping.
-        &quot;resumePreviousJobId&quot;: &quot;A String&quot;, # Optional. The prior hyperparameter tuning job id that users hope to
-            # continue with. The job id will be used to find the corresponding vizier
-            # study guid and resume the study.
-        &quot;maxParallelTrials&quot;: 42, # Optional. The number of training trials to run concurrently.
-            # You can reduce the time it takes to perform hyperparameter tuning by adding
-            # trials in parallel. However, each trail only benefits from the information
-            # gained in completed trials. That means that a trial does not get access to
-            # the results of trials running at the same time, which could reduce the
-            # quality of the overall optimization.
-            #
-            # Each trial will use the same scale tier and machine types.
-            #
-            # Defaults to one.
-        &quot;maxFailedTrials&quot;: 42, # Optional. The number of failed trials that need to be seen before failing
-            # the hyperparameter tuning job. You can specify this field to override the
-            # default failing criteria for AI Platform hyperparameter tuning jobs.
-            #
-            # Defaults to zero, which means the service decides when a hyperparameter
-            # job should fail.
-        &quot;goal&quot;: &quot;A String&quot;, # Required. The type of goal to use for tuning. Available types are
-            # `MAXIMIZE` and `MINIMIZE`.
-            #
-            # Defaults to `MAXIMIZE`.
-        &quot;maxTrials&quot;: 42, # Optional. How many training trials should be attempted to optimize
-            # the specified hyperparameters.
-            #
-            # Defaults to one.
-        &quot;algorithm&quot;: &quot;A String&quot;, # Optional. The search algorithm specified for the hyperparameter
-            # tuning job.
-            # Uses the default AI Platform hyperparameter tuning
-            # algorithm if unspecified.
-        &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-            # current versions of TensorFlow, this tag name should exactly match what is
-            # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-            # prior to 0.12, this should be only the tag passed to tf.Summary.
-            # By default, &quot;training/hptuning/metric&quot; will be used.
-      },
+      &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for workload run-as account.
+          # Users submitting jobs must have act-as permission on this run-as account.
+          # If not specified, then CMLE P4SA will be used by default.
       &quot;workerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
           #
           # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
@@ -3521,6 +3441,16 @@
           # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
           # the value of `masterConfig.imageUri`. Learn more about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
+        &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+          &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+        },
         &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3543,16 +3473,6 @@
             # both cannot be set at the same time.
           &quot;A String&quot;,
         ],
-        &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-            # [accelerators for online
-            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-          &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-          &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-        },
         &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
             # the one used in the custom container. This field is required if the replica
             # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -3577,27 +3497,13 @@
           &quot;A String&quot;,
         ],
       },
-      &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
-          # job. Each replica in the cluster will be of the type specified in
-          # `parameter_server_type`.
+      &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+          # variable when training with a custom container. Defaults to `false`. [Learn
+          # more about this
+          # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
           #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-          # set this value, you must also set `parameter_server_type`.
-          #
-          # The default value is zero.
-      &quot;packageUris&quot;: [ # Required. The Google Cloud Storage location of the packages with
-          # the training program and any additional dependencies.
-          # The maximum number of package URIs is 100.
-        &quot;A String&quot;,
-      ],
-      &quot;evaluatorCount&quot;: &quot;A String&quot;, # Optional. The number of evaluator replicas to use for the training job.
-          # Each replica in the cluster will be of the type specified in
-          # `evaluator_type`.
-          #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-          # set this value, you must also set `evaluator_type`.
-          #
-          # The default value is zero.
+          # This field has no effect for training jobs that don&#x27;t use a custom
+          # container.
       &quot;masterType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
           # job&#x27;s master worker. You must specify this field when `scaleTier` is set to
           # `CUSTOM`.
@@ -3650,65 +3556,27 @@
           # field. Learn more about the [special configuration options for training
           # with
           # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for training. You must
-          # either specify this field or specify `masterConfig.imageUri`.
+      &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
           #
-          # For more information, see the [runtime version
-          # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
-          # manage runtime versions](/ai-platform/training/docs/versioning).
-      &quot;evaluatorType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-          # job&#x27;s evaluator nodes.
-          #
-          # The supported values are the same as those described in the entry for
-          # `masterType`.
-          #
-          # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be Compute Engine machine
-          # types or both must be legacy machine types.
-          #
-          # This value must be present when `scaleTier` is set to `CUSTOM` and
-          # `evaluatorCount` is greater than zero.
-      &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-          # job&#x27;s worker nodes.
-          #
-          # The supported values are the same as those described in the entry for
-          # `masterType`.
-          #
-          # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be Compute Engine machine
-          # types or both must be legacy machine types.
-          #
-          # If you use `cloud_tpu` for this value, see special instructions for
-          # [configuring a custom TPU
-          # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-          #
-          # This value must be present when `scaleTier` is set to `CUSTOM` and
-          # `workerCount` is greater than zero.
-      &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
-          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
-      &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-          # job&#x27;s parameter server.
-          #
-          # The supported values are the same as those described in the entry for
-          # `master_type`.
-          #
-          # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be Compute Engine machine
-          # types or both must be legacy machine types.
-          #
-          # This value must be present when `scaleTier` is set to `CUSTOM` and
-          # `parameter_server_count` is greater than zero.
-      &quot;masterConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-          #
-          # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-          # to a Compute Engine machine type. Learn about [restrictions on accelerator
-          # configurations for
+          # You should only set `parameterServerConfig.acceleratorConfig` if
+          # `parameterServerType` is set to a Compute Engine machine type. [Learn
+          # about restrictions on accelerator configurations for
           # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
           #
-          # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-          # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
-          # about [configuring custom
+          # Set `parameterServerConfig.imageUri` only if you build a custom image for
+          # your parameter server. If `parameterServerConfig.imageUri` has not been
+          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
           # containers](/ai-platform/training/docs/distributed-training-containers).
+        &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+          &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+        },
         &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
             # Registry. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -3731,16 +3599,6 @@
             # both cannot be set at the same time.
           &quot;A String&quot;,
         ],
-        &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-            # [accelerators for online
-            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-          &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-          &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-        },
         &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
             # the one used in the custom container. This field is required if the replica
             # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -3765,8 +3623,8 @@
           &quot;A String&quot;,
         ],
       },
-      &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
-          # and parameter servers.
+      &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
+          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
       &quot;jobDir&quot;: &quot;A String&quot;, # Optional. A Google Cloud Storage path in which to store training outputs
           # and other data needed for training. This path is passed to your TensorFlow
           # program as the &#x27;--job-dir&#x27; command-line argument. The benefit of specifying
@@ -3785,6 +3643,308 @@
           #
           # Read more about the Python versions available for [each runtime
           # version](/ml-engine/docs/runtime-version-list).
+      &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
+        &quot;maxTrials&quot;: 42, # Optional. How many training trials should be attempted to optimize
+            # the specified hyperparameters.
+            #
+            # Defaults to one.
+        &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
+            # early stopping.
+        &quot;params&quot;: [ # Required. The set of parameters to tune.
+          { # Represents a single hyperparameter to optimize.
+            &quot;minValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                # should be unset if type is `CATEGORICAL`. This value should be integers if
+                # type is INTEGER.
+            &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
+              &quot;A String&quot;,
+            ],
+            &quot;scaleType&quot;: &quot;A String&quot;, # Optional. How the parameter should be scaled to the hypercube.
+                # Leave unset for categorical parameters.
+                # Some kind of scaling is strongly recommended for real or integral
+                # parameters (e.g., `UNIT_LINEAR_SCALE`).
+            &quot;discreteValues&quot;: [ # Required if type is `DISCRETE`.
+                # A list of feasible points.
+                # The list should be in strictly increasing order. For instance, this
+                # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
+                # should not contain more than 1,000 values.
+              3.14,
+            ],
+            &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
+            &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                # should be unset if type is `CATEGORICAL`. This value should be integers if
+                # type is `INTEGER`.
+            &quot;parameterName&quot;: &quot;A String&quot;, # Required. The parameter name must be unique amongst all ParameterConfigs in
+                # a HyperparameterSpec message. E.g., &quot;learning_rate&quot;.
+          },
+        ],
+        &quot;maxFailedTrials&quot;: 42, # Optional. The number of failed trials that need to be seen before failing
+            # the hyperparameter tuning job. You can specify this field to override the
+            # default failing criteria for AI Platform hyperparameter tuning jobs.
+            #
+            # Defaults to zero, which means the service decides when a hyperparameter
+            # job should fail.
+        &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+            # current versions of TensorFlow, this tag name should exactly match what is
+            # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+            # prior to 0.12, this should be only the tag passed to tf.Summary.
+            # By default, &quot;training/hptuning/metric&quot; will be used.
+        &quot;resumePreviousJobId&quot;: &quot;A String&quot;, # Optional. The prior hyperparameter tuning job id that users hope to
+            # continue with. The job id will be used to find the corresponding vizier
+            # study guid and resume the study.
+        &quot;goal&quot;: &quot;A String&quot;, # Required. The type of goal to use for tuning. Available types are
+            # `MAXIMIZE` and `MINIMIZE`.
+            #
+            # Defaults to `MAXIMIZE`.
+        &quot;algorithm&quot;: &quot;A String&quot;, # Optional. The search algorithm specified for the hyperparameter
+            # tuning job.
+            # Uses the default AI Platform hyperparameter tuning
+            # algorithm if unspecified.
+        &quot;maxParallelTrials&quot;: 42, # Optional. The number of training trials to run concurrently.
+            # You can reduce the time it takes to perform hyperparameter tuning by adding
+            # trials in parallel. However, each trail only benefits from the information
+            # gained in completed trials. That means that a trial does not get access to
+            # the results of trials running at the same time, which could reduce the
+            # quality of the overall optimization.
+            #
+            # Each trial will use the same scale tier and machine types.
+            #
+            # Defaults to one.
+      },
+      &quot;evaluatorType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+          # job&#x27;s evaluator nodes.
+          #
+          # The supported values are the same as those described in the entry for
+          # `masterType`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `evaluatorCount` is greater than zero.
+      &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
+          # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+          # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+          # the form projects/{project}/global/networks/{network}. Where {project} is a
+          # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
+          #
+          # Private services access must already be configured for the network. If left
+          # unspecified, the Job is not peered with any network. Learn more -
+          # Connecting Job to user network over private
+          # IP.
+      &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+          # job&#x27;s parameter server.
+          #
+          # The supported values are the same as those described in the entry for
+          # `master_type`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `parameter_server_count` is greater than zero.
+      &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+          # job&#x27;s worker nodes.
+          #
+          # The supported values are the same as those described in the entry for
+          # `masterType`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # If you use `cloud_tpu` for this value, see special instructions for
+          # [configuring a custom TPU
+          # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `workerCount` is greater than zero.
+      &quot;masterConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+          #
+          # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+          # to a Compute Engine machine type. Learn about [restrictions on accelerator
+          # configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+          # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+          # about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+          &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+        },
+        &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+        &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
+            # The following rules apply for container_command and container_args:
+            # - If you do not supply command or args:
+            #   The defaults defined in the Docker image are used.
+            # - If you supply a command but no args:
+            #   The default EntryPoint and the default Cmd defined in the Docker image
+            #   are ignored. Your command is run without any arguments.
+            # - If you supply only args:
+            #   The default Entrypoint defined in the Docker image is run with the args
+            #   that you supplied.
+            # - If you supply a command and args:
+            #   The default Entrypoint and the default Cmd defined in the Docker image
+            #   are ignored. Your command is run with your args.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
+        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
+      },
+      &quot;evaluatorCount&quot;: &quot;A String&quot;, # Optional. The number of evaluator replicas to use for the training job.
+          # Each replica in the cluster will be of the type specified in
+          # `evaluator_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `evaluator_type`.
+          #
+          # The default value is zero.
+      &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
+          # starts. If your job uses a custom container, then the arguments are passed
+          # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
+          # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
+          # `ENTRYPOINT`&lt;/a&gt; command.
+        &quot;A String&quot;,
+      ],
+      &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
+      &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for training. You must
+          # either specify this field or specify `masterConfig.imageUri`.
+          #
+          # For more information, see the [runtime version
+          # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+          # manage runtime versions](/ai-platform/training/docs/versioning).
+      &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
+          # job. Each replica in the cluster will be of the type specified in
+          # `parameter_server_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `parameter_server_type`.
+          #
+          # The default value is zero.
+      &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+          #
+          # You should only set `evaluatorConfig.acceleratorConfig` if
+          # `evaluatorType` is set to a Compute Engine machine type. [Learn
+          # about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `evaluatorConfig.imageUri` only if you build a custom image for
+          # your evaluator. If `evaluatorConfig.imageUri` has not been
+          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+          &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+        },
+        &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+        &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
+            # The following rules apply for container_command and container_args:
+            # - If you do not supply command or args:
+            #   The defaults defined in the Docker image are used.
+            # - If you supply a command but no args:
+            #   The default EntryPoint and the default Cmd defined in the Docker image
+            #   are ignored. Your command is run without any arguments.
+            # - If you supply only args:
+            #   The default Entrypoint defined in the Docker image is run with the args
+            #   that you supplied.
+            # - If you supply a command and args:
+            #   The default Entrypoint and the default Cmd defined in the Docker image
+            #   are ignored. Your command is run with your args.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
+        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+            # If provided, it will override default ENTRYPOINT of the docker image.
+            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+            # It cannot be set if custom container image is
+            # not provided.
+            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+            # both cannot be set at the same time.
+          &quot;A String&quot;,
+        ],
+      },
+      &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+          # protect resources created by a training job, instead of using Google&#x27;s
+          # default encryption. If this is set, then all resources created by the
+          # training job will be encrypted with the customer-managed encryption key
+          # that you specify.
+          #
+          # [Learn how and when to use CMEK with AI Platform
+          # Training](/ai-platform/training/docs/cmek).
+          # a resource.
+        &quot;kmsKeyName&quot;: &quot;A String&quot;, # The Cloud KMS resource identifier of the customer-managed encryption key
+            # used to protect a resource, such as a training job. It has the following
+            # format:
+            # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+      },
+      &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
+          # replica in the cluster will be of the type specified in `worker_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `worker_type`.
+          #
+          # The default value is zero.
       &quot;scheduling&quot;: { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
         &quot;maxWaitTime&quot;: &quot;A String&quot;,
         &quot;maxRunningTime&quot;: &quot;A String&quot;, # Optional. The maximum job running time, expressed in seconds. The field can
@@ -3811,167 +3971,22 @@
             #   ...
             # ```
       },
-      &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
-          # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-          # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-          # the form projects/{project}/global/networks/{network}. Where {project} is a
-          # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
-          #
-          # Private services access must already be configured for the network. If left
-          # unspecified, the Job is not peered with any network. Learn more -
-          # Connecting Job to user network over private
-          # IP.
-      &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
-          #
-          # You should only set `evaluatorConfig.acceleratorConfig` if
-          # `evaluatorType` is set to a Compute Engine machine type. [Learn
-          # about restrictions on accelerator configurations for
-          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `evaluatorConfig.imageUri` only if you build a custom image for
-          # your evaluator. If `evaluatorConfig.imageUri` has not been
-          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-          # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ai-platform/training/docs/distributed-training-containers).
-        &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
-            # The following rules apply for container_command and container_args:
-            # - If you do not supply command or args:
-            #   The defaults defined in the Docker image are used.
-            # - If you supply a command but no args:
-            #   The default EntryPoint and the default Cmd defined in the Docker image
-            #   are ignored. Your command is run without any arguments.
-            # - If you supply only args:
-            #   The default Entrypoint defined in the Docker image is run with the args
-            #   that you supplied.
-            # - If you supply a command and args:
-            #   The default Entrypoint and the default Cmd defined in the Docker image
-            #   are ignored. Your command is run with your args.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
-        &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-            # [accelerators for online
-            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-          &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-          &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-        },
-        &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-            # the one used in the custom container. This field is required if the replica
-            # is a TPU worker that uses a custom container. Otherwise, do not specify
-            # this field. This must be a [runtime version that currently supports
-            # training with
-            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-            #
-            # Note that the version of TensorFlow included in a runtime version may
-            # differ from the numbering of the runtime version itself, because it may
-            # have a different [patch
-            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-            # In this field, you must specify the runtime version (TensorFlow minor
-            # version). For example, if your custom container runs TensorFlow `1.x.y`,
-            # specify `1.x`.
-        &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-            # If provided, it will override default ENTRYPOINT of the docker image.
-            # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-            # It cannot be set if custom container image is
-            # not provided.
-            # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-            # both cannot be set at the same time.
-          &quot;A String&quot;,
-        ],
-      },
-      &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
-          # variable when training with a custom container. Defaults to `false`. [Learn
-          # more about this
-          # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
-          #
-          # This field has no effect for training jobs that don&#x27;t use a custom
-          # container.
-    },
-    &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
-    &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
-    &quot;endTime&quot;: &quot;A String&quot;, # Output only. When the job processing was completed.
-    &quot;startTime&quot;: &quot;A String&quot;, # Output only. When the job processing was started.
-    &quot;predictionOutput&quot;: { # Represents results of a prediction job. # The current prediction job result.
-      &quot;errorCount&quot;: &quot;A String&quot;, # The number of data instances which resulted in errors.
-      &quot;outputPath&quot;: &quot;A String&quot;, # The output Google Cloud Storage location provided at the job creation time.
-      &quot;nodeHours&quot;: 3.14, # Node hours used by the batch prediction job.
-      &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
-    },
-    &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
-      &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
-      &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-          # Only set for built-in algorithms jobs.
-        &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-            # saves the trained model. Only set for successful jobs that don&#x27;t use
-            # hyperparameter tuning.
-        &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-        &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-            # trained.
-        &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-      },
-      &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
-          # Only set for hyperparameter tuning jobs.
-        { # Represents the result of a single hyperparameter tuning trial from a
-            # training job. The TrainingOutput object that is returned on successful
-            # completion of a training job with hyperparameter tuning includes a list
-            # of HyperparameterOutput objects, one for each successful trial.
-          &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
-          &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
-          &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
-          &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
-          &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
-            &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-            &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-          },
-          &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-              # Only set for trials of built-in algorithms jobs that have succeeded.
-            &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-                # saves the trained model. Only set for successful jobs that don&#x27;t use
-                # hyperparameter tuning.
-            &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-            &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-                # trained.
-            &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-          },
-          &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
-          &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
-              # populated.
-            { # An observed value of a metric.
-              &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-            },
-          ],
-          &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
-            &quot;a_key&quot;: &quot;A String&quot;,
-          },
-        },
+      &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
+          # and parameter servers.
+      &quot;packageUris&quot;: [ # Required. The Google Cloud Storage location of the packages with
+          # the training program and any additional dependencies.
+          # The maximum number of package URIs is 100.
+        &quot;A String&quot;,
       ],
-      &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
-          # trials. See
-          # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
-          # for more information. Only set for hyperparameter tuning jobs.
-      &quot;completedTrialCount&quot;: &quot;A String&quot;, # The number of hyperparameter tuning trials that completed successfully.
-          # Only set for hyperparameter tuning jobs.
-      &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
-      &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
     },
-    &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
-    &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
-        # Each label is a key-value pair, where both the key and the value are
-        # arbitrary strings that you supply.
-        # For more information, see the documentation on
-        # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-      &quot;a_key&quot;: &quot;A String&quot;,
-    },
+    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a job from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform job updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `GetJob`, and
+        # systems are expected to put that etag in the request to `UpdateJob` to
+        # ensure that their change will be applied to the same version of the job.
+    &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
   }
 
   updateMask: string, Required. Specifies the path, relative to `Job`, of the field to update.
@@ -4002,20 +4017,34 @@
   An object of the form:
 
     { # Represents a training or prediction job.
+      &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
       &quot;predictionInput&quot;: { # Represents input parameters for a prediction job. # Input parameters to create a prediction job.
+        &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
+        &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
+        &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
+            # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
+          &quot;A String&quot;,
+        ],
+        &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
+            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
+            # for AI Platform services.
         &quot;versionName&quot;: &quot;A String&quot;, # Use this field if you want to specify a version of the model to use. The
             # string is formatted the same way as `model_version`, with the addition
             # of the version information:
             #
             # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION&quot;`
+        &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
+            # the model to use.
+        &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
+            # prediction. If not set, AI Platform will pick the runtime version used
+            # during the CreateVersion request for this model version, or choose the
+            # latest stable version when model version information is not available
+            # such as when the model is specified by uri.
         &quot;modelName&quot;: &quot;A String&quot;, # Use this field if you want to use the default version for the specified
             # model. The string must use the following format:
             #
             # `&quot;projects/YOUR_PROJECT/models/YOUR_MODEL&quot;`
-        &quot;uri&quot;: &quot;A String&quot;, # Use this field if you want to specify a Google Cloud Storage path for
-            # the model to use.
-        &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
-            # Defaults to 10 if not specified.
         &quot;signatureName&quot;: &quot;A String&quot;, # Optional. The name of the signature defined in the SavedModel to use for
             # this job. Please refer to
             # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -4024,202 +4053,96 @@
             # Defaults to
             # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
             # , which is &quot;serving_default&quot;.
-        &quot;outputPath&quot;: &quot;A String&quot;, # Required. The output Google Cloud Storage location.
-        &quot;outputDataFormat&quot;: &quot;A String&quot;, # Optional. Format of the output data files, defaults to JSON.
-        &quot;dataFormat&quot;: &quot;A String&quot;, # Required. The format of the input data files.
         &quot;batchSize&quot;: &quot;A String&quot;, # Optional. Number of records per batch, defaults to 64.
             # The service will buffer batch_size number of records in memory before
             # invoking one Tensorflow prediction call internally. So take the record
             # size and memory available into consideration when setting this parameter.
-        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for this batch
-            # prediction. If not set, AI Platform will pick the runtime version used
-            # during the CreateVersion request for this model version, or choose the
-            # latest stable version when model version information is not available
-            # such as when the model is specified by uri.
-        &quot;inputPaths&quot;: [ # Required. The Cloud Storage location of the input data files. May contain
-            # &lt;a href=&quot;/storage/docs/gsutil/addlhelp/WildcardNames&quot;&gt;wildcards&lt;/a&gt;.
-          &quot;A String&quot;,
+        &quot;maxWorkerCount&quot;: &quot;A String&quot;, # Optional. The maximum number of workers to be used for parallel processing.
+            # Defaults to 10 if not specified.
+      },
+      &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
+          # Each label is a key-value pair, where both the key and the value are
+          # arbitrary strings that you supply.
+          # For more information, see the documentation on
+          # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
+        &quot;a_key&quot;: &quot;A String&quot;,
+      },
+      &quot;endTime&quot;: &quot;A String&quot;, # Output only. When the job processing was completed.
+      &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
+        &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
+            # Only set for hyperparameter tuning jobs.
+          { # Represents the result of a single hyperparameter tuning trial from a
+              # training job. The TrainingOutput object that is returned on successful
+              # completion of a training job with hyperparameter tuning includes a list
+              # of HyperparameterOutput objects, one for each successful trial.
+            &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
+            &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
+              &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+            },
+            &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
+              &quot;a_key&quot;: &quot;A String&quot;,
+            },
+            &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+                # Only set for trials of built-in algorithms jobs that have succeeded.
+              &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+                  # saves the trained model. Only set for successful jobs that don&#x27;t use
+                  # hyperparameter tuning.
+              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+              &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+                  # trained.
+              &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+            },
+            &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
+            &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
+                # populated.
+              { # An observed value of a metric.
+                &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
+                &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
+              },
+            ],
+            &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
+            &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
+            &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
+          },
         ],
-        &quot;region&quot;: &quot;A String&quot;, # Required. The Google Compute Engine region to run the prediction job in.
-            # See the &lt;a href=&quot;/ml-engine/docs/tensorflow/regions&quot;&gt;available regions&lt;/a&gt;
-            # for AI Platform services.
+        &quot;completedTrialCount&quot;: &quot;A String&quot;, # The number of hyperparameter tuning trials that completed successfully.
+            # Only set for hyperparameter tuning jobs.
+        &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
+        &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
+        &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
+            # Only set for built-in algorithms jobs.
+          &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
+              # saves the trained model. Only set for successful jobs that don&#x27;t use
+              # hyperparameter tuning.
+          &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
+          &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
+              # trained.
+          &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
+        },
+        &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
+        &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
+            # trials. See
+            # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
+            # for more information. Only set for hyperparameter tuning jobs.
       },
       &quot;errorMessage&quot;: &quot;A String&quot;, # Output only. The details of a failure or a cancellation.
-      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-          # prevent simultaneous updates of a job from overwriting each other.
-          # It is strongly suggested that systems make use of the `etag` in the
-          # read-modify-write cycle to perform job updates in order to avoid race
-          # conditions: An `etag` is returned in the response to `GetJob`, and
-          # systems are expected to put that etag in the request to `UpdateJob` to
-          # ensure that their change will be applied to the same version of the job.
+      &quot;predictionOutput&quot;: { # Represents results of a prediction job. # The current prediction job result.
+        &quot;errorCount&quot;: &quot;A String&quot;, # The number of data instances which resulted in errors.
+        &quot;nodeHours&quot;: 3.14, # Node hours used by the batch prediction job.
+        &quot;outputPath&quot;: &quot;A String&quot;, # The output Google Cloud Storage location provided at the job creation time.
+        &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
+      },
+      &quot;startTime&quot;: &quot;A String&quot;, # Output only. When the job processing was started.
+      &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
       &quot;trainingInput&quot;: { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
           # to submit your training job, you can specify the input parameters as
           # command-line arguments and/or in a YAML configuration file referenced from
           # the --config command-line argument. For details, see the guide to [submitting
           # a training job](/ai-platform/training/docs/training-jobs).
-        &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
-            # replica in the cluster will be of the type specified in `worker_type`.
-            #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `worker_type`.
-            #
-            # The default value is zero.
-        &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
-        &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
-            # starts. If your job uses a custom container, then the arguments are passed
-            # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
-            # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
-            # `ENTRYPOINT`&lt;/a&gt; command.
-          &quot;A String&quot;,
-        ],
-        &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-            #
-            # You should only set `parameterServerConfig.acceleratorConfig` if
-            # `parameterServerType` is set to a Compute Engine machine type. [Learn
-            # about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            #
-            # Set `parameterServerConfig.imageUri` only if you build a custom image for
-            # your parameter server. If `parameterServerConfig.imageUri` has not been
-            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-            # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
-              # Registry. Learn more about [configuring custom
-              # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
-              # The following rules apply for container_command and container_args:
-              # - If you do not supply command or args:
-              #   The defaults defined in the Docker image are used.
-              # - If you supply a command but no args:
-              #   The default EntryPoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run without any arguments.
-              # - If you supply only args:
-              #   The default Entrypoint defined in the Docker image is run with the args
-              #   that you supplied.
-              # - If you supply a command and args:
-              #   The default Entrypoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run with your args.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
-          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-          },
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
-        },
-        &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
-            # protect resources created by a training job, instead of using Google&#x27;s
-            # default encryption. If this is set, then all resources created by the
-            # training job will be encrypted with the customer-managed encryption key
-            # that you specify.
-            #
-            # [Learn how and when to use CMEK with AI Platform
-            # Training](/ai-platform/training/docs/cmek).
-            # a resource.
-          &quot;kmsKeyName&quot;: &quot;A String&quot;, # The Cloud KMS resource identifier of the customer-managed encryption key
-              # used to protect a resource, such as a training job. It has the following
-              # format:
-              # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
-        },
-        &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
-          &quot;params&quot;: [ # Required. The set of parameters to tune.
-            { # Represents a single hyperparameter to optimize.
-              &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
-                &quot;A String&quot;,
-              ],
-              &quot;parameterName&quot;: &quot;A String&quot;, # Required. The parameter name must be unique amongst all ParameterConfigs in
-                  # a HyperparameterSpec message. E.g., &quot;learning_rate&quot;.
-              &quot;minValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                  # should be unset if type is `CATEGORICAL`. This value should be integers if
-                  # type is INTEGER.
-              &quot;discreteValues&quot;: [ # Required if type is `DISCRETE`.
-                  # A list of feasible points.
-                  # The list should be in strictly increasing order. For instance, this
-                  # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
-                  # should not contain more than 1,000 values.
-                3.14,
-              ],
-              &quot;scaleType&quot;: &quot;A String&quot;, # Optional. How the parameter should be scaled to the hypercube.
-                  # Leave unset for categorical parameters.
-                  # Some kind of scaling is strongly recommended for real or integral
-                  # parameters (e.g., `UNIT_LINEAR_SCALE`).
-              &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                  # should be unset if type is `CATEGORICAL`. This value should be integers if
-                  # type is `INTEGER`.
-              &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
-            },
-          ],
-          &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
-              # early stopping.
-          &quot;resumePreviousJobId&quot;: &quot;A String&quot;, # Optional. The prior hyperparameter tuning job id that users hope to
-              # continue with. The job id will be used to find the corresponding vizier
-              # study guid and resume the study.
-          &quot;maxParallelTrials&quot;: 42, # Optional. The number of training trials to run concurrently.
-              # You can reduce the time it takes to perform hyperparameter tuning by adding
-              # trials in parallel. However, each trail only benefits from the information
-              # gained in completed trials. That means that a trial does not get access to
-              # the results of trials running at the same time, which could reduce the
-              # quality of the overall optimization.
-              #
-              # Each trial will use the same scale tier and machine types.
-              #
-              # Defaults to one.
-          &quot;maxFailedTrials&quot;: 42, # Optional. The number of failed trials that need to be seen before failing
-              # the hyperparameter tuning job. You can specify this field to override the
-              # default failing criteria for AI Platform hyperparameter tuning jobs.
-              #
-              # Defaults to zero, which means the service decides when a hyperparameter
-              # job should fail.
-          &quot;goal&quot;: &quot;A String&quot;, # Required. The type of goal to use for tuning. Available types are
-              # `MAXIMIZE` and `MINIMIZE`.
-              #
-              # Defaults to `MAXIMIZE`.
-          &quot;maxTrials&quot;: 42, # Optional. How many training trials should be attempted to optimize
-              # the specified hyperparameters.
-              #
-              # Defaults to one.
-          &quot;algorithm&quot;: &quot;A String&quot;, # Optional. The search algorithm specified for the hyperparameter
-              # tuning job.
-              # Uses the default AI Platform hyperparameter tuning
-              # algorithm if unspecified.
-          &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
-              # current versions of TensorFlow, this tag name should exactly match what is
-              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
-              # prior to 0.12, this should be only the tag passed to tf.Summary.
-              # By default, &quot;training/hptuning/metric&quot; will be used.
-        },
+        &quot;serviceAccount&quot;: &quot;A String&quot;, # Optional. Specifies the service account for workload run-as account.
+            # Users submitting jobs must have act-as permission on this run-as account.
+            # If not specified, then CMLE P4SA will be used by default.
         &quot;workerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
             #
             # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
@@ -4231,6 +4154,16 @@
             # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
             # the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+          },
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -4253,16 +4186,6 @@
               # both cannot be set at the same time.
             &quot;A String&quot;,
           ],
-          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-          },
           &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
               # the one used in the custom container. This field is required if the replica
               # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -4287,27 +4210,13 @@
             &quot;A String&quot;,
           ],
         },
-        &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
-            # job. Each replica in the cluster will be of the type specified in
-            # `parameter_server_type`.
+        &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+            # variable when training with a custom container. Defaults to `false`. [Learn
+            # more about this
+            # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
             #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `parameter_server_type`.
-            #
-            # The default value is zero.
-        &quot;packageUris&quot;: [ # Required. The Google Cloud Storage location of the packages with
-            # the training program and any additional dependencies.
-            # The maximum number of package URIs is 100.
-          &quot;A String&quot;,
-        ],
-        &quot;evaluatorCount&quot;: &quot;A String&quot;, # Optional. The number of evaluator replicas to use for the training job.
-            # Each replica in the cluster will be of the type specified in
-            # `evaluator_type`.
-            #
-            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
-            # set this value, you must also set `evaluator_type`.
-            #
-            # The default value is zero.
+            # This field has no effect for training jobs that don&#x27;t use a custom
+            # container.
         &quot;masterType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
             # job&#x27;s master worker. You must specify this field when `scaleTier` is set to
             # `CUSTOM`.
@@ -4360,65 +4269,27 @@
             # field. Learn more about the [special configuration options for training
             # with
             # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for training. You must
-            # either specify this field or specify `masterConfig.imageUri`.
+        &quot;parameterServerConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
             #
-            # For more information, see the [runtime version
-            # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
-            # manage runtime versions](/ai-platform/training/docs/versioning).
-        &quot;evaluatorType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-            # job&#x27;s evaluator nodes.
-            #
-            # The supported values are the same as those described in the entry for
-            # `masterType`.
-            #
-            # This value must be consistent with the category of machine type that
-            # `masterType` uses. In other words, both must be Compute Engine machine
-            # types or both must be legacy machine types.
-            #
-            # This value must be present when `scaleTier` is set to `CUSTOM` and
-            # `evaluatorCount` is greater than zero.
-        &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-            # job&#x27;s worker nodes.
-            #
-            # The supported values are the same as those described in the entry for
-            # `masterType`.
-            #
-            # This value must be consistent with the category of machine type that
-            # `masterType` uses. In other words, both must be Compute Engine machine
-            # types or both must be legacy machine types.
-            #
-            # If you use `cloud_tpu` for this value, see special instructions for
-            # [configuring a custom TPU
-            # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-            #
-            # This value must be present when `scaleTier` is set to `CUSTOM` and
-            # `workerCount` is greater than zero.
-        &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
-            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
-        &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
-            # job&#x27;s parameter server.
-            #
-            # The supported values are the same as those described in the entry for
-            # `master_type`.
-            #
-            # This value must be consistent with the category of machine type that
-            # `masterType` uses. In other words, both must be Compute Engine machine
-            # types or both must be legacy machine types.
-            #
-            # This value must be present when `scaleTier` is set to `CUSTOM` and
-            # `parameter_server_count` is greater than zero.
-        &quot;masterConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-            #
-            # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-            # to a Compute Engine machine type. Learn about [restrictions on accelerator
-            # configurations for
+            # You should only set `parameterServerConfig.acceleratorConfig` if
+            # `parameterServerType` is set to a Compute Engine machine type. [Learn
+            # about restrictions on accelerator configurations for
             # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
             #
-            # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-            # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
-            # about [configuring custom
+            # Set `parameterServerConfig.imageUri` only if you build a custom image for
+            # your parameter server. If `parameterServerConfig.imageUri` has not been
+            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
             # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+          },
           &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
               # Registry. Learn more about [configuring custom
               # containers](/ai-platform/training/docs/distributed-training-containers).
@@ -4441,16 +4312,6 @@
               # both cannot be set at the same time.
             &quot;A String&quot;,
           ],
-          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-          },
           &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
               # the one used in the custom container. This field is required if the replica
               # is a TPU worker that uses a custom container. Otherwise, do not specify
@@ -4475,8 +4336,8 @@
             &quot;A String&quot;,
           ],
         },
-        &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
-            # and parameter servers.
+        &quot;region&quot;: &quot;A String&quot;, # Required. The region to run the training job in. See the [available
+            # regions](/ai-platform/training/docs/regions) for AI Platform Training.
         &quot;jobDir&quot;: &quot;A String&quot;, # Optional. A Google Cloud Storage path in which to store training outputs
             # and other data needed for training. This path is passed to your TensorFlow
             # program as the &#x27;--job-dir&#x27; command-line argument. The benefit of specifying
@@ -4495,6 +4356,308 @@
             #
             # Read more about the Python versions available for [each runtime
             # version](/ml-engine/docs/runtime-version-list).
+        &quot;hyperparameters&quot;: { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
+          &quot;maxTrials&quot;: 42, # Optional. How many training trials should be attempted to optimize
+              # the specified hyperparameters.
+              #
+              # Defaults to one.
+          &quot;enableTrialEarlyStopping&quot;: True or False, # Optional. Indicates if the hyperparameter tuning job enables auto trial
+              # early stopping.
+          &quot;params&quot;: [ # Required. The set of parameters to tune.
+            { # Represents a single hyperparameter to optimize.
+              &quot;minValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                  # should be unset if type is `CATEGORICAL`. This value should be integers if
+                  # type is INTEGER.
+              &quot;categoricalValues&quot;: [ # Required if type is `CATEGORICAL`. The list of possible categories.
+                &quot;A String&quot;,
+              ],
+              &quot;scaleType&quot;: &quot;A String&quot;, # Optional. How the parameter should be scaled to the hypercube.
+                  # Leave unset for categorical parameters.
+                  # Some kind of scaling is strongly recommended for real or integral
+                  # parameters (e.g., `UNIT_LINEAR_SCALE`).
+              &quot;discreteValues&quot;: [ # Required if type is `DISCRETE`.
+                  # A list of feasible points.
+                  # The list should be in strictly increasing order. For instance, this
+                  # parameter might have possible settings of 1.5, 2.5, and 4.0. This list
+                  # should not contain more than 1,000 values.
+                3.14,
+              ],
+              &quot;type&quot;: &quot;A String&quot;, # Required. The type of the parameter.
+              &quot;maxValue&quot;: 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                  # should be unset if type is `CATEGORICAL`. This value should be integers if
+                  # type is `INTEGER`.
+              &quot;parameterName&quot;: &quot;A String&quot;, # Required. The parameter name must be unique amongst all ParameterConfigs in
+                  # a HyperparameterSpec message. E.g., &quot;learning_rate&quot;.
+            },
+          ],
+          &quot;maxFailedTrials&quot;: 42, # Optional. The number of failed trials that need to be seen before failing
+              # the hyperparameter tuning job. You can specify this field to override the
+              # default failing criteria for AI Platform hyperparameter tuning jobs.
+              #
+              # Defaults to zero, which means the service decides when a hyperparameter
+              # job should fail.
+          &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # Optional. The TensorFlow summary tag name to use for optimizing trials. For
+              # current versions of TensorFlow, this tag name should exactly match what is
+              # shown in TensorBoard, including all scopes.  For versions of TensorFlow
+              # prior to 0.12, this should be only the tag passed to tf.Summary.
+              # By default, &quot;training/hptuning/metric&quot; will be used.
+          &quot;resumePreviousJobId&quot;: &quot;A String&quot;, # Optional. The prior hyperparameter tuning job id that users hope to
+              # continue with. The job id will be used to find the corresponding vizier
+              # study guid and resume the study.
+          &quot;goal&quot;: &quot;A String&quot;, # Required. The type of goal to use for tuning. Available types are
+              # `MAXIMIZE` and `MINIMIZE`.
+              #
+              # Defaults to `MAXIMIZE`.
+          &quot;algorithm&quot;: &quot;A String&quot;, # Optional. The search algorithm specified for the hyperparameter
+              # tuning job.
+              # Uses the default AI Platform hyperparameter tuning
+              # algorithm if unspecified.
+          &quot;maxParallelTrials&quot;: 42, # Optional. The number of training trials to run concurrently.
+              # You can reduce the time it takes to perform hyperparameter tuning by adding
+              # trials in parallel. However, each trail only benefits from the information
+              # gained in completed trials. That means that a trial does not get access to
+              # the results of trials running at the same time, which could reduce the
+              # quality of the overall optimization.
+              #
+              # Each trial will use the same scale tier and machine types.
+              #
+              # Defaults to one.
+        },
+        &quot;evaluatorType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+            # job&#x27;s evaluator nodes.
+            #
+            # The supported values are the same as those described in the entry for
+            # `masterType`.
+            #
+            # This value must be consistent with the category of machine type that
+            # `masterType` uses. In other words, both must be Compute Engine machine
+            # types or both must be legacy machine types.
+            #
+            # This value must be present when `scaleTier` is set to `CUSTOM` and
+            # `evaluatorCount` is greater than zero.
+        &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
+            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
+            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
+            # the form projects/{project}/global/networks/{network}. Where {project} is a
+            # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
+            #
+            # Private services access must already be configured for the network. If left
+            # unspecified, the Job is not peered with any network. Learn more -
+            # Connecting Job to user network over private
+            # IP.
+        &quot;parameterServerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+            # job&#x27;s parameter server.
+            #
+            # The supported values are the same as those described in the entry for
+            # `master_type`.
+            #
+            # This value must be consistent with the category of machine type that
+            # `masterType` uses. In other words, both must be Compute Engine machine
+            # types or both must be legacy machine types.
+            #
+            # This value must be present when `scaleTier` is set to `CUSTOM` and
+            # `parameter_server_count` is greater than zero.
+        &quot;workerType&quot;: &quot;A String&quot;, # Optional. Specifies the type of virtual machine to use for your training
+            # job&#x27;s worker nodes.
+            #
+            # The supported values are the same as those described in the entry for
+            # `masterType`.
+            #
+            # This value must be consistent with the category of machine type that
+            # `masterType` uses. In other words, both must be Compute Engine machine
+            # types or both must be legacy machine types.
+            #
+            # If you use `cloud_tpu` for this value, see special instructions for
+            # [configuring a custom TPU
+            # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+            #
+            # This value must be present when `scaleTier` is set to `CUSTOM` and
+            # `workerCount` is greater than zero.
+        &quot;masterConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+            #
+            # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+            # to a Compute Engine machine type. Learn about [restrictions on accelerator
+            # configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            #
+            # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+            # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+            # about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+          },
+          &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
+              # Registry. Learn more about [configuring custom
+              # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
+              # The following rules apply for container_command and container_args:
+              # - If you do not supply command or args:
+              #   The defaults defined in the Docker image are used.
+              # - If you supply a command but no args:
+              #   The default EntryPoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run without any arguments.
+              # - If you supply only args:
+              #   The default Entrypoint defined in the Docker image is run with the args
+              #   that you supplied.
+              # - If you supply a command and args:
+              #   The default Entrypoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run with your args.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
+        },
+        &quot;evaluatorCount&quot;: &quot;A String&quot;, # Optional. The number of evaluator replicas to use for the training job.
+            # Each replica in the cluster will be of the type specified in
+            # `evaluator_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `evaluator_type`.
+            #
+            # The default value is zero.
+        &quot;args&quot;: [ # Optional. Command-line arguments passed to the training application when it
+            # starts. If your job uses a custom container, then the arguments are passed
+            # to the container&#x27;s &lt;a class=&quot;external&quot; target=&quot;_blank&quot;
+            # href=&quot;https://docs.docker.com/engine/reference/builder/#entrypoint&quot;&gt;
+            # `ENTRYPOINT`&lt;/a&gt; command.
+          &quot;A String&quot;,
+        ],
+        &quot;pythonModule&quot;: &quot;A String&quot;, # Required. The Python module name to run after installing the packages.
+        &quot;runtimeVersion&quot;: &quot;A String&quot;, # Optional. The AI Platform runtime version to use for training. You must
+            # either specify this field or specify `masterConfig.imageUri`.
+            #
+            # For more information, see the [runtime version
+            # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+            # manage runtime versions](/ai-platform/training/docs/versioning).
+        &quot;parameterServerCount&quot;: &quot;A String&quot;, # Optional. The number of parameter server replicas to use for the training
+            # job. Each replica in the cluster will be of the type specified in
+            # `parameter_server_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `parameter_server_type`.
+            #
+            # The default value is zero.
+        &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+            #
+            # You should only set `evaluatorConfig.acceleratorConfig` if
+            # `evaluatorType` is set to a Compute Engine machine type. [Learn
+            # about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            #
+            # Set `evaluatorConfig.imageUri` only if you build a custom image for
+            # your evaluator. If `evaluatorConfig.imageUri` has not been
+            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+              # [Learn about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+              # [accelerators for online
+              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
+            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
+          },
+          &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
+              # Registry. Learn more about [configuring custom
+              # containers](/ai-platform/training/docs/distributed-training-containers).
+          &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
+              # The following rules apply for container_command and container_args:
+              # - If you do not supply command or args:
+              #   The defaults defined in the Docker image are used.
+              # - If you supply a command but no args:
+              #   The default EntryPoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run without any arguments.
+              # - If you supply only args:
+              #   The default Entrypoint defined in the Docker image is run with the args
+              #   that you supplied.
+              # - If you supply a command and args:
+              #   The default Entrypoint and the default Cmd defined in the Docker image
+              #   are ignored. Your command is run with your args.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
+          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
+              # the one used in the custom container. This field is required if the replica
+              # is a TPU worker that uses a custom container. Otherwise, do not specify
+              # this field. This must be a [runtime version that currently supports
+              # training with
+              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+              #
+              # Note that the version of TensorFlow included in a runtime version may
+              # differ from the numbering of the runtime version itself, because it may
+              # have a different [patch
+              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+              # In this field, you must specify the runtime version (TensorFlow minor
+              # version). For example, if your custom container runs TensorFlow `1.x.y`,
+              # specify `1.x`.
+          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
+              # If provided, it will override default ENTRYPOINT of the docker image.
+              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
+              # It cannot be set if custom container image is
+              # not provided.
+              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
+              # both cannot be set at the same time.
+            &quot;A String&quot;,
+          ],
+        },
+        &quot;encryptionConfig&quot;: { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+            # protect resources created by a training job, instead of using Google&#x27;s
+            # default encryption. If this is set, then all resources created by the
+            # training job will be encrypted with the customer-managed encryption key
+            # that you specify.
+            #
+            # [Learn how and when to use CMEK with AI Platform
+            # Training](/ai-platform/training/docs/cmek).
+            # a resource.
+          &quot;kmsKeyName&quot;: &quot;A String&quot;, # The Cloud KMS resource identifier of the customer-managed encryption key
+              # used to protect a resource, such as a training job. It has the following
+              # format:
+              # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+        },
+        &quot;workerCount&quot;: &quot;A String&quot;, # Optional. The number of worker replicas to use for the training job. Each
+            # replica in the cluster will be of the type specified in `worker_type`.
+            #
+            # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+            # set this value, you must also set `worker_type`.
+            #
+            # The default value is zero.
         &quot;scheduling&quot;: { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
           &quot;maxWaitTime&quot;: &quot;A String&quot;,
           &quot;maxRunningTime&quot;: &quot;A String&quot;, # Optional. The maximum job running time, expressed in seconds. The field can
@@ -4521,167 +4684,22 @@
               #   ...
               # ```
         },
-        &quot;network&quot;: &quot;A String&quot;, # Optional. The full name of the Google Compute Engine
-            # [network](/compute/docs/networks-and-firewalls#networks) to which the Job
-            # is peered. For example, projects/12345/global/networks/myVPC. Format is of
-            # the form projects/{project}/global/networks/{network}. Where {project} is a
-            # project number, as in &#x27;12345&#x27;, and {network} is network name.&quot;.
-            #
-            # Private services access must already be configured for the network. If left
-            # unspecified, the Job is not peered with any network. Learn more -
-            # Connecting Job to user network over private
-            # IP.
-        &quot;evaluatorConfig&quot;: { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
-            #
-            # You should only set `evaluatorConfig.acceleratorConfig` if
-            # `evaluatorType` is set to a Compute Engine machine type. [Learn
-            # about restrictions on accelerator configurations for
-            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-            #
-            # Set `evaluatorConfig.imageUri` only if you build a custom image for
-            # your evaluator. If `evaluatorConfig.imageUri` has not been
-            # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
-            # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;imageUri&quot;: &quot;A String&quot;, # The Docker image to run on the replica. This image must be in Container
-              # Registry. Learn more about [configuring custom
-              # containers](/ai-platform/training/docs/distributed-training-containers).
-          &quot;containerArgs&quot;: [ # Arguments to the entrypoint command.
-              # The following rules apply for container_command and container_args:
-              # - If you do not supply command or args:
-              #   The defaults defined in the Docker image are used.
-              # - If you supply a command but no args:
-              #   The default EntryPoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run without any arguments.
-              # - If you supply only args:
-              #   The default Entrypoint defined in the Docker image is run with the args
-              #   that you supplied.
-              # - If you supply a command and args:
-              #   The default Entrypoint and the default Cmd defined in the Docker image
-              #   are ignored. Your command is run with your args.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
-          &quot;acceleratorConfig&quot;: { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-              # [Learn about restrictions on accelerator configurations for
-              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
-              # Note that the AcceleratorConfig can be used in both Jobs and Versions.
-              # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
-              # [accelerators for online
-              # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
-            &quot;count&quot;: &quot;A String&quot;, # The number of accelerators to attach to each machine running the job.
-            &quot;type&quot;: &quot;A String&quot;, # The type of accelerator to use.
-          },
-          &quot;tpuTfVersion&quot;: &quot;A String&quot;, # The AI Platform runtime version that includes a TensorFlow version matching
-              # the one used in the custom container. This field is required if the replica
-              # is a TPU worker that uses a custom container. Otherwise, do not specify
-              # this field. This must be a [runtime version that currently supports
-              # training with
-              # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
-              #
-              # Note that the version of TensorFlow included in a runtime version may
-              # differ from the numbering of the runtime version itself, because it may
-              # have a different [patch
-              # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
-              # In this field, you must specify the runtime version (TensorFlow minor
-              # version). For example, if your custom container runs TensorFlow `1.x.y`,
-              # specify `1.x`.
-          &quot;containerCommand&quot;: [ # The command with which the replica&#x27;s custom container is run.
-              # If provided, it will override default ENTRYPOINT of the docker image.
-              # If not provided, the docker image&#x27;s ENTRYPOINT is used.
-              # It cannot be set if custom container image is
-              # not provided.
-              # Note that this field and [TrainingInput.args] are mutually exclusive, i.e.,
-              # both cannot be set at the same time.
-            &quot;A String&quot;,
-          ],
-        },
-        &quot;useChiefInTfConfig&quot;: True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
-            # variable when training with a custom container. Defaults to `false`. [Learn
-            # more about this
-            # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
-            #
-            # This field has no effect for training jobs that don&#x27;t use a custom
-            # container.
-      },
-      &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of a job.
-      &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
-      &quot;endTime&quot;: &quot;A String&quot;, # Output only. When the job processing was completed.
-      &quot;startTime&quot;: &quot;A String&quot;, # Output only. When the job processing was started.
-      &quot;predictionOutput&quot;: { # Represents results of a prediction job. # The current prediction job result.
-        &quot;errorCount&quot;: &quot;A String&quot;, # The number of data instances which resulted in errors.
-        &quot;outputPath&quot;: &quot;A String&quot;, # The output Google Cloud Storage location provided at the job creation time.
-        &quot;nodeHours&quot;: 3.14, # Node hours used by the batch prediction job.
-        &quot;predictionCount&quot;: &quot;A String&quot;, # The number of generated predictions.
-      },
-      &quot;trainingOutput&quot;: { # Represents results of a training job. Output only. # The current training job result.
-        &quot;isBuiltInAlgorithmJob&quot;: True or False, # Whether this job is a built-in Algorithm job.
-        &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-            # Only set for built-in algorithms jobs.
-          &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-              # saves the trained model. Only set for successful jobs that don&#x27;t use
-              # hyperparameter tuning.
-          &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-          &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-              # trained.
-          &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-        },
-        &quot;trials&quot;: [ # Results for individual Hyperparameter trials.
-            # Only set for hyperparameter tuning jobs.
-          { # Represents the result of a single hyperparameter tuning trial from a
-              # training job. The TrainingOutput object that is returned on successful
-              # completion of a training job with hyperparameter tuning includes a list
-              # of HyperparameterOutput objects, one for each successful trial.
-            &quot;trialId&quot;: &quot;A String&quot;, # The trial id for these results.
-            &quot;endTime&quot;: &quot;A String&quot;, # Output only. End time for the trial.
-            &quot;startTime&quot;: &quot;A String&quot;, # Output only. Start time for the trial.
-            &quot;isTrialStoppedEarly&quot;: True or False, # True if the trial is stopped early.
-            &quot;finalMetric&quot;: { # An observed value of a metric. # The final objective metric seen for this trial.
-              &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-              &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-            },
-            &quot;builtInAlgorithmOutput&quot;: { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
-                # Only set for trials of built-in algorithms jobs that have succeeded.
-              &quot;modelPath&quot;: &quot;A String&quot;, # The Cloud Storage path to the `model/` directory where the training job
-                  # saves the trained model. Only set for successful jobs that don&#x27;t use
-                  # hyperparameter tuning.
-              &quot;pythonVersion&quot;: &quot;A String&quot;, # Python version on which the built-in algorithm was trained.
-              &quot;runtimeVersion&quot;: &quot;A String&quot;, # AI Platform runtime version on which the built-in algorithm was
-                  # trained.
-              &quot;framework&quot;: &quot;A String&quot;, # Framework on which the built-in algorithm was trained.
-            },
-            &quot;state&quot;: &quot;A String&quot;, # Output only. The detailed state of the trial.
-            &quot;allMetrics&quot;: [ # All recorded object metrics for this trial. This field is not currently
-                # populated.
-              { # An observed value of a metric.
-                &quot;trainingStep&quot;: &quot;A String&quot;, # The global training step for this metric.
-                &quot;objectiveValue&quot;: 3.14, # The objective value at this training step.
-              },
-            ],
-            &quot;hyperparameters&quot;: { # The hyperparameters given to this trial.
-              &quot;a_key&quot;: &quot;A String&quot;,
-            },
-          },
+        &quot;scaleTier&quot;: &quot;A String&quot;, # Required. Specifies the machine types, the number of replicas for workers
+            # and parameter servers.
+        &quot;packageUris&quot;: [ # Required. The Google Cloud Storage location of the packages with
+            # the training program and any additional dependencies.
+            # The maximum number of package URIs is 100.
+          &quot;A String&quot;,
         ],
-        &quot;hyperparameterMetricTag&quot;: &quot;A String&quot;, # The TensorFlow summary tag name used for optimizing hyperparameter tuning
-            # trials. See
-            # [`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)
-            # for more information. Only set for hyperparameter tuning jobs.
-        &quot;completedTrialCount&quot;: &quot;A String&quot;, # The number of hyperparameter tuning trials that completed successfully.
-            # Only set for hyperparameter tuning jobs.
-        &quot;isHyperparameterTuningJob&quot;: True or False, # Whether this job is a hyperparameter tuning job.
-        &quot;consumedMLUnits&quot;: 3.14, # The amount of ML units consumed by the job.
       },
-      &quot;createTime&quot;: &quot;A String&quot;, # Output only. When the job was created.
-      &quot;labels&quot;: { # Optional. One or more labels that you can add, to organize your jobs.
-          # Each label is a key-value pair, where both the key and the value are
-          # arbitrary strings that you supply.
-          # For more information, see the documentation on
-          # &lt;a href=&quot;/ml-engine/docs/tensorflow/resource-labels&quot;&gt;using labels&lt;/a&gt;.
-        &quot;a_key&quot;: &quot;A String&quot;,
-      },
+      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+          # prevent simultaneous updates of a job from overwriting each other.
+          # It is strongly suggested that systems make use of the `etag` in the
+          # read-modify-write cycle to perform job updates in order to avoid race
+          # conditions: An `etag` is returned in the response to `GetJob`, and
+          # systems are expected to put that etag in the request to `UpdateJob` to
+          # ensure that their change will be applied to the same version of the job.
+      &quot;jobId&quot;: &quot;A String&quot;, # Required. The user-specified id of the job.
     }</pre>
 </div>
 
@@ -4699,11 +4717,6 @@
     The object takes the form of:
 
 { # Request message for `SetIamPolicy` method.
-    &quot;updateMask&quot;: &quot;A String&quot;, # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
-        # the fields in the mask will be modified. If no mask is provided, the
-        # following default mask is used:
-        # 
-        # `paths: &quot;bindings, etag&quot;`
     &quot;policy&quot;: { # An Identity and Access Management (IAM) policy, which specifies access # REQUIRED: The complete policy to be applied to the `resource`. The size of
         # the policy is limited to a few 10s of KB. An empty policy is a
         # valid policy but certain Cloud Platform services (such as Projects)
@@ -4774,30 +4787,18 @@
         #
         # For a description of IAM and its features, see the
         # [IAM documentation](https://cloud.google.com/iam/docs/).
-      &quot;version&quot;: 42, # Specifies the format of the policy.
-          #
-          # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
-          # are rejected.
-          #
-          # Any operation that affects conditional role bindings must specify version
-          # `3`. This requirement applies to the following operations:
-          #
-          # * Getting a policy that includes a conditional role binding
-          # * Adding a conditional role binding to a policy
-          # * Changing a conditional role binding in a policy
-          # * Removing any role binding, with or without a condition, from a policy
-          #   that includes conditions
+      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+          # prevent simultaneous updates of a policy from overwriting each other.
+          # It is strongly suggested that systems make use of the `etag` in the
+          # read-modify-write cycle to perform policy updates in order to avoid race
+          # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+          # systems are expected to put that etag in the request to `setIamPolicy` to
+          # ensure that their change will be applied to the same version of the policy.
           #
           # **Important:** If you use IAM Conditions, you must include the `etag` field
           # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
           # you to overwrite a version `3` policy with a version `1` policy, and all of
           # the conditions in the version `3` policy are lost.
-          #
-          # If a policy does not include any conditions, operations on that policy may
-          # specify any valid version or leave the field unset.
-          #
-          # To learn which resources support conditions in their IAM policies, see the
-          # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
       &quot;auditConfigs&quot;: [ # Specifies cloud audit logging configuration for this policy.
         { # Specifies the audit configuration for a service.
             # The configuration determines which permission types are logged, and what
@@ -4814,7 +4815,7 @@
             #     {
             #       &quot;audit_configs&quot;: [
             #         {
-            #           &quot;service&quot;: &quot;allServices&quot;
+            #           &quot;service&quot;: &quot;allServices&quot;,
             #           &quot;audit_log_configs&quot;: [
             #             {
             #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
@@ -4823,18 +4824,18 @@
             #               ]
             #             },
             #             {
-            #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+            #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;
             #             },
             #             {
-            #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;,
+            #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;
             #             }
             #           ]
             #         },
             #         {
-            #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;
+            #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;,
             #           &quot;audit_log_configs&quot;: [
             #             {
-            #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
+            #               &quot;log_type&quot;: &quot;DATA_READ&quot;
             #             },
             #             {
             #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
@@ -4866,27 +4867,53 @@
                 #           ]
                 #         },
                 #         {
-                #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+                #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;
                 #         }
                 #       ]
                 #     }
                 #
                 # This enables &#x27;DATA_READ&#x27; and &#x27;DATA_WRITE&#x27; logging, while exempting
                 # jose@example.com from DATA_READ logging.
+              &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
               &quot;exemptedMembers&quot;: [ # Specifies the identities that do not cause logging for this type of
                   # permission.
                   # Follows the same format of Binding.members.
                 &quot;A String&quot;,
               ],
-              &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
             },
           ],
         },
       ],
+      &quot;version&quot;: 42, # Specifies the format of the policy.
+          #
+          # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+          # are rejected.
+          #
+          # Any operation that affects conditional role bindings must specify version
+          # `3`. This requirement applies to the following operations:
+          #
+          # * Getting a policy that includes a conditional role binding
+          # * Adding a conditional role binding to a policy
+          # * Changing a conditional role binding in a policy
+          # * Removing any role binding, with or without a condition, from a policy
+          #   that includes conditions
+          #
+          # **Important:** If you use IAM Conditions, you must include the `etag` field
+          # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+          # you to overwrite a version `3` policy with a version `1` policy, and all of
+          # the conditions in the version `3` policy are lost.
+          #
+          # If a policy does not include any conditions, operations on that policy may
+          # specify any valid version or leave the field unset.
+          #
+          # To learn which resources support conditions in their IAM policies, see the
+          # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
       &quot;bindings&quot;: [ # Associates a list of `members` to a `role`. Optionally, may specify a
           # `condition` that determines how and when the `bindings` are applied. Each
           # of the `bindings` must contain at least one member.
         { # Associates `members` with a `role`.
+          &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
+              # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
           &quot;condition&quot;: { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
               #
               # If the condition evaluates to `true`, then this binding applies to the
@@ -4929,8 +4956,6 @@
               # The exact variables and functions that may be referenced within an expression
               # are determined by the service that evaluates it. See the service
               # documentation for additional information.
-            &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
-                # describes the expression, e.g. when hovered over it in a UI.
             &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
                 # syntax.
             &quot;title&quot;: &quot;A String&quot;, # Optional. Title for the expression, i.e. a short string describing
@@ -4938,6 +4963,8 @@
                 # expression.
             &quot;location&quot;: &quot;A String&quot;, # Optional. String indicating the location of the expression for error
                 # reporting, e.g. a file name and a position in the file.
+            &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
+                # describes the expression, e.g. when hovered over it in a UI.
           },
           &quot;members&quot;: [ # Specifies the identities requesting access for a Cloud Platform resource.
               # `members` can have the following values:
@@ -4984,23 +5011,14 @@
               #
             &quot;A String&quot;,
           ],
-          &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
-              # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
         },
       ],
-      &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-          # prevent simultaneous updates of a policy from overwriting each other.
-          # It is strongly suggested that systems make use of the `etag` in the
-          # read-modify-write cycle to perform policy updates in order to avoid race
-          # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-          # systems are expected to put that etag in the request to `setIamPolicy` to
-          # ensure that their change will be applied to the same version of the policy.
-          #
-          # **Important:** If you use IAM Conditions, you must include the `etag` field
-          # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
-          # you to overwrite a version `3` policy with a version `1` policy, and all of
-          # the conditions in the version `3` policy are lost.
     },
+    &quot;updateMask&quot;: &quot;A String&quot;, # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
+        # the fields in the mask will be modified. If no mask is provided, the
+        # following default mask is used:
+        # 
+        # `paths: &quot;bindings, etag&quot;`
   }
 
   x__xgafv: string, V1 error format.
@@ -5078,30 +5096,18 @@
       #
       # For a description of IAM and its features, see the
       # [IAM documentation](https://cloud.google.com/iam/docs/).
-    &quot;version&quot;: 42, # Specifies the format of the policy.
-        #
-        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
-        # are rejected.
-        #
-        # Any operation that affects conditional role bindings must specify version
-        # `3`. This requirement applies to the following operations:
-        #
-        # * Getting a policy that includes a conditional role binding
-        # * Adding a conditional role binding to a policy
-        # * Changing a conditional role binding in a policy
-        # * Removing any role binding, with or without a condition, from a policy
-        #   that includes conditions
+    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a policy from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform policy updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+        # systems are expected to put that etag in the request to `setIamPolicy` to
+        # ensure that their change will be applied to the same version of the policy.
         #
         # **Important:** If you use IAM Conditions, you must include the `etag` field
         # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
         # you to overwrite a version `3` policy with a version `1` policy, and all of
         # the conditions in the version `3` policy are lost.
-        #
-        # If a policy does not include any conditions, operations on that policy may
-        # specify any valid version or leave the field unset.
-        #
-        # To learn which resources support conditions in their IAM policies, see the
-        # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
     &quot;auditConfigs&quot;: [ # Specifies cloud audit logging configuration for this policy.
       { # Specifies the audit configuration for a service.
           # The configuration determines which permission types are logged, and what
@@ -5118,7 +5124,7 @@
           #     {
           #       &quot;audit_configs&quot;: [
           #         {
-          #           &quot;service&quot;: &quot;allServices&quot;
+          #           &quot;service&quot;: &quot;allServices&quot;,
           #           &quot;audit_log_configs&quot;: [
           #             {
           #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
@@ -5127,18 +5133,18 @@
           #               ]
           #             },
           #             {
-          #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+          #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;
           #             },
           #             {
-          #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;,
+          #               &quot;log_type&quot;: &quot;ADMIN_READ&quot;
           #             }
           #           ]
           #         },
           #         {
-          #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;
+          #           &quot;service&quot;: &quot;sampleservice.googleapis.com&quot;,
           #           &quot;audit_log_configs&quot;: [
           #             {
-          #               &quot;log_type&quot;: &quot;DATA_READ&quot;,
+          #               &quot;log_type&quot;: &quot;DATA_READ&quot;
           #             },
           #             {
           #               &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
@@ -5170,27 +5176,53 @@
               #           ]
               #         },
               #         {
-              #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;,
+              #           &quot;log_type&quot;: &quot;DATA_WRITE&quot;
               #         }
               #       ]
               #     }
               #
               # This enables &#x27;DATA_READ&#x27; and &#x27;DATA_WRITE&#x27; logging, while exempting
               # jose@example.com from DATA_READ logging.
+            &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
             &quot;exemptedMembers&quot;: [ # Specifies the identities that do not cause logging for this type of
                 # permission.
                 # Follows the same format of Binding.members.
               &quot;A String&quot;,
             ],
-            &quot;logType&quot;: &quot;A String&quot;, # The log type that this config enables.
           },
         ],
       },
     ],
+    &quot;version&quot;: 42, # Specifies the format of the policy.
+        #
+        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+        # are rejected.
+        #
+        # Any operation that affects conditional role bindings must specify version
+        # `3`. This requirement applies to the following operations:
+        #
+        # * Getting a policy that includes a conditional role binding
+        # * Adding a conditional role binding to a policy
+        # * Changing a conditional role binding in a policy
+        # * Removing any role binding, with or without a condition, from a policy
+        #   that includes conditions
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+        #
+        # If a policy does not include any conditions, operations on that policy may
+        # specify any valid version or leave the field unset.
+        #
+        # To learn which resources support conditions in their IAM policies, see the
+        # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
     &quot;bindings&quot;: [ # Associates a list of `members` to a `role`. Optionally, may specify a
         # `condition` that determines how and when the `bindings` are applied. Each
         # of the `bindings` must contain at least one member.
       { # Associates `members` with a `role`.
+        &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
+            # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
         &quot;condition&quot;: { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
             #
             # If the condition evaluates to `true`, then this binding applies to the
@@ -5233,8 +5265,6 @@
             # The exact variables and functions that may be referenced within an expression
             # are determined by the service that evaluates it. See the service
             # documentation for additional information.
-          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
-              # describes the expression, e.g. when hovered over it in a UI.
           &quot;expression&quot;: &quot;A String&quot;, # Textual representation of an expression in Common Expression Language
               # syntax.
           &quot;title&quot;: &quot;A String&quot;, # Optional. Title for the expression, i.e. a short string describing
@@ -5242,6 +5272,8 @@
               # expression.
           &quot;location&quot;: &quot;A String&quot;, # Optional. String indicating the location of the expression for error
               # reporting, e.g. a file name and a position in the file.
+          &quot;description&quot;: &quot;A String&quot;, # Optional. Description of the expression. This is a longer text which
+              # describes the expression, e.g. when hovered over it in a UI.
         },
         &quot;members&quot;: [ # Specifies the identities requesting access for a Cloud Platform resource.
             # `members` can have the following values:
@@ -5288,22 +5320,8 @@
             #
           &quot;A String&quot;,
         ],
-        &quot;role&quot;: &quot;A String&quot;, # Role that is assigned to `members`.
-            # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
       },
     ],
-    &quot;etag&quot;: &quot;A String&quot;, # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a policy from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform policy updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-        # systems are expected to put that etag in the request to `setIamPolicy` to
-        # ensure that their change will be applied to the same version of the policy.
-        #
-        # **Important:** If you use IAM Conditions, you must include the `etag` field
-        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
-        # you to overwrite a version `3` policy with a version `1` policy, and all of
-        # the conditions in the version `3` policy are lost.
   }</pre>
 </div>