chore: regens API reference docs (#889)

diff --git a/docs/dyn/ml_v1.projects.jobs.html b/docs/dyn/ml_v1.projects.jobs.html
index d0a9ac2..a45bbe5 100644
--- a/docs/dyn/ml_v1.projects.jobs.html
+++ b/docs/dyn/ml_v1.projects.jobs.html
@@ -72,34 +72,34 @@
 
 </style>
 
-<h1><a href="ml_v1.html">Cloud Machine Learning Engine</a> . <a href="ml_v1.projects.html">projects</a> . <a href="ml_v1.projects.jobs.html">jobs</a></h1>
+<h1><a href="ml_v1.html">AI Platform Training & Prediction API</a> . <a href="ml_v1.projects.html">projects</a> . <a href="ml_v1.projects.jobs.html">jobs</a></h1>
 <h2>Instance Methods</h2>
 <p class="toc_element">
   <code><a href="#cancel">cancel(name, body=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Cancels a running job.</p>
 <p class="toc_element">
-  <code><a href="#create">create(parent, body, x__xgafv=None)</a></code></p>
+  <code><a href="#create">create(parent, body=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Creates a training or a batch prediction job.</p>
 <p class="toc_element">
   <code><a href="#get">get(name, x__xgafv=None)</a></code></p>
 <p class="firstline">Describes a job.</p>
 <p class="toc_element">
-  <code><a href="#getIamPolicy">getIamPolicy(resource, x__xgafv=None)</a></code></p>
+  <code><a href="#getIamPolicy">getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Gets the access control policy for a resource.</p>
 <p class="toc_element">
-  <code><a href="#list">list(parent, pageToken=None, x__xgafv=None, pageSize=None, filter=None)</a></code></p>
+  <code><a href="#list">list(parent, pageSize=None, pageToken=None, x__xgafv=None, filter=None)</a></code></p>
 <p class="firstline">Lists the jobs in the project.</p>
 <p class="toc_element">
   <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
 <p class="firstline">Retrieves the next page of results.</p>
 <p class="toc_element">
-  <code><a href="#patch">patch(name, body, updateMask=None, x__xgafv=None)</a></code></p>
+  <code><a href="#patch">patch(name, body=None, updateMask=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Updates a specific job resource.</p>
 <p class="toc_element">
-  <code><a href="#setIamPolicy">setIamPolicy(resource, body, x__xgafv=None)</a></code></p>
+  <code><a href="#setIamPolicy">setIamPolicy(resource, body=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Sets the access control policy on the specified resource. Replaces any</p>
 <p class="toc_element">
-  <code><a href="#testIamPermissions">testIamPermissions(resource, body, x__xgafv=None)</a></code></p>
+  <code><a href="#testIamPermissions">testIamPermissions(resource, body=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Returns permissions that a caller has on the specified resource.</p>
 <h3>Method Details</h3>
 <div class="method">
@@ -135,12 +135,12 @@
 </div>
 
 <div class="method">
-    <code class="details" id="create">create(parent, body, x__xgafv=None)</code>
+    <code class="details" id="create">create(parent, body=None, x__xgafv=None)</code>
   <pre>Creates a training or a batch prediction job.
 
 Args:
   parent: string, Required. The project name. (required)
-  body: object, The request body. (required)
+  body: object, The request body.
     The object takes the form of:
 
 { # Represents a training or prediction job.
@@ -154,6 +154,7 @@
           # training job. The TrainingOutput object that is returned on successful
           # completion of a training job with hyperparameter tuning includes a list
           # of HyperparameterOutput objects, one for each successful trial.
+        "startTime": "A String", # Output only. Start time for the trial.
         "hyperparameters": { # The hyperparameters given to this trial.
           "a_key": "A String",
         },
@@ -161,6 +162,7 @@
           "trainingStep": "A String", # The global training step for this metric.
           "objectiveValue": 3.14, # The objective value at this training step.
         },
+        "state": "A String", # Output only. The detailed state of the trial.
         "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
             # populated.
           { # An observed value of a metric.
@@ -169,6 +171,7 @@
           },
         ],
         "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
+        "endTime": "A String", # Output only. End time for the trial.
         "trialId": "A String", # The trial id for these results.
         "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
             # Only set for trials of built-in algorithms jobs that have succeeded.
@@ -205,11 +208,6 @@
         # model. The string must use the following format:
         #
         # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
-    "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
-        # prediction. If not set, AI Platform will pick the runtime version used
-        # during the CreateVersion request for this model version, or choose the
-        # latest stable version when model version information is not available
-        # such as when the model is specified by uri.
     "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
         # this job. Please refer to
         # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -218,14 +216,15 @@
         # Defaults to
         # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
         # , which is "serving_default".
+    "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+        # prediction. If not set, AI Platform will pick the runtime version used
+        # during the CreateVersion request for this model version, or choose the
+        # latest stable version when model version information is not available
+        # such as when the model is specified by uri.
     "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
         # The service will buffer batch_size number of records in memory before
         # invoking one Tensorflow prediction call internally. So take the record
         # size and memory available into consideration when setting this parameter.
-    "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
-        # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
-      "A String",
-    ],
     "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
         # Defaults to 10 if not specified.
     "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
@@ -238,154 +237,43 @@
         #
         # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
     "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
-        # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+        # See the &lt;a href="/ml-engine/docs/tensorflow/regions"&gt;available regions&lt;/a&gt;
         # for AI Platform services.
+    "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+        # &lt;a href="/storage/docs/gsutil/addlhelp/WildcardNames"&gt;wildcards&lt;/a&gt;.
+      "A String",
+    ],
     "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
   },
-  "trainingInput": { # Represents input parameters for a training job. When using the # Input parameters to create a training job.
-      # gcloud command to submit your training job, you can specify
-      # the input parameters as command-line arguments and/or in a YAML configuration
-      # file referenced from the --config command-line argument. For
-      # details, see the guide to
-      # <a href="/ml-engine/docs/tensorflow/training-jobs">submitting a training
-      # job</a>.
-    "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-        # job's worker nodes.
-        #
-        # The supported values are the same as those described in the entry for
-        # `masterType`.
-        #
-        # This value must be consistent with the category of machine type that
-        # `masterType` uses. In other words, both must be AI Platform machine
-        # types or both must be Compute Engine machine types.
-        #
-        # If you use `cloud_tpu` for this value, see special instructions for
-        # [configuring a custom TPU
-        # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-        #
-        # This value must be present when `scaleTier` is set to `CUSTOM` and
-        # `workerCount` is greater than zero.
-    "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-        #
-        # You should only set `parameterServerConfig.acceleratorConfig` if
-        # `parameterServerConfigType` is set to a Compute Engine machine type. [Learn
-        # about restrictions on accelerator configurations for
-        # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        #
-        # Set `parameterServerConfig.imageUri` only if you build a custom image for
-        # your parameter server. If `parameterServerConfig.imageUri` has not been
-        # set, AI Platform uses the value of `masterConfig.imageUri`.
-        # Learn more about [configuring custom
-        # containers](/ml-engine/docs/distributed-training-containers).
-      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-          # [Learn about restrictions on accelerator configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        "count": "A String", # The number of accelerators to attach to each machine running the job.
-        "type": "A String", # The type of accelerator to use.
-      },
-      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-          # Registry. Learn more about [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-    },
-    "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. If not
-        # set, AI Platform uses the default stable version, 1.0. For more
-        # information, see the
-        # <a href="/ml-engine/docs/runtime-version-list">runtime version list</a>
-        # and
-        # <a href="/ml-engine/docs/versioning">how to manage runtime versions</a>.
-    "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
-        # and parameter servers.
+  "labels": { # Optional. One or more labels that you can add, to organize your jobs.
+      # Each label is a key-value pair, where both the key and the value are
+      # arbitrary strings that you supply.
+      # For more information, see the documentation on
+      # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
+    "a_key": "A String",
+  },
+  "jobId": "A String", # Required. The user-specified id of the job.
+  "state": "A String", # Output only. The detailed state of a job.
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+      # prevent simultaneous updates of a job from overwriting each other.
+      # It is strongly suggested that systems make use of the `etag` in the
+      # read-modify-write cycle to perform job updates in order to avoid race
+      # conditions: An `etag` is returned in the response to `GetJob`, and
+      # systems are expected to put that etag in the request to `UpdateJob` to
+      # ensure that their change will be applied to the same version of the job.
+  "startTime": "A String", # Output only. When the job processing was started.
+  "trainingInput": { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
+      # to submit your training job, you can specify the input parameters as
+      # command-line arguments and/or in a YAML configuration file referenced from
+      # the --config command-line argument. For details, see the guide to [submitting
+      # a training job](/ai-platform/training/docs/training-jobs).
     "masterType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-        # job's master worker.
+        # job's master worker. You must specify this field when `scaleTier` is set to
+        # `CUSTOM`.
         #
+        # You can use certain Compute Engine machine types directly in this field.
         # The following types are supported:
         #
-        # <dl>
-        #   <dt>standard</dt>
-        #   <dd>
-        #   A basic machine configuration suitable for training simple models with
-        #   small to moderate datasets.
-        #   </dd>
-        #   <dt>large_model</dt>
-        #   <dd>
-        #   A machine with a lot of memory, specially suited for parameter servers
-        #   when your model is large (having many hidden layers or layers with very
-        #   large numbers of nodes).
-        #   </dd>
-        #   <dt>complex_model_s</dt>
-        #   <dd>
-        #   A machine suitable for the master and workers of the cluster when your
-        #   model requires more computation than the standard machine can handle
-        #   satisfactorily.
-        #   </dd>
-        #   <dt>complex_model_m</dt>
-        #   <dd>
-        #   A machine with roughly twice the number of cores and roughly double the
-        #   memory of <i>complex_model_s</i>.
-        #   </dd>
-        #   <dt>complex_model_l</dt>
-        #   <dd>
-        #   A machine with roughly twice the number of cores and roughly double the
-        #   memory of <i>complex_model_m</i>.
-        #   </dd>
-        #   <dt>standard_gpu</dt>
-        #   <dd>
-        #   A machine equivalent to <i>standard</i> that
-        #   also includes a single NVIDIA Tesla K80 GPU. See more about
-        #   <a href="/ml-engine/docs/tensorflow/using-gpus">using GPUs to
-        #   train your model</a>.
-        #   </dd>
-        #   <dt>complex_model_m_gpu</dt>
-        #   <dd>
-        #   A machine equivalent to <i>complex_model_m</i> that also includes
-        #   four NVIDIA Tesla K80 GPUs.
-        #   </dd>
-        #   <dt>complex_model_l_gpu</dt>
-        #   <dd>
-        #   A machine equivalent to <i>complex_model_l</i> that also includes
-        #   eight NVIDIA Tesla K80 GPUs.
-        #   </dd>
-        #   <dt>standard_p100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>standard</i> that
-        #   also includes a single NVIDIA Tesla P100 GPU.
-        #   </dd>
-        #   <dt>complex_model_m_p100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>complex_model_m</i> that also includes
-        #   four NVIDIA Tesla P100 GPUs.
-        #   </dd>
-        #   <dt>standard_v100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>standard</i> that
-        #   also includes a single NVIDIA Tesla V100 GPU.
-        #   </dd>
-        #   <dt>large_model_v100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>large_model</i> that
-        #   also includes a single NVIDIA Tesla V100 GPU.
-        #   </dd>
-        #   <dt>complex_model_m_v100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>complex_model_m</i> that
-        #   also includes four NVIDIA Tesla V100 GPUs.
-        #   </dd>
-        #   <dt>complex_model_l_v100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>complex_model_l</i> that
-        #   also includes eight NVIDIA Tesla V100 GPUs.
-        #   </dd>
-        #   <dt>cloud_tpu</dt>
-        #   <dd>
-        #   A TPU VM including one Cloud TPU. See more about
-        #   <a href="/ml-engine/docs/tensorflow/using-tpus">using TPUs to train
-        #   your model</a>.
-        #   </dd>
-        # </dl>
-        #
-        # You may also use certain Compute Engine machine types directly in this
-        # field. The following types are supported:
-        #
         # - `n1-standard-4`
         # - `n1-standard-8`
         # - `n1-standard-16`
@@ -404,10 +292,231 @@
         # - `n1-highcpu-64`
         # - `n1-highcpu-96`
         #
-        # See more about [using Compute Engine machine
-        # types](/ml-engine/docs/tensorflow/machine-types#compute-engine-machine-types).
+        # Learn more about [using Compute Engine machine
+        # types](/ml-engine/docs/machine-types#compute-engine-machine-types).
         #
-        # You must set this value when `scaleTier` is set to `CUSTOM`.
+        # Alternatively, you can use the following legacy machine types:
+        #
+        # - `standard`
+        # - `large_model`
+        # - `complex_model_s`
+        # - `complex_model_m`
+        # - `complex_model_l`
+        # - `standard_gpu`
+        # - `complex_model_m_gpu`
+        # - `complex_model_l_gpu`
+        # - `standard_p100`
+        # - `complex_model_m_p100`
+        # - `standard_v100`
+        # - `large_model_v100`
+        # - `complex_model_m_v100`
+        # - `complex_model_l_v100`
+        #
+        # Learn more about [using legacy machine
+        # types](/ml-engine/docs/machine-types#legacy-machine-types).
+        #
+        # Finally, if you want to use a TPU for training, specify `cloud_tpu` in this
+        # field. Learn more about the [special configuration options for training
+        # with
+        # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+    "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
+        # and other data needed for training. This path is passed to your TensorFlow
+        # program as the '--job-dir' command-line argument. The benefit of specifying
+        # this field is that Cloud ML validates the path for use in training.
+    "scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
+      "maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
+          # contain up to nine fractional digits, terminated by `s`. By default there
+          # is no limit to the running time.
+          #
+          # If the training job is still running after this duration, AI Platform
+          # Training cancels it.
+          #
+          # For example, if you want to ensure your job runs for no more than 2 hours,
+          # set this field to `7200s` (2 hours * 60 minutes / hour * 60 seconds /
+          # minute).
+          #
+          # If you submit your training job using the `gcloud` tool, you can [provide
+          # this field in a `config.yaml`
+          # file](/ai-platform/training/docs/training-jobs#formatting_your_configuration_parameters).
+          # For example:
+          #
+          # ```yaml
+          # trainingInput:
+          #   ...
+          #   scheduling:
+          #     maxRunningTime: 7200s
+          #   ...
+          # ```
+    },
+    "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
+        # job. Each replica in the cluster will be of the type specified in
+        # `parameter_server_type`.
+        #
+        # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+        # set this value, you must also set `parameter_server_type`.
+        #
+        # The default value is zero.
+    "evaluatorCount": "A String", # Optional. The number of evaluator replicas to use for the training job.
+        # Each replica in the cluster will be of the type specified in
+        # `evaluator_type`.
+        #
+        # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+        # set this value, you must also set `evaluator_type`.
+        #
+        # The default value is zero.
+    "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+        # job's worker nodes.
+        #
+        # The supported values are the same as those described in the entry for
+        # `masterType`.
+        #
+        # This value must be consistent with the category of machine type that
+        # `masterType` uses. In other words, both must be Compute Engine machine
+        # types or both must be legacy machine types.
+        #
+        # If you use `cloud_tpu` for this value, see special instructions for
+        # [configuring a custom TPU
+        # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+        #
+        # This value must be present when `scaleTier` is set to `CUSTOM` and
+        # `workerCount` is greater than zero.
+    "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
+        # and parameter servers.
+    "packageUris": [ # Required. The Google Cloud Storage location of the packages with
+        # the training program and any additional dependencies.
+        # The maximum number of package URIs is 100.
+      "A String",
+    ],
+    "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
+        #
+        # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
+        # to a Compute Engine machine type. [Learn about restrictions on accelerator
+        # configurations for
+        # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+        #
+        # Set `workerConfig.imageUri` only if you build a custom image for your
+        # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
+        # the value of `masterConfig.imageUri`. Learn more about [configuring custom
+        # containers](/ai-platform/training/docs/distributed-training-containers).
+      "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+          # the one used in the custom container. This field is required if the replica
+          # is a TPU worker that uses a custom container. Otherwise, do not specify
+          # this field. This must be a [runtime version that currently supports
+          # training with
+          # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+          #
+          # Note that the version of TensorFlow included in a runtime version may
+          # differ from the numbering of the runtime version itself, because it may
+          # have a different [patch
+          # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+          # In this field, you must specify the runtime version (TensorFlow minor
+          # version). For example, if your custom container runs TensorFlow `1.x.y`,
+          # specify `1.x`.
+      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+          # [Learn about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+          # [accelerators for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        "count": "A String", # The number of accelerators to attach to each machine running the job.
+        "type": "A String", # The type of accelerator to use.
+      },
+      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+          # Registry. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+    },
+    "evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+        #
+        # You should only set `evaluatorConfig.acceleratorConfig` if
+        # `evaluatorType` is set to a Compute Engine machine type. [Learn
+        # about restrictions on accelerator configurations for
+        # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+        #
+        # Set `evaluatorConfig.imageUri` only if you build a custom image for
+        # your evaluator. If `evaluatorConfig.imageUri` has not been
+        # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+        # containers](/ai-platform/training/docs/distributed-training-containers).
+      "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+          # the one used in the custom container. This field is required if the replica
+          # is a TPU worker that uses a custom container. Otherwise, do not specify
+          # this field. This must be a [runtime version that currently supports
+          # training with
+          # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+          #
+          # Note that the version of TensorFlow included in a runtime version may
+          # differ from the numbering of the runtime version itself, because it may
+          # have a different [patch
+          # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+          # In this field, you must specify the runtime version (TensorFlow minor
+          # version). For example, if your custom container runs TensorFlow `1.x.y`,
+          # specify `1.x`.
+      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+          # [Learn about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+          # [accelerators for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        "count": "A String", # The number of accelerators to attach to each machine running the job.
+        "type": "A String", # The type of accelerator to use.
+      },
+      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+          # Registry. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+    },
+    "useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+        # variable when training with a custom container. Defaults to `false`. [Learn
+        # more about this
+        # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
+        #
+        # This field has no effect for training jobs that don't use a custom
+        # container.
+    "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+        #
+        # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+        # to a Compute Engine machine type. Learn about [restrictions on accelerator
+        # configurations for
+        # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+        #
+        # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+        # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+        # about [configuring custom
+        # containers](/ai-platform/training/docs/distributed-training-containers).
+      "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+          # the one used in the custom container. This field is required if the replica
+          # is a TPU worker that uses a custom container. Otherwise, do not specify
+          # this field. This must be a [runtime version that currently supports
+          # training with
+          # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+          #
+          # Note that the version of TensorFlow included in a runtime version may
+          # differ from the numbering of the runtime version itself, because it may
+          # have a different [patch
+          # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+          # In this field, you must specify the runtime version (TensorFlow minor
+          # version). For example, if your custom container runs TensorFlow `1.x.y`,
+          # specify `1.x`.
+      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+          # [Learn about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+          # [accelerators for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        "count": "A String", # The number of accelerators to attach to each machine running the job.
+        "type": "A String", # The type of accelerator to use.
+      },
+      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+          # Registry. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+    },
+    "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. You must
+        # either specify this field or specify `masterConfig.imageUri`.
+        #
+        # For more information, see the [runtime version
+        # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+        # manage runtime versions](/ai-platform/training/docs/versioning).
     "hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
       "maxTrials": 42, # Optional. How many training trials should be attempted to optimize
           # the specified hyperparameters.
@@ -437,9 +546,9 @@
           "maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
               # should be unset if type is `CATEGORICAL`. This value should be integers if
               # type is `INTEGER`.
-          "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
-            "A String",
-          ],
+          "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+              # should be unset if type is `CATEGORICAL`. This value should be integers if
+              # type is INTEGER.
           "discreteValues": [ # Required if type is `DISCRETE`.
               # A list of feasible points.
               # The list should be in strictly increasing order. For instance, this
@@ -449,9 +558,9 @@
           ],
           "parameterName": "A String", # Required. The parameter name must be unique amongst all ParameterConfigs in
               # a HyperparameterSpec message. E.g., "learning_rate".
-          "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-              # should be unset if type is `CATEGORICAL`. This value should be integers if
-              # type is INTEGER.
+          "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
+            "A String",
+          ],
           "type": "A String", # Required. The type of the parameter.
           "scaleType": "A String", # Optional. How the parameter should be scaled to the hypercube.
               # Leave unset for categorical parameters.
@@ -475,26 +584,14 @@
           #
           # Defaults to one.
     },
-    "region": "A String", # Required. The Google Compute Engine region to run the training job in.
-        # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
-        # for AI Platform services.
-    "args": [ # Optional. Command line arguments to pass to the program.
+    "args": [ # Optional. Command-line arguments passed to the training application when it
+        # starts. If your job uses a custom container, then the arguments are passed
+        # to the container's &lt;a class="external" target="_blank"
+        # href="https://docs.docker.com/engine/reference/builder/#entrypoint"&gt;
+        # `ENTRYPOINT`&lt;/a&gt; command.
       "A String",
     ],
     "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
-    "pythonVersion": "A String", # Optional. The version of Python used in training. If not set, the default
-        # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-        # to '1.4' and above. Python '2.7' works with all supported
-        # <a href="/ml-engine/docs/runtime-version-list">runtime versions</a>.
-    "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
-        # and other data needed for training. This path is passed to your TensorFlow
-        # program as the '--job-dir' command-line argument. The benefit of specifying
-        # this field is that Cloud ML validates the path for use in training.
-    "packageUris": [ # Required. The Google Cloud Storage location of the packages with
-        # the training program and any additional dependencies.
-        # The maximum number of package URIs is 100.
-      "A String",
-    ],
     "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
         # replica in the cluster will be of the type specified in `worker_type`.
         #
@@ -502,6 +599,87 @@
         # set this value, you must also set `worker_type`.
         #
         # The default value is zero.
+    "encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+        # protect resources created by a training job, instead of using Google's
+        # default encryption. If this is set, then all resources created by the
+        # training job will be encrypted with the customer-managed encryption key
+        # that you specify.
+        #
+        # [Learn how and when to use CMEK with AI Platform
+        # Training](/ai-platform/training/docs/cmek).
+        # a resource.
+      "kmsKeyName": "A String", # The Cloud KMS resource identifier of the customer-managed encryption key
+          # used to protect a resource, such as a training job. It has the following
+          # format:
+          # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+    },
+    "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
+        #
+        # You should only set `parameterServerConfig.acceleratorConfig` if
+        # `parameterServerType` is set to a Compute Engine machine type. [Learn
+        # about restrictions on accelerator configurations for
+        # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+        #
+        # Set `parameterServerConfig.imageUri` only if you build a custom image for
+        # your parameter server. If `parameterServerConfig.imageUri` has not been
+        # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+        # containers](/ai-platform/training/docs/distributed-training-containers).
+      "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+          # the one used in the custom container. This field is required if the replica
+          # is a TPU worker that uses a custom container. Otherwise, do not specify
+          # this field. This must be a [runtime version that currently supports
+          # training with
+          # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+          #
+          # Note that the version of TensorFlow included in a runtime version may
+          # differ from the numbering of the runtime version itself, because it may
+          # have a different [patch
+          # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+          # In this field, you must specify the runtime version (TensorFlow minor
+          # version). For example, if your custom container runs TensorFlow `1.x.y`,
+          # specify `1.x`.
+      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+          # [Learn about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+          # [accelerators for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        "count": "A String", # The number of accelerators to attach to each machine running the job.
+        "type": "A String", # The type of accelerator to use.
+      },
+      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+          # Registry. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+    },
+    "region": "A String", # Required. The region to run the training job in. See the [available
+        # regions](/ai-platform/training/docs/regions) for AI Platform Training.
+    "pythonVersion": "A String", # Optional. The version of Python used in training. You must either specify
+        # this field or specify `masterConfig.imageUri`.
+        #
+        # The following Python versions are available:
+        #
+        # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+        #   later.
+        # * Python '3.5' is available when `runtime_version` is set to a version
+        #   from '1.4' to '1.14'.
+        # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+        #   earlier.
+        #
+        # Read more about the Python versions available for [each runtime
+        # version](/ml-engine/docs/runtime-version-list).
+    "evaluatorType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+        # job's evaluator nodes.
+        #
+        # The supported values are the same as those described in the entry for
+        # `masterType`.
+        #
+        # This value must be consistent with the category of machine type that
+        # `masterType` uses. In other words, both must be Compute Engine machine
+        # types or both must be legacy machine types.
+        #
+        # This value must be present when `scaleTier` is set to `CUSTOM` and
+        # `evaluatorCount` is greater than zero.
     "parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
         # job's parameter server.
         #
@@ -509,81 +687,12 @@
         # `master_type`.
         #
         # This value must be consistent with the category of machine type that
-        # `masterType` uses. In other words, both must be AI Platform machine
-        # types or both must be Compute Engine machine types.
+        # `masterType` uses. In other words, both must be Compute Engine machine
+        # types or both must be legacy machine types.
         #
         # This value must be present when `scaleTier` is set to `CUSTOM` and
         # `parameter_server_count` is greater than zero.
-    "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
-        #
-        # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
-        # to a Compute Engine machine type. [Learn about restrictions on accelerator
-        # configurations for
-        # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        #
-        # Set `workerConfig.imageUri` only if you build a custom image for your
-        # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
-        # the value of `masterConfig.imageUri`. Learn more about
-        # [configuring custom
-        # containers](/ml-engine/docs/distributed-training-containers).
-      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-          # [Learn about restrictions on accelerator configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        "count": "A String", # The number of accelerators to attach to each machine running the job.
-        "type": "A String", # The type of accelerator to use.
-      },
-      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-          # Registry. Learn more about [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-    },
-    "maxRunningTime": "A String", # Optional. The maximum job running time. The default is 7 days.
-    "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-        #
-        # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-        # to a Compute Engine machine type. Learn about [restrictions on accelerator
-        # configurations for
-        # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        #
-        # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-        # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more about
-        # [configuring custom
-        # containers](/ml-engine/docs/distributed-training-containers).
-      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-          # [Learn about restrictions on accelerator configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        "count": "A String", # The number of accelerators to attach to each machine running the job.
-        "type": "A String", # The type of accelerator to use.
-      },
-      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-          # Registry. Learn more about [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-    },
-    "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
-        # job. Each replica in the cluster will be of the type specified in
-        # `parameter_server_type`.
-        #
-        # This value can only be used when `scale_tier` is set to `CUSTOM`.If you
-        # set this value, you must also set `parameter_server_type`.
-        #
-        # The default value is zero.
   },
-  "jobId": "A String", # Required. The user-specified id of the job.
-  "labels": { # Optional. One or more labels that you can add, to organize your jobs.
-      # Each label is a key-value pair, where both the key and the value are
-      # arbitrary strings that you supply.
-      # For more information, see the documentation on
-      # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
-    "a_key": "A String",
-  },
-  "state": "A String", # Output only. The detailed state of a job.
-  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-      # prevent simultaneous updates of a job from overwriting each other.
-      # It is strongly suggested that systems make use of the `etag` in the
-      # read-modify-write cycle to perform job updates in order to avoid race
-      # conditions: An `etag` is returned in the response to `GetJob`, and
-      # systems are expected to put that etag in the request to `UpdateJob` to
-      # ensure that their change will be applied to the same version of the job.
-  "startTime": "A String", # Output only. When the job processing was started.
   "endTime": "A String", # Output only. When the job processing was completed.
   "predictionOutput": { # Represents results of a prediction job. # The current prediction job result.
     "outputPath": "A String", # The output Google Cloud Storage location provided at the job creation time.
@@ -613,6 +722,7 @@
             # training job. The TrainingOutput object that is returned on successful
             # completion of a training job with hyperparameter tuning includes a list
             # of HyperparameterOutput objects, one for each successful trial.
+          "startTime": "A String", # Output only. Start time for the trial.
           "hyperparameters": { # The hyperparameters given to this trial.
             "a_key": "A String",
           },
@@ -620,6 +730,7 @@
             "trainingStep": "A String", # The global training step for this metric.
             "objectiveValue": 3.14, # The objective value at this training step.
           },
+          "state": "A String", # Output only. The detailed state of the trial.
           "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
               # populated.
             { # An observed value of a metric.
@@ -628,6 +739,7 @@
             },
           ],
           "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
+          "endTime": "A String", # Output only. End time for the trial.
           "trialId": "A String", # The trial id for these results.
           "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
               # Only set for trials of built-in algorithms jobs that have succeeded.
@@ -664,11 +776,6 @@
           # model. The string must use the following format:
           #
           # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
-      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
-          # prediction. If not set, AI Platform will pick the runtime version used
-          # during the CreateVersion request for this model version, or choose the
-          # latest stable version when model version information is not available
-          # such as when the model is specified by uri.
       "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
           # this job. Please refer to
           # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -677,14 +784,15 @@
           # Defaults to
           # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
           # , which is "serving_default".
+      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+          # prediction. If not set, AI Platform will pick the runtime version used
+          # during the CreateVersion request for this model version, or choose the
+          # latest stable version when model version information is not available
+          # such as when the model is specified by uri.
       "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
           # The service will buffer batch_size number of records in memory before
           # invoking one Tensorflow prediction call internally. So take the record
           # size and memory available into consideration when setting this parameter.
-      "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
-          # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
-        "A String",
-      ],
       "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
           # Defaults to 10 if not specified.
       "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
@@ -697,154 +805,43 @@
           #
           # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
       "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
-          # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+          # See the &lt;a href="/ml-engine/docs/tensorflow/regions"&gt;available regions&lt;/a&gt;
           # for AI Platform services.
+      "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+          # &lt;a href="/storage/docs/gsutil/addlhelp/WildcardNames"&gt;wildcards&lt;/a&gt;.
+        "A String",
+      ],
       "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
     },
-    "trainingInput": { # Represents input parameters for a training job. When using the # Input parameters to create a training job.
-        # gcloud command to submit your training job, you can specify
-        # the input parameters as command-line arguments and/or in a YAML configuration
-        # file referenced from the --config command-line argument. For
-        # details, see the guide to
-        # <a href="/ml-engine/docs/tensorflow/training-jobs">submitting a training
-        # job</a>.
-      "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-          # job's worker nodes.
-          #
-          # The supported values are the same as those described in the entry for
-          # `masterType`.
-          #
-          # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be AI Platform machine
-          # types or both must be Compute Engine machine types.
-          #
-          # If you use `cloud_tpu` for this value, see special instructions for
-          # [configuring a custom TPU
-          # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-          #
-          # This value must be present when `scaleTier` is set to `CUSTOM` and
-          # `workerCount` is greater than zero.
-      "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-          #
-          # You should only set `parameterServerConfig.acceleratorConfig` if
-          # `parameterServerConfigType` is set to a Compute Engine machine type. [Learn
-          # about restrictions on accelerator configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `parameterServerConfig.imageUri` only if you build a custom image for
-          # your parameter server. If `parameterServerConfig.imageUri` has not been
-          # set, AI Platform uses the value of `masterConfig.imageUri`.
-          # Learn more about [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ml-engine/docs/distributed-training-containers).
-      },
-      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. If not
-          # set, AI Platform uses the default stable version, 1.0. For more
-          # information, see the
-          # <a href="/ml-engine/docs/runtime-version-list">runtime version list</a>
-          # and
-          # <a href="/ml-engine/docs/versioning">how to manage runtime versions</a>.
-      "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
-          # and parameter servers.
+    "labels": { # Optional. One or more labels that you can add, to organize your jobs.
+        # Each label is a key-value pair, where both the key and the value are
+        # arbitrary strings that you supply.
+        # For more information, see the documentation on
+        # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
+      "a_key": "A String",
+    },
+    "jobId": "A String", # Required. The user-specified id of the job.
+    "state": "A String", # Output only. The detailed state of a job.
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a job from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform job updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `GetJob`, and
+        # systems are expected to put that etag in the request to `UpdateJob` to
+        # ensure that their change will be applied to the same version of the job.
+    "startTime": "A String", # Output only. When the job processing was started.
+    "trainingInput": { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
+        # to submit your training job, you can specify the input parameters as
+        # command-line arguments and/or in a YAML configuration file referenced from
+        # the --config command-line argument. For details, see the guide to [submitting
+        # a training job](/ai-platform/training/docs/training-jobs).
       "masterType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-          # job's master worker.
+          # job's master worker. You must specify this field when `scaleTier` is set to
+          # `CUSTOM`.
           #
+          # You can use certain Compute Engine machine types directly in this field.
           # The following types are supported:
           #
-          # <dl>
-          #   <dt>standard</dt>
-          #   <dd>
-          #   A basic machine configuration suitable for training simple models with
-          #   small to moderate datasets.
-          #   </dd>
-          #   <dt>large_model</dt>
-          #   <dd>
-          #   A machine with a lot of memory, specially suited for parameter servers
-          #   when your model is large (having many hidden layers or layers with very
-          #   large numbers of nodes).
-          #   </dd>
-          #   <dt>complex_model_s</dt>
-          #   <dd>
-          #   A machine suitable for the master and workers of the cluster when your
-          #   model requires more computation than the standard machine can handle
-          #   satisfactorily.
-          #   </dd>
-          #   <dt>complex_model_m</dt>
-          #   <dd>
-          #   A machine with roughly twice the number of cores and roughly double the
-          #   memory of <i>complex_model_s</i>.
-          #   </dd>
-          #   <dt>complex_model_l</dt>
-          #   <dd>
-          #   A machine with roughly twice the number of cores and roughly double the
-          #   memory of <i>complex_model_m</i>.
-          #   </dd>
-          #   <dt>standard_gpu</dt>
-          #   <dd>
-          #   A machine equivalent to <i>standard</i> that
-          #   also includes a single NVIDIA Tesla K80 GPU. See more about
-          #   <a href="/ml-engine/docs/tensorflow/using-gpus">using GPUs to
-          #   train your model</a>.
-          #   </dd>
-          #   <dt>complex_model_m_gpu</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_m</i> that also includes
-          #   four NVIDIA Tesla K80 GPUs.
-          #   </dd>
-          #   <dt>complex_model_l_gpu</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_l</i> that also includes
-          #   eight NVIDIA Tesla K80 GPUs.
-          #   </dd>
-          #   <dt>standard_p100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>standard</i> that
-          #   also includes a single NVIDIA Tesla P100 GPU.
-          #   </dd>
-          #   <dt>complex_model_m_p100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_m</i> that also includes
-          #   four NVIDIA Tesla P100 GPUs.
-          #   </dd>
-          #   <dt>standard_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>standard</i> that
-          #   also includes a single NVIDIA Tesla V100 GPU.
-          #   </dd>
-          #   <dt>large_model_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>large_model</i> that
-          #   also includes a single NVIDIA Tesla V100 GPU.
-          #   </dd>
-          #   <dt>complex_model_m_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_m</i> that
-          #   also includes four NVIDIA Tesla V100 GPUs.
-          #   </dd>
-          #   <dt>complex_model_l_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_l</i> that
-          #   also includes eight NVIDIA Tesla V100 GPUs.
-          #   </dd>
-          #   <dt>cloud_tpu</dt>
-          #   <dd>
-          #   A TPU VM including one Cloud TPU. See more about
-          #   <a href="/ml-engine/docs/tensorflow/using-tpus">using TPUs to train
-          #   your model</a>.
-          #   </dd>
-          # </dl>
-          #
-          # You may also use certain Compute Engine machine types directly in this
-          # field. The following types are supported:
-          #
           # - `n1-standard-4`
           # - `n1-standard-8`
           # - `n1-standard-16`
@@ -863,10 +860,231 @@
           # - `n1-highcpu-64`
           # - `n1-highcpu-96`
           #
-          # See more about [using Compute Engine machine
-          # types](/ml-engine/docs/tensorflow/machine-types#compute-engine-machine-types).
+          # Learn more about [using Compute Engine machine
+          # types](/ml-engine/docs/machine-types#compute-engine-machine-types).
           #
-          # You must set this value when `scaleTier` is set to `CUSTOM`.
+          # Alternatively, you can use the following legacy machine types:
+          #
+          # - `standard`
+          # - `large_model`
+          # - `complex_model_s`
+          # - `complex_model_m`
+          # - `complex_model_l`
+          # - `standard_gpu`
+          # - `complex_model_m_gpu`
+          # - `complex_model_l_gpu`
+          # - `standard_p100`
+          # - `complex_model_m_p100`
+          # - `standard_v100`
+          # - `large_model_v100`
+          # - `complex_model_m_v100`
+          # - `complex_model_l_v100`
+          #
+          # Learn more about [using legacy machine
+          # types](/ml-engine/docs/machine-types#legacy-machine-types).
+          #
+          # Finally, if you want to use a TPU for training, specify `cloud_tpu` in this
+          # field. Learn more about the [special configuration options for training
+          # with
+          # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+      "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
+          # and other data needed for training. This path is passed to your TensorFlow
+          # program as the '--job-dir' command-line argument. The benefit of specifying
+          # this field is that Cloud ML validates the path for use in training.
+      "scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
+        "maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
+            # contain up to nine fractional digits, terminated by `s`. By default there
+            # is no limit to the running time.
+            #
+            # If the training job is still running after this duration, AI Platform
+            # Training cancels it.
+            #
+            # For example, if you want to ensure your job runs for no more than 2 hours,
+            # set this field to `7200s` (2 hours * 60 minutes / hour * 60 seconds /
+            # minute).
+            #
+            # If you submit your training job using the `gcloud` tool, you can [provide
+            # this field in a `config.yaml`
+            # file](/ai-platform/training/docs/training-jobs#formatting_your_configuration_parameters).
+            # For example:
+            #
+            # ```yaml
+            # trainingInput:
+            #   ...
+            #   scheduling:
+            #     maxRunningTime: 7200s
+            #   ...
+            # ```
+      },
+      "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
+          # job. Each replica in the cluster will be of the type specified in
+          # `parameter_server_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `parameter_server_type`.
+          #
+          # The default value is zero.
+      "evaluatorCount": "A String", # Optional. The number of evaluator replicas to use for the training job.
+          # Each replica in the cluster will be of the type specified in
+          # `evaluator_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `evaluator_type`.
+          #
+          # The default value is zero.
+      "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+          # job's worker nodes.
+          #
+          # The supported values are the same as those described in the entry for
+          # `masterType`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # If you use `cloud_tpu` for this value, see special instructions for
+          # [configuring a custom TPU
+          # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `workerCount` is greater than zero.
+      "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
+          # and parameter servers.
+      "packageUris": [ # Required. The Google Cloud Storage location of the packages with
+          # the training program and any additional dependencies.
+          # The maximum number of package URIs is 100.
+        "A String",
+      ],
+      "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
+          #
+          # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
+          # to a Compute Engine machine type. [Learn about restrictions on accelerator
+          # configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `workerConfig.imageUri` only if you build a custom image for your
+          # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
+          # the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+          #
+          # You should only set `evaluatorConfig.acceleratorConfig` if
+          # `evaluatorType` is set to a Compute Engine machine type. [Learn
+          # about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `evaluatorConfig.imageUri` only if you build a custom image for
+          # your evaluator. If `evaluatorConfig.imageUri` has not been
+          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+          # variable when training with a custom container. Defaults to `false`. [Learn
+          # more about this
+          # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
+          #
+          # This field has no effect for training jobs that don't use a custom
+          # container.
+      "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+          #
+          # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+          # to a Compute Engine machine type. Learn about [restrictions on accelerator
+          # configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+          # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+          # about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. You must
+          # either specify this field or specify `masterConfig.imageUri`.
+          #
+          # For more information, see the [runtime version
+          # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+          # manage runtime versions](/ai-platform/training/docs/versioning).
       "hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
         "maxTrials": 42, # Optional. How many training trials should be attempted to optimize
             # the specified hyperparameters.
@@ -896,9 +1114,9 @@
             "maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
                 # should be unset if type is `CATEGORICAL`. This value should be integers if
                 # type is `INTEGER`.
-            "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
-              "A String",
-            ],
+            "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                # should be unset if type is `CATEGORICAL`. This value should be integers if
+                # type is INTEGER.
             "discreteValues": [ # Required if type is `DISCRETE`.
                 # A list of feasible points.
                 # The list should be in strictly increasing order. For instance, this
@@ -908,9 +1126,9 @@
             ],
             "parameterName": "A String", # Required. The parameter name must be unique amongst all ParameterConfigs in
                 # a HyperparameterSpec message. E.g., "learning_rate".
-            "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                # should be unset if type is `CATEGORICAL`. This value should be integers if
-                # type is INTEGER.
+            "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
+              "A String",
+            ],
             "type": "A String", # Required. The type of the parameter.
             "scaleType": "A String", # Optional. How the parameter should be scaled to the hypercube.
                 # Leave unset for categorical parameters.
@@ -934,26 +1152,14 @@
             #
             # Defaults to one.
       },
-      "region": "A String", # Required. The Google Compute Engine region to run the training job in.
-          # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
-          # for AI Platform services.
-      "args": [ # Optional. Command line arguments to pass to the program.
+      "args": [ # Optional. Command-line arguments passed to the training application when it
+          # starts. If your job uses a custom container, then the arguments are passed
+          # to the container's &lt;a class="external" target="_blank"
+          # href="https://docs.docker.com/engine/reference/builder/#entrypoint"&gt;
+          # `ENTRYPOINT`&lt;/a&gt; command.
         "A String",
       ],
       "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
-      "pythonVersion": "A String", # Optional. The version of Python used in training. If not set, the default
-          # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-          # to '1.4' and above. Python '2.7' works with all supported
-          # <a href="/ml-engine/docs/runtime-version-list">runtime versions</a>.
-      "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
-          # and other data needed for training. This path is passed to your TensorFlow
-          # program as the '--job-dir' command-line argument. The benefit of specifying
-          # this field is that Cloud ML validates the path for use in training.
-      "packageUris": [ # Required. The Google Cloud Storage location of the packages with
-          # the training program and any additional dependencies.
-          # The maximum number of package URIs is 100.
-        "A String",
-      ],
       "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
           # replica in the cluster will be of the type specified in `worker_type`.
           #
@@ -961,6 +1167,87 @@
           # set this value, you must also set `worker_type`.
           #
           # The default value is zero.
+      "encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+          # protect resources created by a training job, instead of using Google's
+          # default encryption. If this is set, then all resources created by the
+          # training job will be encrypted with the customer-managed encryption key
+          # that you specify.
+          #
+          # [Learn how and when to use CMEK with AI Platform
+          # Training](/ai-platform/training/docs/cmek).
+          # a resource.
+        "kmsKeyName": "A String", # The Cloud KMS resource identifier of the customer-managed encryption key
+            # used to protect a resource, such as a training job. It has the following
+            # format:
+            # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+      },
+      "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
+          #
+          # You should only set `parameterServerConfig.acceleratorConfig` if
+          # `parameterServerType` is set to a Compute Engine machine type. [Learn
+          # about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `parameterServerConfig.imageUri` only if you build a custom image for
+          # your parameter server. If `parameterServerConfig.imageUri` has not been
+          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "region": "A String", # Required. The region to run the training job in. See the [available
+          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
+      "pythonVersion": "A String", # Optional. The version of Python used in training. You must either specify
+          # this field or specify `masterConfig.imageUri`.
+          #
+          # The following Python versions are available:
+          #
+          # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+          #   later.
+          # * Python '3.5' is available when `runtime_version` is set to a version
+          #   from '1.4' to '1.14'.
+          # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+          #   earlier.
+          #
+          # Read more about the Python versions available for [each runtime
+          # version](/ml-engine/docs/runtime-version-list).
+      "evaluatorType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+          # job's evaluator nodes.
+          #
+          # The supported values are the same as those described in the entry for
+          # `masterType`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `evaluatorCount` is greater than zero.
       "parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
           # job's parameter server.
           #
@@ -968,81 +1255,12 @@
           # `master_type`.
           #
           # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be AI Platform machine
-          # types or both must be Compute Engine machine types.
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
           #
           # This value must be present when `scaleTier` is set to `CUSTOM` and
           # `parameter_server_count` is greater than zero.
-      "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
-          #
-          # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
-          # to a Compute Engine machine type. [Learn about restrictions on accelerator
-          # configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `workerConfig.imageUri` only if you build a custom image for your
-          # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
-          # the value of `masterConfig.imageUri`. Learn more about
-          # [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ml-engine/docs/distributed-training-containers).
-      },
-      "maxRunningTime": "A String", # Optional. The maximum job running time. The default is 7 days.
-      "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-          #
-          # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-          # to a Compute Engine machine type. Learn about [restrictions on accelerator
-          # configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-          # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more about
-          # [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ml-engine/docs/distributed-training-containers).
-      },
-      "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
-          # job. Each replica in the cluster will be of the type specified in
-          # `parameter_server_type`.
-          #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`.If you
-          # set this value, you must also set `parameter_server_type`.
-          #
-          # The default value is zero.
     },
-    "jobId": "A String", # Required. The user-specified id of the job.
-    "labels": { # Optional. One or more labels that you can add, to organize your jobs.
-        # Each label is a key-value pair, where both the key and the value are
-        # arbitrary strings that you supply.
-        # For more information, see the documentation on
-        # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
-      "a_key": "A String",
-    },
-    "state": "A String", # Output only. The detailed state of a job.
-    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a job from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform job updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `GetJob`, and
-        # systems are expected to put that etag in the request to `UpdateJob` to
-        # ensure that their change will be applied to the same version of the job.
-    "startTime": "A String", # Output only. When the job processing was started.
     "endTime": "A String", # Output only. When the job processing was completed.
     "predictionOutput": { # Represents results of a prediction job. # The current prediction job result.
       "outputPath": "A String", # The output Google Cloud Storage location provided at the job creation time.
@@ -1079,6 +1297,7 @@
             # training job. The TrainingOutput object that is returned on successful
             # completion of a training job with hyperparameter tuning includes a list
             # of HyperparameterOutput objects, one for each successful trial.
+          "startTime": "A String", # Output only. Start time for the trial.
           "hyperparameters": { # The hyperparameters given to this trial.
             "a_key": "A String",
           },
@@ -1086,6 +1305,7 @@
             "trainingStep": "A String", # The global training step for this metric.
             "objectiveValue": 3.14, # The objective value at this training step.
           },
+          "state": "A String", # Output only. The detailed state of the trial.
           "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
               # populated.
             { # An observed value of a metric.
@@ -1094,6 +1314,7 @@
             },
           ],
           "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
+          "endTime": "A String", # Output only. End time for the trial.
           "trialId": "A String", # The trial id for these results.
           "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
               # Only set for trials of built-in algorithms jobs that have succeeded.
@@ -1130,11 +1351,6 @@
           # model. The string must use the following format:
           #
           # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
-      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
-          # prediction. If not set, AI Platform will pick the runtime version used
-          # during the CreateVersion request for this model version, or choose the
-          # latest stable version when model version information is not available
-          # such as when the model is specified by uri.
       "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
           # this job. Please refer to
           # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -1143,14 +1359,15 @@
           # Defaults to
           # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
           # , which is "serving_default".
+      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+          # prediction. If not set, AI Platform will pick the runtime version used
+          # during the CreateVersion request for this model version, or choose the
+          # latest stable version when model version information is not available
+          # such as when the model is specified by uri.
       "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
           # The service will buffer batch_size number of records in memory before
           # invoking one Tensorflow prediction call internally. So take the record
           # size and memory available into consideration when setting this parameter.
-      "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
-          # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
-        "A String",
-      ],
       "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
           # Defaults to 10 if not specified.
       "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
@@ -1163,154 +1380,43 @@
           #
           # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
       "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
-          # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+          # See the &lt;a href="/ml-engine/docs/tensorflow/regions"&gt;available regions&lt;/a&gt;
           # for AI Platform services.
+      "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+          # &lt;a href="/storage/docs/gsutil/addlhelp/WildcardNames"&gt;wildcards&lt;/a&gt;.
+        "A String",
+      ],
       "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
     },
-    "trainingInput": { # Represents input parameters for a training job. When using the # Input parameters to create a training job.
-        # gcloud command to submit your training job, you can specify
-        # the input parameters as command-line arguments and/or in a YAML configuration
-        # file referenced from the --config command-line argument. For
-        # details, see the guide to
-        # <a href="/ml-engine/docs/tensorflow/training-jobs">submitting a training
-        # job</a>.
-      "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-          # job's worker nodes.
-          #
-          # The supported values are the same as those described in the entry for
-          # `masterType`.
-          #
-          # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be AI Platform machine
-          # types or both must be Compute Engine machine types.
-          #
-          # If you use `cloud_tpu` for this value, see special instructions for
-          # [configuring a custom TPU
-          # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-          #
-          # This value must be present when `scaleTier` is set to `CUSTOM` and
-          # `workerCount` is greater than zero.
-      "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-          #
-          # You should only set `parameterServerConfig.acceleratorConfig` if
-          # `parameterServerConfigType` is set to a Compute Engine machine type. [Learn
-          # about restrictions on accelerator configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `parameterServerConfig.imageUri` only if you build a custom image for
-          # your parameter server. If `parameterServerConfig.imageUri` has not been
-          # set, AI Platform uses the value of `masterConfig.imageUri`.
-          # Learn more about [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ml-engine/docs/distributed-training-containers).
-      },
-      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. If not
-          # set, AI Platform uses the default stable version, 1.0. For more
-          # information, see the
-          # <a href="/ml-engine/docs/runtime-version-list">runtime version list</a>
-          # and
-          # <a href="/ml-engine/docs/versioning">how to manage runtime versions</a>.
-      "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
-          # and parameter servers.
+    "labels": { # Optional. One or more labels that you can add, to organize your jobs.
+        # Each label is a key-value pair, where both the key and the value are
+        # arbitrary strings that you supply.
+        # For more information, see the documentation on
+        # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
+      "a_key": "A String",
+    },
+    "jobId": "A String", # Required. The user-specified id of the job.
+    "state": "A String", # Output only. The detailed state of a job.
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a job from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform job updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `GetJob`, and
+        # systems are expected to put that etag in the request to `UpdateJob` to
+        # ensure that their change will be applied to the same version of the job.
+    "startTime": "A String", # Output only. When the job processing was started.
+    "trainingInput": { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
+        # to submit your training job, you can specify the input parameters as
+        # command-line arguments and/or in a YAML configuration file referenced from
+        # the --config command-line argument. For details, see the guide to [submitting
+        # a training job](/ai-platform/training/docs/training-jobs).
       "masterType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-          # job's master worker.
+          # job's master worker. You must specify this field when `scaleTier` is set to
+          # `CUSTOM`.
           #
+          # You can use certain Compute Engine machine types directly in this field.
           # The following types are supported:
           #
-          # <dl>
-          #   <dt>standard</dt>
-          #   <dd>
-          #   A basic machine configuration suitable for training simple models with
-          #   small to moderate datasets.
-          #   </dd>
-          #   <dt>large_model</dt>
-          #   <dd>
-          #   A machine with a lot of memory, specially suited for parameter servers
-          #   when your model is large (having many hidden layers or layers with very
-          #   large numbers of nodes).
-          #   </dd>
-          #   <dt>complex_model_s</dt>
-          #   <dd>
-          #   A machine suitable for the master and workers of the cluster when your
-          #   model requires more computation than the standard machine can handle
-          #   satisfactorily.
-          #   </dd>
-          #   <dt>complex_model_m</dt>
-          #   <dd>
-          #   A machine with roughly twice the number of cores and roughly double the
-          #   memory of <i>complex_model_s</i>.
-          #   </dd>
-          #   <dt>complex_model_l</dt>
-          #   <dd>
-          #   A machine with roughly twice the number of cores and roughly double the
-          #   memory of <i>complex_model_m</i>.
-          #   </dd>
-          #   <dt>standard_gpu</dt>
-          #   <dd>
-          #   A machine equivalent to <i>standard</i> that
-          #   also includes a single NVIDIA Tesla K80 GPU. See more about
-          #   <a href="/ml-engine/docs/tensorflow/using-gpus">using GPUs to
-          #   train your model</a>.
-          #   </dd>
-          #   <dt>complex_model_m_gpu</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_m</i> that also includes
-          #   four NVIDIA Tesla K80 GPUs.
-          #   </dd>
-          #   <dt>complex_model_l_gpu</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_l</i> that also includes
-          #   eight NVIDIA Tesla K80 GPUs.
-          #   </dd>
-          #   <dt>standard_p100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>standard</i> that
-          #   also includes a single NVIDIA Tesla P100 GPU.
-          #   </dd>
-          #   <dt>complex_model_m_p100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_m</i> that also includes
-          #   four NVIDIA Tesla P100 GPUs.
-          #   </dd>
-          #   <dt>standard_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>standard</i> that
-          #   also includes a single NVIDIA Tesla V100 GPU.
-          #   </dd>
-          #   <dt>large_model_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>large_model</i> that
-          #   also includes a single NVIDIA Tesla V100 GPU.
-          #   </dd>
-          #   <dt>complex_model_m_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_m</i> that
-          #   also includes four NVIDIA Tesla V100 GPUs.
-          #   </dd>
-          #   <dt>complex_model_l_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_l</i> that
-          #   also includes eight NVIDIA Tesla V100 GPUs.
-          #   </dd>
-          #   <dt>cloud_tpu</dt>
-          #   <dd>
-          #   A TPU VM including one Cloud TPU. See more about
-          #   <a href="/ml-engine/docs/tensorflow/using-tpus">using TPUs to train
-          #   your model</a>.
-          #   </dd>
-          # </dl>
-          #
-          # You may also use certain Compute Engine machine types directly in this
-          # field. The following types are supported:
-          #
           # - `n1-standard-4`
           # - `n1-standard-8`
           # - `n1-standard-16`
@@ -1329,10 +1435,231 @@
           # - `n1-highcpu-64`
           # - `n1-highcpu-96`
           #
-          # See more about [using Compute Engine machine
-          # types](/ml-engine/docs/tensorflow/machine-types#compute-engine-machine-types).
+          # Learn more about [using Compute Engine machine
+          # types](/ml-engine/docs/machine-types#compute-engine-machine-types).
           #
-          # You must set this value when `scaleTier` is set to `CUSTOM`.
+          # Alternatively, you can use the following legacy machine types:
+          #
+          # - `standard`
+          # - `large_model`
+          # - `complex_model_s`
+          # - `complex_model_m`
+          # - `complex_model_l`
+          # - `standard_gpu`
+          # - `complex_model_m_gpu`
+          # - `complex_model_l_gpu`
+          # - `standard_p100`
+          # - `complex_model_m_p100`
+          # - `standard_v100`
+          # - `large_model_v100`
+          # - `complex_model_m_v100`
+          # - `complex_model_l_v100`
+          #
+          # Learn more about [using legacy machine
+          # types](/ml-engine/docs/machine-types#legacy-machine-types).
+          #
+          # Finally, if you want to use a TPU for training, specify `cloud_tpu` in this
+          # field. Learn more about the [special configuration options for training
+          # with
+          # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+      "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
+          # and other data needed for training. This path is passed to your TensorFlow
+          # program as the '--job-dir' command-line argument. The benefit of specifying
+          # this field is that Cloud ML validates the path for use in training.
+      "scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
+        "maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
+            # contain up to nine fractional digits, terminated by `s`. By default there
+            # is no limit to the running time.
+            #
+            # If the training job is still running after this duration, AI Platform
+            # Training cancels it.
+            #
+            # For example, if you want to ensure your job runs for no more than 2 hours,
+            # set this field to `7200s` (2 hours * 60 minutes / hour * 60 seconds /
+            # minute).
+            #
+            # If you submit your training job using the `gcloud` tool, you can [provide
+            # this field in a `config.yaml`
+            # file](/ai-platform/training/docs/training-jobs#formatting_your_configuration_parameters).
+            # For example:
+            #
+            # ```yaml
+            # trainingInput:
+            #   ...
+            #   scheduling:
+            #     maxRunningTime: 7200s
+            #   ...
+            # ```
+      },
+      "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
+          # job. Each replica in the cluster will be of the type specified in
+          # `parameter_server_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `parameter_server_type`.
+          #
+          # The default value is zero.
+      "evaluatorCount": "A String", # Optional. The number of evaluator replicas to use for the training job.
+          # Each replica in the cluster will be of the type specified in
+          # `evaluator_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `evaluator_type`.
+          #
+          # The default value is zero.
+      "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+          # job's worker nodes.
+          #
+          # The supported values are the same as those described in the entry for
+          # `masterType`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # If you use `cloud_tpu` for this value, see special instructions for
+          # [configuring a custom TPU
+          # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `workerCount` is greater than zero.
+      "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
+          # and parameter servers.
+      "packageUris": [ # Required. The Google Cloud Storage location of the packages with
+          # the training program and any additional dependencies.
+          # The maximum number of package URIs is 100.
+        "A String",
+      ],
+      "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
+          #
+          # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
+          # to a Compute Engine machine type. [Learn about restrictions on accelerator
+          # configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `workerConfig.imageUri` only if you build a custom image for your
+          # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
+          # the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+          #
+          # You should only set `evaluatorConfig.acceleratorConfig` if
+          # `evaluatorType` is set to a Compute Engine machine type. [Learn
+          # about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `evaluatorConfig.imageUri` only if you build a custom image for
+          # your evaluator. If `evaluatorConfig.imageUri` has not been
+          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+          # variable when training with a custom container. Defaults to `false`. [Learn
+          # more about this
+          # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
+          #
+          # This field has no effect for training jobs that don't use a custom
+          # container.
+      "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+          #
+          # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+          # to a Compute Engine machine type. Learn about [restrictions on accelerator
+          # configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+          # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+          # about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. You must
+          # either specify this field or specify `masterConfig.imageUri`.
+          #
+          # For more information, see the [runtime version
+          # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+          # manage runtime versions](/ai-platform/training/docs/versioning).
       "hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
         "maxTrials": 42, # Optional. How many training trials should be attempted to optimize
             # the specified hyperparameters.
@@ -1362,9 +1689,9 @@
             "maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
                 # should be unset if type is `CATEGORICAL`. This value should be integers if
                 # type is `INTEGER`.
-            "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
-              "A String",
-            ],
+            "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                # should be unset if type is `CATEGORICAL`. This value should be integers if
+                # type is INTEGER.
             "discreteValues": [ # Required if type is `DISCRETE`.
                 # A list of feasible points.
                 # The list should be in strictly increasing order. For instance, this
@@ -1374,9 +1701,9 @@
             ],
             "parameterName": "A String", # Required. The parameter name must be unique amongst all ParameterConfigs in
                 # a HyperparameterSpec message. E.g., "learning_rate".
-            "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                # should be unset if type is `CATEGORICAL`. This value should be integers if
-                # type is INTEGER.
+            "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
+              "A String",
+            ],
             "type": "A String", # Required. The type of the parameter.
             "scaleType": "A String", # Optional. How the parameter should be scaled to the hypercube.
                 # Leave unset for categorical parameters.
@@ -1400,26 +1727,14 @@
             #
             # Defaults to one.
       },
-      "region": "A String", # Required. The Google Compute Engine region to run the training job in.
-          # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
-          # for AI Platform services.
-      "args": [ # Optional. Command line arguments to pass to the program.
+      "args": [ # Optional. Command-line arguments passed to the training application when it
+          # starts. If your job uses a custom container, then the arguments are passed
+          # to the container's &lt;a class="external" target="_blank"
+          # href="https://docs.docker.com/engine/reference/builder/#entrypoint"&gt;
+          # `ENTRYPOINT`&lt;/a&gt; command.
         "A String",
       ],
       "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
-      "pythonVersion": "A String", # Optional. The version of Python used in training. If not set, the default
-          # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-          # to '1.4' and above. Python '2.7' works with all supported
-          # <a href="/ml-engine/docs/runtime-version-list">runtime versions</a>.
-      "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
-          # and other data needed for training. This path is passed to your TensorFlow
-          # program as the '--job-dir' command-line argument. The benefit of specifying
-          # this field is that Cloud ML validates the path for use in training.
-      "packageUris": [ # Required. The Google Cloud Storage location of the packages with
-          # the training program and any additional dependencies.
-          # The maximum number of package URIs is 100.
-        "A String",
-      ],
       "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
           # replica in the cluster will be of the type specified in `worker_type`.
           #
@@ -1427,6 +1742,87 @@
           # set this value, you must also set `worker_type`.
           #
           # The default value is zero.
+      "encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+          # protect resources created by a training job, instead of using Google's
+          # default encryption. If this is set, then all resources created by the
+          # training job will be encrypted with the customer-managed encryption key
+          # that you specify.
+          #
+          # [Learn how and when to use CMEK with AI Platform
+          # Training](/ai-platform/training/docs/cmek).
+          # a resource.
+        "kmsKeyName": "A String", # The Cloud KMS resource identifier of the customer-managed encryption key
+            # used to protect a resource, such as a training job. It has the following
+            # format:
+            # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+      },
+      "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
+          #
+          # You should only set `parameterServerConfig.acceleratorConfig` if
+          # `parameterServerType` is set to a Compute Engine machine type. [Learn
+          # about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `parameterServerConfig.imageUri` only if you build a custom image for
+          # your parameter server. If `parameterServerConfig.imageUri` has not been
+          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "region": "A String", # Required. The region to run the training job in. See the [available
+          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
+      "pythonVersion": "A String", # Optional. The version of Python used in training. You must either specify
+          # this field or specify `masterConfig.imageUri`.
+          #
+          # The following Python versions are available:
+          #
+          # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+          #   later.
+          # * Python '3.5' is available when `runtime_version` is set to a version
+          #   from '1.4' to '1.14'.
+          # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+          #   earlier.
+          #
+          # Read more about the Python versions available for [each runtime
+          # version](/ml-engine/docs/runtime-version-list).
+      "evaluatorType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+          # job's evaluator nodes.
+          #
+          # The supported values are the same as those described in the entry for
+          # `masterType`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `evaluatorCount` is greater than zero.
       "parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
           # job's parameter server.
           #
@@ -1434,81 +1830,12 @@
           # `master_type`.
           #
           # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be AI Platform machine
-          # types or both must be Compute Engine machine types.
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
           #
           # This value must be present when `scaleTier` is set to `CUSTOM` and
           # `parameter_server_count` is greater than zero.
-      "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
-          #
-          # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
-          # to a Compute Engine machine type. [Learn about restrictions on accelerator
-          # configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `workerConfig.imageUri` only if you build a custom image for your
-          # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
-          # the value of `masterConfig.imageUri`. Learn more about
-          # [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ml-engine/docs/distributed-training-containers).
-      },
-      "maxRunningTime": "A String", # Optional. The maximum job running time. The default is 7 days.
-      "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-          #
-          # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-          # to a Compute Engine machine type. Learn about [restrictions on accelerator
-          # configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-          # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more about
-          # [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ml-engine/docs/distributed-training-containers).
-      },
-      "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
-          # job. Each replica in the cluster will be of the type specified in
-          # `parameter_server_type`.
-          #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`.If you
-          # set this value, you must also set `parameter_server_type`.
-          #
-          # The default value is zero.
     },
-    "jobId": "A String", # Required. The user-specified id of the job.
-    "labels": { # Optional. One or more labels that you can add, to organize your jobs.
-        # Each label is a key-value pair, where both the key and the value are
-        # arbitrary strings that you supply.
-        # For more information, see the documentation on
-        # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
-      "a_key": "A String",
-    },
-    "state": "A String", # Output only. The detailed state of a job.
-    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a job from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform job updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `GetJob`, and
-        # systems are expected to put that etag in the request to `UpdateJob` to
-        # ensure that their change will be applied to the same version of the job.
-    "startTime": "A String", # Output only. When the job processing was started.
     "endTime": "A String", # Output only. When the job processing was completed.
     "predictionOutput": { # Represents results of a prediction job. # The current prediction job result.
       "outputPath": "A String", # The output Google Cloud Storage location provided at the job creation time.
@@ -1521,7 +1848,7 @@
 </div>
 
 <div class="method">
-    <code class="details" id="getIamPolicy">getIamPolicy(resource, x__xgafv=None)</code>
+    <code class="details" id="getIamPolicy">getIamPolicy(resource, options_requestedPolicyVersion=None, x__xgafv=None)</code>
   <pre>Gets the access control policy for a resource.
 Returns an empty policy if the resource exists and does not have a policy
 set.
@@ -1529,6 +1856,14 @@
 Args:
   resource: string, REQUIRED: The resource for which the policy is being requested.
 See the operation documentation for the appropriate value for this field. (required)
+  options_requestedPolicyVersion: integer, Optional. The policy format version to be returned.
+
+Valid values are 0, 1, and 3. Requests specifying an invalid value will be
+rejected.
+
+Requests for policies with any conditional bindings must specify version 3.
+Policies without any conditional bindings may specify any valid value or
+leave the field unset.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -1537,53 +1872,72 @@
 Returns:
   An object of the form:
 
-    { # Defines an Identity and Access Management (IAM) policy. It is used to
-      # specify access control policies for Cloud Platform resources.
+    { # An Identity and Access Management (IAM) policy, which specifies access
+      # controls for Google Cloud resources.
       #
       #
-      # A `Policy` consists of a list of `bindings`. A `binding` binds a list of
-      # `members` to a `role`, where the members can be user accounts, Google groups,
-      # Google domains, and service accounts. A `role` is a named list of permissions
-      # defined by IAM.
+      # A `Policy` is a collection of `bindings`. A `binding` binds one or more
+      # `members` to a single `role`. Members can be user accounts, service accounts,
+      # Google groups, and domains (such as G Suite). A `role` is a named list of
+      # permissions; each `role` can be an IAM predefined role or a user-created
+      # custom role.
       #
-      # **JSON Example**
+      # Optionally, a `binding` can specify a `condition`, which is a logical
+      # expression that allows access to a resource only if the expression evaluates
+      # to `true`. A condition can add constraints based on attributes of the
+      # request, the resource, or both.
+      #
+      # **JSON example:**
       #
       #     {
       #       "bindings": [
       #         {
-      #           "role": "roles/owner",
+      #           "role": "roles/resourcemanager.organizationAdmin",
       #           "members": [
       #             "user:mike@example.com",
       #             "group:admins@example.com",
       #             "domain:google.com",
-      #             "serviceAccount:my-other-app@appspot.gserviceaccount.com"
+      #             "serviceAccount:my-project-id@appspot.gserviceaccount.com"
       #           ]
       #         },
       #         {
-      #           "role": "roles/viewer",
-      #           "members": ["user:sean@example.com"]
+      #           "role": "roles/resourcemanager.organizationViewer",
+      #           "members": ["user:eve@example.com"],
+      #           "condition": {
+      #             "title": "expirable access",
+      #             "description": "Does not grant access after Sep 2020",
+      #             "expression": "request.time &lt; timestamp('2020-10-01T00:00:00.000Z')",
+      #           }
       #         }
-      #       ]
+      #       ],
+      #       "etag": "BwWWja0YfJA=",
+      #       "version": 3
       #     }
       #
-      # **YAML Example**
+      # **YAML example:**
       #
       #     bindings:
       #     - members:
       #       - user:mike@example.com
       #       - group:admins@example.com
       #       - domain:google.com
-      #       - serviceAccount:my-other-app@appspot.gserviceaccount.com
-      #       role: roles/owner
+      #       - serviceAccount:my-project-id@appspot.gserviceaccount.com
+      #       role: roles/resourcemanager.organizationAdmin
       #     - members:
-      #       - user:sean@example.com
-      #       role: roles/viewer
-      #
+      #       - user:eve@example.com
+      #       role: roles/resourcemanager.organizationViewer
+      #       condition:
+      #         title: expirable access
+      #         description: Does not grant access after Sep 2020
+      #         expression: request.time &lt; timestamp('2020-10-01T00:00:00.000Z')
+      #     - etag: BwWWja0YfJA=
+      #     - version: 3
       #
       # For a description of IAM and its features, see the
-      # [IAM developer's guide](https://cloud.google.com/iam/docs).
-    "bindings": [ # Associates a list of `members` to a `role`.
-        # `bindings` with no members will result in an error.
+      # [IAM documentation](https://cloud.google.com/iam/docs/).
+    "bindings": [ # Associates a list of `members` to a `role`. Optionally, may specify a
+        # `condition` that determines how and when the `bindings` are applied. Each
+        # of the `bindings` must contain at least one member.
       { # Associates `members` with a `role`.
         "role": "A String", # Role that is assigned to `members`.
             # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
@@ -1597,7 +1951,7 @@
             #    who is authenticated with a Google account or a service account.
             #
             # * `user:{emailid}`: An email address that represents a specific Google
-            #    account. For example, `alice@gmail.com` .
+            #    account. For example, `alice@example.com` .
             #
             #
             # * `serviceAccount:{emailid}`: An email address that represents a service
@@ -1606,46 +1960,78 @@
             # * `group:{emailid}`: An email address that represents a Google group.
             #    For example, `admins@example.com`.
             #
+            # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+            #    identifier) representing a user that has been recently deleted. For
+            #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+            #    recovered, this value reverts to `user:{emailid}` and the recovered user
+            #    retains the role in the binding.
+            #
+            # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+            #    unique identifier) representing a service account that has been recently
+            #    deleted. For example,
+            #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+            #    If the service account is undeleted, this value reverts to
+            #    `serviceAccount:{emailid}` and the undeleted service account retains the
+            #    role in the binding.
+            #
+            # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+            #    identifier) representing a Google group that has been recently
+            #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+            #    the group is recovered, this value reverts to `group:{emailid}` and the
+            #    recovered group retains the role in the binding.
+            #
             #
             # * `domain:{domain}`: The G Suite domain (primary) that represents all the
             #    users of that domain. For example, `google.com` or `example.com`.
             #
           "A String",
         ],
-        "condition": { # Represents an expression text. Example: # The condition that is associated with this binding.
+        "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
             # NOTE: An unsatisfied condition will not allow user access via current
             # binding. Different bindings, including their conditions, are examined
             # independently.
+            # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+            # are documented at https://github.com/google/cel-spec.
             #
-            #     title: "User account presence"
-            #     description: "Determines whether the request has a user account"
-            #     expression: "size(request.user) > 0"
-          "description": "A String", # An optional description of the expression. This is a longer text which
+            # Example (Comparison):
+            #
+            #     title: "Summary size limit"
+            #     description: "Determines if a summary is less than 100 chars"
+            #     expression: "document.summary.size() &lt; 100"
+            #
+            # Example (Equality):
+            #
+            #     title: "Requestor is owner"
+            #     description: "Determines if requestor is the document owner"
+            #     expression: "document.owner == request.auth.claims.email"
+            #
+            # Example (Logic):
+            #
+            #     title: "Public documents"
+            #     description: "Determine whether the document should be publicly visible"
+            #     expression: "document.type != 'private' &amp;&amp; document.type != 'internal'"
+            #
+            # Example (Data Manipulation):
+            #
+            #     title: "Notification string"
+            #     description: "Create a notification string with a timestamp."
+            #     expression: "'New message received at ' + string(document.create_time)"
+            #
+            # The exact variables and functions that may be referenced within an expression
+            # are determined by the service that evaluates it. See the service
+            # documentation for additional information.
+          "description": "A String", # Optional. Description of the expression. This is a longer text which
               # describes the expression, e.g. when hovered over it in a UI.
-          "expression": "A String", # Textual representation of an expression in
-              # Common Expression Language syntax.
-              #
-              # The application context of the containing message determines which
-              # well-known feature set of CEL is supported.
-          "location": "A String", # An optional string indicating the location of the expression for error
+          "expression": "A String", # Textual representation of an expression in Common Expression Language
+              # syntax.
+          "location": "A String", # Optional. String indicating the location of the expression for error
               # reporting, e.g. a file name and a position in the file.
-          "title": "A String", # An optional title for the expression, i.e. a short string describing
+          "title": "A String", # Optional. Title for the expression, i.e. a short string describing
               # its purpose. This can be used e.g. in UIs which allow to enter the
               # expression.
         },
       },
     ],
-    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a policy from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform policy updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-        # systems are expected to put that etag in the request to `setIamPolicy` to
-        # ensure that their change will be applied to the same version of the policy.
-        #
-        # If no `etag` is provided in the call to `setIamPolicy`, then the existing
-        # policy is overwritten blindly.
-    "version": 42, # Deprecated.
     "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
       { # Specifies the audit configuration for a service.
           # The configuration determines which permission types are logged, and what
@@ -1667,7 +2053,7 @@
           #             {
           #               "log_type": "DATA_READ",
           #               "exempted_members": [
-          #                 "user:foo@gmail.com"
+          #                 "user:jose@example.com"
           #               ]
           #             },
           #             {
@@ -1679,7 +2065,7 @@
           #           ]
           #         },
           #         {
-          #           "service": "fooservice.googleapis.com"
+          #           "service": "sampleservice.googleapis.com"
           #           "audit_log_configs": [
           #             {
           #               "log_type": "DATA_READ",
@@ -1687,7 +2073,7 @@
           #             {
           #               "log_type": "DATA_WRITE",
           #               "exempted_members": [
-          #                 "user:bar@gmail.com"
+          #                 "user:aliya@example.com"
           #               ]
           #             }
           #           ]
@@ -1695,9 +2081,9 @@
           #       ]
           #     }
           #
-          # For fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
-          # logging. It also exempts foo@gmail.com from DATA_READ logging, and
-          # bar@gmail.com from DATA_WRITE logging.
+          # For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
+          # logging. It also exempts jose@example.com from DATA_READ logging, and
+          # aliya@example.com from DATA_WRITE logging.
         "auditLogConfigs": [ # The configuration for logging of each type of permission.
           { # Provides the configuration for logging a type of permissions.
               # Example:
@@ -1707,7 +2093,7 @@
               #         {
               #           "log_type": "DATA_READ",
               #           "exempted_members": [
-              #             "user:foo@gmail.com"
+              #             "user:jose@example.com"
               #           ]
               #         },
               #         {
@@ -1717,7 +2103,7 @@
               #     }
               #
               # This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
-              # foo@gmail.com from DATA_READ logging.
+              # jose@example.com from DATA_READ logging.
             "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
                 # permission.
                 # Follows the same format of Binding.members.
@@ -1731,11 +2117,44 @@
             # `allServices` is a special value that covers all services.
       },
     ],
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a policy from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform policy updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+        # systems are expected to put that etag in the request to `setIamPolicy` to
+        # ensure that their change will be applied to the same version of the policy.
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+    "version": 42, # Specifies the format of the policy.
+        #
+        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+        # are rejected.
+        #
+        # Any operation that affects conditional role bindings must specify version
+        # `3`. This requirement applies to the following operations:
+        #
+        # * Getting a policy that includes a conditional role binding
+        # * Adding a conditional role binding to a policy
+        # * Changing a conditional role binding in a policy
+        # * Removing any role binding, with or without a condition, from a policy
+        #   that includes conditions
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+        #
+        # If a policy does not include any conditions, operations on that policy may
+        # specify any valid version or leave the field unset.
   }</pre>
 </div>
 
 <div class="method">
-    <code class="details" id="list">list(parent, pageToken=None, x__xgafv=None, pageSize=None, filter=None)</code>
+    <code class="details" id="list">list(parent, pageSize=None, pageToken=None, x__xgafv=None, filter=None)</code>
   <pre>Lists the jobs in the project.
 
 If there are no jobs that match the request parameters, the list
@@ -1743,6 +2162,11 @@
 
 Args:
   parent: string, Required. The name of the project for which to list jobs. (required)
+  pageSize: integer, Optional. The number of jobs to retrieve per "page" of results. If there
+are more remaining results than this number, the response message will
+contain a valid value in the `next_page_token` field.
+
+The default value is 20, and the maximum page size is 100.
   pageToken: string, Optional. A page token to request the next page of results.
 
 You get the token from the `next_page_token` field of the response from
@@ -1751,20 +2175,15 @@
     Allowed values
       1 - v1 error format
       2 - v2 error format
-  pageSize: integer, Optional. The number of jobs to retrieve per "page" of results. If there
-are more remaining results than this number, the response message will
-contain a valid value in the `next_page_token` field.
-
-The default value is 20, and the maximum page size is 100.
   filter: string, Optional. Specifies the subset of jobs to retrieve.
 You can filter on the value of one or more attributes of the job object.
 For example, retrieve jobs with a job identifier that starts with 'census':
-<p><code>gcloud ai-platform jobs list --filter='jobId:census*'</code>
-<p>List all failed jobs with names that start with 'rnn':
-<p><code>gcloud ai-platform jobs list --filter='jobId:rnn*
-AND state:FAILED'</code>
-<p>For more examples, see the guide to
-<a href="/ml-engine/docs/tensorflow/monitor-training">monitoring jobs</a>.
+&lt;p&gt;&lt;code&gt;gcloud ai-platform jobs list --filter='jobId:census*'&lt;/code&gt;
+&lt;p&gt;List all failed jobs with names that start with 'rnn':
+&lt;p&gt;&lt;code&gt;gcloud ai-platform jobs list --filter='jobId:rnn*
+AND state:FAILED'&lt;/code&gt;
+&lt;p&gt;For more examples, see the guide to
+&lt;a href="/ml-engine/docs/tensorflow/monitor-training"&gt;monitoring jobs&lt;/a&gt;.
 
 Returns:
   An object of the form:
@@ -1784,6 +2203,7 @@
                 # training job. The TrainingOutput object that is returned on successful
                 # completion of a training job with hyperparameter tuning includes a list
                 # of HyperparameterOutput objects, one for each successful trial.
+              "startTime": "A String", # Output only. Start time for the trial.
               "hyperparameters": { # The hyperparameters given to this trial.
                 "a_key": "A String",
               },
@@ -1791,6 +2211,7 @@
                 "trainingStep": "A String", # The global training step for this metric.
                 "objectiveValue": 3.14, # The objective value at this training step.
               },
+              "state": "A String", # Output only. The detailed state of the trial.
               "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
                   # populated.
                 { # An observed value of a metric.
@@ -1799,6 +2220,7 @@
                 },
               ],
               "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
+              "endTime": "A String", # Output only. End time for the trial.
               "trialId": "A String", # The trial id for these results.
               "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
                   # Only set for trials of built-in algorithms jobs that have succeeded.
@@ -1835,11 +2257,6 @@
               # model. The string must use the following format:
               #
               # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
-          "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
-              # prediction. If not set, AI Platform will pick the runtime version used
-              # during the CreateVersion request for this model version, or choose the
-              # latest stable version when model version information is not available
-              # such as when the model is specified by uri.
           "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
               # this job. Please refer to
               # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -1848,14 +2265,15 @@
               # Defaults to
               # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
               # , which is "serving_default".
+          "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+              # prediction. If not set, AI Platform will pick the runtime version used
+              # during the CreateVersion request for this model version, or choose the
+              # latest stable version when model version information is not available
+              # such as when the model is specified by uri.
           "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
               # The service will buffer batch_size number of records in memory before
               # invoking one Tensorflow prediction call internally. So take the record
               # size and memory available into consideration when setting this parameter.
-          "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
-              # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
-            "A String",
-          ],
           "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
               # Defaults to 10 if not specified.
           "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
@@ -1868,154 +2286,43 @@
               #
               # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
           "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
-              # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+              # See the &lt;a href="/ml-engine/docs/tensorflow/regions"&gt;available regions&lt;/a&gt;
               # for AI Platform services.
+          "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+              # &lt;a href="/storage/docs/gsutil/addlhelp/WildcardNames"&gt;wildcards&lt;/a&gt;.
+            "A String",
+          ],
           "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
         },
-        "trainingInput": { # Represents input parameters for a training job. When using the # Input parameters to create a training job.
-            # gcloud command to submit your training job, you can specify
-            # the input parameters as command-line arguments and/or in a YAML configuration
-            # file referenced from the --config command-line argument. For
-            # details, see the guide to
-            # <a href="/ml-engine/docs/tensorflow/training-jobs">submitting a training
-            # job</a>.
-          "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-              # job's worker nodes.
-              #
-              # The supported values are the same as those described in the entry for
-              # `masterType`.
-              #
-              # This value must be consistent with the category of machine type that
-              # `masterType` uses. In other words, both must be AI Platform machine
-              # types or both must be Compute Engine machine types.
-              #
-              # If you use `cloud_tpu` for this value, see special instructions for
-              # [configuring a custom TPU
-              # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-              #
-              # This value must be present when `scaleTier` is set to `CUSTOM` and
-              # `workerCount` is greater than zero.
-          "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-              #
-              # You should only set `parameterServerConfig.acceleratorConfig` if
-              # `parameterServerConfigType` is set to a Compute Engine machine type. [Learn
-              # about restrictions on accelerator configurations for
-              # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-              #
-              # Set `parameterServerConfig.imageUri` only if you build a custom image for
-              # your parameter server. If `parameterServerConfig.imageUri` has not been
-              # set, AI Platform uses the value of `masterConfig.imageUri`.
-              # Learn more about [configuring custom
-              # containers](/ml-engine/docs/distributed-training-containers).
-            "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-                # [Learn about restrictions on accelerator configurations for
-                # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-              "count": "A String", # The number of accelerators to attach to each machine running the job.
-              "type": "A String", # The type of accelerator to use.
-            },
-            "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-                # Registry. Learn more about [configuring custom
-                # containers](/ml-engine/docs/distributed-training-containers).
-          },
-          "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. If not
-              # set, AI Platform uses the default stable version, 1.0. For more
-              # information, see the
-              # <a href="/ml-engine/docs/runtime-version-list">runtime version list</a>
-              # and
-              # <a href="/ml-engine/docs/versioning">how to manage runtime versions</a>.
-          "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
-              # and parameter servers.
+        "labels": { # Optional. One or more labels that you can add, to organize your jobs.
+            # Each label is a key-value pair, where both the key and the value are
+            # arbitrary strings that you supply.
+            # For more information, see the documentation on
+            # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
+          "a_key": "A String",
+        },
+        "jobId": "A String", # Required. The user-specified id of the job.
+        "state": "A String", # Output only. The detailed state of a job.
+        "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+            # prevent simultaneous updates of a job from overwriting each other.
+            # It is strongly suggested that systems make use of the `etag` in the
+            # read-modify-write cycle to perform job updates in order to avoid race
+            # conditions: An `etag` is returned in the response to `GetJob`, and
+            # systems are expected to put that etag in the request to `UpdateJob` to
+            # ensure that their change will be applied to the same version of the job.
+        "startTime": "A String", # Output only. When the job processing was started.
+        "trainingInput": { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
+            # to submit your training job, you can specify the input parameters as
+            # command-line arguments and/or in a YAML configuration file referenced from
+            # the --config command-line argument. For details, see the guide to [submitting
+            # a training job](/ai-platform/training/docs/training-jobs).
           "masterType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-              # job's master worker.
+              # job's master worker. You must specify this field when `scaleTier` is set to
+              # `CUSTOM`.
               #
+              # You can use certain Compute Engine machine types directly in this field.
               # The following types are supported:
               #
-              # <dl>
-              #   <dt>standard</dt>
-              #   <dd>
-              #   A basic machine configuration suitable for training simple models with
-              #   small to moderate datasets.
-              #   </dd>
-              #   <dt>large_model</dt>
-              #   <dd>
-              #   A machine with a lot of memory, specially suited for parameter servers
-              #   when your model is large (having many hidden layers or layers with very
-              #   large numbers of nodes).
-              #   </dd>
-              #   <dt>complex_model_s</dt>
-              #   <dd>
-              #   A machine suitable for the master and workers of the cluster when your
-              #   model requires more computation than the standard machine can handle
-              #   satisfactorily.
-              #   </dd>
-              #   <dt>complex_model_m</dt>
-              #   <dd>
-              #   A machine with roughly twice the number of cores and roughly double the
-              #   memory of <i>complex_model_s</i>.
-              #   </dd>
-              #   <dt>complex_model_l</dt>
-              #   <dd>
-              #   A machine with roughly twice the number of cores and roughly double the
-              #   memory of <i>complex_model_m</i>.
-              #   </dd>
-              #   <dt>standard_gpu</dt>
-              #   <dd>
-              #   A machine equivalent to <i>standard</i> that
-              #   also includes a single NVIDIA Tesla K80 GPU. See more about
-              #   <a href="/ml-engine/docs/tensorflow/using-gpus">using GPUs to
-              #   train your model</a>.
-              #   </dd>
-              #   <dt>complex_model_m_gpu</dt>
-              #   <dd>
-              #   A machine equivalent to <i>complex_model_m</i> that also includes
-              #   four NVIDIA Tesla K80 GPUs.
-              #   </dd>
-              #   <dt>complex_model_l_gpu</dt>
-              #   <dd>
-              #   A machine equivalent to <i>complex_model_l</i> that also includes
-              #   eight NVIDIA Tesla K80 GPUs.
-              #   </dd>
-              #   <dt>standard_p100</dt>
-              #   <dd>
-              #   A machine equivalent to <i>standard</i> that
-              #   also includes a single NVIDIA Tesla P100 GPU.
-              #   </dd>
-              #   <dt>complex_model_m_p100</dt>
-              #   <dd>
-              #   A machine equivalent to <i>complex_model_m</i> that also includes
-              #   four NVIDIA Tesla P100 GPUs.
-              #   </dd>
-              #   <dt>standard_v100</dt>
-              #   <dd>
-              #   A machine equivalent to <i>standard</i> that
-              #   also includes a single NVIDIA Tesla V100 GPU.
-              #   </dd>
-              #   <dt>large_model_v100</dt>
-              #   <dd>
-              #   A machine equivalent to <i>large_model</i> that
-              #   also includes a single NVIDIA Tesla V100 GPU.
-              #   </dd>
-              #   <dt>complex_model_m_v100</dt>
-              #   <dd>
-              #   A machine equivalent to <i>complex_model_m</i> that
-              #   also includes four NVIDIA Tesla V100 GPUs.
-              #   </dd>
-              #   <dt>complex_model_l_v100</dt>
-              #   <dd>
-              #   A machine equivalent to <i>complex_model_l</i> that
-              #   also includes eight NVIDIA Tesla V100 GPUs.
-              #   </dd>
-              #   <dt>cloud_tpu</dt>
-              #   <dd>
-              #   A TPU VM including one Cloud TPU. See more about
-              #   <a href="/ml-engine/docs/tensorflow/using-tpus">using TPUs to train
-              #   your model</a>.
-              #   </dd>
-              # </dl>
-              #
-              # You may also use certain Compute Engine machine types directly in this
-              # field. The following types are supported:
-              #
               # - `n1-standard-4`
               # - `n1-standard-8`
               # - `n1-standard-16`
@@ -2034,10 +2341,231 @@
               # - `n1-highcpu-64`
               # - `n1-highcpu-96`
               #
-              # See more about [using Compute Engine machine
-              # types](/ml-engine/docs/tensorflow/machine-types#compute-engine-machine-types).
+              # Learn more about [using Compute Engine machine
+              # types](/ml-engine/docs/machine-types#compute-engine-machine-types).
               #
-              # You must set this value when `scaleTier` is set to `CUSTOM`.
+              # Alternatively, you can use the following legacy machine types:
+              #
+              # - `standard`
+              # - `large_model`
+              # - `complex_model_s`
+              # - `complex_model_m`
+              # - `complex_model_l`
+              # - `standard_gpu`
+              # - `complex_model_m_gpu`
+              # - `complex_model_l_gpu`
+              # - `standard_p100`
+              # - `complex_model_m_p100`
+              # - `standard_v100`
+              # - `large_model_v100`
+              # - `complex_model_m_v100`
+              # - `complex_model_l_v100`
+              #
+              # Learn more about [using legacy machine
+              # types](/ml-engine/docs/machine-types#legacy-machine-types).
+              #
+              # Finally, if you want to use a TPU for training, specify `cloud_tpu` in this
+              # field. Learn more about the [special configuration options for training
+              # with
+              # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+          "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
+              # and other data needed for training. This path is passed to your TensorFlow
+              # program as the '--job-dir' command-line argument. The benefit of specifying
+              # this field is that Cloud ML validates the path for use in training.
+          "scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
+            "maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
+                # contain up to nine fractional digits, terminated by `s`. By default there
+                # is no limit to the running time.
+                #
+                # If the training job is still running after this duration, AI Platform
+                # Training cancels it.
+                #
+                # For example, if you want to ensure your job runs for no more than 2 hours,
+                # set this field to `7200s` (2 hours * 60 minutes / hour * 60 seconds /
+                # minute).
+                #
+                # If you submit your training job using the `gcloud` tool, you can [provide
+                # this field in a `config.yaml`
+                # file](/ai-platform/training/docs/training-jobs#formatting_your_configuration_parameters).
+                # For example:
+                #
+                # ```yaml
+                # trainingInput:
+                #   ...
+                #   scheduling:
+                #     maxRunningTime: 7200s
+                #   ...
+                # ```
+          },
+          "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
+              # job. Each replica in the cluster will be of the type specified in
+              # `parameter_server_type`.
+              #
+              # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+              # set this value, you must also set `parameter_server_type`.
+              #
+              # The default value is zero.
+          "evaluatorCount": "A String", # Optional. The number of evaluator replicas to use for the training job.
+              # Each replica in the cluster will be of the type specified in
+              # `evaluator_type`.
+              #
+              # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+              # set this value, you must also set `evaluator_type`.
+              #
+              # The default value is zero.
+          "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+              # job's worker nodes.
+              #
+              # The supported values are the same as those described in the entry for
+              # `masterType`.
+              #
+              # This value must be consistent with the category of machine type that
+              # `masterType` uses. In other words, both must be Compute Engine machine
+              # types or both must be legacy machine types.
+              #
+              # If you use `cloud_tpu` for this value, see special instructions for
+              # [configuring a custom TPU
+              # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+              #
+              # This value must be present when `scaleTier` is set to `CUSTOM` and
+              # `workerCount` is greater than zero.
+          "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
+              # and parameter servers.
+          "packageUris": [ # Required. The Google Cloud Storage location of the packages with
+              # the training program and any additional dependencies.
+              # The maximum number of package URIs is 100.
+            "A String",
+          ],
+          "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
+              #
+              # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
+              # to a Compute Engine machine type. [Learn about restrictions on accelerator
+              # configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              #
+              # Set `workerConfig.imageUri` only if you build a custom image for your
+              # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
+              # the value of `masterConfig.imageUri`. Learn more about [configuring custom
+              # containers](/ai-platform/training/docs/distributed-training-containers).
+            "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+                # the one used in the custom container. This field is required if the replica
+                # is a TPU worker that uses a custom container. Otherwise, do not specify
+                # this field. This must be a [runtime version that currently supports
+                # training with
+                # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+                #
+                # Note that the version of TensorFlow included in a runtime version may
+                # differ from the numbering of the runtime version itself, because it may
+                # have a different [patch
+                # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+                # In this field, you must specify the runtime version (TensorFlow minor
+                # version). For example, if your custom container runs TensorFlow `1.x.y`,
+                # specify `1.x`.
+            "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+                # [Learn about restrictions on accelerator configurations for
+                # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+                # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+                # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+                # [accelerators for online
+                # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+              "count": "A String", # The number of accelerators to attach to each machine running the job.
+              "type": "A String", # The type of accelerator to use.
+            },
+            "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+                # Registry. Learn more about [configuring custom
+                # containers](/ai-platform/training/docs/distributed-training-containers).
+          },
+          "evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+              #
+              # You should only set `evaluatorConfig.acceleratorConfig` if
+              # `evaluatorType` is set to a Compute Engine machine type. [Learn
+              # about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              #
+              # Set `evaluatorConfig.imageUri` only if you build a custom image for
+              # your evaluator. If `evaluatorConfig.imageUri` has not been
+              # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+              # containers](/ai-platform/training/docs/distributed-training-containers).
+            "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+                # the one used in the custom container. This field is required if the replica
+                # is a TPU worker that uses a custom container. Otherwise, do not specify
+                # this field. This must be a [runtime version that currently supports
+                # training with
+                # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+                #
+                # Note that the version of TensorFlow included in a runtime version may
+                # differ from the numbering of the runtime version itself, because it may
+                # have a different [patch
+                # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+                # In this field, you must specify the runtime version (TensorFlow minor
+                # version). For example, if your custom container runs TensorFlow `1.x.y`,
+                # specify `1.x`.
+            "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+                # [Learn about restrictions on accelerator configurations for
+                # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+                # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+                # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+                # [accelerators for online
+                # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+              "count": "A String", # The number of accelerators to attach to each machine running the job.
+              "type": "A String", # The type of accelerator to use.
+            },
+            "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+                # Registry. Learn more about [configuring custom
+                # containers](/ai-platform/training/docs/distributed-training-containers).
+          },
+          "useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+              # variable when training with a custom container. Defaults to `false`. [Learn
+              # more about this
+              # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
+              #
+              # This field has no effect for training jobs that don't use a custom
+              # container.
+          "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+              #
+              # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+              # to a Compute Engine machine type. Learn about [restrictions on accelerator
+              # configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              #
+              # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+              # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+              # about [configuring custom
+              # containers](/ai-platform/training/docs/distributed-training-containers).
+            "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+                # the one used in the custom container. This field is required if the replica
+                # is a TPU worker that uses a custom container. Otherwise, do not specify
+                # this field. This must be a [runtime version that currently supports
+                # training with
+                # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+                #
+                # Note that the version of TensorFlow included in a runtime version may
+                # differ from the numbering of the runtime version itself, because it may
+                # have a different [patch
+                # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+                # In this field, you must specify the runtime version (TensorFlow minor
+                # version). For example, if your custom container runs TensorFlow `1.x.y`,
+                # specify `1.x`.
+            "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+                # [Learn about restrictions on accelerator configurations for
+                # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+                # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+                # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+                # [accelerators for online
+                # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+              "count": "A String", # The number of accelerators to attach to each machine running the job.
+              "type": "A String", # The type of accelerator to use.
+            },
+            "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+                # Registry. Learn more about [configuring custom
+                # containers](/ai-platform/training/docs/distributed-training-containers).
+          },
+          "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. You must
+              # either specify this field or specify `masterConfig.imageUri`.
+              #
+              # For more information, see the [runtime version
+              # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+              # manage runtime versions](/ai-platform/training/docs/versioning).
           "hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
             "maxTrials": 42, # Optional. How many training trials should be attempted to optimize
                 # the specified hyperparameters.
@@ -2067,9 +2595,9 @@
                 "maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
                     # should be unset if type is `CATEGORICAL`. This value should be integers if
                     # type is `INTEGER`.
-                "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
-                  "A String",
-                ],
+                "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                    # should be unset if type is `CATEGORICAL`. This value should be integers if
+                    # type is INTEGER.
                 "discreteValues": [ # Required if type is `DISCRETE`.
                     # A list of feasible points.
                     # The list should be in strictly increasing order. For instance, this
@@ -2079,9 +2607,9 @@
                 ],
                 "parameterName": "A String", # Required. The parameter name must be unique amongst all ParameterConfigs in
                     # a HyperparameterSpec message. E.g., "learning_rate".
-                "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                    # should be unset if type is `CATEGORICAL`. This value should be integers if
-                    # type is INTEGER.
+                "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
+                  "A String",
+                ],
                 "type": "A String", # Required. The type of the parameter.
                 "scaleType": "A String", # Optional. How the parameter should be scaled to the hypercube.
                     # Leave unset for categorical parameters.
@@ -2105,26 +2633,14 @@
                 #
                 # Defaults to one.
           },
-          "region": "A String", # Required. The Google Compute Engine region to run the training job in.
-              # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
-              # for AI Platform services.
-          "args": [ # Optional. Command line arguments to pass to the program.
+          "args": [ # Optional. Command-line arguments passed to the training application when it
+              # starts. If your job uses a custom container, then the arguments are passed
+              # to the container's &lt;a class="external" target="_blank"
+              # href="https://docs.docker.com/engine/reference/builder/#entrypoint"&gt;
+              # `ENTRYPOINT`&lt;/a&gt; command.
             "A String",
           ],
           "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
-          "pythonVersion": "A String", # Optional. The version of Python used in training. If not set, the default
-              # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-              # to '1.4' and above. Python '2.7' works with all supported
-              # <a href="/ml-engine/docs/runtime-version-list">runtime versions</a>.
-          "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
-              # and other data needed for training. This path is passed to your TensorFlow
-              # program as the '--job-dir' command-line argument. The benefit of specifying
-              # this field is that Cloud ML validates the path for use in training.
-          "packageUris": [ # Required. The Google Cloud Storage location of the packages with
-              # the training program and any additional dependencies.
-              # The maximum number of package URIs is 100.
-            "A String",
-          ],
           "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
               # replica in the cluster will be of the type specified in `worker_type`.
               #
@@ -2132,6 +2648,87 @@
               # set this value, you must also set `worker_type`.
               #
               # The default value is zero.
+          "encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+              # protect resources created by a training job, instead of using Google's
+              # default encryption. If this is set, then all resources created by the
+              # training job will be encrypted with the customer-managed encryption key
+              # that you specify.
+              #
+              # [Learn how and when to use CMEK with AI Platform
+              # Training](/ai-platform/training/docs/cmek).
+              # a resource.
+            "kmsKeyName": "A String", # The Cloud KMS resource identifier of the customer-managed encryption key
+                # used to protect a resource, such as a training job. It has the following
+                # format:
+                # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+          },
+          "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
+              #
+              # You should only set `parameterServerConfig.acceleratorConfig` if
+              # `parameterServerType` is set to a Compute Engine machine type. [Learn
+              # about restrictions on accelerator configurations for
+              # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+              #
+              # Set `parameterServerConfig.imageUri` only if you build a custom image for
+              # your parameter server. If `parameterServerConfig.imageUri` has not been
+              # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+              # containers](/ai-platform/training/docs/distributed-training-containers).
+            "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+                # the one used in the custom container. This field is required if the replica
+                # is a TPU worker that uses a custom container. Otherwise, do not specify
+                # this field. This must be a [runtime version that currently supports
+                # training with
+                # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+                #
+                # Note that the version of TensorFlow included in a runtime version may
+                # differ from the numbering of the runtime version itself, because it may
+                # have a different [patch
+                # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+                # In this field, you must specify the runtime version (TensorFlow minor
+                # version). For example, if your custom container runs TensorFlow `1.x.y`,
+                # specify `1.x`.
+            "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+                # [Learn about restrictions on accelerator configurations for
+                # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+                # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+                # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+                # [accelerators for online
+                # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+              "count": "A String", # The number of accelerators to attach to each machine running the job.
+              "type": "A String", # The type of accelerator to use.
+            },
+            "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+                # Registry. Learn more about [configuring custom
+                # containers](/ai-platform/training/docs/distributed-training-containers).
+          },
+          "region": "A String", # Required. The region to run the training job in. See the [available
+              # regions](/ai-platform/training/docs/regions) for AI Platform Training.
+          "pythonVersion": "A String", # Optional. The version of Python used in training. You must either specify
+              # this field or specify `masterConfig.imageUri`.
+              #
+              # The following Python versions are available:
+              #
+              # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+              #   later.
+              # * Python '3.5' is available when `runtime_version` is set to a version
+              #   from '1.4' to '1.14'.
+              # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+              #   earlier.
+              #
+              # Read more about the Python versions available for [each runtime
+              # version](/ml-engine/docs/runtime-version-list).
+          "evaluatorType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+              # job's evaluator nodes.
+              #
+              # The supported values are the same as those described in the entry for
+              # `masterType`.
+              #
+              # This value must be consistent with the category of machine type that
+              # `masterType` uses. In other words, both must be Compute Engine machine
+              # types or both must be legacy machine types.
+              #
+              # This value must be present when `scaleTier` is set to `CUSTOM` and
+              # `evaluatorCount` is greater than zero.
           "parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
               # job's parameter server.
               #
@@ -2139,81 +2736,12 @@
               # `master_type`.
               #
               # This value must be consistent with the category of machine type that
-              # `masterType` uses. In other words, both must be AI Platform machine
-              # types or both must be Compute Engine machine types.
+              # `masterType` uses. In other words, both must be Compute Engine machine
+              # types or both must be legacy machine types.
               #
               # This value must be present when `scaleTier` is set to `CUSTOM` and
               # `parameter_server_count` is greater than zero.
-          "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
-              #
-              # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
-              # to a Compute Engine machine type. [Learn about restrictions on accelerator
-              # configurations for
-              # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-              #
-              # Set `workerConfig.imageUri` only if you build a custom image for your
-              # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
-              # the value of `masterConfig.imageUri`. Learn more about
-              # [configuring custom
-              # containers](/ml-engine/docs/distributed-training-containers).
-            "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-                # [Learn about restrictions on accelerator configurations for
-                # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-              "count": "A String", # The number of accelerators to attach to each machine running the job.
-              "type": "A String", # The type of accelerator to use.
-            },
-            "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-                # Registry. Learn more about [configuring custom
-                # containers](/ml-engine/docs/distributed-training-containers).
-          },
-          "maxRunningTime": "A String", # Optional. The maximum job running time. The default is 7 days.
-          "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-              #
-              # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-              # to a Compute Engine machine type. Learn about [restrictions on accelerator
-              # configurations for
-              # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-              #
-              # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-              # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more about
-              # [configuring custom
-              # containers](/ml-engine/docs/distributed-training-containers).
-            "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-                # [Learn about restrictions on accelerator configurations for
-                # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-              "count": "A String", # The number of accelerators to attach to each machine running the job.
-              "type": "A String", # The type of accelerator to use.
-            },
-            "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-                # Registry. Learn more about [configuring custom
-                # containers](/ml-engine/docs/distributed-training-containers).
-          },
-          "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
-              # job. Each replica in the cluster will be of the type specified in
-              # `parameter_server_type`.
-              #
-              # This value can only be used when `scale_tier` is set to `CUSTOM`.If you
-              # set this value, you must also set `parameter_server_type`.
-              #
-              # The default value is zero.
         },
-        "jobId": "A String", # Required. The user-specified id of the job.
-        "labels": { # Optional. One or more labels that you can add, to organize your jobs.
-            # Each label is a key-value pair, where both the key and the value are
-            # arbitrary strings that you supply.
-            # For more information, see the documentation on
-            # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
-          "a_key": "A String",
-        },
-        "state": "A String", # Output only. The detailed state of a job.
-        "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-            # prevent simultaneous updates of a job from overwriting each other.
-            # It is strongly suggested that systems make use of the `etag` in the
-            # read-modify-write cycle to perform job updates in order to avoid race
-            # conditions: An `etag` is returned in the response to `GetJob`, and
-            # systems are expected to put that etag in the request to `UpdateJob` to
-            # ensure that their change will be applied to the same version of the job.
-        "startTime": "A String", # Output only. When the job processing was started.
         "endTime": "A String", # Output only. When the job processing was completed.
         "predictionOutput": { # Represents results of a prediction job. # The current prediction job result.
           "outputPath": "A String", # The output Google Cloud Storage location provided at the job creation time.
@@ -2242,14 +2770,14 @@
 </div>
 
 <div class="method">
-    <code class="details" id="patch">patch(name, body, updateMask=None, x__xgafv=None)</code>
+    <code class="details" id="patch">patch(name, body=None, updateMask=None, x__xgafv=None)</code>
   <pre>Updates a specific job resource.
 
 Currently the only supported fields to update are `labels`.
 
 Args:
   name: string, Required. The job name. (required)
-  body: object, The request body. (required)
+  body: object, The request body.
     The object takes the form of:
 
 { # Represents a training or prediction job.
@@ -2263,6 +2791,7 @@
           # training job. The TrainingOutput object that is returned on successful
           # completion of a training job with hyperparameter tuning includes a list
           # of HyperparameterOutput objects, one for each successful trial.
+        "startTime": "A String", # Output only. Start time for the trial.
         "hyperparameters": { # The hyperparameters given to this trial.
           "a_key": "A String",
         },
@@ -2270,6 +2799,7 @@
           "trainingStep": "A String", # The global training step for this metric.
           "objectiveValue": 3.14, # The objective value at this training step.
         },
+        "state": "A String", # Output only. The detailed state of the trial.
         "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
             # populated.
           { # An observed value of a metric.
@@ -2278,6 +2808,7 @@
           },
         ],
         "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
+        "endTime": "A String", # Output only. End time for the trial.
         "trialId": "A String", # The trial id for these results.
         "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
             # Only set for trials of built-in algorithms jobs that have succeeded.
@@ -2314,11 +2845,6 @@
         # model. The string must use the following format:
         #
         # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
-    "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
-        # prediction. If not set, AI Platform will pick the runtime version used
-        # during the CreateVersion request for this model version, or choose the
-        # latest stable version when model version information is not available
-        # such as when the model is specified by uri.
     "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
         # this job. Please refer to
         # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -2327,14 +2853,15 @@
         # Defaults to
         # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
         # , which is "serving_default".
+    "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+        # prediction. If not set, AI Platform will pick the runtime version used
+        # during the CreateVersion request for this model version, or choose the
+        # latest stable version when model version information is not available
+        # such as when the model is specified by uri.
     "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
         # The service will buffer batch_size number of records in memory before
         # invoking one Tensorflow prediction call internally. So take the record
         # size and memory available into consideration when setting this parameter.
-    "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
-        # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
-      "A String",
-    ],
     "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
         # Defaults to 10 if not specified.
     "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
@@ -2347,154 +2874,43 @@
         #
         # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
     "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
-        # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+        # See the &lt;a href="/ml-engine/docs/tensorflow/regions"&gt;available regions&lt;/a&gt;
         # for AI Platform services.
+    "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+        # &lt;a href="/storage/docs/gsutil/addlhelp/WildcardNames"&gt;wildcards&lt;/a&gt;.
+      "A String",
+    ],
     "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
   },
-  "trainingInput": { # Represents input parameters for a training job. When using the # Input parameters to create a training job.
-      # gcloud command to submit your training job, you can specify
-      # the input parameters as command-line arguments and/or in a YAML configuration
-      # file referenced from the --config command-line argument. For
-      # details, see the guide to
-      # <a href="/ml-engine/docs/tensorflow/training-jobs">submitting a training
-      # job</a>.
-    "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-        # job's worker nodes.
-        #
-        # The supported values are the same as those described in the entry for
-        # `masterType`.
-        #
-        # This value must be consistent with the category of machine type that
-        # `masterType` uses. In other words, both must be AI Platform machine
-        # types or both must be Compute Engine machine types.
-        #
-        # If you use `cloud_tpu` for this value, see special instructions for
-        # [configuring a custom TPU
-        # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-        #
-        # This value must be present when `scaleTier` is set to `CUSTOM` and
-        # `workerCount` is greater than zero.
-    "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-        #
-        # You should only set `parameterServerConfig.acceleratorConfig` if
-        # `parameterServerConfigType` is set to a Compute Engine machine type. [Learn
-        # about restrictions on accelerator configurations for
-        # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        #
-        # Set `parameterServerConfig.imageUri` only if you build a custom image for
-        # your parameter server. If `parameterServerConfig.imageUri` has not been
-        # set, AI Platform uses the value of `masterConfig.imageUri`.
-        # Learn more about [configuring custom
-        # containers](/ml-engine/docs/distributed-training-containers).
-      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-          # [Learn about restrictions on accelerator configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        "count": "A String", # The number of accelerators to attach to each machine running the job.
-        "type": "A String", # The type of accelerator to use.
-      },
-      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-          # Registry. Learn more about [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-    },
-    "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. If not
-        # set, AI Platform uses the default stable version, 1.0. For more
-        # information, see the
-        # <a href="/ml-engine/docs/runtime-version-list">runtime version list</a>
-        # and
-        # <a href="/ml-engine/docs/versioning">how to manage runtime versions</a>.
-    "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
-        # and parameter servers.
+  "labels": { # Optional. One or more labels that you can add, to organize your jobs.
+      # Each label is a key-value pair, where both the key and the value are
+      # arbitrary strings that you supply.
+      # For more information, see the documentation on
+      # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
+    "a_key": "A String",
+  },
+  "jobId": "A String", # Required. The user-specified id of the job.
+  "state": "A String", # Output only. The detailed state of a job.
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+      # prevent simultaneous updates of a job from overwriting each other.
+      # It is strongly suggested that systems make use of the `etag` in the
+      # read-modify-write cycle to perform job updates in order to avoid race
+      # conditions: An `etag` is returned in the response to `GetJob`, and
+      # systems are expected to put that etag in the request to `UpdateJob` to
+      # ensure that their change will be applied to the same version of the job.
+  "startTime": "A String", # Output only. When the job processing was started.
+  "trainingInput": { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
+      # to submit your training job, you can specify the input parameters as
+      # command-line arguments and/or in a YAML configuration file referenced from
+      # the --config command-line argument. For details, see the guide to [submitting
+      # a training job](/ai-platform/training/docs/training-jobs).
     "masterType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-        # job's master worker.
+        # job's master worker. You must specify this field when `scaleTier` is set to
+        # `CUSTOM`.
         #
+        # You can use certain Compute Engine machine types directly in this field.
         # The following types are supported:
         #
-        # <dl>
-        #   <dt>standard</dt>
-        #   <dd>
-        #   A basic machine configuration suitable for training simple models with
-        #   small to moderate datasets.
-        #   </dd>
-        #   <dt>large_model</dt>
-        #   <dd>
-        #   A machine with a lot of memory, specially suited for parameter servers
-        #   when your model is large (having many hidden layers or layers with very
-        #   large numbers of nodes).
-        #   </dd>
-        #   <dt>complex_model_s</dt>
-        #   <dd>
-        #   A machine suitable for the master and workers of the cluster when your
-        #   model requires more computation than the standard machine can handle
-        #   satisfactorily.
-        #   </dd>
-        #   <dt>complex_model_m</dt>
-        #   <dd>
-        #   A machine with roughly twice the number of cores and roughly double the
-        #   memory of <i>complex_model_s</i>.
-        #   </dd>
-        #   <dt>complex_model_l</dt>
-        #   <dd>
-        #   A machine with roughly twice the number of cores and roughly double the
-        #   memory of <i>complex_model_m</i>.
-        #   </dd>
-        #   <dt>standard_gpu</dt>
-        #   <dd>
-        #   A machine equivalent to <i>standard</i> that
-        #   also includes a single NVIDIA Tesla K80 GPU. See more about
-        #   <a href="/ml-engine/docs/tensorflow/using-gpus">using GPUs to
-        #   train your model</a>.
-        #   </dd>
-        #   <dt>complex_model_m_gpu</dt>
-        #   <dd>
-        #   A machine equivalent to <i>complex_model_m</i> that also includes
-        #   four NVIDIA Tesla K80 GPUs.
-        #   </dd>
-        #   <dt>complex_model_l_gpu</dt>
-        #   <dd>
-        #   A machine equivalent to <i>complex_model_l</i> that also includes
-        #   eight NVIDIA Tesla K80 GPUs.
-        #   </dd>
-        #   <dt>standard_p100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>standard</i> that
-        #   also includes a single NVIDIA Tesla P100 GPU.
-        #   </dd>
-        #   <dt>complex_model_m_p100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>complex_model_m</i> that also includes
-        #   four NVIDIA Tesla P100 GPUs.
-        #   </dd>
-        #   <dt>standard_v100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>standard</i> that
-        #   also includes a single NVIDIA Tesla V100 GPU.
-        #   </dd>
-        #   <dt>large_model_v100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>large_model</i> that
-        #   also includes a single NVIDIA Tesla V100 GPU.
-        #   </dd>
-        #   <dt>complex_model_m_v100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>complex_model_m</i> that
-        #   also includes four NVIDIA Tesla V100 GPUs.
-        #   </dd>
-        #   <dt>complex_model_l_v100</dt>
-        #   <dd>
-        #   A machine equivalent to <i>complex_model_l</i> that
-        #   also includes eight NVIDIA Tesla V100 GPUs.
-        #   </dd>
-        #   <dt>cloud_tpu</dt>
-        #   <dd>
-        #   A TPU VM including one Cloud TPU. See more about
-        #   <a href="/ml-engine/docs/tensorflow/using-tpus">using TPUs to train
-        #   your model</a>.
-        #   </dd>
-        # </dl>
-        #
-        # You may also use certain Compute Engine machine types directly in this
-        # field. The following types are supported:
-        #
         # - `n1-standard-4`
         # - `n1-standard-8`
         # - `n1-standard-16`
@@ -2513,10 +2929,231 @@
         # - `n1-highcpu-64`
         # - `n1-highcpu-96`
         #
-        # See more about [using Compute Engine machine
-        # types](/ml-engine/docs/tensorflow/machine-types#compute-engine-machine-types).
+        # Learn more about [using Compute Engine machine
+        # types](/ml-engine/docs/machine-types#compute-engine-machine-types).
         #
-        # You must set this value when `scaleTier` is set to `CUSTOM`.
+        # Alternatively, you can use the following legacy machine types:
+        #
+        # - `standard`
+        # - `large_model`
+        # - `complex_model_s`
+        # - `complex_model_m`
+        # - `complex_model_l`
+        # - `standard_gpu`
+        # - `complex_model_m_gpu`
+        # - `complex_model_l_gpu`
+        # - `standard_p100`
+        # - `complex_model_m_p100`
+        # - `standard_v100`
+        # - `large_model_v100`
+        # - `complex_model_m_v100`
+        # - `complex_model_l_v100`
+        #
+        # Learn more about [using legacy machine
+        # types](/ml-engine/docs/machine-types#legacy-machine-types).
+        #
+        # Finally, if you want to use a TPU for training, specify `cloud_tpu` in this
+        # field. Learn more about the [special configuration options for training
+        # with
+        # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+    "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
+        # and other data needed for training. This path is passed to your TensorFlow
+        # program as the '--job-dir' command-line argument. The benefit of specifying
+        # this field is that Cloud ML validates the path for use in training.
+    "scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
+      "maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
+          # contain up to nine fractional digits, terminated by `s`. By default there
+          # is no limit to the running time.
+          #
+          # If the training job is still running after this duration, AI Platform
+          # Training cancels it.
+          #
+          # For example, if you want to ensure your job runs for no more than 2 hours,
+          # set this field to `7200s` (2 hours * 60 minutes / hour * 60 seconds /
+          # minute).
+          #
+          # If you submit your training job using the `gcloud` tool, you can [provide
+          # this field in a `config.yaml`
+          # file](/ai-platform/training/docs/training-jobs#formatting_your_configuration_parameters).
+          # For example:
+          #
+          # ```yaml
+          # trainingInput:
+          #   ...
+          #   scheduling:
+          #     maxRunningTime: 7200s
+          #   ...
+          # ```
+    },
+    "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
+        # job. Each replica in the cluster will be of the type specified in
+        # `parameter_server_type`.
+        #
+        # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+        # set this value, you must also set `parameter_server_type`.
+        #
+        # The default value is zero.
+    "evaluatorCount": "A String", # Optional. The number of evaluator replicas to use for the training job.
+        # Each replica in the cluster will be of the type specified in
+        # `evaluator_type`.
+        #
+        # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+        # set this value, you must also set `evaluator_type`.
+        #
+        # The default value is zero.
+    "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+        # job's worker nodes.
+        #
+        # The supported values are the same as those described in the entry for
+        # `masterType`.
+        #
+        # This value must be consistent with the category of machine type that
+        # `masterType` uses. In other words, both must be Compute Engine machine
+        # types or both must be legacy machine types.
+        #
+        # If you use `cloud_tpu` for this value, see special instructions for
+        # [configuring a custom TPU
+        # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+        #
+        # This value must be present when `scaleTier` is set to `CUSTOM` and
+        # `workerCount` is greater than zero.
+    "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
+        # and parameter servers.
+    "packageUris": [ # Required. The Google Cloud Storage location of the packages with
+        # the training program and any additional dependencies.
+        # The maximum number of package URIs is 100.
+      "A String",
+    ],
+    "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
+        #
+        # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
+        # to a Compute Engine machine type. [Learn about restrictions on accelerator
+        # configurations for
+        # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+        #
+        # Set `workerConfig.imageUri` only if you build a custom image for your
+        # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
+        # the value of `masterConfig.imageUri`. Learn more about [configuring custom
+        # containers](/ai-platform/training/docs/distributed-training-containers).
+      "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+          # the one used in the custom container. This field is required if the replica
+          # is a TPU worker that uses a custom container. Otherwise, do not specify
+          # this field. This must be a [runtime version that currently supports
+          # training with
+          # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+          #
+          # Note that the version of TensorFlow included in a runtime version may
+          # differ from the numbering of the runtime version itself, because it may
+          # have a different [patch
+          # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+          # In this field, you must specify the runtime version (TensorFlow minor
+          # version). For example, if your custom container runs TensorFlow `1.x.y`,
+          # specify `1.x`.
+      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+          # [Learn about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+          # [accelerators for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        "count": "A String", # The number of accelerators to attach to each machine running the job.
+        "type": "A String", # The type of accelerator to use.
+      },
+      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+          # Registry. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+    },
+    "evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+        #
+        # You should only set `evaluatorConfig.acceleratorConfig` if
+        # `evaluatorType` is set to a Compute Engine machine type. [Learn
+        # about restrictions on accelerator configurations for
+        # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+        #
+        # Set `evaluatorConfig.imageUri` only if you build a custom image for
+        # your evaluator. If `evaluatorConfig.imageUri` has not been
+        # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+        # containers](/ai-platform/training/docs/distributed-training-containers).
+      "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+          # the one used in the custom container. This field is required if the replica
+          # is a TPU worker that uses a custom container. Otherwise, do not specify
+          # this field. This must be a [runtime version that currently supports
+          # training with
+          # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+          #
+          # Note that the version of TensorFlow included in a runtime version may
+          # differ from the numbering of the runtime version itself, because it may
+          # have a different [patch
+          # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+          # In this field, you must specify the runtime version (TensorFlow minor
+          # version). For example, if your custom container runs TensorFlow `1.x.y`,
+          # specify `1.x`.
+      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+          # [Learn about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+          # [accelerators for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        "count": "A String", # The number of accelerators to attach to each machine running the job.
+        "type": "A String", # The type of accelerator to use.
+      },
+      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+          # Registry. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+    },
+    "useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+        # variable when training with a custom container. Defaults to `false`. [Learn
+        # more about this
+        # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
+        #
+        # This field has no effect for training jobs that don't use a custom
+        # container.
+    "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+        #
+        # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+        # to a Compute Engine machine type. Learn about [restrictions on accelerator
+        # configurations for
+        # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+        #
+        # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+        # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+        # about [configuring custom
+        # containers](/ai-platform/training/docs/distributed-training-containers).
+      "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+          # the one used in the custom container. This field is required if the replica
+          # is a TPU worker that uses a custom container. Otherwise, do not specify
+          # this field. This must be a [runtime version that currently supports
+          # training with
+          # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+          #
+          # Note that the version of TensorFlow included in a runtime version may
+          # differ from the numbering of the runtime version itself, because it may
+          # have a different [patch
+          # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+          # In this field, you must specify the runtime version (TensorFlow minor
+          # version). For example, if your custom container runs TensorFlow `1.x.y`,
+          # specify `1.x`.
+      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+          # [Learn about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+          # [accelerators for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        "count": "A String", # The number of accelerators to attach to each machine running the job.
+        "type": "A String", # The type of accelerator to use.
+      },
+      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+          # Registry. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+    },
+    "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. You must
+        # either specify this field or specify `masterConfig.imageUri`.
+        #
+        # For more information, see the [runtime version
+        # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+        # manage runtime versions](/ai-platform/training/docs/versioning).
     "hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
       "maxTrials": 42, # Optional. How many training trials should be attempted to optimize
           # the specified hyperparameters.
@@ -2546,9 +3183,9 @@
           "maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
               # should be unset if type is `CATEGORICAL`. This value should be integers if
               # type is `INTEGER`.
-          "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
-            "A String",
-          ],
+          "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+              # should be unset if type is `CATEGORICAL`. This value should be integers if
+              # type is INTEGER.
           "discreteValues": [ # Required if type is `DISCRETE`.
               # A list of feasible points.
               # The list should be in strictly increasing order. For instance, this
@@ -2558,9 +3195,9 @@
           ],
           "parameterName": "A String", # Required. The parameter name must be unique amongst all ParameterConfigs in
               # a HyperparameterSpec message. E.g., "learning_rate".
-          "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-              # should be unset if type is `CATEGORICAL`. This value should be integers if
-              # type is INTEGER.
+          "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
+            "A String",
+          ],
           "type": "A String", # Required. The type of the parameter.
           "scaleType": "A String", # Optional. How the parameter should be scaled to the hypercube.
               # Leave unset for categorical parameters.
@@ -2584,26 +3221,14 @@
           #
           # Defaults to one.
     },
-    "region": "A String", # Required. The Google Compute Engine region to run the training job in.
-        # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
-        # for AI Platform services.
-    "args": [ # Optional. Command line arguments to pass to the program.
+    "args": [ # Optional. Command-line arguments passed to the training application when it
+        # starts. If your job uses a custom container, then the arguments are passed
+        # to the container's &lt;a class="external" target="_blank"
+        # href="https://docs.docker.com/engine/reference/builder/#entrypoint"&gt;
+        # `ENTRYPOINT`&lt;/a&gt; command.
       "A String",
     ],
     "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
-    "pythonVersion": "A String", # Optional. The version of Python used in training. If not set, the default
-        # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-        # to '1.4' and above. Python '2.7' works with all supported
-        # <a href="/ml-engine/docs/runtime-version-list">runtime versions</a>.
-    "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
-        # and other data needed for training. This path is passed to your TensorFlow
-        # program as the '--job-dir' command-line argument. The benefit of specifying
-        # this field is that Cloud ML validates the path for use in training.
-    "packageUris": [ # Required. The Google Cloud Storage location of the packages with
-        # the training program and any additional dependencies.
-        # The maximum number of package URIs is 100.
-      "A String",
-    ],
     "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
         # replica in the cluster will be of the type specified in `worker_type`.
         #
@@ -2611,6 +3236,87 @@
         # set this value, you must also set `worker_type`.
         #
         # The default value is zero.
+    "encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+        # protect resources created by a training job, instead of using Google's
+        # default encryption. If this is set, then all resources created by the
+        # training job will be encrypted with the customer-managed encryption key
+        # that you specify.
+        #
+        # [Learn how and when to use CMEK with AI Platform
+        # Training](/ai-platform/training/docs/cmek).
+        # a resource.
+      "kmsKeyName": "A String", # The Cloud KMS resource identifier of the customer-managed encryption key
+          # used to protect a resource, such as a training job. It has the following
+          # format:
+          # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+    },
+    "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
+        #
+        # You should only set `parameterServerConfig.acceleratorConfig` if
+        # `parameterServerType` is set to a Compute Engine machine type. [Learn
+        # about restrictions on accelerator configurations for
+        # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+        #
+        # Set `parameterServerConfig.imageUri` only if you build a custom image for
+        # your parameter server. If `parameterServerConfig.imageUri` has not been
+        # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+        # containers](/ai-platform/training/docs/distributed-training-containers).
+      "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+          # the one used in the custom container. This field is required if the replica
+          # is a TPU worker that uses a custom container. Otherwise, do not specify
+          # this field. This must be a [runtime version that currently supports
+          # training with
+          # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+          #
+          # Note that the version of TensorFlow included in a runtime version may
+          # differ from the numbering of the runtime version itself, because it may
+          # have a different [patch
+          # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+          # In this field, you must specify the runtime version (TensorFlow minor
+          # version). For example, if your custom container runs TensorFlow `1.x.y`,
+          # specify `1.x`.
+      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+          # [Learn about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+          # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+          # [accelerators for online
+          # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+        "count": "A String", # The number of accelerators to attach to each machine running the job.
+        "type": "A String", # The type of accelerator to use.
+      },
+      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+          # Registry. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+    },
+    "region": "A String", # Required. The region to run the training job in. See the [available
+        # regions](/ai-platform/training/docs/regions) for AI Platform Training.
+    "pythonVersion": "A String", # Optional. The version of Python used in training. You must either specify
+        # this field or specify `masterConfig.imageUri`.
+        #
+        # The following Python versions are available:
+        #
+        # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+        #   later.
+        # * Python '3.5' is available when `runtime_version` is set to a version
+        #   from '1.4' to '1.14'.
+        # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+        #   earlier.
+        #
+        # Read more about the Python versions available for [each runtime
+        # version](/ml-engine/docs/runtime-version-list).
+    "evaluatorType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+        # job's evaluator nodes.
+        #
+        # The supported values are the same as those described in the entry for
+        # `masterType`.
+        #
+        # This value must be consistent with the category of machine type that
+        # `masterType` uses. In other words, both must be Compute Engine machine
+        # types or both must be legacy machine types.
+        #
+        # This value must be present when `scaleTier` is set to `CUSTOM` and
+        # `evaluatorCount` is greater than zero.
     "parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
         # job's parameter server.
         #
@@ -2618,81 +3324,12 @@
         # `master_type`.
         #
         # This value must be consistent with the category of machine type that
-        # `masterType` uses. In other words, both must be AI Platform machine
-        # types or both must be Compute Engine machine types.
+        # `masterType` uses. In other words, both must be Compute Engine machine
+        # types or both must be legacy machine types.
         #
         # This value must be present when `scaleTier` is set to `CUSTOM` and
         # `parameter_server_count` is greater than zero.
-    "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
-        #
-        # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
-        # to a Compute Engine machine type. [Learn about restrictions on accelerator
-        # configurations for
-        # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        #
-        # Set `workerConfig.imageUri` only if you build a custom image for your
-        # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
-        # the value of `masterConfig.imageUri`. Learn more about
-        # [configuring custom
-        # containers](/ml-engine/docs/distributed-training-containers).
-      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-          # [Learn about restrictions on accelerator configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        "count": "A String", # The number of accelerators to attach to each machine running the job.
-        "type": "A String", # The type of accelerator to use.
-      },
-      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-          # Registry. Learn more about [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-    },
-    "maxRunningTime": "A String", # Optional. The maximum job running time. The default is 7 days.
-    "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-        #
-        # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-        # to a Compute Engine machine type. Learn about [restrictions on accelerator
-        # configurations for
-        # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        #
-        # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-        # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more about
-        # [configuring custom
-        # containers](/ml-engine/docs/distributed-training-containers).
-      "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-          # [Learn about restrictions on accelerator configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-        "count": "A String", # The number of accelerators to attach to each machine running the job.
-        "type": "A String", # The type of accelerator to use.
-      },
-      "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-          # Registry. Learn more about [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-    },
-    "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
-        # job. Each replica in the cluster will be of the type specified in
-        # `parameter_server_type`.
-        #
-        # This value can only be used when `scale_tier` is set to `CUSTOM`.If you
-        # set this value, you must also set `parameter_server_type`.
-        #
-        # The default value is zero.
   },
-  "jobId": "A String", # Required. The user-specified id of the job.
-  "labels": { # Optional. One or more labels that you can add, to organize your jobs.
-      # Each label is a key-value pair, where both the key and the value are
-      # arbitrary strings that you supply.
-      # For more information, see the documentation on
-      # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
-    "a_key": "A String",
-  },
-  "state": "A String", # Output only. The detailed state of a job.
-  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-      # prevent simultaneous updates of a job from overwriting each other.
-      # It is strongly suggested that systems make use of the `etag` in the
-      # read-modify-write cycle to perform job updates in order to avoid race
-      # conditions: An `etag` is returned in the response to `GetJob`, and
-      # systems are expected to put that etag in the request to `UpdateJob` to
-      # ensure that their change will be applied to the same version of the job.
-  "startTime": "A String", # Output only. When the job processing was started.
   "endTime": "A String", # Output only. When the job processing was completed.
   "predictionOutput": { # Represents results of a prediction job. # The current prediction job result.
     "outputPath": "A String", # The output Google Cloud Storage location provided at the job creation time.
@@ -2741,6 +3378,7 @@
             # training job. The TrainingOutput object that is returned on successful
             # completion of a training job with hyperparameter tuning includes a list
             # of HyperparameterOutput objects, one for each successful trial.
+          "startTime": "A String", # Output only. Start time for the trial.
           "hyperparameters": { # The hyperparameters given to this trial.
             "a_key": "A String",
           },
@@ -2748,6 +3386,7 @@
             "trainingStep": "A String", # The global training step for this metric.
             "objectiveValue": 3.14, # The objective value at this training step.
           },
+          "state": "A String", # Output only. The detailed state of the trial.
           "allMetrics": [ # All recorded object metrics for this trial. This field is not currently
               # populated.
             { # An observed value of a metric.
@@ -2756,6 +3395,7 @@
             },
           ],
           "isTrialStoppedEarly": True or False, # True if the trial is stopped early.
+          "endTime": "A String", # Output only. End time for the trial.
           "trialId": "A String", # The trial id for these results.
           "builtInAlgorithmOutput": { # Represents output related to a built-in algorithm Job. # Details related to built-in algorithms jobs.
               # Only set for trials of built-in algorithms jobs that have succeeded.
@@ -2792,11 +3432,6 @@
           # model. The string must use the following format:
           #
           # `"projects/YOUR_PROJECT/models/YOUR_MODEL"`
-      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
-          # prediction. If not set, AI Platform will pick the runtime version used
-          # during the CreateVersion request for this model version, or choose the
-          # latest stable version when model version information is not available
-          # such as when the model is specified by uri.
       "signatureName": "A String", # Optional. The name of the signature defined in the SavedModel to use for
           # this job. Please refer to
           # [SavedModel](https://tensorflow.github.io/serving/serving_basic.html)
@@ -2805,14 +3440,15 @@
           # Defaults to
           # [DEFAULT_SERVING_SIGNATURE_DEF_KEY](https://www.tensorflow.org/api_docs/python/tf/saved_model/signature_constants)
           # , which is "serving_default".
+      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for this batch
+          # prediction. If not set, AI Platform will pick the runtime version used
+          # during the CreateVersion request for this model version, or choose the
+          # latest stable version when model version information is not available
+          # such as when the model is specified by uri.
       "batchSize": "A String", # Optional. Number of records per batch, defaults to 64.
           # The service will buffer batch_size number of records in memory before
           # invoking one Tensorflow prediction call internally. So take the record
           # size and memory available into consideration when setting this parameter.
-      "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
-          # <a href="/storage/docs/gsutil/addlhelp/WildcardNames">wildcards</a>.
-        "A String",
-      ],
       "maxWorkerCount": "A String", # Optional. The maximum number of workers to be used for parallel processing.
           # Defaults to 10 if not specified.
       "uri": "A String", # Use this field if you want to specify a Google Cloud Storage path for
@@ -2825,154 +3461,43 @@
           #
           # `"projects/YOUR_PROJECT/models/YOUR_MODEL/versions/YOUR_VERSION"`
       "region": "A String", # Required. The Google Compute Engine region to run the prediction job in.
-          # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
+          # See the &lt;a href="/ml-engine/docs/tensorflow/regions"&gt;available regions&lt;/a&gt;
           # for AI Platform services.
+      "inputPaths": [ # Required. The Cloud Storage location of the input data files. May contain
+          # &lt;a href="/storage/docs/gsutil/addlhelp/WildcardNames"&gt;wildcards&lt;/a&gt;.
+        "A String",
+      ],
       "outputDataFormat": "A String", # Optional. Format of the output data files, defaults to JSON.
     },
-    "trainingInput": { # Represents input parameters for a training job. When using the # Input parameters to create a training job.
-        # gcloud command to submit your training job, you can specify
-        # the input parameters as command-line arguments and/or in a YAML configuration
-        # file referenced from the --config command-line argument. For
-        # details, see the guide to
-        # <a href="/ml-engine/docs/tensorflow/training-jobs">submitting a training
-        # job</a>.
-      "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-          # job's worker nodes.
-          #
-          # The supported values are the same as those described in the entry for
-          # `masterType`.
-          #
-          # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be AI Platform machine
-          # types or both must be Compute Engine machine types.
-          #
-          # If you use `cloud_tpu` for this value, see special instructions for
-          # [configuring a custom TPU
-          # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
-          #
-          # This value must be present when `scaleTier` is set to `CUSTOM` and
-          # `workerCount` is greater than zero.
-      "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
-          #
-          # You should only set `parameterServerConfig.acceleratorConfig` if
-          # `parameterServerConfigType` is set to a Compute Engine machine type. [Learn
-          # about restrictions on accelerator configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `parameterServerConfig.imageUri` only if you build a custom image for
-          # your parameter server. If `parameterServerConfig.imageUri` has not been
-          # set, AI Platform uses the value of `masterConfig.imageUri`.
-          # Learn more about [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ml-engine/docs/distributed-training-containers).
-      },
-      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. If not
-          # set, AI Platform uses the default stable version, 1.0. For more
-          # information, see the
-          # <a href="/ml-engine/docs/runtime-version-list">runtime version list</a>
-          # and
-          # <a href="/ml-engine/docs/versioning">how to manage runtime versions</a>.
-      "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
-          # and parameter servers.
+    "labels": { # Optional. One or more labels that you can add, to organize your jobs.
+        # Each label is a key-value pair, where both the key and the value are
+        # arbitrary strings that you supply.
+        # For more information, see the documentation on
+        # &lt;a href="/ml-engine/docs/tensorflow/resource-labels"&gt;using labels&lt;/a&gt;.
+      "a_key": "A String",
+    },
+    "jobId": "A String", # Required. The user-specified id of the job.
+    "state": "A String", # Output only. The detailed state of a job.
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a job from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform job updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `GetJob`, and
+        # systems are expected to put that etag in the request to `UpdateJob` to
+        # ensure that their change will be applied to the same version of the job.
+    "startTime": "A String", # Output only. When the job processing was started.
+    "trainingInput": { # Represents input parameters for a training job. When using the gcloud command # Input parameters to create a training job.
+        # to submit your training job, you can specify the input parameters as
+        # command-line arguments and/or in a YAML configuration file referenced from
+        # the --config command-line argument. For details, see the guide to [submitting
+        # a training job](/ai-platform/training/docs/training-jobs).
       "masterType": "A String", # Optional. Specifies the type of virtual machine to use for your training
-          # job's master worker.
+          # job's master worker. You must specify this field when `scaleTier` is set to
+          # `CUSTOM`.
           #
+          # You can use certain Compute Engine machine types directly in this field.
           # The following types are supported:
           #
-          # <dl>
-          #   <dt>standard</dt>
-          #   <dd>
-          #   A basic machine configuration suitable for training simple models with
-          #   small to moderate datasets.
-          #   </dd>
-          #   <dt>large_model</dt>
-          #   <dd>
-          #   A machine with a lot of memory, specially suited for parameter servers
-          #   when your model is large (having many hidden layers or layers with very
-          #   large numbers of nodes).
-          #   </dd>
-          #   <dt>complex_model_s</dt>
-          #   <dd>
-          #   A machine suitable for the master and workers of the cluster when your
-          #   model requires more computation than the standard machine can handle
-          #   satisfactorily.
-          #   </dd>
-          #   <dt>complex_model_m</dt>
-          #   <dd>
-          #   A machine with roughly twice the number of cores and roughly double the
-          #   memory of <i>complex_model_s</i>.
-          #   </dd>
-          #   <dt>complex_model_l</dt>
-          #   <dd>
-          #   A machine with roughly twice the number of cores and roughly double the
-          #   memory of <i>complex_model_m</i>.
-          #   </dd>
-          #   <dt>standard_gpu</dt>
-          #   <dd>
-          #   A machine equivalent to <i>standard</i> that
-          #   also includes a single NVIDIA Tesla K80 GPU. See more about
-          #   <a href="/ml-engine/docs/tensorflow/using-gpus">using GPUs to
-          #   train your model</a>.
-          #   </dd>
-          #   <dt>complex_model_m_gpu</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_m</i> that also includes
-          #   four NVIDIA Tesla K80 GPUs.
-          #   </dd>
-          #   <dt>complex_model_l_gpu</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_l</i> that also includes
-          #   eight NVIDIA Tesla K80 GPUs.
-          #   </dd>
-          #   <dt>standard_p100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>standard</i> that
-          #   also includes a single NVIDIA Tesla P100 GPU.
-          #   </dd>
-          #   <dt>complex_model_m_p100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_m</i> that also includes
-          #   four NVIDIA Tesla P100 GPUs.
-          #   </dd>
-          #   <dt>standard_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>standard</i> that
-          #   also includes a single NVIDIA Tesla V100 GPU.
-          #   </dd>
-          #   <dt>large_model_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>large_model</i> that
-          #   also includes a single NVIDIA Tesla V100 GPU.
-          #   </dd>
-          #   <dt>complex_model_m_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_m</i> that
-          #   also includes four NVIDIA Tesla V100 GPUs.
-          #   </dd>
-          #   <dt>complex_model_l_v100</dt>
-          #   <dd>
-          #   A machine equivalent to <i>complex_model_l</i> that
-          #   also includes eight NVIDIA Tesla V100 GPUs.
-          #   </dd>
-          #   <dt>cloud_tpu</dt>
-          #   <dd>
-          #   A TPU VM including one Cloud TPU. See more about
-          #   <a href="/ml-engine/docs/tensorflow/using-tpus">using TPUs to train
-          #   your model</a>.
-          #   </dd>
-          # </dl>
-          #
-          # You may also use certain Compute Engine machine types directly in this
-          # field. The following types are supported:
-          #
           # - `n1-standard-4`
           # - `n1-standard-8`
           # - `n1-standard-16`
@@ -2991,10 +3516,231 @@
           # - `n1-highcpu-64`
           # - `n1-highcpu-96`
           #
-          # See more about [using Compute Engine machine
-          # types](/ml-engine/docs/tensorflow/machine-types#compute-engine-machine-types).
+          # Learn more about [using Compute Engine machine
+          # types](/ml-engine/docs/machine-types#compute-engine-machine-types).
           #
-          # You must set this value when `scaleTier` is set to `CUSTOM`.
+          # Alternatively, you can use the following legacy machine types:
+          #
+          # - `standard`
+          # - `large_model`
+          # - `complex_model_s`
+          # - `complex_model_m`
+          # - `complex_model_l`
+          # - `standard_gpu`
+          # - `complex_model_m_gpu`
+          # - `complex_model_l_gpu`
+          # - `standard_p100`
+          # - `complex_model_m_p100`
+          # - `standard_v100`
+          # - `large_model_v100`
+          # - `complex_model_m_v100`
+          # - `complex_model_l_v100`
+          #
+          # Learn more about [using legacy machine
+          # types](/ml-engine/docs/machine-types#legacy-machine-types).
+          #
+          # Finally, if you want to use a TPU for training, specify `cloud_tpu` in this
+          # field. Learn more about the [special configuration options for training
+          # with
+          # TPUs](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+      "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
+          # and other data needed for training. This path is passed to your TensorFlow
+          # program as the '--job-dir' command-line argument. The benefit of specifying
+          # this field is that Cloud ML validates the path for use in training.
+      "scheduling": { # All parameters related to scheduling of training jobs. # Optional. Scheduling options for a training job.
+        "maxRunningTime": "A String", # Optional. The maximum job running time, expressed in seconds. The field can
+            # contain up to nine fractional digits, terminated by `s`. By default there
+            # is no limit to the running time.
+            #
+            # If the training job is still running after this duration, AI Platform
+            # Training cancels it.
+            #
+            # For example, if you want to ensure your job runs for no more than 2 hours,
+            # set this field to `7200s` (2 hours * 60 minutes / hour * 60 seconds /
+            # minute).
+            #
+            # If you submit your training job using the `gcloud` tool, you can [provide
+            # this field in a `config.yaml`
+            # file](/ai-platform/training/docs/training-jobs#formatting_your_configuration_parameters).
+            # For example:
+            #
+            # ```yaml
+            # trainingInput:
+            #   ...
+            #   scheduling:
+            #     maxRunningTime: 7200s
+            #   ...
+            # ```
+      },
+      "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
+          # job. Each replica in the cluster will be of the type specified in
+          # `parameter_server_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `parameter_server_type`.
+          #
+          # The default value is zero.
+      "evaluatorCount": "A String", # Optional. The number of evaluator replicas to use for the training job.
+          # Each replica in the cluster will be of the type specified in
+          # `evaluator_type`.
+          #
+          # This value can only be used when `scale_tier` is set to `CUSTOM`. If you
+          # set this value, you must also set `evaluator_type`.
+          #
+          # The default value is zero.
+      "workerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+          # job's worker nodes.
+          #
+          # The supported values are the same as those described in the entry for
+          # `masterType`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # If you use `cloud_tpu` for this value, see special instructions for
+          # [configuring a custom TPU
+          # machine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `workerCount` is greater than zero.
+      "scaleTier": "A String", # Required. Specifies the machine types, the number of replicas for workers
+          # and parameter servers.
+      "packageUris": [ # Required. The Google Cloud Storage location of the packages with
+          # the training program and any additional dependencies.
+          # The maximum number of package URIs is 100.
+        "A String",
+      ],
+      "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
+          #
+          # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
+          # to a Compute Engine machine type. [Learn about restrictions on accelerator
+          # configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `workerConfig.imageUri` only if you build a custom image for your
+          # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
+          # the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "evaluatorConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for evaluators.
+          #
+          # You should only set `evaluatorConfig.acceleratorConfig` if
+          # `evaluatorType` is set to a Compute Engine machine type. [Learn
+          # about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `evaluatorConfig.imageUri` only if you build a custom image for
+          # your evaluator. If `evaluatorConfig.imageUri` has not been
+          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "useChiefInTfConfig": True or False, # Optional. Use `chief` instead of `master` in the `TF_CONFIG` environment
+          # variable when training with a custom container. Defaults to `false`. [Learn
+          # more about this
+          # field.](/ai-platform/training/docs/distributed-training-details#chief-versus-master)
+          #
+          # This field has no effect for training jobs that don't use a custom
+          # container.
+      "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
+          #
+          # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
+          # to a Compute Engine machine type. Learn about [restrictions on accelerator
+          # configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `masterConfig.imageUri` only if you build a custom image. Only one of
+          # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more
+          # about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "runtimeVersion": "A String", # Optional. The AI Platform runtime version to use for training. You must
+          # either specify this field or specify `masterConfig.imageUri`.
+          #
+          # For more information, see the [runtime version
+          # list](/ai-platform/training/docs/runtime-version-list) and learn [how to
+          # manage runtime versions](/ai-platform/training/docs/versioning).
       "hyperparameters": { # Represents a set of hyperparameters to optimize. # Optional. The set of Hyperparameters to tune.
         "maxTrials": 42, # Optional. How many training trials should be attempted to optimize
             # the specified hyperparameters.
@@ -3024,9 +3770,9 @@
             "maxValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
                 # should be unset if type is `CATEGORICAL`. This value should be integers if
                 # type is `INTEGER`.
-            "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
-              "A String",
-            ],
+            "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
+                # should be unset if type is `CATEGORICAL`. This value should be integers if
+                # type is INTEGER.
             "discreteValues": [ # Required if type is `DISCRETE`.
                 # A list of feasible points.
                 # The list should be in strictly increasing order. For instance, this
@@ -3036,9 +3782,9 @@
             ],
             "parameterName": "A String", # Required. The parameter name must be unique amongst all ParameterConfigs in
                 # a HyperparameterSpec message. E.g., "learning_rate".
-            "minValue": 3.14, # Required if type is `DOUBLE` or `INTEGER`. This field
-                # should be unset if type is `CATEGORICAL`. This value should be integers if
-                # type is INTEGER.
+            "categoricalValues": [ # Required if type is `CATEGORICAL`. The list of possible categories.
+              "A String",
+            ],
             "type": "A String", # Required. The type of the parameter.
             "scaleType": "A String", # Optional. How the parameter should be scaled to the hypercube.
                 # Leave unset for categorical parameters.
@@ -3062,26 +3808,14 @@
             #
             # Defaults to one.
       },
-      "region": "A String", # Required. The Google Compute Engine region to run the training job in.
-          # See the <a href="/ml-engine/docs/tensorflow/regions">available regions</a>
-          # for AI Platform services.
-      "args": [ # Optional. Command line arguments to pass to the program.
+      "args": [ # Optional. Command-line arguments passed to the training application when it
+          # starts. If your job uses a custom container, then the arguments are passed
+          # to the container's &lt;a class="external" target="_blank"
+          # href="https://docs.docker.com/engine/reference/builder/#entrypoint"&gt;
+          # `ENTRYPOINT`&lt;/a&gt; command.
         "A String",
       ],
       "pythonModule": "A String", # Required. The Python module name to run after installing the packages.
-      "pythonVersion": "A String", # Optional. The version of Python used in training. If not set, the default
-          # version is '2.7'. Python '3.5' is available when `runtime_version` is set
-          # to '1.4' and above. Python '2.7' works with all supported
-          # <a href="/ml-engine/docs/runtime-version-list">runtime versions</a>.
-      "jobDir": "A String", # Optional. A Google Cloud Storage path in which to store training outputs
-          # and other data needed for training. This path is passed to your TensorFlow
-          # program as the '--job-dir' command-line argument. The benefit of specifying
-          # this field is that Cloud ML validates the path for use in training.
-      "packageUris": [ # Required. The Google Cloud Storage location of the packages with
-          # the training program and any additional dependencies.
-          # The maximum number of package URIs is 100.
-        "A String",
-      ],
       "workerCount": "A String", # Optional. The number of worker replicas to use for the training job. Each
           # replica in the cluster will be of the type specified in `worker_type`.
           #
@@ -3089,6 +3823,87 @@
           # set this value, you must also set `worker_type`.
           #
           # The default value is zero.
+      "encryptionConfig": { # Represents a custom encryption key configuration that can be applied to # Optional. Options for using customer-managed encryption keys (CMEK) to
+          # protect resources created by a training job, instead of using Google's
+          # default encryption. If this is set, then all resources created by the
+          # training job will be encrypted with the customer-managed encryption key
+          # that you specify.
+          #
+          # [Learn how and when to use CMEK with AI Platform
+          # Training](/ai-platform/training/docs/cmek).
+          # a resource.
+        "kmsKeyName": "A String", # The Cloud KMS resource identifier of the customer-managed encryption key
+            # used to protect a resource, such as a training job. It has the following
+            # format:
+            # `projects/{PROJECT_ID}/locations/{REGION}/keyRings/{KEY_RING_NAME}/cryptoKeys/{KEY_NAME}`
+      },
+      "parameterServerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for parameter servers.
+          #
+          # You should only set `parameterServerConfig.acceleratorConfig` if
+          # `parameterServerType` is set to a Compute Engine machine type. [Learn
+          # about restrictions on accelerator configurations for
+          # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+          #
+          # Set `parameterServerConfig.imageUri` only if you build a custom image for
+          # your parameter server. If `parameterServerConfig.imageUri` has not been
+          # set, AI Platform uses the value of `masterConfig.imageUri`. Learn more about [configuring custom
+          # containers](/ai-platform/training/docs/distributed-training-containers).
+        "tpuTfVersion": "A String", # The AI Platform runtime version that includes a TensorFlow version matching
+            # the one used in the custom container. This field is required if the replica
+            # is a TPU worker that uses a custom container. Otherwise, do not specify
+            # this field. This must be a [runtime version that currently supports
+            # training with
+            # TPUs](/ml-engine/docs/tensorflow/runtime-version-list#tpu-support).
+            #
+            # Note that the version of TensorFlow included in a runtime version may
+            # differ from the numbering of the runtime version itself, because it may
+            # have a different [patch
+            # version](https://www.tensorflow.org/guide/version_compat#semantic_versioning_20).
+            # In this field, you must specify the runtime version (TensorFlow minor
+            # version). For example, if your custom container runs TensorFlow `1.x.y`,
+            # specify `1.x`.
+        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
+            # [Learn about restrictions on accelerator configurations for
+            # training.](/ai-platform/training/docs/using-gpus#compute-engine-machine-types-with-gpu)
+            # Note that the AcceleratorConfig can be used in both Jobs and Versions.
+            # Learn more about [accelerators for training](/ml-engine/docs/using-gpus) and
+            # [accelerators for online
+            # prediction](/ml-engine/docs/machine-types-online-prediction#gpus).
+          "count": "A String", # The number of accelerators to attach to each machine running the job.
+          "type": "A String", # The type of accelerator to use.
+        },
+        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
+            # Registry. Learn more about [configuring custom
+            # containers](/ai-platform/training/docs/distributed-training-containers).
+      },
+      "region": "A String", # Required. The region to run the training job in. See the [available
+          # regions](/ai-platform/training/docs/regions) for AI Platform Training.
+      "pythonVersion": "A String", # Optional. The version of Python used in training. You must either specify
+          # this field or specify `masterConfig.imageUri`.
+          #
+          # The following Python versions are available:
+          #
+          # * Python '3.7' is available when `runtime_version` is set to '1.15' or
+          #   later.
+          # * Python '3.5' is available when `runtime_version` is set to a version
+          #   from '1.4' to '1.14'.
+          # * Python '2.7' is available when `runtime_version` is set to '1.15' or
+          #   earlier.
+          #
+          # Read more about the Python versions available for [each runtime
+          # version](/ml-engine/docs/runtime-version-list).
+      "evaluatorType": "A String", # Optional. Specifies the type of virtual machine to use for your training
+          # job's evaluator nodes.
+          #
+          # The supported values are the same as those described in the entry for
+          # `masterType`.
+          #
+          # This value must be consistent with the category of machine type that
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
+          #
+          # This value must be present when `scaleTier` is set to `CUSTOM` and
+          # `evaluatorCount` is greater than zero.
       "parameterServerType": "A String", # Optional. Specifies the type of virtual machine to use for your training
           # job's parameter server.
           #
@@ -3096,81 +3911,12 @@
           # `master_type`.
           #
           # This value must be consistent with the category of machine type that
-          # `masterType` uses. In other words, both must be AI Platform machine
-          # types or both must be Compute Engine machine types.
+          # `masterType` uses. In other words, both must be Compute Engine machine
+          # types or both must be legacy machine types.
           #
           # This value must be present when `scaleTier` is set to `CUSTOM` and
           # `parameter_server_count` is greater than zero.
-      "workerConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for workers.
-          #
-          # You should only set `workerConfig.acceleratorConfig` if `workerType` is set
-          # to a Compute Engine machine type. [Learn about restrictions on accelerator
-          # configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `workerConfig.imageUri` only if you build a custom image for your
-          # worker. If `workerConfig.imageUri` has not been set, AI Platform uses
-          # the value of `masterConfig.imageUri`. Learn more about
-          # [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ml-engine/docs/distributed-training-containers).
-      },
-      "maxRunningTime": "A String", # Optional. The maximum job running time. The default is 7 days.
-      "masterConfig": { # Represents the configuration for a replica in a cluster. # Optional. The configuration for your master worker.
-          #
-          # You should only set `masterConfig.acceleratorConfig` if `masterType` is set
-          # to a Compute Engine machine type. Learn about [restrictions on accelerator
-          # configurations for
-          # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          #
-          # Set `masterConfig.imageUri` only if you build a custom image. Only one of
-          # `masterConfig.imageUri` and `runtimeVersion` should be set. Learn more about
-          # [configuring custom
-          # containers](/ml-engine/docs/distributed-training-containers).
-        "acceleratorConfig": { # Represents a hardware accelerator request config. # Represents the type and number of accelerators used by the replica.
-            # [Learn about restrictions on accelerator configurations for
-            # training.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)
-          "count": "A String", # The number of accelerators to attach to each machine running the job.
-          "type": "A String", # The type of accelerator to use.
-        },
-        "imageUri": "A String", # The Docker image to run on the replica. This image must be in Container
-            # Registry. Learn more about [configuring custom
-            # containers](/ml-engine/docs/distributed-training-containers).
-      },
-      "parameterServerCount": "A String", # Optional. The number of parameter server replicas to use for the training
-          # job. Each replica in the cluster will be of the type specified in
-          # `parameter_server_type`.
-          #
-          # This value can only be used when `scale_tier` is set to `CUSTOM`.If you
-          # set this value, you must also set `parameter_server_type`.
-          #
-          # The default value is zero.
     },
-    "jobId": "A String", # Required. The user-specified id of the job.
-    "labels": { # Optional. One or more labels that you can add, to organize your jobs.
-        # Each label is a key-value pair, where both the key and the value are
-        # arbitrary strings that you supply.
-        # For more information, see the documentation on
-        # <a href="/ml-engine/docs/tensorflow/resource-labels">using labels</a>.
-      "a_key": "A String",
-    },
-    "state": "A String", # Output only. The detailed state of a job.
-    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a job from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform job updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `GetJob`, and
-        # systems are expected to put that etag in the request to `UpdateJob` to
-        # ensure that their change will be applied to the same version of the job.
-    "startTime": "A String", # Output only. When the job processing was started.
     "endTime": "A String", # Output only. When the job processing was completed.
     "predictionOutput": { # Represents results of a prediction job. # The current prediction job result.
       "outputPath": "A String", # The output Google Cloud Storage location provided at the job creation time.
@@ -3183,67 +3929,88 @@
 </div>
 
 <div class="method">
-    <code class="details" id="setIamPolicy">setIamPolicy(resource, body, x__xgafv=None)</code>
+    <code class="details" id="setIamPolicy">setIamPolicy(resource, body=None, x__xgafv=None)</code>
   <pre>Sets the access control policy on the specified resource. Replaces any
 existing policy.
 
+Can return Public Errors: NOT_FOUND, INVALID_ARGUMENT and PERMISSION_DENIED
+
 Args:
   resource: string, REQUIRED: The resource for which the policy is being specified.
 See the operation documentation for the appropriate value for this field. (required)
-  body: object, The request body. (required)
+  body: object, The request body.
     The object takes the form of:
 
 { # Request message for `SetIamPolicy` method.
-    "policy": { # Defines an Identity and Access Management (IAM) policy. It is used to # REQUIRED: The complete policy to be applied to the `resource`. The size of
+    "policy": { # An Identity and Access Management (IAM) policy, which specifies access # REQUIRED: The complete policy to be applied to the `resource`. The size of
         # the policy is limited to a few 10s of KB. An empty policy is a
         # valid policy but certain Cloud Platform services (such as Projects)
         # might reject them.
-        # specify access control policies for Cloud Platform resources.
+        # controls for Google Cloud resources.
         #
         #
-        # A `Policy` consists of a list of `bindings`. A `binding` binds a list of
-        # `members` to a `role`, where the members can be user accounts, Google groups,
-        # Google domains, and service accounts. A `role` is a named list of permissions
-        # defined by IAM.
+        # A `Policy` is a collection of `bindings`. A `binding` binds one or more
+        # `members` to a single `role`. Members can be user accounts, service accounts,
+        # Google groups, and domains (such as G Suite). A `role` is a named list of
+        # permissions; each `role` can be an IAM predefined role or a user-created
+        # custom role.
         #
-        # **JSON Example**
+        # Optionally, a `binding` can specify a `condition`, which is a logical
+        # expression that allows access to a resource only if the expression evaluates
+        # to `true`. A condition can add constraints based on attributes of the
+        # request, the resource, or both.
+        #
+        # **JSON example:**
         #
         #     {
         #       "bindings": [
         #         {
-        #           "role": "roles/owner",
+        #           "role": "roles/resourcemanager.organizationAdmin",
         #           "members": [
         #             "user:mike@example.com",
         #             "group:admins@example.com",
         #             "domain:google.com",
-        #             "serviceAccount:my-other-app@appspot.gserviceaccount.com"
+        #             "serviceAccount:my-project-id@appspot.gserviceaccount.com"
         #           ]
         #         },
         #         {
-        #           "role": "roles/viewer",
-        #           "members": ["user:sean@example.com"]
+        #           "role": "roles/resourcemanager.organizationViewer",
+        #           "members": ["user:eve@example.com"],
+        #           "condition": {
+        #             "title": "expirable access",
+        #             "description": "Does not grant access after Sep 2020",
+        #             "expression": "request.time &lt; timestamp('2020-10-01T00:00:00.000Z')",
+        #           }
         #         }
-        #       ]
+        #       ],
+        #       "etag": "BwWWja0YfJA=",
+        #       "version": 3
         #     }
         #
-        # **YAML Example**
+        # **YAML example:**
         #
         #     bindings:
         #     - members:
         #       - user:mike@example.com
         #       - group:admins@example.com
         #       - domain:google.com
-        #       - serviceAccount:my-other-app@appspot.gserviceaccount.com
-        #       role: roles/owner
+        #       - serviceAccount:my-project-id@appspot.gserviceaccount.com
+        #       role: roles/resourcemanager.organizationAdmin
         #     - members:
-        #       - user:sean@example.com
-        #       role: roles/viewer
-        #
+        #       - user:eve@example.com
+        #       role: roles/resourcemanager.organizationViewer
+        #       condition:
+        #         title: expirable access
+        #         description: Does not grant access after Sep 2020
+        #         expression: request.time &lt; timestamp('2020-10-01T00:00:00.000Z')
+        #     - etag: BwWWja0YfJA=
+        #     - version: 3
         #
         # For a description of IAM and its features, see the
-        # [IAM developer's guide](https://cloud.google.com/iam/docs).
-      "bindings": [ # Associates a list of `members` to a `role`.
-          # `bindings` with no members will result in an error.
+        # [IAM documentation](https://cloud.google.com/iam/docs/).
+      "bindings": [ # Associates a list of `members` to a `role`. Optionally, may specify a
+          # `condition` that determines how and when the `bindings` are applied. Each
+          # of the `bindings` must contain at least one member.
         { # Associates `members` with a `role`.
           "role": "A String", # Role that is assigned to `members`.
               # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
@@ -3257,7 +4024,7 @@
               #    who is authenticated with a Google account or a service account.
               #
               # * `user:{emailid}`: An email address that represents a specific Google
-              #    account. For example, `alice@gmail.com` .
+              #    account. For example, `alice@example.com` .
               #
               #
               # * `serviceAccount:{emailid}`: An email address that represents a service
@@ -3266,46 +4033,78 @@
               # * `group:{emailid}`: An email address that represents a Google group.
               #    For example, `admins@example.com`.
               #
+              # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+              #    identifier) representing a user that has been recently deleted. For
+              #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+              #    recovered, this value reverts to `user:{emailid}` and the recovered user
+              #    retains the role in the binding.
+              #
+              # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+              #    unique identifier) representing a service account that has been recently
+              #    deleted. For example,
+              #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+              #    If the service account is undeleted, this value reverts to
+              #    `serviceAccount:{emailid}` and the undeleted service account retains the
+              #    role in the binding.
+              #
+              # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+              #    identifier) representing a Google group that has been recently
+              #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+              #    the group is recovered, this value reverts to `group:{emailid}` and the
+              #    recovered group retains the role in the binding.
+              #
               #
               # * `domain:{domain}`: The G Suite domain (primary) that represents all the
               #    users of that domain. For example, `google.com` or `example.com`.
               #
             "A String",
           ],
-          "condition": { # Represents an expression text. Example: # The condition that is associated with this binding.
+          "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
               # NOTE: An unsatisfied condition will not allow user access via current
               # binding. Different bindings, including their conditions, are examined
               # independently.
+              # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+              # are documented at https://github.com/google/cel-spec.
               #
-              #     title: "User account presence"
-              #     description: "Determines whether the request has a user account"
-              #     expression: "size(request.user) > 0"
-            "description": "A String", # An optional description of the expression. This is a longer text which
+              # Example (Comparison):
+              #
+              #     title: "Summary size limit"
+              #     description: "Determines if a summary is less than 100 chars"
+              #     expression: "document.summary.size() &lt; 100"
+              #
+              # Example (Equality):
+              #
+              #     title: "Requestor is owner"
+              #     description: "Determines if requestor is the document owner"
+              #     expression: "document.owner == request.auth.claims.email"
+              #
+              # Example (Logic):
+              #
+              #     title: "Public documents"
+              #     description: "Determine whether the document should be publicly visible"
+              #     expression: "document.type != 'private' &amp;&amp; document.type != 'internal'"
+              #
+              # Example (Data Manipulation):
+              #
+              #     title: "Notification string"
+              #     description: "Create a notification string with a timestamp."
+              #     expression: "'New message received at ' + string(document.create_time)"
+              #
+              # The exact variables and functions that may be referenced within an expression
+              # are determined by the service that evaluates it. See the service
+              # documentation for additional information.
+            "description": "A String", # Optional. Description of the expression. This is a longer text which
                 # describes the expression, e.g. when hovered over it in a UI.
-            "expression": "A String", # Textual representation of an expression in
-                # Common Expression Language syntax.
-                #
-                # The application context of the containing message determines which
-                # well-known feature set of CEL is supported.
-            "location": "A String", # An optional string indicating the location of the expression for error
+            "expression": "A String", # Textual representation of an expression in Common Expression Language
+                # syntax.
+            "location": "A String", # Optional. String indicating the location of the expression for error
                 # reporting, e.g. a file name and a position in the file.
-            "title": "A String", # An optional title for the expression, i.e. a short string describing
+            "title": "A String", # Optional. Title for the expression, i.e. a short string describing
                 # its purpose. This can be used e.g. in UIs which allow to enter the
                 # expression.
           },
         },
       ],
-      "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-          # prevent simultaneous updates of a policy from overwriting each other.
-          # It is strongly suggested that systems make use of the `etag` in the
-          # read-modify-write cycle to perform policy updates in order to avoid race
-          # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-          # systems are expected to put that etag in the request to `setIamPolicy` to
-          # ensure that their change will be applied to the same version of the policy.
-          #
-          # If no `etag` is provided in the call to `setIamPolicy`, then the existing
-          # policy is overwritten blindly.
-      "version": 42, # Deprecated.
       "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
         { # Specifies the audit configuration for a service.
             # The configuration determines which permission types are logged, and what
@@ -3327,7 +4126,7 @@
             #             {
             #               "log_type": "DATA_READ",
             #               "exempted_members": [
-            #                 "user:foo@gmail.com"
+            #                 "user:jose@example.com"
             #               ]
             #             },
             #             {
@@ -3339,7 +4138,7 @@
             #           ]
             #         },
             #         {
-            #           "service": "fooservice.googleapis.com"
+            #           "service": "sampleservice.googleapis.com"
             #           "audit_log_configs": [
             #             {
             #               "log_type": "DATA_READ",
@@ -3347,7 +4146,7 @@
             #             {
             #               "log_type": "DATA_WRITE",
             #               "exempted_members": [
-            #                 "user:bar@gmail.com"
+            #                 "user:aliya@example.com"
             #               ]
             #             }
             #           ]
@@ -3355,9 +4154,9 @@
             #       ]
             #     }
             #
-            # For fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
-            # logging. It also exempts foo@gmail.com from DATA_READ logging, and
-            # bar@gmail.com from DATA_WRITE logging.
+            # For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
+            # logging. It also exempts jose@example.com from DATA_READ logging, and
+            # aliya@example.com from DATA_WRITE logging.
           "auditLogConfigs": [ # The configuration for logging of each type of permission.
             { # Provides the configuration for logging a type of permissions.
                 # Example:
@@ -3367,7 +4166,7 @@
                 #         {
                 #           "log_type": "DATA_READ",
                 #           "exempted_members": [
-                #             "user:foo@gmail.com"
+                #             "user:jose@example.com"
                 #           ]
                 #         },
                 #         {
@@ -3377,7 +4176,7 @@
                 #     }
                 #
                 # This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
-                # foo@gmail.com from DATA_READ logging.
+                # jose@example.com from DATA_READ logging.
               "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
                   # permission.
                   # Follows the same format of Binding.members.
@@ -3391,6 +4190,39 @@
               # `allServices` is a special value that covers all services.
         },
       ],
+      "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+          # prevent simultaneous updates of a policy from overwriting each other.
+          # It is strongly suggested that systems make use of the `etag` in the
+          # read-modify-write cycle to perform policy updates in order to avoid race
+          # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+          # systems are expected to put that etag in the request to `setIamPolicy` to
+          # ensure that their change will be applied to the same version of the policy.
+          #
+          # **Important:** If you use IAM Conditions, you must include the `etag` field
+          # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+          # you to overwrite a version `3` policy with a version `1` policy, and all of
+          # the conditions in the version `3` policy are lost.
+      "version": 42, # Specifies the format of the policy.
+          #
+          # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+          # are rejected.
+          #
+          # Any operation that affects conditional role bindings must specify version
+          # `3`. This requirement applies to the following operations:
+          #
+          # * Getting a policy that includes a conditional role binding
+          # * Adding a conditional role binding to a policy
+          # * Changing a conditional role binding in a policy
+          # * Removing any role binding, with or without a condition, from a policy
+          #   that includes conditions
+          #
+          # **Important:** If you use IAM Conditions, you must include the `etag` field
+          # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+          # you to overwrite a version `3` policy with a version `1` policy, and all of
+          # the conditions in the version `3` policy are lost.
+          #
+          # If a policy does not include any conditions, operations on that policy may
+          # specify any valid version or leave the field unset.
     },
     "updateMask": "A String", # OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
         # the fields in the mask will be modified. If no mask is provided, the
@@ -3407,53 +4239,72 @@
 Returns:
   An object of the form:
 
-    { # Defines an Identity and Access Management (IAM) policy. It is used to
-      # specify access control policies for Cloud Platform resources.
+    { # An Identity and Access Management (IAM) policy, which specifies access
+      # controls for Google Cloud resources.
       #
       #
-      # A `Policy` consists of a list of `bindings`. A `binding` binds a list of
-      # `members` to a `role`, where the members can be user accounts, Google groups,
-      # Google domains, and service accounts. A `role` is a named list of permissions
-      # defined by IAM.
+      # A `Policy` is a collection of `bindings`. A `binding` binds one or more
+      # `members` to a single `role`. Members can be user accounts, service accounts,
+      # Google groups, and domains (such as G Suite). A `role` is a named list of
+      # permissions; each `role` can be an IAM predefined role or a user-created
+      # custom role.
       #
-      # **JSON Example**
+      # Optionally, a `binding` can specify a `condition`, which is a logical
+      # expression that allows access to a resource only if the expression evaluates
+      # to `true`. A condition can add constraints based on attributes of the
+      # request, the resource, or both.
+      #
+      # **JSON example:**
       #
       #     {
       #       "bindings": [
       #         {
-      #           "role": "roles/owner",
+      #           "role": "roles/resourcemanager.organizationAdmin",
       #           "members": [
       #             "user:mike@example.com",
       #             "group:admins@example.com",
       #             "domain:google.com",
-      #             "serviceAccount:my-other-app@appspot.gserviceaccount.com"
+      #             "serviceAccount:my-project-id@appspot.gserviceaccount.com"
       #           ]
       #         },
       #         {
-      #           "role": "roles/viewer",
-      #           "members": ["user:sean@example.com"]
+      #           "role": "roles/resourcemanager.organizationViewer",
+      #           "members": ["user:eve@example.com"],
+      #           "condition": {
+      #             "title": "expirable access",
+      #             "description": "Does not grant access after Sep 2020",
+      #             "expression": "request.time &lt; timestamp('2020-10-01T00:00:00.000Z')",
+      #           }
       #         }
-      #       ]
+      #       ],
+      #       "etag": "BwWWja0YfJA=",
+      #       "version": 3
       #     }
       #
-      # **YAML Example**
+      # **YAML example:**
       #
       #     bindings:
       #     - members:
       #       - user:mike@example.com
       #       - group:admins@example.com
       #       - domain:google.com
-      #       - serviceAccount:my-other-app@appspot.gserviceaccount.com
-      #       role: roles/owner
+      #       - serviceAccount:my-project-id@appspot.gserviceaccount.com
+      #       role: roles/resourcemanager.organizationAdmin
       #     - members:
-      #       - user:sean@example.com
-      #       role: roles/viewer
-      #
+      #       - user:eve@example.com
+      #       role: roles/resourcemanager.organizationViewer
+      #       condition:
+      #         title: expirable access
+      #         description: Does not grant access after Sep 2020
+      #         expression: request.time &lt; timestamp('2020-10-01T00:00:00.000Z')
+      #     - etag: BwWWja0YfJA=
+      #     - version: 3
       #
       # For a description of IAM and its features, see the
-      # [IAM developer's guide](https://cloud.google.com/iam/docs).
-    "bindings": [ # Associates a list of `members` to a `role`.
-        # `bindings` with no members will result in an error.
+      # [IAM documentation](https://cloud.google.com/iam/docs/).
+    "bindings": [ # Associates a list of `members` to a `role`. Optionally, may specify a
+        # `condition` that determines how and when the `bindings` are applied. Each
+        # of the `bindings` must contain at least one member.
       { # Associates `members` with a `role`.
         "role": "A String", # Role that is assigned to `members`.
             # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
@@ -3467,7 +4318,7 @@
             #    who is authenticated with a Google account or a service account.
             #
             # * `user:{emailid}`: An email address that represents a specific Google
-            #    account. For example, `alice@gmail.com` .
+            #    account. For example, `alice@example.com` .
             #
             #
             # * `serviceAccount:{emailid}`: An email address that represents a service
@@ -3476,46 +4327,78 @@
             # * `group:{emailid}`: An email address that represents a Google group.
             #    For example, `admins@example.com`.
             #
+            # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+            #    identifier) representing a user that has been recently deleted. For
+            #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+            #    recovered, this value reverts to `user:{emailid}` and the recovered user
+            #    retains the role in the binding.
+            #
+            # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+            #    unique identifier) representing a service account that has been recently
+            #    deleted. For example,
+            #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+            #    If the service account is undeleted, this value reverts to
+            #    `serviceAccount:{emailid}` and the undeleted service account retains the
+            #    role in the binding.
+            #
+            # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+            #    identifier) representing a Google group that has been recently
+            #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+            #    the group is recovered, this value reverts to `group:{emailid}` and the
+            #    recovered group retains the role in the binding.
+            #
             #
             # * `domain:{domain}`: The G Suite domain (primary) that represents all the
             #    users of that domain. For example, `google.com` or `example.com`.
             #
           "A String",
         ],
-        "condition": { # Represents an expression text. Example: # The condition that is associated with this binding.
+        "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
             # NOTE: An unsatisfied condition will not allow user access via current
             # binding. Different bindings, including their conditions, are examined
             # independently.
+            # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+            # are documented at https://github.com/google/cel-spec.
             #
-            #     title: "User account presence"
-            #     description: "Determines whether the request has a user account"
-            #     expression: "size(request.user) > 0"
-          "description": "A String", # An optional description of the expression. This is a longer text which
+            # Example (Comparison):
+            #
+            #     title: "Summary size limit"
+            #     description: "Determines if a summary is less than 100 chars"
+            #     expression: "document.summary.size() &lt; 100"
+            #
+            # Example (Equality):
+            #
+            #     title: "Requestor is owner"
+            #     description: "Determines if requestor is the document owner"
+            #     expression: "document.owner == request.auth.claims.email"
+            #
+            # Example (Logic):
+            #
+            #     title: "Public documents"
+            #     description: "Determine whether the document should be publicly visible"
+            #     expression: "document.type != 'private' &amp;&amp; document.type != 'internal'"
+            #
+            # Example (Data Manipulation):
+            #
+            #     title: "Notification string"
+            #     description: "Create a notification string with a timestamp."
+            #     expression: "'New message received at ' + string(document.create_time)"
+            #
+            # The exact variables and functions that may be referenced within an expression
+            # are determined by the service that evaluates it. See the service
+            # documentation for additional information.
+          "description": "A String", # Optional. Description of the expression. This is a longer text which
               # describes the expression, e.g. when hovered over it in a UI.
-          "expression": "A String", # Textual representation of an expression in
-              # Common Expression Language syntax.
-              #
-              # The application context of the containing message determines which
-              # well-known feature set of CEL is supported.
-          "location": "A String", # An optional string indicating the location of the expression for error
+          "expression": "A String", # Textual representation of an expression in Common Expression Language
+              # syntax.
+          "location": "A String", # Optional. String indicating the location of the expression for error
               # reporting, e.g. a file name and a position in the file.
-          "title": "A String", # An optional title for the expression, i.e. a short string describing
+          "title": "A String", # Optional. Title for the expression, i.e. a short string describing
               # its purpose. This can be used e.g. in UIs which allow to enter the
               # expression.
         },
       },
     ],
-    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
-        # prevent simultaneous updates of a policy from overwriting each other.
-        # It is strongly suggested that systems make use of the `etag` in the
-        # read-modify-write cycle to perform policy updates in order to avoid race
-        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
-        # systems are expected to put that etag in the request to `setIamPolicy` to
-        # ensure that their change will be applied to the same version of the policy.
-        #
-        # If no `etag` is provided in the call to `setIamPolicy`, then the existing
-        # policy is overwritten blindly.
-    "version": 42, # Deprecated.
     "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
       { # Specifies the audit configuration for a service.
           # The configuration determines which permission types are logged, and what
@@ -3537,7 +4420,7 @@
           #             {
           #               "log_type": "DATA_READ",
           #               "exempted_members": [
-          #                 "user:foo@gmail.com"
+          #                 "user:jose@example.com"
           #               ]
           #             },
           #             {
@@ -3549,7 +4432,7 @@
           #           ]
           #         },
           #         {
-          #           "service": "fooservice.googleapis.com"
+          #           "service": "sampleservice.googleapis.com"
           #           "audit_log_configs": [
           #             {
           #               "log_type": "DATA_READ",
@@ -3557,7 +4440,7 @@
           #             {
           #               "log_type": "DATA_WRITE",
           #               "exempted_members": [
-          #                 "user:bar@gmail.com"
+          #                 "user:aliya@example.com"
           #               ]
           #             }
           #           ]
@@ -3565,9 +4448,9 @@
           #       ]
           #     }
           #
-          # For fooservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
-          # logging. It also exempts foo@gmail.com from DATA_READ logging, and
-          # bar@gmail.com from DATA_WRITE logging.
+          # For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
+          # logging. It also exempts jose@example.com from DATA_READ logging, and
+          # aliya@example.com from DATA_WRITE logging.
         "auditLogConfigs": [ # The configuration for logging of each type of permission.
           { # Provides the configuration for logging a type of permissions.
               # Example:
@@ -3577,7 +4460,7 @@
               #         {
               #           "log_type": "DATA_READ",
               #           "exempted_members": [
-              #             "user:foo@gmail.com"
+              #             "user:jose@example.com"
               #           ]
               #         },
               #         {
@@ -3587,7 +4470,7 @@
               #     }
               #
               # This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
-              # foo@gmail.com from DATA_READ logging.
+              # jose@example.com from DATA_READ logging.
             "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
                 # permission.
                 # Follows the same format of Binding.members.
@@ -3601,11 +4484,44 @@
             # `allServices` is a special value that covers all services.
       },
     ],
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a policy from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform policy updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+        # systems are expected to put that etag in the request to `setIamPolicy` to
+        # ensure that their change will be applied to the same version of the policy.
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+    "version": 42, # Specifies the format of the policy.
+        #
+        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+        # are rejected.
+        #
+        # Any operation that affects conditional role bindings must specify version
+        # `3`. This requirement applies to the following operations:
+        #
+        # * Getting a policy that includes a conditional role binding
+        # * Adding a conditional role binding to a policy
+        # * Changing a conditional role binding in a policy
+        # * Removing any role binding, with or without a condition, from a policy
+        #   that includes conditions
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+        #
+        # If a policy does not include any conditions, operations on that policy may
+        # specify any valid version or leave the field unset.
   }</pre>
 </div>
 
 <div class="method">
-    <code class="details" id="testIamPermissions">testIamPermissions(resource, body, x__xgafv=None)</code>
+    <code class="details" id="testIamPermissions">testIamPermissions(resource, body=None, x__xgafv=None)</code>
   <pre>Returns permissions that a caller has on the specified resource.
 If the resource does not exist, this will return an empty set of
 permissions, not a NOT_FOUND error.
@@ -3617,7 +4533,7 @@
 Args:
   resource: string, REQUIRED: The resource for which the policy detail is being requested.
 See the operation documentation for the appropriate value for this field. (required)
-  body: object, The request body. (required)
+  body: object, The request body.
     The object takes the form of:
 
 { # Request message for `TestIamPermissions` method.