docs: update generated docs (#981)

diff --git a/docs/dyn/dataflow_v1b3.projects.templates.html b/docs/dyn/dataflow_v1b3.projects.templates.html
index da13aae..042bff5 100644
--- a/docs/dyn/dataflow_v1b3.projects.templates.html
+++ b/docs/dyn/dataflow_v1b3.projects.templates.html
@@ -81,7 +81,7 @@
   <code><a href="#get">get(projectId, view=None, gcsPath=None, location=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Get the template associated with a template.</p>
 <p class="toc_element">
-  <code><a href="#launch">launch(projectId, body=None, validateOnly=None, gcsPath=None, location=None, dynamicTemplate_gcsPath=None, dynamicTemplate_stagingLocation=None, x__xgafv=None)</a></code></p>
+  <code><a href="#launch">launch(projectId, body=None, dynamicTemplate_gcsPath=None, dynamicTemplate_stagingLocation=None, location=None, validateOnly=None, gcsPath=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Launch a template.</p>
 <h3>Method Details</h3>
 <div class="method">
@@ -94,35 +94,34 @@
     The object takes the form of:
 
 { # A request to create a Cloud Dataflow job from a template.
-    &quot;jobName&quot;: &quot;A String&quot;, # Required. The job name to use for the created job.
-    &quot;gcsPath&quot;: &quot;A String&quot;, # Required. A Cloud Storage path to the template from which to
-        # create the job.
-        # Must be a valid Cloud Storage URL, beginning with `gs://`.
+    &quot;location&quot;: &quot;A String&quot;, # The [regional endpoint]
+        # (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
+        # which to direct the request.
     &quot;environment&quot;: { # The environment values to set at runtime. # The runtime environment for the job.
-      &quot;machineType&quot;: &quot;A String&quot;, # The machine type to use for the job. Defaults to the value from the
-          # template if not specified.
-      &quot;subnetwork&quot;: &quot;A String&quot;, # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
-          # the form &quot;regions/REGION/subnetworks/SUBNETWORK&quot;.
-      &quot;ipConfiguration&quot;: &quot;A String&quot;, # Configuration for VM IPs.
-      &quot;kmsKeyName&quot;: &quot;A String&quot;, # Optional. Name for the Cloud KMS key for the job.
-          # Key format is:
-          # projects/&lt;project&gt;/locations/&lt;location&gt;/keyRings/&lt;keyring&gt;/cryptoKeys/&lt;key&gt;
-      &quot;tempLocation&quot;: &quot;A String&quot;, # The Cloud Storage path to use for temporary files.
-          # Must be a valid Cloud Storage URL, beginning with `gs://`.
       &quot;bypassTempDirValidation&quot;: True or False, # Whether to bypass the safety checks for the job&#x27;s temporary directory.
           # Use with caution.
+      &quot;tempLocation&quot;: &quot;A String&quot;, # The Cloud Storage path to use for temporary files.
+          # Must be a valid Cloud Storage URL, beginning with `gs://`.
       &quot;network&quot;: &quot;A String&quot;, # Network to which VMs will be assigned.  If empty or unspecified,
           # the service will use the network &quot;default&quot;.
+      &quot;subnetwork&quot;: &quot;A String&quot;, # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
+          # the form &quot;regions/REGION/subnetworks/SUBNETWORK&quot;.
       &quot;workerRegion&quot;: &quot;A String&quot;, # The Compute Engine region
           # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
           # which worker processing should occur, e.g. &quot;us-west1&quot;. Mutually exclusive
           # with worker_zone. If neither worker_region nor worker_zone is specified,
           # default to the control plane&#x27;s region.
+      &quot;numWorkers&quot;: 42, # The initial number of Google Compute Engine instnaces for the job.
+      &quot;additionalExperiments&quot;: [ # Additional experiment flags for the job.
+        &quot;A String&quot;,
+      ],
       &quot;zone&quot;: &quot;A String&quot;, # The Compute Engine [availability
           # zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)
           # for launching worker instances to run your pipeline.
           # In the future, worker_zone will take precedence.
-      &quot;numWorkers&quot;: 42, # The initial number of Google Compute Engine instnaces for the job.
+      &quot;serviceAccountEmail&quot;: &quot;A String&quot;, # The email address of the service account to run the job as.
+      &quot;maxWorkers&quot;: 42, # The maximum number of Google Compute Engine instances to be made
+          # available to your pipeline during execution, from 1 to 1000.
       &quot;workerZone&quot;: &quot;A String&quot;, # The Compute Engine zone
           # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
           # which worker processing should occur, e.g. &quot;us-west1-a&quot;. Mutually exclusive
@@ -135,16 +134,17 @@
           # page.
         &quot;a_key&quot;: &quot;A String&quot;,
       },
-      &quot;additionalExperiments&quot;: [ # Additional experiment flags for the job.
-        &quot;A String&quot;,
-      ],
-      &quot;maxWorkers&quot;: 42, # The maximum number of Google Compute Engine instances to be made
-          # available to your pipeline during execution, from 1 to 1000.
-      &quot;serviceAccountEmail&quot;: &quot;A String&quot;, # The email address of the service account to run the job as.
+      &quot;machineType&quot;: &quot;A String&quot;, # The machine type to use for the job. Defaults to the value from the
+          # template if not specified.
+      &quot;ipConfiguration&quot;: &quot;A String&quot;, # Configuration for VM IPs.
+      &quot;kmsKeyName&quot;: &quot;A String&quot;, # Optional. Name for the Cloud KMS key for the job.
+          # Key format is:
+          # projects/&lt;project&gt;/locations/&lt;location&gt;/keyRings/&lt;keyring&gt;/cryptoKeys/&lt;key&gt;
     },
-    &quot;location&quot;: &quot;A String&quot;, # The [regional endpoint]
-        # (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
-        # which to direct the request.
+    &quot;gcsPath&quot;: &quot;A String&quot;, # Required. A Cloud Storage path to the template from which to
+        # create the job.
+        # Must be a valid Cloud Storage URL, beginning with `gs://`.
+    &quot;jobName&quot;: &quot;A String&quot;, # Required. The job name to use for the created job.
     &quot;parameters&quot;: { # The runtime parameters to pass to the job.
       &quot;a_key&quot;: &quot;A String&quot;,
     },
@@ -159,827 +159,162 @@
   An object of the form:
 
     { # Defines a job to be run by the Cloud Dataflow service.
-    &quot;clientRequestId&quot;: &quot;A String&quot;, # The client&#x27;s unique identifier of the job, re-used across retried attempts.
-        # If this field is set, the service will ensure its uniqueness.
-        # The request to create a job will fail if the service has knowledge of a
-        # previously submitted job with the same client&#x27;s ID and job name.
-        # The caller may use this field to ensure idempotence of job
-        # creation across retried attempts to create a job.
-        # By default, the field is empty and, in that case, the service ignores it.
-    &quot;id&quot;: &quot;A String&quot;, # The unique ID of this job.
-        #
-        # This field is set by the Cloud Dataflow service when the Job is
-        # created, and is immutable for the life of the job.
-    &quot;currentStateTime&quot;: &quot;A String&quot;, # The timestamp associated with the current state.
-    &quot;transformNameMapping&quot;: { # The map of transform name prefixes of the job to be replaced to the
-        # corresponding name prefixes of the new job.
-      &quot;a_key&quot;: &quot;A String&quot;,
-    },
-    &quot;environment&quot;: { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
-      &quot;internalExperiments&quot;: { # Experimental settings.
-        &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
-      },
-      &quot;workerRegion&quot;: &quot;A String&quot;, # The Compute Engine region
-          # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
-          # which worker processing should occur, e.g. &quot;us-west1&quot;. Mutually exclusive
-          # with worker_zone. If neither worker_region nor worker_zone is specified,
-          # default to the control plane&#x27;s region.
-      &quot;serviceKmsKeyName&quot;: &quot;A String&quot;, # If set, contains the Cloud KMS key identifier used to encrypt data
-          # at rest, AKA a Customer Managed Encryption Key (CMEK).
-          #
-          # Format:
-          #   projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
-      &quot;userAgent&quot;: { # A description of the process that generated the request.
-        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-      },
-      &quot;workerZone&quot;: &quot;A String&quot;, # The Compute Engine zone
-          # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
-          # which worker processing should occur, e.g. &quot;us-west1-a&quot;. Mutually exclusive
-          # with worker_region. If neither worker_region nor worker_zone is specified,
-          # a zone in the control plane&#x27;s region is chosen based on available capacity.
-      &quot;clusterManagerApiService&quot;: &quot;A String&quot;, # The type of cluster manager API to use.  If unknown or
-          # unspecified, the service will attempt to choose a reasonable
-          # default.  This should be in the form of the API service name,
-          # e.g. &quot;compute.googleapis.com&quot;.
-      &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the system should use for temporary
-          # storage.  The system will append the suffix &quot;/temp-{JOBNAME} to
-          # this resource prefix, where {JOBNAME} is the value of the
-          # job_name field.  The resulting bucket and object prefix is used
-          # as the prefix of the resources used to store temporary data
-          # needed during the job execution.  NOTE: This will override the
-          # value in taskrunner_settings.
-          # The supported resource type is:
-          #
-          # Google Cloud Storage:
-          #
-          #   storage.googleapis.com/{bucket}/{object}
-          #   bucket.storage.googleapis.com/{object}
-      &quot;experiments&quot;: [ # The list of experiments to enable.
-        &quot;A String&quot;,
-      ],
-      &quot;version&quot;: { # A structure describing which components and their versions of the service
-          # are required in order to run the job.
-        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-      },
-      &quot;serviceAccountEmail&quot;: &quot;A String&quot;, # Identity to run virtual machines as. Defaults to the default account.
-      &quot;sdkPipelineOptions&quot;: { # The Cloud Dataflow SDK pipeline options specified by the user. These
-          # options are passed through the service and are used to recreate the
-          # SDK pipeline options on the worker in a language agnostic and platform
-          # independent way.
-        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-      },
-      &quot;flexResourceSchedulingGoal&quot;: &quot;A String&quot;, # Which Flexible Resource Scheduling mode to run in.
-      &quot;workerPools&quot;: [ # The worker pools. At least one &quot;harness&quot; worker pool must be
-          # specified in order for the job to have workers.
-        { # Describes one particular pool of Cloud Dataflow workers to be
-            # instantiated by the Cloud Dataflow service in order to perform the
-            # computations required by a job.  Note that a workflow job may use
-            # multiple pools, in order to match the various computational
-            # requirements of the various stages of the job.
-          &quot;numThreadsPerWorker&quot;: 42, # The number of threads per worker harness. If empty or unspecified, the
-              # service will choose a number of threads (according to the number of cores
-              # on the selected machine type for batch, or 1 by convention for streaming).
-          &quot;numWorkers&quot;: 42, # Number of Google Compute Engine workers in this pool needed to
-              # execute the job.  If zero or unspecified, the service will
-              # attempt to choose a reasonable default.
-          &quot;zone&quot;: &quot;A String&quot;, # Zone to run the worker pools in.  If empty or unspecified, the service
-              # will attempt to choose a reasonable default.
-          &quot;diskSourceImage&quot;: &quot;A String&quot;, # Fully qualified source image for disks.
-          &quot;packages&quot;: [ # Packages to be installed on workers.
-            { # The packages that must be installed in order for a worker to run the
-                # steps of the Cloud Dataflow job that will be assigned to its worker
-                # pool.
-                #
-                # This is the mechanism by which the Cloud Dataflow SDK causes code to
-                # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
-                # might use this to install jars containing the user&#x27;s code and all of the
-                # various dependencies (libraries, data files, etc.) required in order
-                # for that code to run.
-              &quot;name&quot;: &quot;A String&quot;, # The name of the package.
-              &quot;location&quot;: &quot;A String&quot;, # The resource to read the package from. The supported resource type is:
-                  #
-                  # Google Cloud Storage:
-                  #
-                  #   storage.googleapis.com/{bucket}
-                  #   bucket.storage.googleapis.com/
-            },
-          ],
-          &quot;teardownPolicy&quot;: &quot;A String&quot;, # Sets the policy for determining when to turndown worker pool.
-              # Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
-              # `TEARDOWN_NEVER`.
-              # `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
-              # the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
-              # if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
-              # down.
-              #
-              # If the workers are not torn down by the service, they will
-              # continue to run and use Google Compute Engine VM resources in the
-              # user&#x27;s project until they are explicitly terminated by the user.
-              # Because of this, Google recommends using the `TEARDOWN_ALWAYS`
-              # policy except for small, manually supervised test jobs.
-              #
-              # If unknown or unspecified, the service will attempt to choose a reasonable
-              # default.
-          &quot;onHostMaintenance&quot;: &quot;A String&quot;, # The action to take on host maintenance, as defined by the Google
-              # Compute Engine API.
-          &quot;poolArgs&quot;: { # Extra arguments for this worker pool.
-            &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+      &quot;pipelineDescription&quot;: { # A descriptive representation of submitted pipeline as well as the executed # Preliminary field: The format of this data may change at any time.
+          # A description of the user pipeline and stages through which it is executed.
+          # Created by Cloud Dataflow service.  Only retrieved with
+          # JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
+          # form.  This data is provided by the Dataflow service for ease of visualizing
+          # the pipeline and interpreting Dataflow provided metrics.
+        &quot;displayData&quot;: [ # Pipeline level display data.
+          { # Data provided with a pipeline or transform to provide descriptive info.
+            &quot;url&quot;: &quot;A String&quot;, # An optional full URL.
+            &quot;javaClassValue&quot;: &quot;A String&quot;, # Contains value if the data is of java class type.
+            &quot;timestampValue&quot;: &quot;A String&quot;, # Contains value if the data is of timestamp type.
+            &quot;durationValue&quot;: &quot;A String&quot;, # Contains value if the data is of duration type.
+            &quot;label&quot;: &quot;A String&quot;, # An optional label to display in a dax UI for the element.
+            &quot;key&quot;: &quot;A String&quot;, # The key identifying the display data.
+                # This is intended to be used as a label for the display data
+                # when viewed in a dax monitoring system.
+            &quot;namespace&quot;: &quot;A String&quot;, # The namespace for the key. This is usually a class name or programming
+                # language namespace (i.e. python module) which defines the display data.
+                # This allows a dax monitoring system to specially handle the data
+                # and perform custom rendering.
+            &quot;floatValue&quot;: 3.14, # Contains value if the data is of float type.
+            &quot;strValue&quot;: &quot;A String&quot;, # Contains value if the data is of string type.
+            &quot;int64Value&quot;: &quot;A String&quot;, # Contains value if the data is of int64 type.
+            &quot;boolValue&quot;: True or False, # Contains value if the data is of a boolean type.
+            &quot;shortStrValue&quot;: &quot;A String&quot;, # A possible additional shorter value to display.
+                # For example a java_class_name_value of com.mypackage.MyDoFn
+                # will be stored with MyDoFn as the short_str_value and
+                # com.mypackage.MyDoFn as the java_class_name value.
+                # short_str_value can be displayed and java_class_name_value
+                # will be displayed as a tooltip.
           },
-          &quot;diskSizeGb&quot;: 42, # Size of root disk for VMs, in GB.  If zero or unspecified, the service will
-              # attempt to choose a reasonable default.
-          &quot;workerHarnessContainerImage&quot;: &quot;A String&quot;, # Required. Docker container image that executes the Cloud Dataflow worker
-              # harness, residing in Google Container Registry.
-              #
-              # Deprecated for the Fn API path. Use sdk_harness_container_images instead.
-          &quot;diskType&quot;: &quot;A String&quot;, # Type of root disk for VMs.  If empty or unspecified, the service will
-              # attempt to choose a reasonable default.
-          &quot;machineType&quot;: &quot;A String&quot;, # Machine type (e.g. &quot;n1-standard-1&quot;).  If empty or unspecified, the
-              # service will attempt to choose a reasonable default.
-          &quot;kind&quot;: &quot;A String&quot;, # The kind of the worker pool; currently only `harness` and `shuffle`
-              # are supported.
-          &quot;sdkHarnessContainerImages&quot;: [ # Set of SDK harness containers needed to execute this pipeline. This will
-              # only be set in the Fn API path. For non-cross-language pipelines this
-              # should have only one entry. Cross-language pipelines will have two or more
-              # entries.
-            { # Defines a SDK harness container for executing Dataflow pipelines.
-              &quot;containerImage&quot;: &quot;A String&quot;, # A docker container image that resides in Google Container Registry.
-              &quot;useSingleCorePerContainer&quot;: True or False, # If true, recommends the Dataflow service to use only one core per SDK
-                  # container instance with this image. If false (or unset) recommends using
-                  # more than one core per SDK container instance with this image for
-                  # efficiency. Note that Dataflow service may choose to override this property
-                  # if needed.
-            },
-          ],
-          &quot;dataDisks&quot;: [ # Data disks that are used by a VM in this workflow.
-            { # Describes the data disk used by a workflow job.
-              &quot;diskType&quot;: &quot;A String&quot;, # Disk storage type, as defined by Google Compute Engine.  This
-                  # must be a disk type appropriate to the project and zone in which
-                  # the workers will run.  If unknown or unspecified, the service
-                  # will attempt to choose a reasonable default.
-                  #
-                  # For example, the standard persistent disk type is a resource name
-                  # typically ending in &quot;pd-standard&quot;.  If SSD persistent disks are
-                  # available, the resource name typically ends with &quot;pd-ssd&quot;.  The
-                  # actual valid values are defined the Google Compute Engine API,
-                  # not by the Cloud Dataflow API; consult the Google Compute Engine
-                  # documentation for more information about determining the set of
-                  # available disk types for a particular project and zone.
-                  #
-                  # Google Compute Engine Disk types are local to a particular
-                  # project in a particular zone, and so the resource name will
-                  # typically look something like this:
-                  #
-                  # compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
-              &quot;sizeGb&quot;: 42, # Size of disk in GB.  If zero or unspecified, the service will
-                  # attempt to choose a reasonable default.
-              &quot;mountPoint&quot;: &quot;A String&quot;, # Directory in a VM where disk is mounted.
-            },
-          ],
-          &quot;subnetwork&quot;: &quot;A String&quot;, # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
-              # the form &quot;regions/REGION/subnetworks/SUBNETWORK&quot;.
-          &quot;ipConfiguration&quot;: &quot;A String&quot;, # Configuration for VM IPs.
-          &quot;taskrunnerSettings&quot;: { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when
-              # using the standard Dataflow task runner.  Users should ignore
-              # this field.
-            &quot;alsologtostderr&quot;: True or False, # Whether to also send taskrunner log info to stderr.
-            &quot;taskGroup&quot;: &quot;A String&quot;, # The UNIX group ID on the worker VM to use for tasks launched by
-                # taskrunner; e.g. &quot;wheel&quot;.
-            &quot;harnessCommand&quot;: &quot;A String&quot;, # The command to launch the worker harness.
-            &quot;logDir&quot;: &quot;A String&quot;, # The directory on the VM to store logs.
-            &quot;oauthScopes&quot;: [ # The OAuth2 scopes to be requested by the taskrunner in order to
-                # access the Cloud Dataflow API.
+        ],
+        &quot;originalPipelineTransform&quot;: [ # Description of each transform in the pipeline and collections between them.
+          { # Description of the type, names/ids, and input/outputs for a transform.
+            &quot;outputCollectionName&quot;: [ # User  names for all collection outputs to this transform.
               &quot;A String&quot;,
             ],
-            &quot;dataflowApiVersion&quot;: &quot;A String&quot;, # The API version of endpoint, e.g. &quot;v1b3&quot;
-            &quot;logUploadLocation&quot;: &quot;A String&quot;, # Indicates where to put logs.  If this is not specified, the logs
-                # will not be uploaded.
-                #
-                # The supported resource type is:
-                #
-                # Google Cloud Storage:
-                #   storage.googleapis.com/{bucket}/{object}
-                #   bucket.storage.googleapis.com/{object}
-            &quot;streamingWorkerMainClass&quot;: &quot;A String&quot;, # The streaming worker main class name.
-            &quot;workflowFileName&quot;: &quot;A String&quot;, # The file to store the workflow in.
-            &quot;languageHint&quot;: &quot;A String&quot;, # The suggested backend language.
-            &quot;commandlinesFileName&quot;: &quot;A String&quot;, # The file to store preprocessing commands in.
-            &quot;baseTaskDir&quot;: &quot;A String&quot;, # The location on the worker for task-specific subdirectories.
-            &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the taskrunner should use for
-                # temporary storage.
-                #
-                # The supported resource type is:
-                #
-                # Google Cloud Storage:
-                #   storage.googleapis.com/{bucket}/{object}
-                #   bucket.storage.googleapis.com/{object}
-            &quot;baseUrl&quot;: &quot;A String&quot;, # The base URL for the taskrunner to use when accessing Google Cloud APIs.
-                #
-                # When workers access Google Cloud APIs, they logically do so via
-                # relative URLs.  If this field is specified, it supplies the base
-                # URL to use for resolving these relative URLs.  The normative
-                # algorithm used is defined by RFC 1808, &quot;Relative Uniform Resource
-                # Locators&quot;.
-                #
-                # If not specified, the default value is &quot;http://www.googleapis.com/&quot;
-            &quot;logToSerialconsole&quot;: True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial
-                # console.
-            &quot;continueOnException&quot;: True or False, # Whether to continue taskrunner if an exception is hit.
-            &quot;parallelWorkerSettings&quot;: { # Provides data to pass through to the worker harness. # The settings to pass to the parallel worker harness.
-              &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the system should use for temporary
-                  # storage.
-                  #
-                  # The supported resource type is:
-                  #
-                  # Google Cloud Storage:
-                  #
-                  #   storage.googleapis.com/{bucket}/{object}
-                  #   bucket.storage.googleapis.com/{object}
-              &quot;reportingEnabled&quot;: True or False, # Whether to send work progress updates to the service.
-              &quot;baseUrl&quot;: &quot;A String&quot;, # The base URL for accessing Google Cloud APIs.
-                  #
-                  # When workers access Google Cloud APIs, they logically do so via
-                  # relative URLs.  If this field is specified, it supplies the base
-                  # URL to use for resolving these relative URLs.  The normative
-                  # algorithm used is defined by RFC 1808, &quot;Relative Uniform Resource
-                  # Locators&quot;.
-                  #
-                  # If not specified, the default value is &quot;http://www.googleapis.com/&quot;
-              &quot;servicePath&quot;: &quot;A String&quot;, # The Cloud Dataflow service path relative to the root URL, for example,
-                  # &quot;dataflow/v1b3/projects&quot;.
-              &quot;shuffleServicePath&quot;: &quot;A String&quot;, # The Shuffle service path relative to the root URL, for example,
-                  # &quot;shuffle/v1beta1&quot;.
-              &quot;workerId&quot;: &quot;A String&quot;, # The ID of the worker running this pipeline.
-            },
-            &quot;taskUser&quot;: &quot;A String&quot;, # The UNIX user ID on the worker VM to use for tasks launched by
-                # taskrunner; e.g. &quot;root&quot;.
-            &quot;vmId&quot;: &quot;A String&quot;, # The ID string of the VM.
+            &quot;displayData&quot;: [ # Transform-specific display data.
+              { # Data provided with a pipeline or transform to provide descriptive info.
+                &quot;url&quot;: &quot;A String&quot;, # An optional full URL.
+                &quot;javaClassValue&quot;: &quot;A String&quot;, # Contains value if the data is of java class type.
+                &quot;timestampValue&quot;: &quot;A String&quot;, # Contains value if the data is of timestamp type.
+                &quot;durationValue&quot;: &quot;A String&quot;, # Contains value if the data is of duration type.
+                &quot;label&quot;: &quot;A String&quot;, # An optional label to display in a dax UI for the element.
+                &quot;key&quot;: &quot;A String&quot;, # The key identifying the display data.
+                    # This is intended to be used as a label for the display data
+                    # when viewed in a dax monitoring system.
+                &quot;namespace&quot;: &quot;A String&quot;, # The namespace for the key. This is usually a class name or programming
+                    # language namespace (i.e. python module) which defines the display data.
+                    # This allows a dax monitoring system to specially handle the data
+                    # and perform custom rendering.
+                &quot;floatValue&quot;: 3.14, # Contains value if the data is of float type.
+                &quot;strValue&quot;: &quot;A String&quot;, # Contains value if the data is of string type.
+                &quot;int64Value&quot;: &quot;A String&quot;, # Contains value if the data is of int64 type.
+                &quot;boolValue&quot;: True or False, # Contains value if the data is of a boolean type.
+                &quot;shortStrValue&quot;: &quot;A String&quot;, # A possible additional shorter value to display.
+                    # For example a java_class_name_value of com.mypackage.MyDoFn
+                    # will be stored with MyDoFn as the short_str_value and
+                    # com.mypackage.MyDoFn as the java_class_name value.
+                    # short_str_value can be displayed and java_class_name_value
+                    # will be displayed as a tooltip.
+              },
+            ],
+            &quot;id&quot;: &quot;A String&quot;, # SDK generated id of this transform instance.
+            &quot;inputCollectionName&quot;: [ # User names for all collection inputs to this transform.
+              &quot;A String&quot;,
+            ],
+            &quot;name&quot;: &quot;A String&quot;, # User provided name for this transform instance.
+            &quot;kind&quot;: &quot;A String&quot;, # Type of transform.
           },
-          &quot;autoscalingSettings&quot;: { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
-            &quot;algorithm&quot;: &quot;A String&quot;, # The algorithm to use for autoscaling.
-            &quot;maxNumWorkers&quot;: 42, # The maximum number of workers to cap scaling at.
+        ],
+        &quot;executionPipelineStage&quot;: [ # Description of each stage of execution of the pipeline.
+          { # Description of the composing transforms, names/ids, and input/outputs of a
+              # stage of execution.  Some composing transforms and sources may have been
+              # generated by the Dataflow service during execution planning.
+            &quot;componentSource&quot;: [ # Collections produced and consumed by component transforms of this stage.
+              { # Description of an interstitial value between transforms in an execution
+                  # stage.
+                &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this transform; may be user or system generated.
+                &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
+                &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
+                    # source is most closely associated.
+              },
+            ],
+            &quot;inputSource&quot;: [ # Input sources for this stage.
+              { # Description of an input or output of an execution stage.
+                &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this source; may be user or system generated.
+                &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
+                    # source is most closely associated.
+                &quot;sizeBytes&quot;: &quot;A String&quot;, # Size of the source, if measurable.
+                &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
+              },
+            ],
+            &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this stage.
+            &quot;componentTransform&quot;: [ # Transforms that comprise this execution stage.
+              { # Description of a transform executed as part of an execution stage.
+                &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
+                &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this transform; may be user or system generated.
+                &quot;originalTransform&quot;: &quot;A String&quot;, # User name for the original user transform with which this transform is
+                    # most closely associated.
+              },
+            ],
+            &quot;id&quot;: &quot;A String&quot;, # Dataflow service generated id for this stage.
+            &quot;outputSource&quot;: [ # Output sources for this stage.
+              { # Description of an input or output of an execution stage.
+                &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this source; may be user or system generated.
+                &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
+                    # source is most closely associated.
+                &quot;sizeBytes&quot;: &quot;A String&quot;, # Size of the source, if measurable.
+                &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
+              },
+            ],
+            &quot;kind&quot;: &quot;A String&quot;, # Type of tranform this stage is executing.
           },
-          &quot;metadata&quot;: { # Metadata to set on the Google Compute Engine VMs.
-            &quot;a_key&quot;: &quot;A String&quot;,
-          },
-          &quot;defaultPackageSet&quot;: &quot;A String&quot;, # The default package set to install.  This allows the service to
-              # select a default set of packages which are useful to worker
-              # harnesses written in a particular language.
-          &quot;network&quot;: &quot;A String&quot;, # Network to which VMs will be assigned.  If empty or unspecified,
-              # the service will use the network &quot;default&quot;.
-        },
-      ],
-      &quot;dataset&quot;: &quot;A String&quot;, # The dataset for the current project where various workflow
-          # related tables are stored.
-          #
-          # The supported resource type is:
-          #
-          # Google BigQuery:
-          #   bigquery.googleapis.com/{dataset}
-    },
-    &quot;stageStates&quot;: [ # This field may be mutated by the Cloud Dataflow service;
-        # callers cannot mutate it.
-      { # A message describing the state of a particular execution stage.
-        &quot;currentStateTime&quot;: &quot;A String&quot;, # The time at which the stage transitioned to this state.
-        &quot;executionStageState&quot;: &quot;A String&quot;, # Executions stage states allow the same set of values as JobState.
-        &quot;executionStageName&quot;: &quot;A String&quot;, # The name of the execution stage.
+        ],
       },
-    ],
-    &quot;jobMetadata&quot;: { # Metadata available primarily for filtering jobs. Will be included in the # This field is populated by the Dataflow service to support filtering jobs
-        # by the metadata values provided here. Populated for ListJobs and all GetJob
-        # views SUMMARY and higher.
-        # ListJob response and Job SUMMARY view.
-      &quot;datastoreDetails&quot;: [ # Identification of a Datastore source used in the Dataflow job.
-        { # Metadata for a Datastore connector used by the job.
-          &quot;namespace&quot;: &quot;A String&quot;, # Namespace used in the connection.
-          &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
-        },
-      ],
-      &quot;sdkVersion&quot;: { # The version of the SDK used to run the job. # The SDK version used to run the job.
-        &quot;version&quot;: &quot;A String&quot;, # The version of the SDK used to run the job.
-        &quot;sdkSupportStatus&quot;: &quot;A String&quot;, # The support status for this SDK version.
-        &quot;versionDisplayName&quot;: &quot;A String&quot;, # A readable string describing the version of the SDK.
-      },
-      &quot;bigqueryDetails&quot;: [ # Identification of a BigQuery source used in the Dataflow job.
-        { # Metadata for a BigQuery connector used by the job.
-          &quot;table&quot;: &quot;A String&quot;, # Table accessed in the connection.
-          &quot;dataset&quot;: &quot;A String&quot;, # Dataset accessed in the connection.
-          &quot;query&quot;: &quot;A String&quot;, # Query used to access data in the connection.
-          &quot;projectId&quot;: &quot;A String&quot;, # Project accessed in the connection.
-        },
-      ],
-      &quot;fileDetails&quot;: [ # Identification of a File source used in the Dataflow job.
-        { # Metadata for a File connector used by the job.
-          &quot;filePattern&quot;: &quot;A String&quot;, # File Pattern used to access files by the connector.
-        },
-      ],
-      &quot;pubsubDetails&quot;: [ # Identification of a PubSub source used in the Dataflow job.
-        { # Metadata for a PubSub connector used by the job.
-          &quot;topic&quot;: &quot;A String&quot;, # Topic accessed in the connection.
-          &quot;subscription&quot;: &quot;A String&quot;, # Subscription used in the connection.
-        },
-      ],
-      &quot;bigTableDetails&quot;: [ # Identification of a BigTable source used in the Dataflow job.
-        { # Metadata for a BigTable connector used by the job.
-          &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
-          &quot;instanceId&quot;: &quot;A String&quot;, # InstanceId accessed in the connection.
-          &quot;tableId&quot;: &quot;A String&quot;, # TableId accessed in the connection.
-        },
-      ],
-      &quot;spannerDetails&quot;: [ # Identification of a Spanner source used in the Dataflow job.
-        { # Metadata for a Spanner connector used by the job.
-          &quot;instanceId&quot;: &quot;A String&quot;, # InstanceId accessed in the connection.
-          &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
-          &quot;databaseId&quot;: &quot;A String&quot;, # DatabaseId accessed in the connection.
-        },
-      ],
-    },
-    &quot;type&quot;: &quot;A String&quot;, # The type of Cloud Dataflow job.
-    &quot;projectId&quot;: &quot;A String&quot;, # The ID of the Cloud Platform project that the job belongs to.
-    &quot;createdFromSnapshotId&quot;: &quot;A String&quot;, # If this is specified, the job&#x27;s initial state is populated from the given
-        # snapshot.
-    &quot;pipelineDescription&quot;: { # A descriptive representation of submitted pipeline as well as the executed # Preliminary field: The format of this data may change at any time.
-        # A description of the user pipeline and stages through which it is executed.
-        # Created by Cloud Dataflow service.  Only retrieved with
-        # JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
-        # form.  This data is provided by the Dataflow service for ease of visualizing
-        # the pipeline and interpreting Dataflow provided metrics.
-      &quot;executionPipelineStage&quot;: [ # Description of each stage of execution of the pipeline.
-        { # Description of the composing transforms, names/ids, and input/outputs of a
-            # stage of execution.  Some composing transforms and sources may have been
-            # generated by the Dataflow service during execution planning.
-          &quot;outputSource&quot;: [ # Output sources for this stage.
-            { # Description of an input or output of an execution stage.
-              &quot;sizeBytes&quot;: &quot;A String&quot;, # Size of the source, if measurable.
-              &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
-              &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this source; may be user or system generated.
-              &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
-                  # source is most closely associated.
-            },
-          ],
-          &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this stage.
-          &quot;inputSource&quot;: [ # Input sources for this stage.
-            { # Description of an input or output of an execution stage.
-              &quot;sizeBytes&quot;: &quot;A String&quot;, # Size of the source, if measurable.
-              &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
-              &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this source; may be user or system generated.
-              &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
-                  # source is most closely associated.
-            },
-          ],
-          &quot;id&quot;: &quot;A String&quot;, # Dataflow service generated id for this stage.
-          &quot;componentTransform&quot;: [ # Transforms that comprise this execution stage.
-            { # Description of a transform executed as part of an execution stage.
-              &quot;originalTransform&quot;: &quot;A String&quot;, # User name for the original user transform with which this transform is
-                  # most closely associated.
-              &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
-              &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this transform; may be user or system generated.
-            },
-          ],
-          &quot;componentSource&quot;: [ # Collections produced and consumed by component transforms of this stage.
-            { # Description of an interstitial value between transforms in an execution
-                # stage.
-              &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
-              &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this transform; may be user or system generated.
-              &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
-                  # source is most closely associated.
-            },
-          ],
-          &quot;kind&quot;: &quot;A String&quot;, # Type of tranform this stage is executing.
-        },
-      ],
-      &quot;originalPipelineTransform&quot;: [ # Description of each transform in the pipeline and collections between them.
-        { # Description of the type, names/ids, and input/outputs for a transform.
-          &quot;kind&quot;: &quot;A String&quot;, # Type of transform.
-          &quot;inputCollectionName&quot;: [ # User names for all collection inputs to this transform.
-            &quot;A String&quot;,
-          ],
-          &quot;name&quot;: &quot;A String&quot;, # User provided name for this transform instance.
-          &quot;id&quot;: &quot;A String&quot;, # SDK generated id of this transform instance.
-          &quot;displayData&quot;: [ # Transform-specific display data.
-            { # Data provided with a pipeline or transform to provide descriptive info.
-              &quot;durationValue&quot;: &quot;A String&quot;, # Contains value if the data is of duration type.
-              &quot;int64Value&quot;: &quot;A String&quot;, # Contains value if the data is of int64 type.
-              &quot;namespace&quot;: &quot;A String&quot;, # The namespace for the key. This is usually a class name or programming
-                  # language namespace (i.e. python module) which defines the display data.
-                  # This allows a dax monitoring system to specially handle the data
-                  # and perform custom rendering.
-              &quot;floatValue&quot;: 3.14, # Contains value if the data is of float type.
-              &quot;key&quot;: &quot;A String&quot;, # The key identifying the display data.
-                  # This is intended to be used as a label for the display data
-                  # when viewed in a dax monitoring system.
-              &quot;shortStrValue&quot;: &quot;A String&quot;, # A possible additional shorter value to display.
-                  # For example a java_class_name_value of com.mypackage.MyDoFn
-                  # will be stored with MyDoFn as the short_str_value and
-                  # com.mypackage.MyDoFn as the java_class_name value.
-                  # short_str_value can be displayed and java_class_name_value
-                  # will be displayed as a tooltip.
-              &quot;url&quot;: &quot;A String&quot;, # An optional full URL.
-              &quot;label&quot;: &quot;A String&quot;, # An optional label to display in a dax UI for the element.
-              &quot;timestampValue&quot;: &quot;A String&quot;, # Contains value if the data is of timestamp type.
-              &quot;boolValue&quot;: True or False, # Contains value if the data is of a boolean type.
-              &quot;javaClassValue&quot;: &quot;A String&quot;, # Contains value if the data is of java class type.
-              &quot;strValue&quot;: &quot;A String&quot;, # Contains value if the data is of string type.
-            },
-          ],
-          &quot;outputCollectionName&quot;: [ # User  names for all collection outputs to this transform.
-            &quot;A String&quot;,
-          ],
-        },
-      ],
-      &quot;displayData&quot;: [ # Pipeline level display data.
-        { # Data provided with a pipeline or transform to provide descriptive info.
-          &quot;durationValue&quot;: &quot;A String&quot;, # Contains value if the data is of duration type.
-          &quot;int64Value&quot;: &quot;A String&quot;, # Contains value if the data is of int64 type.
-          &quot;namespace&quot;: &quot;A String&quot;, # The namespace for the key. This is usually a class name or programming
-              # language namespace (i.e. python module) which defines the display data.
-              # This allows a dax monitoring system to specially handle the data
-              # and perform custom rendering.
-          &quot;floatValue&quot;: 3.14, # Contains value if the data is of float type.
-          &quot;key&quot;: &quot;A String&quot;, # The key identifying the display data.
-              # This is intended to be used as a label for the display data
-              # when viewed in a dax monitoring system.
-          &quot;shortStrValue&quot;: &quot;A String&quot;, # A possible additional shorter value to display.
-              # For example a java_class_name_value of com.mypackage.MyDoFn
-              # will be stored with MyDoFn as the short_str_value and
-              # com.mypackage.MyDoFn as the java_class_name value.
-              # short_str_value can be displayed and java_class_name_value
-              # will be displayed as a tooltip.
-          &quot;url&quot;: &quot;A String&quot;, # An optional full URL.
-          &quot;label&quot;: &quot;A String&quot;, # An optional label to display in a dax UI for the element.
-          &quot;timestampValue&quot;: &quot;A String&quot;, # Contains value if the data is of timestamp type.
-          &quot;boolValue&quot;: True or False, # Contains value if the data is of a boolean type.
-          &quot;javaClassValue&quot;: &quot;A String&quot;, # Contains value if the data is of java class type.
-          &quot;strValue&quot;: &quot;A String&quot;, # Contains value if the data is of string type.
-        },
-      ],
-    },
-    &quot;replaceJobId&quot;: &quot;A String&quot;, # If this job is an update of an existing job, this field is the job ID
-        # of the job it replaced.
-        #
-        # When sending a `CreateJobRequest`, you can update a job by specifying it
-        # here. The job named here is stopped, and its intermediate state is
-        # transferred to this job.
-    &quot;tempFiles&quot;: [ # A set of files the system should be aware of that are used
-        # for temporary storage. These temporary files will be
-        # removed on job completion.
-        # No duplicates are allowed.
-        # No file patterns are supported.
-        #
-        # The supported files are:
-        #
-        # Google Cloud Storage:
-        #
-        #    storage.googleapis.com/{bucket}/{object}
-        #    bucket.storage.googleapis.com/{object}
-      &quot;A String&quot;,
-    ],
-    &quot;name&quot;: &quot;A String&quot;, # The user-specified Cloud Dataflow job name.
-        #
-        # Only one Job with a given name may exist in a project at any
-        # given time. If a caller attempts to create a Job with the same
-        # name as an already-existing Job, the attempt returns the
-        # existing Job.
-        #
-        # The name must match the regular expression
-        # `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
-    &quot;steps&quot;: [ # Exactly one of step or steps_location should be specified.
-        #
-        # The top-level steps that constitute the entire job.
-      { # Defines a particular step within a Cloud Dataflow job.
+      &quot;labels&quot;: { # User-defined labels for this job.
           #
-          # A job consists of multiple steps, each of which performs some
-          # specific operation as part of the overall job.  Data is typically
-          # passed from one step to another as part of the job.
+          # The labels map can contain no more than 64 entries.  Entries of the labels
+          # map are UTF8 strings that comply with the following restrictions:
           #
-          # Here&#x27;s an example of a sequence of steps which together implement a
-          # Map-Reduce job:
-          #
-          #   * Read a collection of data from some source, parsing the
-          #     collection&#x27;s elements.
-          #
-          #   * Validate the elements.
-          #
-          #   * Apply a user-defined function to map each element to some value
-          #     and extract an element-specific key value.
-          #
-          #   * Group elements with the same key into a single element with
-          #     that key, transforming a multiply-keyed collection into a
-          #     uniquely-keyed collection.
-          #
-          #   * Write the elements out to some data sink.
-          #
-          # Note that the Cloud Dataflow service may be used to run many different
-          # types of jobs, not just Map-Reduce.
-        &quot;name&quot;: &quot;A String&quot;, # The name that identifies the step. This must be unique for each
-            # step with respect to all other steps in the Cloud Dataflow job.
-        &quot;kind&quot;: &quot;A String&quot;, # The kind of step in the Cloud Dataflow job.
-        &quot;properties&quot;: { # Named properties associated with the step. Each kind of
-            # predefined step has its own required set of properties.
-            # Must be provided on Create.  Only retrieved with JOB_VIEW_ALL.
-          &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-        },
-      },
-    ],
-    &quot;replacedByJobId&quot;: &quot;A String&quot;, # If another job is an update of this job (and thus, this job is in
-        # `JOB_STATE_UPDATED`), this field contains the ID of that job.
-    &quot;executionInfo&quot;: { # Additional information about how a Cloud Dataflow job will be executed that # Deprecated.
-        # isn&#x27;t contained in the submitted job.
-      &quot;stages&quot;: { # A mapping from each stage to the information about that stage.
-        &quot;a_key&quot;: { # Contains information about how a particular
-            # google.dataflow.v1beta3.Step will be executed.
-          &quot;stepName&quot;: [ # The steps associated with the execution stage.
-              # Note that stages may have several steps, and that a given step
-              # might be run by more than one stage.
-            &quot;A String&quot;,
-          ],
-        },
-      },
-    },
-    &quot;currentState&quot;: &quot;A String&quot;, # The current state of the job.
-        #
-        # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
-        # specified.
-        #
-        # A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
-        # terminal state. After a job has reached a terminal state, no
-        # further state updates may be made.
-        #
-        # This field may be mutated by the Cloud Dataflow service;
-        # callers cannot mutate it.
-    &quot;location&quot;: &quot;A String&quot;, # The [regional endpoint]
-        # (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that
-        # contains this job.
-    &quot;startTime&quot;: &quot;A String&quot;, # The timestamp when the job was started (transitioned to JOB_STATE_PENDING).
-        # Flexible resource scheduling jobs are started with some delay after job
-        # creation, so start_time is unset before start and is updated when the
-        # job is started by the Cloud Dataflow service. For other jobs, start_time
-        # always equals to create_time and is immutable and set by the Cloud Dataflow
-        # service.
-    &quot;stepsLocation&quot;: &quot;A String&quot;, # The GCS location where the steps are stored.
-    &quot;labels&quot;: { # User-defined labels for this job.
-        #
-        # The labels map can contain no more than 64 entries.  Entries of the labels
-        # map are UTF8 strings that comply with the following restrictions:
-        #
-        # * Keys must conform to regexp:  \p{Ll}\p{Lo}{0,62}
-        # * Values must conform to regexp:  [\p{Ll}\p{Lo}\p{N}_-]{0,63}
-        # * Both keys and values are additionally constrained to be &lt;= 128 bytes in
-        # size.
-      &quot;a_key&quot;: &quot;A String&quot;,
-    },
-    &quot;createTime&quot;: &quot;A String&quot;, # The timestamp when the job was initially created. Immutable and set by the
-        # Cloud Dataflow service.
-    &quot;requestedState&quot;: &quot;A String&quot;, # The job&#x27;s requested state.
-        #
-        # `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and
-        # `JOB_STATE_RUNNING` states, by setting requested_state.  `UpdateJob` may
-        # also be used to directly set a job&#x27;s requested state to
-        # `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the
-        # job if it has not already reached a terminal state.
-  }</pre>
-</div>
-
-<div class="method">
-    <code class="details" id="get">get(projectId, view=None, gcsPath=None, location=None, x__xgafv=None)</code>
-  <pre>Get the template associated with a template.
-
-Args:
-  projectId: string, Required. The ID of the Cloud Platform project that the job belongs to. (required)
-  view: string, The view to retrieve. Defaults to METADATA_ONLY.
-  gcsPath: string, Required. A Cloud Storage path to the template from which to
-create the job.
-Must be valid Cloud Storage URL, beginning with &#x27;gs://&#x27;.
-  location: string, The [regional endpoint]
-(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
-which to direct the request.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # The response to a GetTemplate request.
-    &quot;metadata&quot;: { # Metadata describing a template. # The template metadata describing the template name, available
-        # parameters, etc.
-      &quot;parameters&quot;: [ # The parameters for the template.
-        { # Metadata for a specific parameter.
-          &quot;label&quot;: &quot;A String&quot;, # Required. The label to display for the parameter.
-          &quot;paramType&quot;: &quot;A String&quot;, # Optional. The type of the parameter.
-              # Used for selecting input picker.
-          &quot;helpText&quot;: &quot;A String&quot;, # Required. The help text to display for the parameter.
-          &quot;name&quot;: &quot;A String&quot;, # Required. The name of the parameter.
-          &quot;regexes&quot;: [ # Optional. Regexes that the parameter must match.
-            &quot;A String&quot;,
-          ],
-          &quot;isOptional&quot;: True or False, # Optional. Whether the parameter is optional. Defaults to false.
-        },
-      ],
-      &quot;name&quot;: &quot;A String&quot;, # Required. The name of the template.
-      &quot;description&quot;: &quot;A String&quot;, # Optional. A description of the template.
-    },
-    &quot;runtimeMetadata&quot;: { # RuntimeMetadata describing a runtime environment. # Describes the runtime metadata with SDKInfo and available parameters.
-      &quot;sdkInfo&quot;: { # SDK Information. # SDK Info for the template.
-        &quot;language&quot;: &quot;A String&quot;, # Required. The SDK Language.
-        &quot;version&quot;: &quot;A String&quot;, # Optional. The SDK version.
-      },
-      &quot;parameters&quot;: [ # The parameters for the template.
-        { # Metadata for a specific parameter.
-          &quot;label&quot;: &quot;A String&quot;, # Required. The label to display for the parameter.
-          &quot;paramType&quot;: &quot;A String&quot;, # Optional. The type of the parameter.
-              # Used for selecting input picker.
-          &quot;helpText&quot;: &quot;A String&quot;, # Required. The help text to display for the parameter.
-          &quot;name&quot;: &quot;A String&quot;, # Required. The name of the parameter.
-          &quot;regexes&quot;: [ # Optional. Regexes that the parameter must match.
-            &quot;A String&quot;,
-          ],
-          &quot;isOptional&quot;: True or False, # Optional. Whether the parameter is optional. Defaults to false.
-        },
-      ],
-    },
-    &quot;templateType&quot;: &quot;A String&quot;, # Template Type.
-    &quot;status&quot;: { # The `Status` type defines a logical error model that is suitable for # The status of the get template request. Any problems with the
-        # request will be indicated in the error_details.
-        # different programming environments, including REST APIs and RPC APIs. It is
-        # used by [gRPC](https://github.com/grpc). Each `Status` message contains
-        # three pieces of data: error code, error message, and error details.
-        #
-        # You can find out more about this error model and how to work with it in the
-        # [API Design Guide](https://cloud.google.com/apis/design/errors).
-      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
-      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
-          # user-facing error message should be localized and sent in the
-          # google.rpc.Status.details field, or localized by the client.
-      &quot;details&quot;: [ # A list of messages that carry the error details.  There is a common set of
-          # message types for APIs to use.
-        {
-          &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
-        },
-      ],
-    },
-  }</pre>
-</div>
-
-<div class="method">
-    <code class="details" id="launch">launch(projectId, body=None, validateOnly=None, gcsPath=None, location=None, dynamicTemplate_gcsPath=None, dynamicTemplate_stagingLocation=None, x__xgafv=None)</code>
-  <pre>Launch a template.
-
-Args:
-  projectId: string, Required. The ID of the Cloud Platform project that the job belongs to. (required)
-  body: object, The request body.
-    The object takes the form of:
-
-{ # Parameters to provide to the template being launched.
-    &quot;parameters&quot;: { # The runtime parameters to pass to the job.
-      &quot;a_key&quot;: &quot;A String&quot;,
-    },
-    &quot;jobName&quot;: &quot;A String&quot;, # Required. The job name to use for the created job.
-    &quot;transformNameMapping&quot;: { # Only applicable when updating a pipeline. Map of transform name prefixes of
-        # the job to be replaced to the corresponding name prefixes of the new job.
-      &quot;a_key&quot;: &quot;A String&quot;,
-    },
-    &quot;environment&quot;: { # The environment values to set at runtime. # The runtime environment for the job.
-      &quot;machineType&quot;: &quot;A String&quot;, # The machine type to use for the job. Defaults to the value from the
-          # template if not specified.
-      &quot;subnetwork&quot;: &quot;A String&quot;, # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
-          # the form &quot;regions/REGION/subnetworks/SUBNETWORK&quot;.
-      &quot;ipConfiguration&quot;: &quot;A String&quot;, # Configuration for VM IPs.
-      &quot;kmsKeyName&quot;: &quot;A String&quot;, # Optional. Name for the Cloud KMS key for the job.
-          # Key format is:
-          # projects/&lt;project&gt;/locations/&lt;location&gt;/keyRings/&lt;keyring&gt;/cryptoKeys/&lt;key&gt;
-      &quot;tempLocation&quot;: &quot;A String&quot;, # The Cloud Storage path to use for temporary files.
-          # Must be a valid Cloud Storage URL, beginning with `gs://`.
-      &quot;bypassTempDirValidation&quot;: True or False, # Whether to bypass the safety checks for the job&#x27;s temporary directory.
-          # Use with caution.
-      &quot;network&quot;: &quot;A String&quot;, # Network to which VMs will be assigned.  If empty or unspecified,
-          # the service will use the network &quot;default&quot;.
-      &quot;workerRegion&quot;: &quot;A String&quot;, # The Compute Engine region
-          # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
-          # which worker processing should occur, e.g. &quot;us-west1&quot;. Mutually exclusive
-          # with worker_zone. If neither worker_region nor worker_zone is specified,
-          # default to the control plane&#x27;s region.
-      &quot;zone&quot;: &quot;A String&quot;, # The Compute Engine [availability
-          # zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)
-          # for launching worker instances to run your pipeline.
-          # In the future, worker_zone will take precedence.
-      &quot;numWorkers&quot;: 42, # The initial number of Google Compute Engine instnaces for the job.
-      &quot;workerZone&quot;: &quot;A String&quot;, # The Compute Engine zone
-          # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
-          # which worker processing should occur, e.g. &quot;us-west1-a&quot;. Mutually exclusive
-          # with worker_region. If neither worker_region nor worker_zone is specified,
-          # a zone in the control plane&#x27;s region is chosen based on available capacity.
-          # If both `worker_zone` and `zone` are set, `worker_zone` takes precedence.
-      &quot;additionalUserLabels&quot;: { # Additional user labels to be specified for the job.
-          # Keys and values should follow the restrictions specified in the [labeling
-          # restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions)
-          # page.
+          # * Keys must conform to regexp:  \p{Ll}\p{Lo}{0,62}
+          # * Values must conform to regexp:  [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+          # * Both keys and values are additionally constrained to be &lt;= 128 bytes in
+          # size.
         &quot;a_key&quot;: &quot;A String&quot;,
       },
-      &quot;additionalExperiments&quot;: [ # Additional experiment flags for the job.
-        &quot;A String&quot;,
-      ],
-      &quot;maxWorkers&quot;: 42, # The maximum number of Google Compute Engine instances to be made
-          # available to your pipeline during execution, from 1 to 1000.
-      &quot;serviceAccountEmail&quot;: &quot;A String&quot;, # The email address of the service account to run the job as.
-    },
-    &quot;update&quot;: True or False, # If set, replace the existing pipeline with the name specified by jobName
-        # with this pipeline, preserving state.
-  }
-
-  validateOnly: boolean, If true, the request is validated but not actually executed.
-Defaults to false.
-  gcsPath: string, A Cloud Storage path to the template from which to create
-the job.
-Must be valid Cloud Storage URL, beginning with &#x27;gs://&#x27;.
-  location: string, The [regional endpoint]
-(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
-which to direct the request.
-  dynamicTemplate_gcsPath: string, Path to dynamic template spec file on GCS.
-The file must be a Json serialized DynamicTemplateFieSpec object.
-  dynamicTemplate_stagingLocation: string, Cloud Storage path for staging dependencies.
-Must be a valid Cloud Storage URL, beginning with `gs://`.
-  x__xgafv: string, V1 error format.
-    Allowed values
-      1 - v1 error format
-      2 - v2 error format
-
-Returns:
-  An object of the form:
-
-    { # Response to the request to launch a template.
-    &quot;job&quot;: { # Defines a job to be run by the Cloud Dataflow service. # The job that was launched, if the request was not a dry run and
-        # the job was successfully launched.
-      &quot;clientRequestId&quot;: &quot;A String&quot;, # The client&#x27;s unique identifier of the job, re-used across retried attempts.
-          # If this field is set, the service will ensure its uniqueness.
-          # The request to create a job will fail if the service has knowledge of a
-          # previously submitted job with the same client&#x27;s ID and job name.
-          # The caller may use this field to ensure idempotence of job
-          # creation across retried attempts to create a job.
-          # By default, the field is empty and, in that case, the service ignores it.
-      &quot;id&quot;: &quot;A String&quot;, # The unique ID of this job.
-          #
-          # This field is set by the Cloud Dataflow service when the Job is
-          # created, and is immutable for the life of the job.
-      &quot;currentStateTime&quot;: &quot;A String&quot;, # The timestamp associated with the current state.
-      &quot;transformNameMapping&quot;: { # The map of transform name prefixes of the job to be replaced to the
-          # corresponding name prefixes of the new job.
-        &quot;a_key&quot;: &quot;A String&quot;,
-      },
+      &quot;projectId&quot;: &quot;A String&quot;, # The ID of the Cloud Platform project that the job belongs to.
       &quot;environment&quot;: { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
-        &quot;internalExperiments&quot;: { # Experimental settings.
-          &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
-        },
+        &quot;flexResourceSchedulingGoal&quot;: &quot;A String&quot;, # Which Flexible Resource Scheduling mode to run in.
         &quot;workerRegion&quot;: &quot;A String&quot;, # The Compute Engine region
             # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
             # which worker processing should occur, e.g. &quot;us-west1&quot;. Mutually exclusive
             # with worker_zone. If neither worker_region nor worker_zone is specified,
             # default to the control plane&#x27;s region.
+        &quot;userAgent&quot;: { # A description of the process that generated the request.
+          &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+        },
+        &quot;serviceAccountEmail&quot;: &quot;A String&quot;, # Identity to run virtual machines as. Defaults to the default account.
+        &quot;version&quot;: { # A structure describing which components and their versions of the service
+            # are required in order to run the job.
+          &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+        },
         &quot;serviceKmsKeyName&quot;: &quot;A String&quot;, # If set, contains the Cloud KMS key identifier used to encrypt data
             # at rest, AKA a Customer Managed Encryption Key (CMEK).
             #
             # Format:
             #   projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
-        &quot;userAgent&quot;: { # A description of the process that generated the request.
-          &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-        },
+        &quot;experiments&quot;: [ # The list of experiments to enable.
+          &quot;A String&quot;,
+        ],
         &quot;workerZone&quot;: &quot;A String&quot;, # The Compute Engine zone
             # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
             # which worker processing should occur, e.g. &quot;us-west1-a&quot;. Mutually exclusive
             # with worker_region. If neither worker_region nor worker_zone is specified,
             # a zone in the control plane&#x27;s region is chosen based on available capacity.
-        &quot;clusterManagerApiService&quot;: &quot;A String&quot;, # The type of cluster manager API to use.  If unknown or
-            # unspecified, the service will attempt to choose a reasonable
-            # default.  This should be in the form of the API service name,
-            # e.g. &quot;compute.googleapis.com&quot;.
-        &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the system should use for temporary
-            # storage.  The system will append the suffix &quot;/temp-{JOBNAME} to
-            # this resource prefix, where {JOBNAME} is the value of the
-            # job_name field.  The resulting bucket and object prefix is used
-            # as the prefix of the resources used to store temporary data
-            # needed during the job execution.  NOTE: This will override the
-            # value in taskrunner_settings.
-            # The supported resource type is:
-            #
-            # Google Cloud Storage:
-            #
-            #   storage.googleapis.com/{bucket}/{object}
-            #   bucket.storage.googleapis.com/{object}
-        &quot;experiments&quot;: [ # The list of experiments to enable.
-          &quot;A String&quot;,
-        ],
-        &quot;version&quot;: { # A structure describing which components and their versions of the service
-            # are required in order to run the job.
-          &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-        },
-        &quot;serviceAccountEmail&quot;: &quot;A String&quot;, # Identity to run virtual machines as. Defaults to the default account.
-        &quot;sdkPipelineOptions&quot;: { # The Cloud Dataflow SDK pipeline options specified by the user. These
-            # options are passed through the service and are used to recreate the
-            # SDK pipeline options on the worker in a language agnostic and platform
-            # independent way.
-          &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-        },
-        &quot;flexResourceSchedulingGoal&quot;: &quot;A String&quot;, # Which Flexible Resource Scheduling mode to run in.
         &quot;workerPools&quot;: [ # The worker pools. At least one &quot;harness&quot; worker pool must be
             # specified in order for the job to have workers.
           { # Describes one particular pool of Cloud Dataflow workers to be
@@ -987,15 +322,54 @@
               # computations required by a job.  Note that a workflow job may use
               # multiple pools, in order to match the various computational
               # requirements of the various stages of the job.
-            &quot;numThreadsPerWorker&quot;: 42, # The number of threads per worker harness. If empty or unspecified, the
-                # service will choose a number of threads (according to the number of cores
-                # on the selected machine type for batch, or 1 by convention for streaming).
-            &quot;numWorkers&quot;: 42, # Number of Google Compute Engine workers in this pool needed to
-                # execute the job.  If zero or unspecified, the service will
-                # attempt to choose a reasonable default.
+            &quot;onHostMaintenance&quot;: &quot;A String&quot;, # The action to take on host maintenance, as defined by the Google
+                # Compute Engine API.
+            &quot;sdkHarnessContainerImages&quot;: [ # Set of SDK harness containers needed to execute this pipeline. This will
+                # only be set in the Fn API path. For non-cross-language pipelines this
+                # should have only one entry. Cross-language pipelines will have two or more
+                # entries.
+              { # Defines a SDK harness container for executing Dataflow pipelines.
+                &quot;containerImage&quot;: &quot;A String&quot;, # A docker container image that resides in Google Container Registry.
+                &quot;useSingleCorePerContainer&quot;: True or False, # If true, recommends the Dataflow service to use only one core per SDK
+                    # container instance with this image. If false (or unset) recommends using
+                    # more than one core per SDK container instance with this image for
+                    # efficiency. Note that Dataflow service may choose to override this property
+                    # if needed.
+              },
+            ],
             &quot;zone&quot;: &quot;A String&quot;, # Zone to run the worker pools in.  If empty or unspecified, the service
                 # will attempt to choose a reasonable default.
+            &quot;kind&quot;: &quot;A String&quot;, # The kind of the worker pool; currently only `harness` and `shuffle`
+                # are supported.
+            &quot;metadata&quot;: { # Metadata to set on the Google Compute Engine VMs.
+              &quot;a_key&quot;: &quot;A String&quot;,
+            },
             &quot;diskSourceImage&quot;: &quot;A String&quot;, # Fully qualified source image for disks.
+            &quot;dataDisks&quot;: [ # Data disks that are used by a VM in this workflow.
+              { # Describes the data disk used by a workflow job.
+                &quot;sizeGb&quot;: 42, # Size of disk in GB.  If zero or unspecified, the service will
+                    # attempt to choose a reasonable default.
+                &quot;diskType&quot;: &quot;A String&quot;, # Disk storage type, as defined by Google Compute Engine.  This
+                    # must be a disk type appropriate to the project and zone in which
+                    # the workers will run.  If unknown or unspecified, the service
+                    # will attempt to choose a reasonable default.
+                    #
+                    # For example, the standard persistent disk type is a resource name
+                    # typically ending in &quot;pd-standard&quot;.  If SSD persistent disks are
+                    # available, the resource name typically ends with &quot;pd-ssd&quot;.  The
+                    # actual valid values are defined the Google Compute Engine API,
+                    # not by the Cloud Dataflow API; consult the Google Compute Engine
+                    # documentation for more information about determining the set of
+                    # available disk types for a particular project and zone.
+                    #
+                    # Google Compute Engine Disk types are local to a particular
+                    # project in a particular zone, and so the resource name will
+                    # typically look something like this:
+                    #
+                    # compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
+                &quot;mountPoint&quot;: &quot;A String&quot;, # Directory in a VM where disk is mounted.
+              },
+            ],
             &quot;packages&quot;: [ # Packages to be installed on workers.
               { # The packages that must be installed in order for a worker to run the
                   # steps of the Cloud Dataflow job that will be assigned to its worker
@@ -1031,98 +405,38 @@
                 #
                 # If unknown or unspecified, the service will attempt to choose a reasonable
                 # default.
-            &quot;onHostMaintenance&quot;: &quot;A String&quot;, # The action to take on host maintenance, as defined by the Google
-                # Compute Engine API.
+            &quot;network&quot;: &quot;A String&quot;, # Network to which VMs will be assigned.  If empty or unspecified,
+                # the service will use the network &quot;default&quot;.
+            &quot;ipConfiguration&quot;: &quot;A String&quot;, # Configuration for VM IPs.
+            &quot;diskSizeGb&quot;: 42, # Size of root disk for VMs, in GB.  If zero or unspecified, the service will
+                # attempt to choose a reasonable default.
+            &quot;autoscalingSettings&quot;: { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
+              &quot;maxNumWorkers&quot;: 42, # The maximum number of workers to cap scaling at.
+              &quot;algorithm&quot;: &quot;A String&quot;, # The algorithm to use for autoscaling.
+            },
             &quot;poolArgs&quot;: { # Extra arguments for this worker pool.
               &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
             },
-            &quot;diskSizeGb&quot;: 42, # Size of root disk for VMs, in GB.  If zero or unspecified, the service will
+            &quot;subnetwork&quot;: &quot;A String&quot;, # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
+                # the form &quot;regions/REGION/subnetworks/SUBNETWORK&quot;.
+            &quot;numWorkers&quot;: 42, # Number of Google Compute Engine workers in this pool needed to
+                # execute the job.  If zero or unspecified, the service will
                 # attempt to choose a reasonable default.
+            &quot;numThreadsPerWorker&quot;: 42, # The number of threads per worker harness. If empty or unspecified, the
+                # service will choose a number of threads (according to the number of cores
+                # on the selected machine type for batch, or 1 by convention for streaming).
             &quot;workerHarnessContainerImage&quot;: &quot;A String&quot;, # Required. Docker container image that executes the Cloud Dataflow worker
                 # harness, residing in Google Container Registry.
                 #
                 # Deprecated for the Fn API path. Use sdk_harness_container_images instead.
-            &quot;diskType&quot;: &quot;A String&quot;, # Type of root disk for VMs.  If empty or unspecified, the service will
-                # attempt to choose a reasonable default.
-            &quot;machineType&quot;: &quot;A String&quot;, # Machine type (e.g. &quot;n1-standard-1&quot;).  If empty or unspecified, the
-                # service will attempt to choose a reasonable default.
-            &quot;kind&quot;: &quot;A String&quot;, # The kind of the worker pool; currently only `harness` and `shuffle`
-                # are supported.
-            &quot;sdkHarnessContainerImages&quot;: [ # Set of SDK harness containers needed to execute this pipeline. This will
-                # only be set in the Fn API path. For non-cross-language pipelines this
-                # should have only one entry. Cross-language pipelines will have two or more
-                # entries.
-              { # Defines a SDK harness container for executing Dataflow pipelines.
-                &quot;containerImage&quot;: &quot;A String&quot;, # A docker container image that resides in Google Container Registry.
-                &quot;useSingleCorePerContainer&quot;: True or False, # If true, recommends the Dataflow service to use only one core per SDK
-                    # container instance with this image. If false (or unset) recommends using
-                    # more than one core per SDK container instance with this image for
-                    # efficiency. Note that Dataflow service may choose to override this property
-                    # if needed.
-              },
-            ],
-            &quot;dataDisks&quot;: [ # Data disks that are used by a VM in this workflow.
-              { # Describes the data disk used by a workflow job.
-                &quot;diskType&quot;: &quot;A String&quot;, # Disk storage type, as defined by Google Compute Engine.  This
-                    # must be a disk type appropriate to the project and zone in which
-                    # the workers will run.  If unknown or unspecified, the service
-                    # will attempt to choose a reasonable default.
-                    #
-                    # For example, the standard persistent disk type is a resource name
-                    # typically ending in &quot;pd-standard&quot;.  If SSD persistent disks are
-                    # available, the resource name typically ends with &quot;pd-ssd&quot;.  The
-                    # actual valid values are defined the Google Compute Engine API,
-                    # not by the Cloud Dataflow API; consult the Google Compute Engine
-                    # documentation for more information about determining the set of
-                    # available disk types for a particular project and zone.
-                    #
-                    # Google Compute Engine Disk types are local to a particular
-                    # project in a particular zone, and so the resource name will
-                    # typically look something like this:
-                    #
-                    # compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
-                &quot;sizeGb&quot;: 42, # Size of disk in GB.  If zero or unspecified, the service will
-                    # attempt to choose a reasonable default.
-                &quot;mountPoint&quot;: &quot;A String&quot;, # Directory in a VM where disk is mounted.
-              },
-            ],
-            &quot;subnetwork&quot;: &quot;A String&quot;, # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
-                # the form &quot;regions/REGION/subnetworks/SUBNETWORK&quot;.
-            &quot;ipConfiguration&quot;: &quot;A String&quot;, # Configuration for VM IPs.
             &quot;taskrunnerSettings&quot;: { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when
                 # using the standard Dataflow task runner.  Users should ignore
                 # this field.
-              &quot;alsologtostderr&quot;: True or False, # Whether to also send taskrunner log info to stderr.
-              &quot;taskGroup&quot;: &quot;A String&quot;, # The UNIX group ID on the worker VM to use for tasks launched by
-                  # taskrunner; e.g. &quot;wheel&quot;.
-              &quot;harnessCommand&quot;: &quot;A String&quot;, # The command to launch the worker harness.
-              &quot;logDir&quot;: &quot;A String&quot;, # The directory on the VM to store logs.
+              &quot;dataflowApiVersion&quot;: &quot;A String&quot;, # The API version of endpoint, e.g. &quot;v1b3&quot;
               &quot;oauthScopes&quot;: [ # The OAuth2 scopes to be requested by the taskrunner in order to
                   # access the Cloud Dataflow API.
                 &quot;A String&quot;,
               ],
-              &quot;dataflowApiVersion&quot;: &quot;A String&quot;, # The API version of endpoint, e.g. &quot;v1b3&quot;
-              &quot;logUploadLocation&quot;: &quot;A String&quot;, # Indicates where to put logs.  If this is not specified, the logs
-                  # will not be uploaded.
-                  #
-                  # The supported resource type is:
-                  #
-                  # Google Cloud Storage:
-                  #   storage.googleapis.com/{bucket}/{object}
-                  #   bucket.storage.googleapis.com/{object}
-              &quot;streamingWorkerMainClass&quot;: &quot;A String&quot;, # The streaming worker main class name.
-              &quot;workflowFileName&quot;: &quot;A String&quot;, # The file to store the workflow in.
-              &quot;languageHint&quot;: &quot;A String&quot;, # The suggested backend language.
-              &quot;commandlinesFileName&quot;: &quot;A String&quot;, # The file to store preprocessing commands in.
-              &quot;baseTaskDir&quot;: &quot;A String&quot;, # The location on the worker for task-specific subdirectories.
-              &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the taskrunner should use for
-                  # temporary storage.
-                  #
-                  # The supported resource type is:
-                  #
-                  # Google Cloud Storage:
-                  #   storage.googleapis.com/{bucket}/{object}
-                  #   bucket.storage.googleapis.com/{object}
               &quot;baseUrl&quot;: &quot;A String&quot;, # The base URL for the taskrunner to use when accessing Google Cloud APIs.
                   #
                   # When workers access Google Cloud APIs, they logically do so via
@@ -1132,10 +446,17 @@
                   # Locators&quot;.
                   #
                   # If not specified, the default value is &quot;http://www.googleapis.com/&quot;
+              &quot;workflowFileName&quot;: &quot;A String&quot;, # The file to store the workflow in.
               &quot;logToSerialconsole&quot;: True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial
                   # console.
-              &quot;continueOnException&quot;: True or False, # Whether to continue taskrunner if an exception is hit.
+              &quot;baseTaskDir&quot;: &quot;A String&quot;, # The location on the worker for task-specific subdirectories.
+              &quot;taskUser&quot;: &quot;A String&quot;, # The UNIX user ID on the worker VM to use for tasks launched by
+                  # taskrunner; e.g. &quot;root&quot;.
+              &quot;vmId&quot;: &quot;A String&quot;, # The ID string of the VM.
+              &quot;alsologtostderr&quot;: True or False, # Whether to also send taskrunner log info to stderr.
               &quot;parallelWorkerSettings&quot;: { # Provides data to pass through to the worker harness. # The settings to pass to the parallel worker harness.
+                &quot;shuffleServicePath&quot;: &quot;A String&quot;, # The Shuffle service path relative to the root URL, for example,
+                    # &quot;shuffle/v1beta1&quot;.
                 &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the system should use for temporary
                     # storage.
                     #
@@ -1146,6 +467,8 @@
                     #   storage.googleapis.com/{bucket}/{object}
                     #   bucket.storage.googleapis.com/{object}
                 &quot;reportingEnabled&quot;: True or False, # Whether to send work progress updates to the service.
+                &quot;servicePath&quot;: &quot;A String&quot;, # The Cloud Dataflow service path relative to the root URL, for example,
+                    # &quot;dataflow/v1b3/projects&quot;.
                 &quot;baseUrl&quot;: &quot;A String&quot;, # The base URL for accessing Google Cloud APIs.
                     #
                     # When workers access Google Cloud APIs, they logically do so via
@@ -1155,30 +478,64 @@
                     # Locators&quot;.
                     #
                     # If not specified, the default value is &quot;http://www.googleapis.com/&quot;
-                &quot;servicePath&quot;: &quot;A String&quot;, # The Cloud Dataflow service path relative to the root URL, for example,
-                    # &quot;dataflow/v1b3/projects&quot;.
-                &quot;shuffleServicePath&quot;: &quot;A String&quot;, # The Shuffle service path relative to the root URL, for example,
-                    # &quot;shuffle/v1beta1&quot;.
                 &quot;workerId&quot;: &quot;A String&quot;, # The ID of the worker running this pipeline.
               },
-              &quot;taskUser&quot;: &quot;A String&quot;, # The UNIX user ID on the worker VM to use for tasks launched by
-                  # taskrunner; e.g. &quot;root&quot;.
-              &quot;vmId&quot;: &quot;A String&quot;, # The ID string of the VM.
+              &quot;harnessCommand&quot;: &quot;A String&quot;, # The command to launch the worker harness.
+              &quot;logDir&quot;: &quot;A String&quot;, # The directory on the VM to store logs.
+              &quot;streamingWorkerMainClass&quot;: &quot;A String&quot;, # The streaming worker main class name.
+              &quot;languageHint&quot;: &quot;A String&quot;, # The suggested backend language.
+              &quot;taskGroup&quot;: &quot;A String&quot;, # The UNIX group ID on the worker VM to use for tasks launched by
+                  # taskrunner; e.g. &quot;wheel&quot;.
+              &quot;logUploadLocation&quot;: &quot;A String&quot;, # Indicates where to put logs.  If this is not specified, the logs
+                  # will not be uploaded.
+                  #
+                  # The supported resource type is:
+                  #
+                  # Google Cloud Storage:
+                  #   storage.googleapis.com/{bucket}/{object}
+                  #   bucket.storage.googleapis.com/{object}
+              &quot;commandlinesFileName&quot;: &quot;A String&quot;, # The file to store preprocessing commands in.
+              &quot;continueOnException&quot;: True or False, # Whether to continue taskrunner if an exception is hit.
+              &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the taskrunner should use for
+                  # temporary storage.
+                  #
+                  # The supported resource type is:
+                  #
+                  # Google Cloud Storage:
+                  #   storage.googleapis.com/{bucket}/{object}
+                  #   bucket.storage.googleapis.com/{object}
             },
-            &quot;autoscalingSettings&quot;: { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
-              &quot;algorithm&quot;: &quot;A String&quot;, # The algorithm to use for autoscaling.
-              &quot;maxNumWorkers&quot;: 42, # The maximum number of workers to cap scaling at.
-            },
-            &quot;metadata&quot;: { # Metadata to set on the Google Compute Engine VMs.
-              &quot;a_key&quot;: &quot;A String&quot;,
-            },
+            &quot;diskType&quot;: &quot;A String&quot;, # Type of root disk for VMs.  If empty or unspecified, the service will
+                # attempt to choose a reasonable default.
             &quot;defaultPackageSet&quot;: &quot;A String&quot;, # The default package set to install.  This allows the service to
                 # select a default set of packages which are useful to worker
                 # harnesses written in a particular language.
-            &quot;network&quot;: &quot;A String&quot;, # Network to which VMs will be assigned.  If empty or unspecified,
-                # the service will use the network &quot;default&quot;.
+            &quot;machineType&quot;: &quot;A String&quot;, # Machine type (e.g. &quot;n1-standard-1&quot;).  If empty or unspecified, the
+                # service will attempt to choose a reasonable default.
           },
         ],
+        &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the system should use for temporary
+            # storage.  The system will append the suffix &quot;/temp-{JOBNAME} to
+            # this resource prefix, where {JOBNAME} is the value of the
+            # job_name field.  The resulting bucket and object prefix is used
+            # as the prefix of the resources used to store temporary data
+            # needed during the job execution.  NOTE: This will override the
+            # value in taskrunner_settings.
+            # The supported resource type is:
+            #
+            # Google Cloud Storage:
+            #
+            #   storage.googleapis.com/{bucket}/{object}
+            #   bucket.storage.googleapis.com/{object}
+        &quot;internalExperiments&quot;: { # Experimental settings.
+          &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+        },
+        &quot;sdkPipelineOptions&quot;: { # The Cloud Dataflow SDK pipeline options specified by the user. These
+            # options are passed through the service and are used to recreate the
+            # SDK pipeline options on the worker in a language agnostic and platform
+            # independent way.
+          &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+        },
         &quot;dataset&quot;: &quot;A String&quot;, # The dataset for the current project where various workflow
             # related tables are stored.
             #
@@ -1186,213 +543,12 @@
             #
             # Google BigQuery:
             #   bigquery.googleapis.com/{dataset}
+        &quot;clusterManagerApiService&quot;: &quot;A String&quot;, # The type of cluster manager API to use.  If unknown or
+            # unspecified, the service will attempt to choose a reasonable
+            # default.  This should be in the form of the API service name,
+            # e.g. &quot;compute.googleapis.com&quot;.
       },
-      &quot;stageStates&quot;: [ # This field may be mutated by the Cloud Dataflow service;
-          # callers cannot mutate it.
-        { # A message describing the state of a particular execution stage.
-          &quot;currentStateTime&quot;: &quot;A String&quot;, # The time at which the stage transitioned to this state.
-          &quot;executionStageState&quot;: &quot;A String&quot;, # Executions stage states allow the same set of values as JobState.
-          &quot;executionStageName&quot;: &quot;A String&quot;, # The name of the execution stage.
-        },
-      ],
-      &quot;jobMetadata&quot;: { # Metadata available primarily for filtering jobs. Will be included in the # This field is populated by the Dataflow service to support filtering jobs
-          # by the metadata values provided here. Populated for ListJobs and all GetJob
-          # views SUMMARY and higher.
-          # ListJob response and Job SUMMARY view.
-        &quot;datastoreDetails&quot;: [ # Identification of a Datastore source used in the Dataflow job.
-          { # Metadata for a Datastore connector used by the job.
-            &quot;namespace&quot;: &quot;A String&quot;, # Namespace used in the connection.
-            &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
-          },
-        ],
-        &quot;sdkVersion&quot;: { # The version of the SDK used to run the job. # The SDK version used to run the job.
-          &quot;version&quot;: &quot;A String&quot;, # The version of the SDK used to run the job.
-          &quot;sdkSupportStatus&quot;: &quot;A String&quot;, # The support status for this SDK version.
-          &quot;versionDisplayName&quot;: &quot;A String&quot;, # A readable string describing the version of the SDK.
-        },
-        &quot;bigqueryDetails&quot;: [ # Identification of a BigQuery source used in the Dataflow job.
-          { # Metadata for a BigQuery connector used by the job.
-            &quot;table&quot;: &quot;A String&quot;, # Table accessed in the connection.
-            &quot;dataset&quot;: &quot;A String&quot;, # Dataset accessed in the connection.
-            &quot;query&quot;: &quot;A String&quot;, # Query used to access data in the connection.
-            &quot;projectId&quot;: &quot;A String&quot;, # Project accessed in the connection.
-          },
-        ],
-        &quot;fileDetails&quot;: [ # Identification of a File source used in the Dataflow job.
-          { # Metadata for a File connector used by the job.
-            &quot;filePattern&quot;: &quot;A String&quot;, # File Pattern used to access files by the connector.
-          },
-        ],
-        &quot;pubsubDetails&quot;: [ # Identification of a PubSub source used in the Dataflow job.
-          { # Metadata for a PubSub connector used by the job.
-            &quot;topic&quot;: &quot;A String&quot;, # Topic accessed in the connection.
-            &quot;subscription&quot;: &quot;A String&quot;, # Subscription used in the connection.
-          },
-        ],
-        &quot;bigTableDetails&quot;: [ # Identification of a BigTable source used in the Dataflow job.
-          { # Metadata for a BigTable connector used by the job.
-            &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
-            &quot;instanceId&quot;: &quot;A String&quot;, # InstanceId accessed in the connection.
-            &quot;tableId&quot;: &quot;A String&quot;, # TableId accessed in the connection.
-          },
-        ],
-        &quot;spannerDetails&quot;: [ # Identification of a Spanner source used in the Dataflow job.
-          { # Metadata for a Spanner connector used by the job.
-            &quot;instanceId&quot;: &quot;A String&quot;, # InstanceId accessed in the connection.
-            &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
-            &quot;databaseId&quot;: &quot;A String&quot;, # DatabaseId accessed in the connection.
-          },
-        ],
-      },
-      &quot;type&quot;: &quot;A String&quot;, # The type of Cloud Dataflow job.
-      &quot;projectId&quot;: &quot;A String&quot;, # The ID of the Cloud Platform project that the job belongs to.
-      &quot;createdFromSnapshotId&quot;: &quot;A String&quot;, # If this is specified, the job&#x27;s initial state is populated from the given
-          # snapshot.
-      &quot;pipelineDescription&quot;: { # A descriptive representation of submitted pipeline as well as the executed # Preliminary field: The format of this data may change at any time.
-          # A description of the user pipeline and stages through which it is executed.
-          # Created by Cloud Dataflow service.  Only retrieved with
-          # JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
-          # form.  This data is provided by the Dataflow service for ease of visualizing
-          # the pipeline and interpreting Dataflow provided metrics.
-        &quot;executionPipelineStage&quot;: [ # Description of each stage of execution of the pipeline.
-          { # Description of the composing transforms, names/ids, and input/outputs of a
-              # stage of execution.  Some composing transforms and sources may have been
-              # generated by the Dataflow service during execution planning.
-            &quot;outputSource&quot;: [ # Output sources for this stage.
-              { # Description of an input or output of an execution stage.
-                &quot;sizeBytes&quot;: &quot;A String&quot;, # Size of the source, if measurable.
-                &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
-                &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this source; may be user or system generated.
-                &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
-                    # source is most closely associated.
-              },
-            ],
-            &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this stage.
-            &quot;inputSource&quot;: [ # Input sources for this stage.
-              { # Description of an input or output of an execution stage.
-                &quot;sizeBytes&quot;: &quot;A String&quot;, # Size of the source, if measurable.
-                &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
-                &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this source; may be user or system generated.
-                &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
-                    # source is most closely associated.
-              },
-            ],
-            &quot;id&quot;: &quot;A String&quot;, # Dataflow service generated id for this stage.
-            &quot;componentTransform&quot;: [ # Transforms that comprise this execution stage.
-              { # Description of a transform executed as part of an execution stage.
-                &quot;originalTransform&quot;: &quot;A String&quot;, # User name for the original user transform with which this transform is
-                    # most closely associated.
-                &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
-                &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this transform; may be user or system generated.
-              },
-            ],
-            &quot;componentSource&quot;: [ # Collections produced and consumed by component transforms of this stage.
-              { # Description of an interstitial value between transforms in an execution
-                  # stage.
-                &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
-                &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this transform; may be user or system generated.
-                &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
-                    # source is most closely associated.
-              },
-            ],
-            &quot;kind&quot;: &quot;A String&quot;, # Type of tranform this stage is executing.
-          },
-        ],
-        &quot;originalPipelineTransform&quot;: [ # Description of each transform in the pipeline and collections between them.
-          { # Description of the type, names/ids, and input/outputs for a transform.
-            &quot;kind&quot;: &quot;A String&quot;, # Type of transform.
-            &quot;inputCollectionName&quot;: [ # User names for all collection inputs to this transform.
-              &quot;A String&quot;,
-            ],
-            &quot;name&quot;: &quot;A String&quot;, # User provided name for this transform instance.
-            &quot;id&quot;: &quot;A String&quot;, # SDK generated id of this transform instance.
-            &quot;displayData&quot;: [ # Transform-specific display data.
-              { # Data provided with a pipeline or transform to provide descriptive info.
-                &quot;durationValue&quot;: &quot;A String&quot;, # Contains value if the data is of duration type.
-                &quot;int64Value&quot;: &quot;A String&quot;, # Contains value if the data is of int64 type.
-                &quot;namespace&quot;: &quot;A String&quot;, # The namespace for the key. This is usually a class name or programming
-                    # language namespace (i.e. python module) which defines the display data.
-                    # This allows a dax monitoring system to specially handle the data
-                    # and perform custom rendering.
-                &quot;floatValue&quot;: 3.14, # Contains value if the data is of float type.
-                &quot;key&quot;: &quot;A String&quot;, # The key identifying the display data.
-                    # This is intended to be used as a label for the display data
-                    # when viewed in a dax monitoring system.
-                &quot;shortStrValue&quot;: &quot;A String&quot;, # A possible additional shorter value to display.
-                    # For example a java_class_name_value of com.mypackage.MyDoFn
-                    # will be stored with MyDoFn as the short_str_value and
-                    # com.mypackage.MyDoFn as the java_class_name value.
-                    # short_str_value can be displayed and java_class_name_value
-                    # will be displayed as a tooltip.
-                &quot;url&quot;: &quot;A String&quot;, # An optional full URL.
-                &quot;label&quot;: &quot;A String&quot;, # An optional label to display in a dax UI for the element.
-                &quot;timestampValue&quot;: &quot;A String&quot;, # Contains value if the data is of timestamp type.
-                &quot;boolValue&quot;: True or False, # Contains value if the data is of a boolean type.
-                &quot;javaClassValue&quot;: &quot;A String&quot;, # Contains value if the data is of java class type.
-                &quot;strValue&quot;: &quot;A String&quot;, # Contains value if the data is of string type.
-              },
-            ],
-            &quot;outputCollectionName&quot;: [ # User  names for all collection outputs to this transform.
-              &quot;A String&quot;,
-            ],
-          },
-        ],
-        &quot;displayData&quot;: [ # Pipeline level display data.
-          { # Data provided with a pipeline or transform to provide descriptive info.
-            &quot;durationValue&quot;: &quot;A String&quot;, # Contains value if the data is of duration type.
-            &quot;int64Value&quot;: &quot;A String&quot;, # Contains value if the data is of int64 type.
-            &quot;namespace&quot;: &quot;A String&quot;, # The namespace for the key. This is usually a class name or programming
-                # language namespace (i.e. python module) which defines the display data.
-                # This allows a dax monitoring system to specially handle the data
-                # and perform custom rendering.
-            &quot;floatValue&quot;: 3.14, # Contains value if the data is of float type.
-            &quot;key&quot;: &quot;A String&quot;, # The key identifying the display data.
-                # This is intended to be used as a label for the display data
-                # when viewed in a dax monitoring system.
-            &quot;shortStrValue&quot;: &quot;A String&quot;, # A possible additional shorter value to display.
-                # For example a java_class_name_value of com.mypackage.MyDoFn
-                # will be stored with MyDoFn as the short_str_value and
-                # com.mypackage.MyDoFn as the java_class_name value.
-                # short_str_value can be displayed and java_class_name_value
-                # will be displayed as a tooltip.
-            &quot;url&quot;: &quot;A String&quot;, # An optional full URL.
-            &quot;label&quot;: &quot;A String&quot;, # An optional label to display in a dax UI for the element.
-            &quot;timestampValue&quot;: &quot;A String&quot;, # Contains value if the data is of timestamp type.
-            &quot;boolValue&quot;: True or False, # Contains value if the data is of a boolean type.
-            &quot;javaClassValue&quot;: &quot;A String&quot;, # Contains value if the data is of java class type.
-            &quot;strValue&quot;: &quot;A String&quot;, # Contains value if the data is of string type.
-          },
-        ],
-      },
-      &quot;replaceJobId&quot;: &quot;A String&quot;, # If this job is an update of an existing job, this field is the job ID
-          # of the job it replaced.
-          #
-          # When sending a `CreateJobRequest`, you can update a job by specifying it
-          # here. The job named here is stopped, and its intermediate state is
-          # transferred to this job.
-      &quot;tempFiles&quot;: [ # A set of files the system should be aware of that are used
-          # for temporary storage. These temporary files will be
-          # removed on job completion.
-          # No duplicates are allowed.
-          # No file patterns are supported.
-          #
-          # The supported files are:
-          #
-          # Google Cloud Storage:
-          #
-          #    storage.googleapis.com/{bucket}/{object}
-          #    bucket.storage.googleapis.com/{object}
-        &quot;A String&quot;,
-      ],
-      &quot;name&quot;: &quot;A String&quot;, # The user-specified Cloud Dataflow job name.
-          #
-          # Only one Job with a given name may exist in a project at any
-          # given time. If a caller attempts to create a Job with the same
-          # name as an already-existing Job, the attempt returns the
-          # existing Job.
-          #
-          # The name must match the regular expression
-          # `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
+      &quot;stepsLocation&quot;: &quot;A String&quot;, # The GCS location where the steps are stored.
       &quot;steps&quot;: [ # Exactly one of step or steps_location should be specified.
           #
           # The top-level steps that constitute the entire job.
@@ -1421,18 +577,95 @@
             #
             # Note that the Cloud Dataflow service may be used to run many different
             # types of jobs, not just Map-Reduce.
-          &quot;name&quot;: &quot;A String&quot;, # The name that identifies the step. This must be unique for each
-              # step with respect to all other steps in the Cloud Dataflow job.
           &quot;kind&quot;: &quot;A String&quot;, # The kind of step in the Cloud Dataflow job.
           &quot;properties&quot;: { # Named properties associated with the step. Each kind of
               # predefined step has its own required set of properties.
               # Must be provided on Create.  Only retrieved with JOB_VIEW_ALL.
             &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
           },
+          &quot;name&quot;: &quot;A String&quot;, # The name that identifies the step. This must be unique for each
+              # step with respect to all other steps in the Cloud Dataflow job.
+        },
+      ],
+      &quot;stageStates&quot;: [ # This field may be mutated by the Cloud Dataflow service;
+          # callers cannot mutate it.
+        { # A message describing the state of a particular execution stage.
+          &quot;executionStageState&quot;: &quot;A String&quot;, # Executions stage states allow the same set of values as JobState.
+          &quot;executionStageName&quot;: &quot;A String&quot;, # The name of the execution stage.
+          &quot;currentStateTime&quot;: &quot;A String&quot;, # The time at which the stage transitioned to this state.
         },
       ],
       &quot;replacedByJobId&quot;: &quot;A String&quot;, # If another job is an update of this job (and thus, this job is in
           # `JOB_STATE_UPDATED`), this field contains the ID of that job.
+      &quot;jobMetadata&quot;: { # Metadata available primarily for filtering jobs. Will be included in the # This field is populated by the Dataflow service to support filtering jobs
+          # by the metadata values provided here. Populated for ListJobs and all GetJob
+          # views SUMMARY and higher.
+          # ListJob response and Job SUMMARY view.
+        &quot;sdkVersion&quot;: { # The version of the SDK used to run the job. # The SDK version used to run the job.
+          &quot;sdkSupportStatus&quot;: &quot;A String&quot;, # The support status for this SDK version.
+          &quot;versionDisplayName&quot;: &quot;A String&quot;, # A readable string describing the version of the SDK.
+          &quot;version&quot;: &quot;A String&quot;, # The version of the SDK used to run the job.
+        },
+        &quot;bigTableDetails&quot;: [ # Identification of a BigTable source used in the Dataflow job.
+          { # Metadata for a BigTable connector used by the job.
+            &quot;instanceId&quot;: &quot;A String&quot;, # InstanceId accessed in the connection.
+            &quot;tableId&quot;: &quot;A String&quot;, # TableId accessed in the connection.
+            &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
+          },
+        ],
+        &quot;pubsubDetails&quot;: [ # Identification of a PubSub source used in the Dataflow job.
+          { # Metadata for a PubSub connector used by the job.
+            &quot;subscription&quot;: &quot;A String&quot;, # Subscription used in the connection.
+            &quot;topic&quot;: &quot;A String&quot;, # Topic accessed in the connection.
+          },
+        ],
+        &quot;bigqueryDetails&quot;: [ # Identification of a BigQuery source used in the Dataflow job.
+          { # Metadata for a BigQuery connector used by the job.
+            &quot;dataset&quot;: &quot;A String&quot;, # Dataset accessed in the connection.
+            &quot;projectId&quot;: &quot;A String&quot;, # Project accessed in the connection.
+            &quot;query&quot;: &quot;A String&quot;, # Query used to access data in the connection.
+            &quot;table&quot;: &quot;A String&quot;, # Table accessed in the connection.
+          },
+        ],
+        &quot;fileDetails&quot;: [ # Identification of a File source used in the Dataflow job.
+          { # Metadata for a File connector used by the job.
+            &quot;filePattern&quot;: &quot;A String&quot;, # File Pattern used to access files by the connector.
+          },
+        ],
+        &quot;datastoreDetails&quot;: [ # Identification of a Datastore source used in the Dataflow job.
+          { # Metadata for a Datastore connector used by the job.
+            &quot;namespace&quot;: &quot;A String&quot;, # Namespace used in the connection.
+            &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
+          },
+        ],
+        &quot;spannerDetails&quot;: [ # Identification of a Spanner source used in the Dataflow job.
+          { # Metadata for a Spanner connector used by the job.
+            &quot;instanceId&quot;: &quot;A String&quot;, # InstanceId accessed in the connection.
+            &quot;databaseId&quot;: &quot;A String&quot;, # DatabaseId accessed in the connection.
+            &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
+          },
+        ],
+      },
+      &quot;location&quot;: &quot;A String&quot;, # The [regional endpoint]
+          # (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that
+          # contains this job.
+      &quot;transformNameMapping&quot;: { # The map of transform name prefixes of the job to be replaced to the
+          # corresponding name prefixes of the new job.
+        &quot;a_key&quot;: &quot;A String&quot;,
+      },
+      &quot;startTime&quot;: &quot;A String&quot;, # The timestamp when the job was started (transitioned to JOB_STATE_PENDING).
+          # Flexible resource scheduling jobs are started with some delay after job
+          # creation, so start_time is unset before start and is updated when the
+          # job is started by the Cloud Dataflow service. For other jobs, start_time
+          # always equals to create_time and is immutable and set by the Cloud Dataflow
+          # service.
+      &quot;clientRequestId&quot;: &quot;A String&quot;, # The client&#x27;s unique identifier of the job, re-used across retried attempts.
+          # If this field is set, the service will ensure its uniqueness.
+          # The request to create a job will fail if the service has knowledge of a
+          # previously submitted job with the same client&#x27;s ID and job name.
+          # The caller may use this field to ensure idempotence of job
+          # creation across retried attempts to create a job.
+          # By default, the field is empty and, in that case, the service ignores it.
       &quot;executionInfo&quot;: { # Additional information about how a Cloud Dataflow job will be executed that # Deprecated.
           # isn&#x27;t contained in the submitted job.
         &quot;stages&quot;: { # A mapping from each stage to the information about that stage.
@@ -1446,6 +679,42 @@
           },
         },
       },
+      &quot;type&quot;: &quot;A String&quot;, # The type of Cloud Dataflow job.
+      &quot;createTime&quot;: &quot;A String&quot;, # The timestamp when the job was initially created. Immutable and set by the
+          # Cloud Dataflow service.
+      &quot;tempFiles&quot;: [ # A set of files the system should be aware of that are used
+          # for temporary storage. These temporary files will be
+          # removed on job completion.
+          # No duplicates are allowed.
+          # No file patterns are supported.
+          #
+          # The supported files are:
+          #
+          # Google Cloud Storage:
+          #
+          #    storage.googleapis.com/{bucket}/{object}
+          #    bucket.storage.googleapis.com/{object}
+        &quot;A String&quot;,
+      ],
+      &quot;id&quot;: &quot;A String&quot;, # The unique ID of this job.
+          #
+          # This field is set by the Cloud Dataflow service when the Job is
+          # created, and is immutable for the life of the job.
+      &quot;requestedState&quot;: &quot;A String&quot;, # The job&#x27;s requested state.
+          #
+          # `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and
+          # `JOB_STATE_RUNNING` states, by setting requested_state.  `UpdateJob` may
+          # also be used to directly set a job&#x27;s requested state to
+          # `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the
+          # job if it has not already reached a terminal state.
+      &quot;replaceJobId&quot;: &quot;A String&quot;, # If this job is an update of an existing job, this field is the job ID
+          # of the job it replaced.
+          #
+          # When sending a `CreateJobRequest`, you can update a job by specifying it
+          # here. The job named here is stopped, and its intermediate state is
+          # transferred to this job.
+      &quot;createdFromSnapshotId&quot;: &quot;A String&quot;, # If this is specified, the job&#x27;s initial state is populated from the given
+          # snapshot.
       &quot;currentState&quot;: &quot;A String&quot;, # The current state of the job.
           #
           # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
@@ -1457,37 +726,768 @@
           #
           # This field may be mutated by the Cloud Dataflow service;
           # callers cannot mutate it.
-      &quot;location&quot;: &quot;A String&quot;, # The [regional endpoint]
-          # (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that
-          # contains this job.
-      &quot;startTime&quot;: &quot;A String&quot;, # The timestamp when the job was started (transitioned to JOB_STATE_PENDING).
-          # Flexible resource scheduling jobs are started with some delay after job
-          # creation, so start_time is unset before start and is updated when the
-          # job is started by the Cloud Dataflow service. For other jobs, start_time
-          # always equals to create_time and is immutable and set by the Cloud Dataflow
-          # service.
-      &quot;stepsLocation&quot;: &quot;A String&quot;, # The GCS location where the steps are stored.
-      &quot;labels&quot;: { # User-defined labels for this job.
+      &quot;name&quot;: &quot;A String&quot;, # The user-specified Cloud Dataflow job name.
           #
-          # The labels map can contain no more than 64 entries.  Entries of the labels
-          # map are UTF8 strings that comply with the following restrictions:
+          # Only one Job with a given name may exist in a project at any
+          # given time. If a caller attempts to create a Job with the same
+          # name as an already-existing Job, the attempt returns the
+          # existing Job.
           #
-          # * Keys must conform to regexp:  \p{Ll}\p{Lo}{0,62}
-          # * Values must conform to regexp:  [\p{Ll}\p{Lo}\p{N}_-]{0,63}
-          # * Both keys and values are additionally constrained to be &lt;= 128 bytes in
-          # size.
+          # The name must match the regular expression
+          # `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
+      &quot;currentStateTime&quot;: &quot;A String&quot;, # The timestamp associated with the current state.
+    }</pre>
+</div>
+
+<div class="method">
+    <code class="details" id="get">get(projectId, view=None, gcsPath=None, location=None, x__xgafv=None)</code>
+  <pre>Get the template associated with a template.
+
+Args:
+  projectId: string, Required. The ID of the Cloud Platform project that the job belongs to. (required)
+  view: string, The view to retrieve. Defaults to METADATA_ONLY.
+  gcsPath: string, Required. A Cloud Storage path to the template from which to
+create the job.
+Must be valid Cloud Storage URL, beginning with &#x27;gs://&#x27;.
+  location: string, The [regional endpoint]
+(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
+which to direct the request.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response to a GetTemplate request.
+    &quot;runtimeMetadata&quot;: { # RuntimeMetadata describing a runtime environment. # Describes the runtime metadata with SDKInfo and available parameters.
+      &quot;parameters&quot;: [ # The parameters for the template.
+        { # Metadata for a specific parameter.
+          &quot;label&quot;: &quot;A String&quot;, # Required. The label to display for the parameter.
+          &quot;helpText&quot;: &quot;A String&quot;, # Required. The help text to display for the parameter.
+          &quot;regexes&quot;: [ # Optional. Regexes that the parameter must match.
+            &quot;A String&quot;,
+          ],
+          &quot;paramType&quot;: &quot;A String&quot;, # Optional. The type of the parameter.
+              # Used for selecting input picker.
+          &quot;isOptional&quot;: True or False, # Optional. Whether the parameter is optional. Defaults to false.
+          &quot;name&quot;: &quot;A String&quot;, # Required. The name of the parameter.
+        },
+      ],
+      &quot;sdkInfo&quot;: { # SDK Information. # SDK Info for the template.
+        &quot;language&quot;: &quot;A String&quot;, # Required. The SDK Language.
+        &quot;version&quot;: &quot;A String&quot;, # Optional. The SDK version.
+      },
+    },
+    &quot;status&quot;: { # The `Status` type defines a logical error model that is suitable for # The status of the get template request. Any problems with the
+        # request will be indicated in the error_details.
+        # different programming environments, including REST APIs and RPC APIs. It is
+        # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+        # three pieces of data: error code, error message, and error details.
+        #
+        # You can find out more about this error model and how to work with it in the
+        # [API Design Guide](https://cloud.google.com/apis/design/errors).
+      &quot;details&quot;: [ # A list of messages that carry the error details.  There is a common set of
+          # message types for APIs to use.
+        {
+          &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+        },
+      ],
+      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
+      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
+          # user-facing error message should be localized and sent in the
+          # google.rpc.Status.details field, or localized by the client.
+    },
+    &quot;metadata&quot;: { # Metadata describing a template. # The template metadata describing the template name, available
+        # parameters, etc.
+      &quot;description&quot;: &quot;A String&quot;, # Optional. A description of the template.
+      &quot;parameters&quot;: [ # The parameters for the template.
+        { # Metadata for a specific parameter.
+          &quot;label&quot;: &quot;A String&quot;, # Required. The label to display for the parameter.
+          &quot;helpText&quot;: &quot;A String&quot;, # Required. The help text to display for the parameter.
+          &quot;regexes&quot;: [ # Optional. Regexes that the parameter must match.
+            &quot;A String&quot;,
+          ],
+          &quot;paramType&quot;: &quot;A String&quot;, # Optional. The type of the parameter.
+              # Used for selecting input picker.
+          &quot;isOptional&quot;: True or False, # Optional. Whether the parameter is optional. Defaults to false.
+          &quot;name&quot;: &quot;A String&quot;, # Required. The name of the parameter.
+        },
+      ],
+      &quot;name&quot;: &quot;A String&quot;, # Required. The name of the template.
+    },
+    &quot;templateType&quot;: &quot;A String&quot;, # Template Type.
+  }</pre>
+</div>
+
+<div class="method">
+    <code class="details" id="launch">launch(projectId, body=None, dynamicTemplate_gcsPath=None, dynamicTemplate_stagingLocation=None, location=None, validateOnly=None, gcsPath=None, x__xgafv=None)</code>
+  <pre>Launch a template.
+
+Args:
+  projectId: string, Required. The ID of the Cloud Platform project that the job belongs to. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Parameters to provide to the template being launched.
+    &quot;environment&quot;: { # The environment values to set at runtime. # The runtime environment for the job.
+      &quot;bypassTempDirValidation&quot;: True or False, # Whether to bypass the safety checks for the job&#x27;s temporary directory.
+          # Use with caution.
+      &quot;tempLocation&quot;: &quot;A String&quot;, # The Cloud Storage path to use for temporary files.
+          # Must be a valid Cloud Storage URL, beginning with `gs://`.
+      &quot;network&quot;: &quot;A String&quot;, # Network to which VMs will be assigned.  If empty or unspecified,
+          # the service will use the network &quot;default&quot;.
+      &quot;subnetwork&quot;: &quot;A String&quot;, # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
+          # the form &quot;regions/REGION/subnetworks/SUBNETWORK&quot;.
+      &quot;workerRegion&quot;: &quot;A String&quot;, # The Compute Engine region
+          # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
+          # which worker processing should occur, e.g. &quot;us-west1&quot;. Mutually exclusive
+          # with worker_zone. If neither worker_region nor worker_zone is specified,
+          # default to the control plane&#x27;s region.
+      &quot;numWorkers&quot;: 42, # The initial number of Google Compute Engine instnaces for the job.
+      &quot;additionalExperiments&quot;: [ # Additional experiment flags for the job.
+        &quot;A String&quot;,
+      ],
+      &quot;zone&quot;: &quot;A String&quot;, # The Compute Engine [availability
+          # zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)
+          # for launching worker instances to run your pipeline.
+          # In the future, worker_zone will take precedence.
+      &quot;serviceAccountEmail&quot;: &quot;A String&quot;, # The email address of the service account to run the job as.
+      &quot;maxWorkers&quot;: 42, # The maximum number of Google Compute Engine instances to be made
+          # available to your pipeline during execution, from 1 to 1000.
+      &quot;workerZone&quot;: &quot;A String&quot;, # The Compute Engine zone
+          # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
+          # which worker processing should occur, e.g. &quot;us-west1-a&quot;. Mutually exclusive
+          # with worker_region. If neither worker_region nor worker_zone is specified,
+          # a zone in the control plane&#x27;s region is chosen based on available capacity.
+          # If both `worker_zone` and `zone` are set, `worker_zone` takes precedence.
+      &quot;additionalUserLabels&quot;: { # Additional user labels to be specified for the job.
+          # Keys and values should follow the restrictions specified in the [labeling
+          # restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions)
+          # page.
         &quot;a_key&quot;: &quot;A String&quot;,
       },
-      &quot;createTime&quot;: &quot;A String&quot;, # The timestamp when the job was initially created. Immutable and set by the
-          # Cloud Dataflow service.
-      &quot;requestedState&quot;: &quot;A String&quot;, # The job&#x27;s requested state.
-          #
-          # `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and
-          # `JOB_STATE_RUNNING` states, by setting requested_state.  `UpdateJob` may
-          # also be used to directly set a job&#x27;s requested state to
-          # `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the
-          # job if it has not already reached a terminal state.
+      &quot;machineType&quot;: &quot;A String&quot;, # The machine type to use for the job. Defaults to the value from the
+          # template if not specified.
+      &quot;ipConfiguration&quot;: &quot;A String&quot;, # Configuration for VM IPs.
+      &quot;kmsKeyName&quot;: &quot;A String&quot;, # Optional. Name for the Cloud KMS key for the job.
+          # Key format is:
+          # projects/&lt;project&gt;/locations/&lt;location&gt;/keyRings/&lt;keyring&gt;/cryptoKeys/&lt;key&gt;
     },
+    &quot;transformNameMapping&quot;: { # Only applicable when updating a pipeline. Map of transform name prefixes of
+        # the job to be replaced to the corresponding name prefixes of the new job.
+      &quot;a_key&quot;: &quot;A String&quot;,
+    },
+    &quot;update&quot;: True or False, # If set, replace the existing pipeline with the name specified by jobName
+        # with this pipeline, preserving state.
+    &quot;jobName&quot;: &quot;A String&quot;, # Required. The job name to use for the created job.
+    &quot;parameters&quot;: { # The runtime parameters to pass to the job.
+      &quot;a_key&quot;: &quot;A String&quot;,
+    },
+  }
+
+  dynamicTemplate_gcsPath: string, Path to dynamic template spec file on GCS.
+The file must be a Json serialized DynamicTemplateFieSpec object.
+  dynamicTemplate_stagingLocation: string, Cloud Storage path for staging dependencies.
+Must be a valid Cloud Storage URL, beginning with `gs://`.
+  location: string, The [regional endpoint]
+(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to
+which to direct the request.
+  validateOnly: boolean, If true, the request is validated but not actually executed.
+Defaults to false.
+  gcsPath: string, A Cloud Storage path to the template from which to create
+the job.
+Must be valid Cloud Storage URL, beginning with &#x27;gs://&#x27;.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response to the request to launch a template.
+    &quot;job&quot;: { # Defines a job to be run by the Cloud Dataflow service. # The job that was launched, if the request was not a dry run and
+        # the job was successfully launched.
+        &quot;pipelineDescription&quot;: { # A descriptive representation of submitted pipeline as well as the executed # Preliminary field: The format of this data may change at any time.
+            # A description of the user pipeline and stages through which it is executed.
+            # Created by Cloud Dataflow service.  Only retrieved with
+            # JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
+            # form.  This data is provided by the Dataflow service for ease of visualizing
+            # the pipeline and interpreting Dataflow provided metrics.
+          &quot;displayData&quot;: [ # Pipeline level display data.
+            { # Data provided with a pipeline or transform to provide descriptive info.
+              &quot;url&quot;: &quot;A String&quot;, # An optional full URL.
+              &quot;javaClassValue&quot;: &quot;A String&quot;, # Contains value if the data is of java class type.
+              &quot;timestampValue&quot;: &quot;A String&quot;, # Contains value if the data is of timestamp type.
+              &quot;durationValue&quot;: &quot;A String&quot;, # Contains value if the data is of duration type.
+              &quot;label&quot;: &quot;A String&quot;, # An optional label to display in a dax UI for the element.
+              &quot;key&quot;: &quot;A String&quot;, # The key identifying the display data.
+                  # This is intended to be used as a label for the display data
+                  # when viewed in a dax monitoring system.
+              &quot;namespace&quot;: &quot;A String&quot;, # The namespace for the key. This is usually a class name or programming
+                  # language namespace (i.e. python module) which defines the display data.
+                  # This allows a dax monitoring system to specially handle the data
+                  # and perform custom rendering.
+              &quot;floatValue&quot;: 3.14, # Contains value if the data is of float type.
+              &quot;strValue&quot;: &quot;A String&quot;, # Contains value if the data is of string type.
+              &quot;int64Value&quot;: &quot;A String&quot;, # Contains value if the data is of int64 type.
+              &quot;boolValue&quot;: True or False, # Contains value if the data is of a boolean type.
+              &quot;shortStrValue&quot;: &quot;A String&quot;, # A possible additional shorter value to display.
+                  # For example a java_class_name_value of com.mypackage.MyDoFn
+                  # will be stored with MyDoFn as the short_str_value and
+                  # com.mypackage.MyDoFn as the java_class_name value.
+                  # short_str_value can be displayed and java_class_name_value
+                  # will be displayed as a tooltip.
+            },
+          ],
+          &quot;originalPipelineTransform&quot;: [ # Description of each transform in the pipeline and collections between them.
+            { # Description of the type, names/ids, and input/outputs for a transform.
+              &quot;outputCollectionName&quot;: [ # User  names for all collection outputs to this transform.
+                &quot;A String&quot;,
+              ],
+              &quot;displayData&quot;: [ # Transform-specific display data.
+                { # Data provided with a pipeline or transform to provide descriptive info.
+                  &quot;url&quot;: &quot;A String&quot;, # An optional full URL.
+                  &quot;javaClassValue&quot;: &quot;A String&quot;, # Contains value if the data is of java class type.
+                  &quot;timestampValue&quot;: &quot;A String&quot;, # Contains value if the data is of timestamp type.
+                  &quot;durationValue&quot;: &quot;A String&quot;, # Contains value if the data is of duration type.
+                  &quot;label&quot;: &quot;A String&quot;, # An optional label to display in a dax UI for the element.
+                  &quot;key&quot;: &quot;A String&quot;, # The key identifying the display data.
+                      # This is intended to be used as a label for the display data
+                      # when viewed in a dax monitoring system.
+                  &quot;namespace&quot;: &quot;A String&quot;, # The namespace for the key. This is usually a class name or programming
+                      # language namespace (i.e. python module) which defines the display data.
+                      # This allows a dax monitoring system to specially handle the data
+                      # and perform custom rendering.
+                  &quot;floatValue&quot;: 3.14, # Contains value if the data is of float type.
+                  &quot;strValue&quot;: &quot;A String&quot;, # Contains value if the data is of string type.
+                  &quot;int64Value&quot;: &quot;A String&quot;, # Contains value if the data is of int64 type.
+                  &quot;boolValue&quot;: True or False, # Contains value if the data is of a boolean type.
+                  &quot;shortStrValue&quot;: &quot;A String&quot;, # A possible additional shorter value to display.
+                      # For example a java_class_name_value of com.mypackage.MyDoFn
+                      # will be stored with MyDoFn as the short_str_value and
+                      # com.mypackage.MyDoFn as the java_class_name value.
+                      # short_str_value can be displayed and java_class_name_value
+                      # will be displayed as a tooltip.
+                },
+              ],
+              &quot;id&quot;: &quot;A String&quot;, # SDK generated id of this transform instance.
+              &quot;inputCollectionName&quot;: [ # User names for all collection inputs to this transform.
+                &quot;A String&quot;,
+              ],
+              &quot;name&quot;: &quot;A String&quot;, # User provided name for this transform instance.
+              &quot;kind&quot;: &quot;A String&quot;, # Type of transform.
+            },
+          ],
+          &quot;executionPipelineStage&quot;: [ # Description of each stage of execution of the pipeline.
+            { # Description of the composing transforms, names/ids, and input/outputs of a
+                # stage of execution.  Some composing transforms and sources may have been
+                # generated by the Dataflow service during execution planning.
+              &quot;componentSource&quot;: [ # Collections produced and consumed by component transforms of this stage.
+                { # Description of an interstitial value between transforms in an execution
+                    # stage.
+                  &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this transform; may be user or system generated.
+                  &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
+                  &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
+                      # source is most closely associated.
+                },
+              ],
+              &quot;inputSource&quot;: [ # Input sources for this stage.
+                { # Description of an input or output of an execution stage.
+                  &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this source; may be user or system generated.
+                  &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
+                      # source is most closely associated.
+                  &quot;sizeBytes&quot;: &quot;A String&quot;, # Size of the source, if measurable.
+                  &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
+                },
+              ],
+              &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this stage.
+              &quot;componentTransform&quot;: [ # Transforms that comprise this execution stage.
+                { # Description of a transform executed as part of an execution stage.
+                  &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
+                  &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this transform; may be user or system generated.
+                  &quot;originalTransform&quot;: &quot;A String&quot;, # User name for the original user transform with which this transform is
+                      # most closely associated.
+                },
+              ],
+              &quot;id&quot;: &quot;A String&quot;, # Dataflow service generated id for this stage.
+              &quot;outputSource&quot;: [ # Output sources for this stage.
+                { # Description of an input or output of an execution stage.
+                  &quot;userName&quot;: &quot;A String&quot;, # Human-readable name for this source; may be user or system generated.
+                  &quot;originalTransformOrCollection&quot;: &quot;A String&quot;, # User name for the original user transform or collection with which this
+                      # source is most closely associated.
+                  &quot;sizeBytes&quot;: &quot;A String&quot;, # Size of the source, if measurable.
+                  &quot;name&quot;: &quot;A String&quot;, # Dataflow service generated name for this source.
+                },
+              ],
+              &quot;kind&quot;: &quot;A String&quot;, # Type of tranform this stage is executing.
+            },
+          ],
+        },
+        &quot;labels&quot;: { # User-defined labels for this job.
+            #
+            # The labels map can contain no more than 64 entries.  Entries of the labels
+            # map are UTF8 strings that comply with the following restrictions:
+            #
+            # * Keys must conform to regexp:  \p{Ll}\p{Lo}{0,62}
+            # * Values must conform to regexp:  [\p{Ll}\p{Lo}\p{N}_-]{0,63}
+            # * Both keys and values are additionally constrained to be &lt;= 128 bytes in
+            # size.
+          &quot;a_key&quot;: &quot;A String&quot;,
+        },
+        &quot;projectId&quot;: &quot;A String&quot;, # The ID of the Cloud Platform project that the job belongs to.
+        &quot;environment&quot;: { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
+          &quot;flexResourceSchedulingGoal&quot;: &quot;A String&quot;, # Which Flexible Resource Scheduling mode to run in.
+          &quot;workerRegion&quot;: &quot;A String&quot;, # The Compute Engine region
+              # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
+              # which worker processing should occur, e.g. &quot;us-west1&quot;. Mutually exclusive
+              # with worker_zone. If neither worker_region nor worker_zone is specified,
+              # default to the control plane&#x27;s region.
+          &quot;userAgent&quot;: { # A description of the process that generated the request.
+            &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+          },
+          &quot;serviceAccountEmail&quot;: &quot;A String&quot;, # Identity to run virtual machines as. Defaults to the default account.
+          &quot;version&quot;: { # A structure describing which components and their versions of the service
+              # are required in order to run the job.
+            &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+          },
+          &quot;serviceKmsKeyName&quot;: &quot;A String&quot;, # If set, contains the Cloud KMS key identifier used to encrypt data
+              # at rest, AKA a Customer Managed Encryption Key (CMEK).
+              #
+              # Format:
+              #   projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
+          &quot;experiments&quot;: [ # The list of experiments to enable.
+            &quot;A String&quot;,
+          ],
+          &quot;workerZone&quot;: &quot;A String&quot;, # The Compute Engine zone
+              # (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in
+              # which worker processing should occur, e.g. &quot;us-west1-a&quot;. Mutually exclusive
+              # with worker_region. If neither worker_region nor worker_zone is specified,
+              # a zone in the control plane&#x27;s region is chosen based on available capacity.
+          &quot;workerPools&quot;: [ # The worker pools. At least one &quot;harness&quot; worker pool must be
+              # specified in order for the job to have workers.
+            { # Describes one particular pool of Cloud Dataflow workers to be
+                # instantiated by the Cloud Dataflow service in order to perform the
+                # computations required by a job.  Note that a workflow job may use
+                # multiple pools, in order to match the various computational
+                # requirements of the various stages of the job.
+              &quot;onHostMaintenance&quot;: &quot;A String&quot;, # The action to take on host maintenance, as defined by the Google
+                  # Compute Engine API.
+              &quot;sdkHarnessContainerImages&quot;: [ # Set of SDK harness containers needed to execute this pipeline. This will
+                  # only be set in the Fn API path. For non-cross-language pipelines this
+                  # should have only one entry. Cross-language pipelines will have two or more
+                  # entries.
+                { # Defines a SDK harness container for executing Dataflow pipelines.
+                  &quot;containerImage&quot;: &quot;A String&quot;, # A docker container image that resides in Google Container Registry.
+                  &quot;useSingleCorePerContainer&quot;: True or False, # If true, recommends the Dataflow service to use only one core per SDK
+                      # container instance with this image. If false (or unset) recommends using
+                      # more than one core per SDK container instance with this image for
+                      # efficiency. Note that Dataflow service may choose to override this property
+                      # if needed.
+                },
+              ],
+              &quot;zone&quot;: &quot;A String&quot;, # Zone to run the worker pools in.  If empty or unspecified, the service
+                  # will attempt to choose a reasonable default.
+              &quot;kind&quot;: &quot;A String&quot;, # The kind of the worker pool; currently only `harness` and `shuffle`
+                  # are supported.
+              &quot;metadata&quot;: { # Metadata to set on the Google Compute Engine VMs.
+                &quot;a_key&quot;: &quot;A String&quot;,
+              },
+              &quot;diskSourceImage&quot;: &quot;A String&quot;, # Fully qualified source image for disks.
+              &quot;dataDisks&quot;: [ # Data disks that are used by a VM in this workflow.
+                { # Describes the data disk used by a workflow job.
+                  &quot;sizeGb&quot;: 42, # Size of disk in GB.  If zero or unspecified, the service will
+                      # attempt to choose a reasonable default.
+                  &quot;diskType&quot;: &quot;A String&quot;, # Disk storage type, as defined by Google Compute Engine.  This
+                      # must be a disk type appropriate to the project and zone in which
+                      # the workers will run.  If unknown or unspecified, the service
+                      # will attempt to choose a reasonable default.
+                      #
+                      # For example, the standard persistent disk type is a resource name
+                      # typically ending in &quot;pd-standard&quot;.  If SSD persistent disks are
+                      # available, the resource name typically ends with &quot;pd-ssd&quot;.  The
+                      # actual valid values are defined the Google Compute Engine API,
+                      # not by the Cloud Dataflow API; consult the Google Compute Engine
+                      # documentation for more information about determining the set of
+                      # available disk types for a particular project and zone.
+                      #
+                      # Google Compute Engine Disk types are local to a particular
+                      # project in a particular zone, and so the resource name will
+                      # typically look something like this:
+                      #
+                      # compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
+                  &quot;mountPoint&quot;: &quot;A String&quot;, # Directory in a VM where disk is mounted.
+                },
+              ],
+              &quot;packages&quot;: [ # Packages to be installed on workers.
+                { # The packages that must be installed in order for a worker to run the
+                    # steps of the Cloud Dataflow job that will be assigned to its worker
+                    # pool.
+                    #
+                    # This is the mechanism by which the Cloud Dataflow SDK causes code to
+                    # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
+                    # might use this to install jars containing the user&#x27;s code and all of the
+                    # various dependencies (libraries, data files, etc.) required in order
+                    # for that code to run.
+                  &quot;name&quot;: &quot;A String&quot;, # The name of the package.
+                  &quot;location&quot;: &quot;A String&quot;, # The resource to read the package from. The supported resource type is:
+                      #
+                      # Google Cloud Storage:
+                      #
+                      #   storage.googleapis.com/{bucket}
+                      #   bucket.storage.googleapis.com/
+                },
+              ],
+              &quot;teardownPolicy&quot;: &quot;A String&quot;, # Sets the policy for determining when to turndown worker pool.
+                  # Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
+                  # `TEARDOWN_NEVER`.
+                  # `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
+                  # the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
+                  # if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
+                  # down.
+                  #
+                  # If the workers are not torn down by the service, they will
+                  # continue to run and use Google Compute Engine VM resources in the
+                  # user&#x27;s project until they are explicitly terminated by the user.
+                  # Because of this, Google recommends using the `TEARDOWN_ALWAYS`
+                  # policy except for small, manually supervised test jobs.
+                  #
+                  # If unknown or unspecified, the service will attempt to choose a reasonable
+                  # default.
+              &quot;network&quot;: &quot;A String&quot;, # Network to which VMs will be assigned.  If empty or unspecified,
+                  # the service will use the network &quot;default&quot;.
+              &quot;ipConfiguration&quot;: &quot;A String&quot;, # Configuration for VM IPs.
+              &quot;diskSizeGb&quot;: 42, # Size of root disk for VMs, in GB.  If zero or unspecified, the service will
+                  # attempt to choose a reasonable default.
+              &quot;autoscalingSettings&quot;: { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
+                &quot;maxNumWorkers&quot;: 42, # The maximum number of workers to cap scaling at.
+                &quot;algorithm&quot;: &quot;A String&quot;, # The algorithm to use for autoscaling.
+              },
+              &quot;poolArgs&quot;: { # Extra arguments for this worker pool.
+                &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+              },
+              &quot;subnetwork&quot;: &quot;A String&quot;, # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
+                  # the form &quot;regions/REGION/subnetworks/SUBNETWORK&quot;.
+              &quot;numWorkers&quot;: 42, # Number of Google Compute Engine workers in this pool needed to
+                  # execute the job.  If zero or unspecified, the service will
+                  # attempt to choose a reasonable default.
+              &quot;numThreadsPerWorker&quot;: 42, # The number of threads per worker harness. If empty or unspecified, the
+                  # service will choose a number of threads (according to the number of cores
+                  # on the selected machine type for batch, or 1 by convention for streaming).
+              &quot;workerHarnessContainerImage&quot;: &quot;A String&quot;, # Required. Docker container image that executes the Cloud Dataflow worker
+                  # harness, residing in Google Container Registry.
+                  #
+                  # Deprecated for the Fn API path. Use sdk_harness_container_images instead.
+              &quot;taskrunnerSettings&quot;: { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when
+                  # using the standard Dataflow task runner.  Users should ignore
+                  # this field.
+                &quot;dataflowApiVersion&quot;: &quot;A String&quot;, # The API version of endpoint, e.g. &quot;v1b3&quot;
+                &quot;oauthScopes&quot;: [ # The OAuth2 scopes to be requested by the taskrunner in order to
+                    # access the Cloud Dataflow API.
+                  &quot;A String&quot;,
+                ],
+                &quot;baseUrl&quot;: &quot;A String&quot;, # The base URL for the taskrunner to use when accessing Google Cloud APIs.
+                    #
+                    # When workers access Google Cloud APIs, they logically do so via
+                    # relative URLs.  If this field is specified, it supplies the base
+                    # URL to use for resolving these relative URLs.  The normative
+                    # algorithm used is defined by RFC 1808, &quot;Relative Uniform Resource
+                    # Locators&quot;.
+                    #
+                    # If not specified, the default value is &quot;http://www.googleapis.com/&quot;
+                &quot;workflowFileName&quot;: &quot;A String&quot;, # The file to store the workflow in.
+                &quot;logToSerialconsole&quot;: True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial
+                    # console.
+                &quot;baseTaskDir&quot;: &quot;A String&quot;, # The location on the worker for task-specific subdirectories.
+                &quot;taskUser&quot;: &quot;A String&quot;, # The UNIX user ID on the worker VM to use for tasks launched by
+                    # taskrunner; e.g. &quot;root&quot;.
+                &quot;vmId&quot;: &quot;A String&quot;, # The ID string of the VM.
+                &quot;alsologtostderr&quot;: True or False, # Whether to also send taskrunner log info to stderr.
+                &quot;parallelWorkerSettings&quot;: { # Provides data to pass through to the worker harness. # The settings to pass to the parallel worker harness.
+                  &quot;shuffleServicePath&quot;: &quot;A String&quot;, # The Shuffle service path relative to the root URL, for example,
+                      # &quot;shuffle/v1beta1&quot;.
+                  &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the system should use for temporary
+                      # storage.
+                      #
+                      # The supported resource type is:
+                      #
+                      # Google Cloud Storage:
+                      #
+                      #   storage.googleapis.com/{bucket}/{object}
+                      #   bucket.storage.googleapis.com/{object}
+                  &quot;reportingEnabled&quot;: True or False, # Whether to send work progress updates to the service.
+                  &quot;servicePath&quot;: &quot;A String&quot;, # The Cloud Dataflow service path relative to the root URL, for example,
+                      # &quot;dataflow/v1b3/projects&quot;.
+                  &quot;baseUrl&quot;: &quot;A String&quot;, # The base URL for accessing Google Cloud APIs.
+                      #
+                      # When workers access Google Cloud APIs, they logically do so via
+                      # relative URLs.  If this field is specified, it supplies the base
+                      # URL to use for resolving these relative URLs.  The normative
+                      # algorithm used is defined by RFC 1808, &quot;Relative Uniform Resource
+                      # Locators&quot;.
+                      #
+                      # If not specified, the default value is &quot;http://www.googleapis.com/&quot;
+                  &quot;workerId&quot;: &quot;A String&quot;, # The ID of the worker running this pipeline.
+                },
+                &quot;harnessCommand&quot;: &quot;A String&quot;, # The command to launch the worker harness.
+                &quot;logDir&quot;: &quot;A String&quot;, # The directory on the VM to store logs.
+                &quot;streamingWorkerMainClass&quot;: &quot;A String&quot;, # The streaming worker main class name.
+                &quot;languageHint&quot;: &quot;A String&quot;, # The suggested backend language.
+                &quot;taskGroup&quot;: &quot;A String&quot;, # The UNIX group ID on the worker VM to use for tasks launched by
+                    # taskrunner; e.g. &quot;wheel&quot;.
+                &quot;logUploadLocation&quot;: &quot;A String&quot;, # Indicates where to put logs.  If this is not specified, the logs
+                    # will not be uploaded.
+                    #
+                    # The supported resource type is:
+                    #
+                    # Google Cloud Storage:
+                    #   storage.googleapis.com/{bucket}/{object}
+                    #   bucket.storage.googleapis.com/{object}
+                &quot;commandlinesFileName&quot;: &quot;A String&quot;, # The file to store preprocessing commands in.
+                &quot;continueOnException&quot;: True or False, # Whether to continue taskrunner if an exception is hit.
+                &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the taskrunner should use for
+                    # temporary storage.
+                    #
+                    # The supported resource type is:
+                    #
+                    # Google Cloud Storage:
+                    #   storage.googleapis.com/{bucket}/{object}
+                    #   bucket.storage.googleapis.com/{object}
+              },
+              &quot;diskType&quot;: &quot;A String&quot;, # Type of root disk for VMs.  If empty or unspecified, the service will
+                  # attempt to choose a reasonable default.
+              &quot;defaultPackageSet&quot;: &quot;A String&quot;, # The default package set to install.  This allows the service to
+                  # select a default set of packages which are useful to worker
+                  # harnesses written in a particular language.
+              &quot;machineType&quot;: &quot;A String&quot;, # Machine type (e.g. &quot;n1-standard-1&quot;).  If empty or unspecified, the
+                  # service will attempt to choose a reasonable default.
+            },
+          ],
+          &quot;tempStoragePrefix&quot;: &quot;A String&quot;, # The prefix of the resources the system should use for temporary
+              # storage.  The system will append the suffix &quot;/temp-{JOBNAME} to
+              # this resource prefix, where {JOBNAME} is the value of the
+              # job_name field.  The resulting bucket and object prefix is used
+              # as the prefix of the resources used to store temporary data
+              # needed during the job execution.  NOTE: This will override the
+              # value in taskrunner_settings.
+              # The supported resource type is:
+              #
+              # Google Cloud Storage:
+              #
+              #   storage.googleapis.com/{bucket}/{object}
+              #   bucket.storage.googleapis.com/{object}
+          &quot;internalExperiments&quot;: { # Experimental settings.
+            &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+          },
+          &quot;sdkPipelineOptions&quot;: { # The Cloud Dataflow SDK pipeline options specified by the user. These
+              # options are passed through the service and are used to recreate the
+              # SDK pipeline options on the worker in a language agnostic and platform
+              # independent way.
+            &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+          },
+          &quot;dataset&quot;: &quot;A String&quot;, # The dataset for the current project where various workflow
+              # related tables are stored.
+              #
+              # The supported resource type is:
+              #
+              # Google BigQuery:
+              #   bigquery.googleapis.com/{dataset}
+          &quot;clusterManagerApiService&quot;: &quot;A String&quot;, # The type of cluster manager API to use.  If unknown or
+              # unspecified, the service will attempt to choose a reasonable
+              # default.  This should be in the form of the API service name,
+              # e.g. &quot;compute.googleapis.com&quot;.
+        },
+        &quot;stepsLocation&quot;: &quot;A String&quot;, # The GCS location where the steps are stored.
+        &quot;steps&quot;: [ # Exactly one of step or steps_location should be specified.
+            #
+            # The top-level steps that constitute the entire job.
+          { # Defines a particular step within a Cloud Dataflow job.
+              #
+              # A job consists of multiple steps, each of which performs some
+              # specific operation as part of the overall job.  Data is typically
+              # passed from one step to another as part of the job.
+              #
+              # Here&#x27;s an example of a sequence of steps which together implement a
+              # Map-Reduce job:
+              #
+              #   * Read a collection of data from some source, parsing the
+              #     collection&#x27;s elements.
+              #
+              #   * Validate the elements.
+              #
+              #   * Apply a user-defined function to map each element to some value
+              #     and extract an element-specific key value.
+              #
+              #   * Group elements with the same key into a single element with
+              #     that key, transforming a multiply-keyed collection into a
+              #     uniquely-keyed collection.
+              #
+              #   * Write the elements out to some data sink.
+              #
+              # Note that the Cloud Dataflow service may be used to run many different
+              # types of jobs, not just Map-Reduce.
+            &quot;kind&quot;: &quot;A String&quot;, # The kind of step in the Cloud Dataflow job.
+            &quot;properties&quot;: { # Named properties associated with the step. Each kind of
+                # predefined step has its own required set of properties.
+                # Must be provided on Create.  Only retrieved with JOB_VIEW_ALL.
+              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+            },
+            &quot;name&quot;: &quot;A String&quot;, # The name that identifies the step. This must be unique for each
+                # step with respect to all other steps in the Cloud Dataflow job.
+          },
+        ],
+        &quot;stageStates&quot;: [ # This field may be mutated by the Cloud Dataflow service;
+            # callers cannot mutate it.
+          { # A message describing the state of a particular execution stage.
+            &quot;executionStageState&quot;: &quot;A String&quot;, # Executions stage states allow the same set of values as JobState.
+            &quot;executionStageName&quot;: &quot;A String&quot;, # The name of the execution stage.
+            &quot;currentStateTime&quot;: &quot;A String&quot;, # The time at which the stage transitioned to this state.
+          },
+        ],
+        &quot;replacedByJobId&quot;: &quot;A String&quot;, # If another job is an update of this job (and thus, this job is in
+            # `JOB_STATE_UPDATED`), this field contains the ID of that job.
+        &quot;jobMetadata&quot;: { # Metadata available primarily for filtering jobs. Will be included in the # This field is populated by the Dataflow service to support filtering jobs
+            # by the metadata values provided here. Populated for ListJobs and all GetJob
+            # views SUMMARY and higher.
+            # ListJob response and Job SUMMARY view.
+          &quot;sdkVersion&quot;: { # The version of the SDK used to run the job. # The SDK version used to run the job.
+            &quot;sdkSupportStatus&quot;: &quot;A String&quot;, # The support status for this SDK version.
+            &quot;versionDisplayName&quot;: &quot;A String&quot;, # A readable string describing the version of the SDK.
+            &quot;version&quot;: &quot;A String&quot;, # The version of the SDK used to run the job.
+          },
+          &quot;bigTableDetails&quot;: [ # Identification of a BigTable source used in the Dataflow job.
+            { # Metadata for a BigTable connector used by the job.
+              &quot;instanceId&quot;: &quot;A String&quot;, # InstanceId accessed in the connection.
+              &quot;tableId&quot;: &quot;A String&quot;, # TableId accessed in the connection.
+              &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
+            },
+          ],
+          &quot;pubsubDetails&quot;: [ # Identification of a PubSub source used in the Dataflow job.
+            { # Metadata for a PubSub connector used by the job.
+              &quot;subscription&quot;: &quot;A String&quot;, # Subscription used in the connection.
+              &quot;topic&quot;: &quot;A String&quot;, # Topic accessed in the connection.
+            },
+          ],
+          &quot;bigqueryDetails&quot;: [ # Identification of a BigQuery source used in the Dataflow job.
+            { # Metadata for a BigQuery connector used by the job.
+              &quot;dataset&quot;: &quot;A String&quot;, # Dataset accessed in the connection.
+              &quot;projectId&quot;: &quot;A String&quot;, # Project accessed in the connection.
+              &quot;query&quot;: &quot;A String&quot;, # Query used to access data in the connection.
+              &quot;table&quot;: &quot;A String&quot;, # Table accessed in the connection.
+            },
+          ],
+          &quot;fileDetails&quot;: [ # Identification of a File source used in the Dataflow job.
+            { # Metadata for a File connector used by the job.
+              &quot;filePattern&quot;: &quot;A String&quot;, # File Pattern used to access files by the connector.
+            },
+          ],
+          &quot;datastoreDetails&quot;: [ # Identification of a Datastore source used in the Dataflow job.
+            { # Metadata for a Datastore connector used by the job.
+              &quot;namespace&quot;: &quot;A String&quot;, # Namespace used in the connection.
+              &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
+            },
+          ],
+          &quot;spannerDetails&quot;: [ # Identification of a Spanner source used in the Dataflow job.
+            { # Metadata for a Spanner connector used by the job.
+              &quot;instanceId&quot;: &quot;A String&quot;, # InstanceId accessed in the connection.
+              &quot;databaseId&quot;: &quot;A String&quot;, # DatabaseId accessed in the connection.
+              &quot;projectId&quot;: &quot;A String&quot;, # ProjectId accessed in the connection.
+            },
+          ],
+        },
+        &quot;location&quot;: &quot;A String&quot;, # The [regional endpoint]
+            # (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that
+            # contains this job.
+        &quot;transformNameMapping&quot;: { # The map of transform name prefixes of the job to be replaced to the
+            # corresponding name prefixes of the new job.
+          &quot;a_key&quot;: &quot;A String&quot;,
+        },
+        &quot;startTime&quot;: &quot;A String&quot;, # The timestamp when the job was started (transitioned to JOB_STATE_PENDING).
+            # Flexible resource scheduling jobs are started with some delay after job
+            # creation, so start_time is unset before start and is updated when the
+            # job is started by the Cloud Dataflow service. For other jobs, start_time
+            # always equals to create_time and is immutable and set by the Cloud Dataflow
+            # service.
+        &quot;clientRequestId&quot;: &quot;A String&quot;, # The client&#x27;s unique identifier of the job, re-used across retried attempts.
+            # If this field is set, the service will ensure its uniqueness.
+            # The request to create a job will fail if the service has knowledge of a
+            # previously submitted job with the same client&#x27;s ID and job name.
+            # The caller may use this field to ensure idempotence of job
+            # creation across retried attempts to create a job.
+            # By default, the field is empty and, in that case, the service ignores it.
+        &quot;executionInfo&quot;: { # Additional information about how a Cloud Dataflow job will be executed that # Deprecated.
+            # isn&#x27;t contained in the submitted job.
+          &quot;stages&quot;: { # A mapping from each stage to the information about that stage.
+            &quot;a_key&quot;: { # Contains information about how a particular
+                # google.dataflow.v1beta3.Step will be executed.
+              &quot;stepName&quot;: [ # The steps associated with the execution stage.
+                  # Note that stages may have several steps, and that a given step
+                  # might be run by more than one stage.
+                &quot;A String&quot;,
+              ],
+            },
+          },
+        },
+        &quot;type&quot;: &quot;A String&quot;, # The type of Cloud Dataflow job.
+        &quot;createTime&quot;: &quot;A String&quot;, # The timestamp when the job was initially created. Immutable and set by the
+            # Cloud Dataflow service.
+        &quot;tempFiles&quot;: [ # A set of files the system should be aware of that are used
+            # for temporary storage. These temporary files will be
+            # removed on job completion.
+            # No duplicates are allowed.
+            # No file patterns are supported.
+            #
+            # The supported files are:
+            #
+            # Google Cloud Storage:
+            #
+            #    storage.googleapis.com/{bucket}/{object}
+            #    bucket.storage.googleapis.com/{object}
+          &quot;A String&quot;,
+        ],
+        &quot;id&quot;: &quot;A String&quot;, # The unique ID of this job.
+            #
+            # This field is set by the Cloud Dataflow service when the Job is
+            # created, and is immutable for the life of the job.
+        &quot;requestedState&quot;: &quot;A String&quot;, # The job&#x27;s requested state.
+            #
+            # `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and
+            # `JOB_STATE_RUNNING` states, by setting requested_state.  `UpdateJob` may
+            # also be used to directly set a job&#x27;s requested state to
+            # `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the
+            # job if it has not already reached a terminal state.
+        &quot;replaceJobId&quot;: &quot;A String&quot;, # If this job is an update of an existing job, this field is the job ID
+            # of the job it replaced.
+            #
+            # When sending a `CreateJobRequest`, you can update a job by specifying it
+            # here. The job named here is stopped, and its intermediate state is
+            # transferred to this job.
+        &quot;createdFromSnapshotId&quot;: &quot;A String&quot;, # If this is specified, the job&#x27;s initial state is populated from the given
+            # snapshot.
+        &quot;currentState&quot;: &quot;A String&quot;, # The current state of the job.
+            #
+            # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
+            # specified.
+            #
+            # A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
+            # terminal state. After a job has reached a terminal state, no
+            # further state updates may be made.
+            #
+            # This field may be mutated by the Cloud Dataflow service;
+            # callers cannot mutate it.
+        &quot;name&quot;: &quot;A String&quot;, # The user-specified Cloud Dataflow job name.
+            #
+            # Only one Job with a given name may exist in a project at any
+            # given time. If a caller attempts to create a Job with the same
+            # name as an already-existing Job, the attempt returns the
+            # existing Job.
+            #
+            # The name must match the regular expression
+            # `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
+        &quot;currentStateTime&quot;: &quot;A String&quot;, # The timestamp associated with the current state.
+      },
   }</pre>
 </div>