build: run docs regen in synth.py (#1059)
diff --git a/docs/dyn/dataflow_v1b3.projects.templates.html b/docs/dyn/dataflow_v1b3.projects.templates.html
index c21ccf4..571bfe6 100644
--- a/docs/dyn/dataflow_v1b3.projects.templates.html
+++ b/docs/dyn/dataflow_v1b3.projects.templates.html
@@ -81,10 +81,10 @@
<code><a href="#create">create(projectId, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Creates a Cloud Dataflow job from a template.</p>
<p class="toc_element">
- <code><a href="#get">get(projectId, view=None, gcsPath=None, location=None, x__xgafv=None)</a></code></p>
+ <code><a href="#get">get(projectId, view=None, location=None, gcsPath=None, x__xgafv=None)</a></code></p>
<p class="firstline">Get the template associated with a template.</p>
<p class="toc_element">
- <code><a href="#launch">launch(projectId, body=None, location=None, dynamicTemplate_stagingLocation=None, gcsPath=None, dynamicTemplate_gcsPath=None, validateOnly=None, x__xgafv=None)</a></code></p>
+ <code><a href="#launch">launch(projectId, body=None, dynamicTemplate_stagingLocation=None, dynamicTemplate_gcsPath=None, validateOnly=None, location=None, gcsPath=None, x__xgafv=None)</a></code></p>
<p class="firstline">Launch a template.</p>
<h3>Method Details</h3>
<div class="method">
@@ -102,33 +102,33 @@
The object takes the form of:
{ # A request to create a Cloud Dataflow job from a template.
+ "gcsPath": "A String", # Required. A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with `gs://`.
"environment": { # The environment values to set at runtime. # The runtime environment for the job.
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- "bypassTempDirValidation": True or False, # Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- "machineType": "A String", # The machine type to use for the job. Defaults to the value from the template if not specified.
- "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
+ "workerZone": "A String", # The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity. If both `worker_zone` and `zone` are set, `worker_zone` takes precedence.
+ "kmsKeyName": "A String", # Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/
"workerRegion": "A String", # The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.
"additionalUserLabels": { # Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1kg", "count": "3" }.
"a_key": "A String",
},
- "zone": "A String", # The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) for launching worker instances to run your pipeline. In the future, worker_zone will take precedence.
- "maxWorkers": 42, # The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- "numWorkers": 42, # The initial number of Google Compute Engine instnaces for the job.
- "ipConfiguration": "A String", # Configuration for VM IPs.
- "tempLocation": "A String", # The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with `gs://`.
"serviceAccountEmail": "A String", # The email address of the service account to run the job as.
- "enableStreamingEngine": True or False, # Whether to enable Streaming Engine for the job.
- "kmsKeyName": "A String", # Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
"additionalExperiments": [ # Additional experiment flags for the job.
"A String",
],
- "workerZone": "A String", # The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity. If both `worker_zone` and `zone` are set, `worker_zone` takes precedence.
+ "zone": "A String", # The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) for launching worker instances to run your pipeline. In the future, worker_zone will take precedence.
+ "numWorkers": 42, # The initial number of Google Compute Engine instnaces for the job.
+ "bypassTempDirValidation": True or False, # Whether to bypass the safety checks for the job's temporary directory. Use with caution.
+ "maxWorkers": 42, # The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
+ "enableStreamingEngine": True or False, # Whether to enable Streaming Engine for the job.
+ "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
+ "tempLocation": "A String", # The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with `gs://`.
+ "ipConfiguration": "A String", # Configuration for VM IPs.
+ "machineType": "A String", # The machine type to use for the job. Defaults to the value from the template if not specified.
},
+ "jobName": "A String", # Required. The job name to use for the created job.
"parameters": { # The runtime parameters to pass to the job.
"a_key": "A String",
},
- "jobName": "A String", # Required. The job name to use for the created job.
- "gcsPath": "A String", # Required. A Cloud Storage path to the template from which to create the job. Must be a valid Cloud Storage URL, beginning with `gs://`.
"location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.
}
@@ -141,97 +141,6 @@
An object of the form:
{ # Defines a job to be run by the Cloud Dataflow service.
- "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.
- { # A message describing the state of a particular execution stage.
- "currentStateTime": "A String", # The time at which the stage transitioned to this state.
- "executionStageState": "A String", # Executions stage states allow the same set of values as JobState.
- "executionStageName": "A String", # The name of the execution stage.
- },
- ],
- "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
- "originalPipelineTransform": [ # Description of each transform in the pipeline and collections between them.
- { # Description of the type, names/ids, and input/outputs for a transform.
- "name": "A String", # User provided name for this transform instance.
- "displayData": [ # Transform-specific display data.
- { # Data provided with a pipeline or transform to provide descriptive info.
- "durationValue": "A String", # Contains value if the data is of duration type.
- "javaClassValue": "A String", # Contains value if the data is of java class type.
- "key": "A String", # The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system.
- "boolValue": True or False, # Contains value if the data is of a boolean type.
- "int64Value": "A String", # Contains value if the data is of int64 type.
- "shortStrValue": "A String", # A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip.
- "timestampValue": "A String", # Contains value if the data is of timestamp type.
- "label": "A String", # An optional label to display in a dax UI for the element.
- "floatValue": 3.14, # Contains value if the data is of float type.
- "url": "A String", # An optional full URL.
- "namespace": "A String", # The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering.
- "strValue": "A String", # Contains value if the data is of string type.
- },
- ],
- "inputCollectionName": [ # User names for all collection inputs to this transform.
- "A String",
- ],
- "id": "A String", # SDK generated id of this transform instance.
- "kind": "A String", # Type of transform.
- "outputCollectionName": [ # User names for all collection outputs to this transform.
- "A String",
- ],
- },
- ],
- "executionPipelineStage": [ # Description of each stage of execution of the pipeline.
- { # Description of the composing transforms, names/ids, and input/outputs of a stage of execution. Some composing transforms and sources may have been generated by the Dataflow service during execution planning.
- "kind": "A String", # Type of tranform this stage is executing.
- "id": "A String", # Dataflow service generated id for this stage.
- "name": "A String", # Dataflow service generated name for this stage.
- "componentTransform": [ # Transforms that comprise this execution stage.
- { # Description of a transform executed as part of an execution stage.
- "userName": "A String", # Human-readable name for this transform; may be user or system generated.
- "originalTransform": "A String", # User name for the original user transform with which this transform is most closely associated.
- "name": "A String", # Dataflow service generated name for this source.
- },
- ],
- "componentSource": [ # Collections produced and consumed by component transforms of this stage.
- { # Description of an interstitial value between transforms in an execution stage.
- "userName": "A String", # Human-readable name for this transform; may be user or system generated.
- "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
- "name": "A String", # Dataflow service generated name for this source.
- },
- ],
- "outputSource": [ # Output sources for this stage.
- { # Description of an input or output of an execution stage.
- "sizeBytes": "A String", # Size of the source, if measurable.
- "name": "A String", # Dataflow service generated name for this source.
- "userName": "A String", # Human-readable name for this source; may be user or system generated.
- "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
- },
- ],
- "inputSource": [ # Input sources for this stage.
- { # Description of an input or output of an execution stage.
- "sizeBytes": "A String", # Size of the source, if measurable.
- "name": "A String", # Dataflow service generated name for this source.
- "userName": "A String", # Human-readable name for this source; may be user or system generated.
- "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
- },
- ],
- },
- ],
- "displayData": [ # Pipeline level display data.
- { # Data provided with a pipeline or transform to provide descriptive info.
- "durationValue": "A String", # Contains value if the data is of duration type.
- "javaClassValue": "A String", # Contains value if the data is of java class type.
- "key": "A String", # The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system.
- "boolValue": True or False, # Contains value if the data is of a boolean type.
- "int64Value": "A String", # Contains value if the data is of int64 type.
- "shortStrValue": "A String", # A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip.
- "timestampValue": "A String", # Contains value if the data is of timestamp type.
- "label": "A String", # An optional label to display in a dax UI for the element.
- "floatValue": 3.14, # Contains value if the data is of float type.
- "url": "A String", # An optional full URL.
- "namespace": "A String", # The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering.
- "strValue": "A String", # Contains value if the data is of string type.
- },
- ],
- },
"labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
"a_key": "A String",
},
@@ -244,138 +153,7 @@
"kind": "A String", # The kind of step in the Cloud Dataflow job.
},
],
- "name": "A String", # The user-specified Cloud Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt returns the existing Job. The name must match the regular expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
- "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in `JOB_STATE_UPDATED`), this field contains the ID of that job.
- "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
- "workerRegion": "A String", # The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.
- "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
- "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
- "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
- "version": { # A structure describing which components and their versions of the service are required in order to run the job.
- "a_key": "", # Properties of the object.
- },
- "workerPools": [ # The worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
- { # Describes one particular pool of Cloud Dataflow workers to be instantiated by the Cloud Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
- "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
- "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS` policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
- "metadata": { # Metadata to set on the Google Compute Engine VMs.
- "a_key": "A String",
- },
- "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
- "poolArgs": { # Extra arguments for this worker pool.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- "ipConfiguration": "A String", # Configuration for VM IPs.
- "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
- "packages": [ # Packages to be installed on workers.
- { # The packages that must be installed in order for a worker to run the steps of the Cloud Dataflow job that will be assigned to its worker pool. This is the mechanism by which the Cloud Dataflow SDK causes code to be loaded onto the workers. For example, the Cloud Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc.) required in order for that code to run.
- "name": "A String", # The name of the package.
- "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
- },
- ],
- "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
- "commandlinesFileName": "A String", # The file to store preprocessing commands in.
- "languageHint": "A String", # The suggested backend language.
- "workflowFileName": "A String", # The file to store the workflow in.
- "streamingWorkerMainClass": "A String", # The streaming worker main class name.
- "vmId": "A String", # The ID string of the VM.
- "baseTaskDir": "A String", # The location on the worker for task-specific subdirectories.
- "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
- "continueOnException": True or False, # Whether to continue taskrunner if an exception is hit.
- "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
- "harnessCommand": "A String", # The command to launch the worker harness.
- "oauthScopes": [ # The OAuth2 scopes to be requested by the taskrunner in order to access the Cloud Dataflow API.
- "A String",
- ],
- "dataflowApiVersion": "A String", # The API version of endpoint, e.g. "v1b3"
- "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
- "alsologtostderr": True or False, # Whether to also send taskrunner log info to stderr.
- "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
- "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
- "logDir": "A String", # The directory on the VM to store logs.
- "logToSerialconsole": True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial console.
- "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # The settings to pass to the parallel worker harness.
- "servicePath": "A String", # The Cloud Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
- "workerId": "A String", # The ID of the worker running this pipeline.
- "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
- "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
- "reportingEnabled": True or False, # Whether to send work progress updates to the service.
- "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
- },
- },
- "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
- "sdkHarnessContainerImages": [ # Set of SDK harness containers needed to execute this pipeline. This will only be set in the Fn API path. For non-cross-language pipelines this should have only one entry. Cross-language pipelines will have two or more entries.
- { # Defines a SDK harness container for executing Dataflow pipelines.
- "containerImage": "A String", # A docker container image that resides in Google Container Registry.
- "useSingleCorePerContainer": True or False, # If true, recommends the Dataflow service to use only one core per SDK container instance with this image. If false (or unset) recommends using more than one core per SDK container instance with this image for efficiency. Note that Dataflow service may choose to override this property if needed.
- },
- ],
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
- "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
- "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
- "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
- "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
- "algorithm": "A String", # The algorithm to use for autoscaling.
- },
- "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
- "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
- "workerHarnessContainerImage": "A String", # Required. Docker container image that executes the Cloud Dataflow worker harness, residing in Google Container Registry. Deprecated for the Fn API path. Use sdk_harness_container_images instead.
- "dataDisks": [ # Data disks that are used by a VM in this workflow.
- { # Describes the data disk used by a workflow job.
- "mountPoint": "A String", # Directory in a VM where disk is mounted.
- "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
- "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Cloud Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
- },
- ],
- "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle` are supported.
- "diskSourceImage": "A String", # Fully qualified source image for disks.
- "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- },
- ],
- "workerZone": "A String", # The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity.
- "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account.
- "flexResourceSchedulingGoal": "A String", # Which Flexible Resource Scheduling mode to run in.
- "userAgent": { # A description of the process that generated the request.
- "a_key": "", # Properties of the object.
- },
- "experiments": [ # The list of experiments to enable.
- "A String",
- ],
- "sdkPipelineOptions": { # The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
- "a_key": "", # Properties of the object.
- },
- "internalExperiments": { # Experimental settings.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
- },
- "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
- "A String",
- ],
- "projectId": "A String", # The ID of the Cloud Platform project that the job belongs to.
- "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.
- "currentStateTime": "A String", # The timestamp associated with the current state.
- "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
- "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.
- "startTime": "A String", # The timestamp when the job was started (transitioned to JOB_STATE_PENDING). Flexible resource scheduling jobs are started with some delay after job creation, so start_time is unset before start and is updated when the job is started by the Cloud Dataflow service. For other jobs, start_time always equals to create_time and is immutable and set by the Cloud Dataflow service.
- "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the Cloud Dataflow service.
"requestedState": "A String", # The job's requested state. `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may also be used to directly set a job's requested state to `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the job if it has not already reached a terminal state.
- "executionInfo": { # Additional information about how a Cloud Dataflow job will be executed that isn't contained in the submitted job. # Deprecated.
- "stages": { # A mapping from each stage to the information about that stage.
- "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
- "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
- "A String",
- ],
- },
- },
- },
- "type": "A String", # The type of Cloud Dataflow job.
- "clientRequestId": "A String", # The client's unique identifier of the job, re-used across retried attempts. If this field is set, the service will ensure its uniqueness. The request to create a job will fail if the service has knowledge of a previously submitted job with the same client's ID and job name. The caller may use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
- "createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot.
- "transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
- "a_key": "A String",
- },
- "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID of the job it replaced. When sending a `CreateJobRequest`, you can update a job by specifying it here. The job named here is stopped, and its intermediate state is transferred to this job.
"jobMetadata": { # Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. # This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher.
"fileDetails": [ # Identification of a File source used in the Dataflow job.
{ # Metadata for a File connector used by the job.
@@ -385,49 +163,274 @@
"spannerDetails": [ # Identification of a Spanner source used in the Dataflow job.
{ # Metadata for a Spanner connector used by the job.
"instanceId": "A String", # InstanceId accessed in the connection.
- "projectId": "A String", # ProjectId accessed in the connection.
"databaseId": "A String", # DatabaseId accessed in the connection.
+ "projectId": "A String", # ProjectId accessed in the connection.
+ },
+ ],
+ "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
+ { # Metadata for a BigTable connector used by the job.
+ "projectId": "A String", # ProjectId accessed in the connection.
+ "instanceId": "A String", # InstanceId accessed in the connection.
+ "tableId": "A String", # TableId accessed in the connection.
},
],
"datastoreDetails": [ # Identification of a Datastore source used in the Dataflow job.
{ # Metadata for a Datastore connector used by the job.
- "namespace": "A String", # Namespace used in the connection.
"projectId": "A String", # ProjectId accessed in the connection.
+ "namespace": "A String", # Namespace used in the connection.
},
],
- "sdkVersion": { # The version of the SDK used to run the job. # The SDK version used to run the job.
- "version": "A String", # The version of the SDK used to run the job.
- "sdkSupportStatus": "A String", # The support status for this SDK version.
- "versionDisplayName": "A String", # A readable string describing the version of the SDK.
- },
"pubsubDetails": [ # Identification of a PubSub source used in the Dataflow job.
{ # Metadata for a PubSub connector used by the job.
"subscription": "A String", # Subscription used in the connection.
"topic": "A String", # Topic accessed in the connection.
},
],
+ "sdkVersion": { # The version of the SDK used to run the job. # The SDK version used to run the job.
+ "versionDisplayName": "A String", # A readable string describing the version of the SDK.
+ "version": "A String", # The version of the SDK used to run the job.
+ "sdkSupportStatus": "A String", # The support status for this SDK version.
+ },
"bigqueryDetails": [ # Identification of a BigQuery source used in the Dataflow job.
{ # Metadata for a BigQuery connector used by the job.
- "dataset": "A String", # Dataset accessed in the connection.
- "projectId": "A String", # Project accessed in the connection.
"query": "A String", # Query used to access data in the connection.
"table": "A String", # Table accessed in the connection.
- },
- ],
- "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
- { # Metadata for a BigTable connector used by the job.
- "instanceId": "A String", # InstanceId accessed in the connection.
- "tableId": "A String", # TableId accessed in the connection.
- "projectId": "A String", # ProjectId accessed in the connection.
+ "dataset": "A String", # Dataset accessed in the connection.
+ "projectId": "A String", # Project accessed in the connection.
},
],
},
"stepsLocation": "A String", # The GCS location where the steps are stored.
+ "transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
+ "a_key": "A String",
+ },
+ "type": "A String", # The type of Cloud Dataflow job.
+ "clientRequestId": "A String", # The client's unique identifier of the job, re-used across retried attempts. If this field is set, the service will ensure its uniqueness. The request to create a job will fail if the service has knowledge of a previously submitted job with the same client's ID and job name. The caller may use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
+ "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+ "A String",
+ ],
+ "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
+ "sdkPipelineOptions": { # The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
+ "a_key": "", # Properties of the object.
+ },
+ "workerPools": [ # The worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
+ { # Describes one particular pool of Cloud Dataflow workers to be instantiated by the Cloud Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
+ "diskSourceImage": "A String", # Fully qualified source image for disks.
+ "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS` policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
+ "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
+ "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
+ "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
+ "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
+ "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
+ "algorithm": "A String", # The algorithm to use for autoscaling.
+ "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
+ },
+ "metadata": { # Metadata to set on the Google Compute Engine VMs.
+ "a_key": "A String",
+ },
+ "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle` are supported.
+ "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
+ "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+ "commandlinesFileName": "A String", # The file to store preprocessing commands in.
+ "dataflowApiVersion": "A String", # The API version of endpoint, e.g. "v1b3"
+ "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
+ "logDir": "A String", # The directory on the VM to store logs.
+ "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # The settings to pass to the parallel worker harness.
+ "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+ "reportingEnabled": True or False, # Whether to send work progress updates to the service.
+ "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
+ "workerId": "A String", # The ID of the worker running this pipeline.
+ "servicePath": "A String", # The Cloud Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
+ "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
+ },
+ "vmId": "A String", # The ID string of the VM.
+ "continueOnException": True or False, # Whether to continue taskrunner if an exception is hit.
+ "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
+ "streamingWorkerMainClass": "A String", # The streaming worker main class name.
+ "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
+ "harnessCommand": "A String", # The command to launch the worker harness.
+ "oauthScopes": [ # The OAuth2 scopes to be requested by the taskrunner in order to access the Cloud Dataflow API.
+ "A String",
+ ],
+ "languageHint": "A String", # The suggested backend language.
+ "baseTaskDir": "A String", # The location on the worker for task-specific subdirectories.
+ "alsologtostderr": True or False, # Whether to also send taskrunner log info to stderr.
+ "logToSerialconsole": True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial console.
+ "workflowFileName": "A String", # The file to store the workflow in.
+ "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+ },
+ "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
+ "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
+ "workerHarnessContainerImage": "A String", # Required. Docker container image that executes the Cloud Dataflow worker harness, residing in Google Container Registry. Deprecated for the Fn API path. Use sdk_harness_container_images instead.
+ "packages": [ # Packages to be installed on workers.
+ { # The packages that must be installed in order for a worker to run the steps of the Cloud Dataflow job that will be assigned to its worker pool. This is the mechanism by which the Cloud Dataflow SDK causes code to be loaded onto the workers. For example, the Cloud Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc.) required in order for that code to run.
+ "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
+ "name": "A String", # The name of the package.
+ },
+ ],
+ "poolArgs": { # Extra arguments for this worker pool.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
+ "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
+ "sdkHarnessContainerImages": [ # Set of SDK harness containers needed to execute this pipeline. This will only be set in the Fn API path. For non-cross-language pipelines this should have only one entry. Cross-language pipelines will have two or more entries.
+ { # Defines a SDK harness container for executing Dataflow pipelines.
+ "useSingleCorePerContainer": True or False, # If true, recommends the Dataflow service to use only one core per SDK container instance with this image. If false (or unset) recommends using more than one core per SDK container instance with this image for efficiency. Note that Dataflow service may choose to override this property if needed.
+ "containerImage": "A String", # A docker container image that resides in Google Container Registry.
+ },
+ ],
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
+ "ipConfiguration": "A String", # Configuration for VM IPs.
+ "dataDisks": [ # Data disks that are used by a VM in this workflow.
+ { # Describes the data disk used by a workflow job.
+ "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Cloud Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
+ "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
+ "mountPoint": "A String", # Directory in a VM where disk is mounted.
+ },
+ ],
+ "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
+ },
+ ],
+ "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account.
+ "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
+ "flexResourceSchedulingGoal": "A String", # Which Flexible Resource Scheduling mode to run in.
+ "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
+ "workerZone": "A String", # The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity.
+ "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+ "workerRegion": "A String", # The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.
+ "internalExperiments": { # Experimental settings.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ "experiments": [ # The list of experiments to enable.
+ "A String",
+ ],
+ "userAgent": { # A description of the process that generated the request.
+ "a_key": "", # Properties of the object.
+ },
+ "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
+ "version": { # A structure describing which components and their versions of the service are required in order to run the job.
+ "a_key": "", # Properties of the object.
+ },
+ },
+ "executionInfo": { # Additional information about how a Cloud Dataflow job will be executed that isn't contained in the submitted job. # Deprecated.
+ "stages": { # A mapping from each stage to the information about that stage.
+ "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
+ "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
+ "A String",
+ ],
+ },
+ },
+ },
+ "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.
+ { # A message describing the state of a particular execution stage.
+ "currentStateTime": "A String", # The time at which the stage transitioned to this state.
+ "executionStageState": "A String", # Executions stage states allow the same set of values as JobState.
+ "executionStageName": "A String", # The name of the execution stage.
+ },
+ ],
+ "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the Cloud Dataflow service.
+ "startTime": "A String", # The timestamp when the job was started (transitioned to JOB_STATE_PENDING). Flexible resource scheduling jobs are started with some delay after job creation, so start_time is unset before start and is updated when the job is started by the Cloud Dataflow service. For other jobs, start_time always equals to create_time and is immutable and set by the Cloud Dataflow service.
+ "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.
+ "currentStateTime": "A String", # The timestamp associated with the current state.
+ "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID of the job it replaced. When sending a `CreateJobRequest`, you can update a job by specifying it here. The job named here is stopped, and its intermediate state is transferred to this job.
+ "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in `JOB_STATE_UPDATED`), this field contains the ID of that job.
+ "createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot.
+ "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
+ "executionPipelineStage": [ # Description of each stage of execution of the pipeline.
+ { # Description of the composing transforms, names/ids, and input/outputs of a stage of execution. Some composing transforms and sources may have been generated by the Dataflow service during execution planning.
+ "name": "A String", # Dataflow service generated name for this stage.
+ "id": "A String", # Dataflow service generated id for this stage.
+ "componentSource": [ # Collections produced and consumed by component transforms of this stage.
+ { # Description of an interstitial value between transforms in an execution stage.
+ "userName": "A String", # Human-readable name for this transform; may be user or system generated.
+ "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
+ "name": "A String", # Dataflow service generated name for this source.
+ },
+ ],
+ "kind": "A String", # Type of tranform this stage is executing.
+ "inputSource": [ # Input sources for this stage.
+ { # Description of an input or output of an execution stage.
+ "sizeBytes": "A String", # Size of the source, if measurable.
+ "userName": "A String", # Human-readable name for this source; may be user or system generated.
+ "name": "A String", # Dataflow service generated name for this source.
+ "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
+ },
+ ],
+ "outputSource": [ # Output sources for this stage.
+ { # Description of an input or output of an execution stage.
+ "sizeBytes": "A String", # Size of the source, if measurable.
+ "userName": "A String", # Human-readable name for this source; may be user or system generated.
+ "name": "A String", # Dataflow service generated name for this source.
+ "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
+ },
+ ],
+ "prerequisiteStage": [ # Other stages that must complete before this stage can run.
+ "A String",
+ ],
+ "componentTransform": [ # Transforms that comprise this execution stage.
+ { # Description of a transform executed as part of an execution stage.
+ "name": "A String", # Dataflow service generated name for this source.
+ "originalTransform": "A String", # User name for the original user transform with which this transform is most closely associated.
+ "userName": "A String", # Human-readable name for this transform; may be user or system generated.
+ },
+ ],
+ },
+ ],
+ "originalPipelineTransform": [ # Description of each transform in the pipeline and collections between them.
+ { # Description of the type, names/ids, and input/outputs for a transform.
+ "inputCollectionName": [ # User names for all collection inputs to this transform.
+ "A String",
+ ],
+ "outputCollectionName": [ # User names for all collection outputs to this transform.
+ "A String",
+ ],
+ "name": "A String", # User provided name for this transform instance.
+ "id": "A String", # SDK generated id of this transform instance.
+ "displayData": [ # Transform-specific display data.
+ { # Data provided with a pipeline or transform to provide descriptive info.
+ "boolValue": True or False, # Contains value if the data is of a boolean type.
+ "url": "A String", # An optional full URL.
+ "key": "A String", # The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system.
+ "floatValue": 3.14, # Contains value if the data is of float type.
+ "int64Value": "A String", # Contains value if the data is of int64 type.
+ "durationValue": "A String", # Contains value if the data is of duration type.
+ "namespace": "A String", # The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering.
+ "label": "A String", # An optional label to display in a dax UI for the element.
+ "timestampValue": "A String", # Contains value if the data is of timestamp type.
+ "javaClassValue": "A String", # Contains value if the data is of java class type.
+ "strValue": "A String", # Contains value if the data is of string type.
+ "shortStrValue": "A String", # A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip.
+ },
+ ],
+ "kind": "A String", # Type of transform.
+ },
+ ],
+ "displayData": [ # Pipeline level display data.
+ { # Data provided with a pipeline or transform to provide descriptive info.
+ "boolValue": True or False, # Contains value if the data is of a boolean type.
+ "url": "A String", # An optional full URL.
+ "key": "A String", # The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system.
+ "floatValue": 3.14, # Contains value if the data is of float type.
+ "int64Value": "A String", # Contains value if the data is of int64 type.
+ "durationValue": "A String", # Contains value if the data is of duration type.
+ "namespace": "A String", # The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering.
+ "label": "A String", # An optional label to display in a dax UI for the element.
+ "timestampValue": "A String", # Contains value if the data is of timestamp type.
+ "javaClassValue": "A String", # Contains value if the data is of java class type.
+ "strValue": "A String", # Contains value if the data is of string type.
+ "shortStrValue": "A String", # A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip.
+ },
+ ],
+ },
+ "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.
+ "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
+ "name": "A String", # The user-specified Cloud Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt returns the existing Job. The name must match the regular expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
+ "projectId": "A String", # The ID of the Cloud Platform project that the job belongs to.
}</pre>
</div>
<div class="method">
- <code class="details" id="get">get(projectId, view=None, gcsPath=None, location=None, x__xgafv=None)</code>
+ <code class="details" id="get">get(projectId, view=None, location=None, gcsPath=None, x__xgafv=None)</code>
<pre>Get the template associated with a template.
Args:
@@ -435,8 +438,8 @@
view: string, The view to retrieve. Defaults to METADATA_ONLY.
Allowed values
METADATA_ONLY - Template view that retrieves only the metadata associated with the template.
- gcsPath: string, Required. A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.
location: string, The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.
+ gcsPath: string, Required. A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
@@ -446,55 +449,55 @@
An object of the form:
{ # The response to a GetTemplate request.
- "templateType": "A String", # Template Type.
- "runtimeMetadata": { # RuntimeMetadata describing a runtime environment. # Describes the runtime metadata with SDKInfo and available parameters.
- "sdkInfo": { # SDK Information. # SDK Info for the template.
- "language": "A String", # Required. The SDK Language.
- "version": "A String", # Optional. The SDK version.
- },
+ "metadata": { # Metadata describing a template. # The template metadata describing the template name, available parameters, etc.
+ "description": "A String", # Optional. A description of the template.
+ "name": "A String", # Required. The name of the template.
"parameters": [ # The parameters for the template.
{ # Metadata for a specific parameter.
"isOptional": True or False, # Optional. Whether the parameter is optional. Defaults to false.
+ "label": "A String", # Required. The label to display for the parameter.
"paramType": "A String", # Optional. The type of the parameter. Used for selecting input picker.
- "name": "A String", # Required. The name of the parameter.
+ "helpText": "A String", # Required. The help text to display for the parameter.
"regexes": [ # Optional. Regexes that the parameter must match.
"A String",
],
- "label": "A String", # Required. The label to display for the parameter.
- "helpText": "A String", # Required. The help text to display for the parameter.
+ "name": "A String", # Required. The name of the parameter.
},
],
},
+ "templateType": "A String", # Template Type.
"status": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The status of the get template request. Any problems with the request will be indicated in the error_details.
"message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
"details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
{
"a_key": "", # Properties of the object. Contains field @type with type URL.
},
],
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
},
- "metadata": { # Metadata describing a template. # The template metadata describing the template name, available parameters, etc.
- "description": "A String", # Optional. A description of the template.
+ "runtimeMetadata": { # RuntimeMetadata describing a runtime environment. # Describes the runtime metadata with SDKInfo and available parameters.
+ "sdkInfo": { # SDK Information. # SDK Info for the template.
+ "version": "A String", # Optional. The SDK version.
+ "language": "A String", # Required. The SDK Language.
+ },
"parameters": [ # The parameters for the template.
{ # Metadata for a specific parameter.
"isOptional": True or False, # Optional. Whether the parameter is optional. Defaults to false.
+ "label": "A String", # Required. The label to display for the parameter.
"paramType": "A String", # Optional. The type of the parameter. Used for selecting input picker.
- "name": "A String", # Required. The name of the parameter.
+ "helpText": "A String", # Required. The help text to display for the parameter.
"regexes": [ # Optional. Regexes that the parameter must match.
"A String",
],
- "label": "A String", # Required. The label to display for the parameter.
- "helpText": "A String", # Required. The help text to display for the parameter.
+ "name": "A String", # Required. The name of the parameter.
},
],
- "name": "A String", # Required. The name of the template.
},
}</pre>
</div>
<div class="method">
- <code class="details" id="launch">launch(projectId, body=None, location=None, dynamicTemplate_stagingLocation=None, gcsPath=None, dynamicTemplate_gcsPath=None, validateOnly=None, x__xgafv=None)</code>
+ <code class="details" id="launch">launch(projectId, body=None, dynamicTemplate_stagingLocation=None, dynamicTemplate_gcsPath=None, validateOnly=None, location=None, gcsPath=None, x__xgafv=None)</code>
<pre>Launch a template.
Args:
@@ -503,43 +506,43 @@
The object takes the form of:
{ # Parameters to provide to the template being launched.
- "environment": { # The environment values to set at runtime. # The runtime environment for the job.
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
- "bypassTempDirValidation": True or False, # Whether to bypass the safety checks for the job's temporary directory. Use with caution.
- "machineType": "A String", # The machine type to use for the job. Defaults to the value from the template if not specified.
- "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- "workerRegion": "A String", # The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.
- "additionalUserLabels": { # Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1kg", "count": "3" }.
- "a_key": "A String",
- },
- "zone": "A String", # The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) for launching worker instances to run your pipeline. In the future, worker_zone will take precedence.
- "maxWorkers": 42, # The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
- "numWorkers": 42, # The initial number of Google Compute Engine instnaces for the job.
- "ipConfiguration": "A String", # Configuration for VM IPs.
- "tempLocation": "A String", # The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with `gs://`.
- "serviceAccountEmail": "A String", # The email address of the service account to run the job as.
- "enableStreamingEngine": True or False, # Whether to enable Streaming Engine for the job.
- "kmsKeyName": "A String", # Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/
- "additionalExperiments": [ # Additional experiment flags for the job.
- "A String",
- ],
- "workerZone": "A String", # The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity. If both `worker_zone` and `zone` are set, `worker_zone` takes precedence.
- },
"parameters": { # The runtime parameters to pass to the job.
"a_key": "A String",
},
"update": True or False, # If set, replace the existing pipeline with the name specified by jobName with this pipeline, preserving state.
- "jobName": "A String", # Required. The job name to use for the created job.
"transformNameMapping": { # Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
"a_key": "A String",
},
+ "jobName": "A String", # Required. The job name to use for the created job.
+ "environment": { # The environment values to set at runtime. # The runtime environment for the job.
+ "workerZone": "A String", # The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity. If both `worker_zone` and `zone` are set, `worker_zone` takes precedence.
+ "kmsKeyName": "A String", # Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/
+ "workerRegion": "A String", # The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.
+ "additionalUserLabels": { # Additional user labels to be specified for the job. Keys and values should follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1kg", "count": "3" }.
+ "a_key": "A String",
+ },
+ "serviceAccountEmail": "A String", # The email address of the service account to run the job as.
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. You can specify a subnetwork using either a complete URL or an abbreviated path. Expected to be of the form "https://www.googleapis.com/compute/v1/projects/HOST_PROJECT_ID/regions/REGION/subnetworks/SUBNETWORK" or "regions/REGION/subnetworks/SUBNETWORK". If the subnetwork is located in a Shared VPC network, you must use the complete URL.
+ "additionalExperiments": [ # Additional experiment flags for the job.
+ "A String",
+ ],
+ "zone": "A String", # The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) for launching worker instances to run your pipeline. In the future, worker_zone will take precedence.
+ "numWorkers": 42, # The initial number of Google Compute Engine instnaces for the job.
+ "bypassTempDirValidation": True or False, # Whether to bypass the safety checks for the job's temporary directory. Use with caution.
+ "maxWorkers": 42, # The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
+ "enableStreamingEngine": True or False, # Whether to enable Streaming Engine for the job.
+ "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
+ "tempLocation": "A String", # The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with `gs://`.
+ "ipConfiguration": "A String", # Configuration for VM IPs.
+ "machineType": "A String", # The machine type to use for the job. Defaults to the value from the template if not specified.
+ },
}
- location: string, The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.
dynamicTemplate_stagingLocation: string, Cloud Storage path for staging dependencies. Must be a valid Cloud Storage URL, beginning with `gs://`.
- gcsPath: string, A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.
dynamicTemplate_gcsPath: string, Path to dynamic template spec file on GCS. The file must be a Json serialized DynamicTemplateFieSpec object.
validateOnly: boolean, If true, the request is validated but not actually executed. Defaults to false.
+ location: string, The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) to which to direct the request.
+ gcsPath: string, A Cloud Storage path to the template from which to create the job. Must be valid Cloud Storage URL, beginning with 'gs://'.
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
@@ -550,97 +553,6 @@
{ # Response to the request to launch a template.
"job": { # Defines a job to be run by the Cloud Dataflow service. # The job that was launched, if the request was not a dry run and the job was successfully launched.
- "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.
- { # A message describing the state of a particular execution stage.
- "currentStateTime": "A String", # The time at which the stage transitioned to this state.
- "executionStageState": "A String", # Executions stage states allow the same set of values as JobState.
- "executionStageName": "A String", # The name of the execution stage.
- },
- ],
- "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
- "originalPipelineTransform": [ # Description of each transform in the pipeline and collections between them.
- { # Description of the type, names/ids, and input/outputs for a transform.
- "name": "A String", # User provided name for this transform instance.
- "displayData": [ # Transform-specific display data.
- { # Data provided with a pipeline or transform to provide descriptive info.
- "durationValue": "A String", # Contains value if the data is of duration type.
- "javaClassValue": "A String", # Contains value if the data is of java class type.
- "key": "A String", # The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system.
- "boolValue": True or False, # Contains value if the data is of a boolean type.
- "int64Value": "A String", # Contains value if the data is of int64 type.
- "shortStrValue": "A String", # A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip.
- "timestampValue": "A String", # Contains value if the data is of timestamp type.
- "label": "A String", # An optional label to display in a dax UI for the element.
- "floatValue": 3.14, # Contains value if the data is of float type.
- "url": "A String", # An optional full URL.
- "namespace": "A String", # The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering.
- "strValue": "A String", # Contains value if the data is of string type.
- },
- ],
- "inputCollectionName": [ # User names for all collection inputs to this transform.
- "A String",
- ],
- "id": "A String", # SDK generated id of this transform instance.
- "kind": "A String", # Type of transform.
- "outputCollectionName": [ # User names for all collection outputs to this transform.
- "A String",
- ],
- },
- ],
- "executionPipelineStage": [ # Description of each stage of execution of the pipeline.
- { # Description of the composing transforms, names/ids, and input/outputs of a stage of execution. Some composing transforms and sources may have been generated by the Dataflow service during execution planning.
- "kind": "A String", # Type of tranform this stage is executing.
- "id": "A String", # Dataflow service generated id for this stage.
- "name": "A String", # Dataflow service generated name for this stage.
- "componentTransform": [ # Transforms that comprise this execution stage.
- { # Description of a transform executed as part of an execution stage.
- "userName": "A String", # Human-readable name for this transform; may be user or system generated.
- "originalTransform": "A String", # User name for the original user transform with which this transform is most closely associated.
- "name": "A String", # Dataflow service generated name for this source.
- },
- ],
- "componentSource": [ # Collections produced and consumed by component transforms of this stage.
- { # Description of an interstitial value between transforms in an execution stage.
- "userName": "A String", # Human-readable name for this transform; may be user or system generated.
- "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
- "name": "A String", # Dataflow service generated name for this source.
- },
- ],
- "outputSource": [ # Output sources for this stage.
- { # Description of an input or output of an execution stage.
- "sizeBytes": "A String", # Size of the source, if measurable.
- "name": "A String", # Dataflow service generated name for this source.
- "userName": "A String", # Human-readable name for this source; may be user or system generated.
- "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
- },
- ],
- "inputSource": [ # Input sources for this stage.
- { # Description of an input or output of an execution stage.
- "sizeBytes": "A String", # Size of the source, if measurable.
- "name": "A String", # Dataflow service generated name for this source.
- "userName": "A String", # Human-readable name for this source; may be user or system generated.
- "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
- },
- ],
- },
- ],
- "displayData": [ # Pipeline level display data.
- { # Data provided with a pipeline or transform to provide descriptive info.
- "durationValue": "A String", # Contains value if the data is of duration type.
- "javaClassValue": "A String", # Contains value if the data is of java class type.
- "key": "A String", # The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system.
- "boolValue": True or False, # Contains value if the data is of a boolean type.
- "int64Value": "A String", # Contains value if the data is of int64 type.
- "shortStrValue": "A String", # A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip.
- "timestampValue": "A String", # Contains value if the data is of timestamp type.
- "label": "A String", # An optional label to display in a dax UI for the element.
- "floatValue": 3.14, # Contains value if the data is of float type.
- "url": "A String", # An optional full URL.
- "namespace": "A String", # The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering.
- "strValue": "A String", # Contains value if the data is of string type.
- },
- ],
- },
"labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
"a_key": "A String",
},
@@ -653,138 +565,7 @@
"kind": "A String", # The kind of step in the Cloud Dataflow job.
},
],
- "name": "A String", # The user-specified Cloud Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt returns the existing Job. The name must match the regular expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
- "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in `JOB_STATE_UPDATED`), this field contains the ID of that job.
- "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
- "workerRegion": "A String", # The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.
- "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
- "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
- "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
- "version": { # A structure describing which components and their versions of the service are required in order to run the job.
- "a_key": "", # Properties of the object.
- },
- "workerPools": [ # The worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
- { # Describes one particular pool of Cloud Dataflow workers to be instantiated by the Cloud Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
- "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
- "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS` policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
- "metadata": { # Metadata to set on the Google Compute Engine VMs.
- "a_key": "A String",
- },
- "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
- "poolArgs": { # Extra arguments for this worker pool.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- "ipConfiguration": "A String", # Configuration for VM IPs.
- "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
- "packages": [ # Packages to be installed on workers.
- { # The packages that must be installed in order for a worker to run the steps of the Cloud Dataflow job that will be assigned to its worker pool. This is the mechanism by which the Cloud Dataflow SDK causes code to be loaded onto the workers. For example, the Cloud Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc.) required in order for that code to run.
- "name": "A String", # The name of the package.
- "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
- },
- ],
- "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
- "commandlinesFileName": "A String", # The file to store preprocessing commands in.
- "languageHint": "A String", # The suggested backend language.
- "workflowFileName": "A String", # The file to store the workflow in.
- "streamingWorkerMainClass": "A String", # The streaming worker main class name.
- "vmId": "A String", # The ID string of the VM.
- "baseTaskDir": "A String", # The location on the worker for task-specific subdirectories.
- "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
- "continueOnException": True or False, # Whether to continue taskrunner if an exception is hit.
- "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
- "harnessCommand": "A String", # The command to launch the worker harness.
- "oauthScopes": [ # The OAuth2 scopes to be requested by the taskrunner in order to access the Cloud Dataflow API.
- "A String",
- ],
- "dataflowApiVersion": "A String", # The API version of endpoint, e.g. "v1b3"
- "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
- "alsologtostderr": True or False, # Whether to also send taskrunner log info to stderr.
- "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
- "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
- "logDir": "A String", # The directory on the VM to store logs.
- "logToSerialconsole": True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial console.
- "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # The settings to pass to the parallel worker harness.
- "servicePath": "A String", # The Cloud Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
- "workerId": "A String", # The ID of the worker running this pipeline.
- "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
- "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
- "reportingEnabled": True or False, # Whether to send work progress updates to the service.
- "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
- },
- },
- "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
- "sdkHarnessContainerImages": [ # Set of SDK harness containers needed to execute this pipeline. This will only be set in the Fn API path. For non-cross-language pipelines this should have only one entry. Cross-language pipelines will have two or more entries.
- { # Defines a SDK harness container for executing Dataflow pipelines.
- "containerImage": "A String", # A docker container image that resides in Google Container Registry.
- "useSingleCorePerContainer": True or False, # If true, recommends the Dataflow service to use only one core per SDK container instance with this image. If false (or unset) recommends using more than one core per SDK container instance with this image for efficiency. Note that Dataflow service may choose to override this property if needed.
- },
- ],
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
- "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
- "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
- "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
- "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
- "algorithm": "A String", # The algorithm to use for autoscaling.
- },
- "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
- "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
- "workerHarnessContainerImage": "A String", # Required. Docker container image that executes the Cloud Dataflow worker harness, residing in Google Container Registry. Deprecated for the Fn API path. Use sdk_harness_container_images instead.
- "dataDisks": [ # Data disks that are used by a VM in this workflow.
- { # Describes the data disk used by a workflow job.
- "mountPoint": "A String", # Directory in a VM where disk is mounted.
- "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
- "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Cloud Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
- },
- ],
- "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle` are supported.
- "diskSourceImage": "A String", # Fully qualified source image for disks.
- "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
- },
- ],
- "workerZone": "A String", # The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity.
- "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account.
- "flexResourceSchedulingGoal": "A String", # Which Flexible Resource Scheduling mode to run in.
- "userAgent": { # A description of the process that generated the request.
- "a_key": "", # Properties of the object.
- },
- "experiments": [ # The list of experiments to enable.
- "A String",
- ],
- "sdkPipelineOptions": { # The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
- "a_key": "", # Properties of the object.
- },
- "internalExperiments": { # Experimental settings.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
- },
- "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
- "A String",
- ],
- "projectId": "A String", # The ID of the Cloud Platform project that the job belongs to.
- "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.
- "currentStateTime": "A String", # The timestamp associated with the current state.
- "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
- "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.
- "startTime": "A String", # The timestamp when the job was started (transitioned to JOB_STATE_PENDING). Flexible resource scheduling jobs are started with some delay after job creation, so start_time is unset before start and is updated when the job is started by the Cloud Dataflow service. For other jobs, start_time always equals to create_time and is immutable and set by the Cloud Dataflow service.
- "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the Cloud Dataflow service.
"requestedState": "A String", # The job's requested state. `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may also be used to directly set a job's requested state to `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the job if it has not already reached a terminal state.
- "executionInfo": { # Additional information about how a Cloud Dataflow job will be executed that isn't contained in the submitted job. # Deprecated.
- "stages": { # A mapping from each stage to the information about that stage.
- "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
- "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
- "A String",
- ],
- },
- },
- },
- "type": "A String", # The type of Cloud Dataflow job.
- "clientRequestId": "A String", # The client's unique identifier of the job, re-used across retried attempts. If this field is set, the service will ensure its uniqueness. The request to create a job will fail if the service has knowledge of a previously submitted job with the same client's ID and job name. The caller may use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
- "createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot.
- "transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
- "a_key": "A String",
- },
- "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID of the job it replaced. When sending a `CreateJobRequest`, you can update a job by specifying it here. The job named here is stopped, and its intermediate state is transferred to this job.
"jobMetadata": { # Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. # This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher.
"fileDetails": [ # Identification of a File source used in the Dataflow job.
{ # Metadata for a File connector used by the job.
@@ -794,44 +575,269 @@
"spannerDetails": [ # Identification of a Spanner source used in the Dataflow job.
{ # Metadata for a Spanner connector used by the job.
"instanceId": "A String", # InstanceId accessed in the connection.
- "projectId": "A String", # ProjectId accessed in the connection.
"databaseId": "A String", # DatabaseId accessed in the connection.
+ "projectId": "A String", # ProjectId accessed in the connection.
+ },
+ ],
+ "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
+ { # Metadata for a BigTable connector used by the job.
+ "projectId": "A String", # ProjectId accessed in the connection.
+ "instanceId": "A String", # InstanceId accessed in the connection.
+ "tableId": "A String", # TableId accessed in the connection.
},
],
"datastoreDetails": [ # Identification of a Datastore source used in the Dataflow job.
{ # Metadata for a Datastore connector used by the job.
- "namespace": "A String", # Namespace used in the connection.
"projectId": "A String", # ProjectId accessed in the connection.
+ "namespace": "A String", # Namespace used in the connection.
},
],
- "sdkVersion": { # The version of the SDK used to run the job. # The SDK version used to run the job.
- "version": "A String", # The version of the SDK used to run the job.
- "sdkSupportStatus": "A String", # The support status for this SDK version.
- "versionDisplayName": "A String", # A readable string describing the version of the SDK.
- },
"pubsubDetails": [ # Identification of a PubSub source used in the Dataflow job.
{ # Metadata for a PubSub connector used by the job.
"subscription": "A String", # Subscription used in the connection.
"topic": "A String", # Topic accessed in the connection.
},
],
+ "sdkVersion": { # The version of the SDK used to run the job. # The SDK version used to run the job.
+ "versionDisplayName": "A String", # A readable string describing the version of the SDK.
+ "version": "A String", # The version of the SDK used to run the job.
+ "sdkSupportStatus": "A String", # The support status for this SDK version.
+ },
"bigqueryDetails": [ # Identification of a BigQuery source used in the Dataflow job.
{ # Metadata for a BigQuery connector used by the job.
- "dataset": "A String", # Dataset accessed in the connection.
- "projectId": "A String", # Project accessed in the connection.
"query": "A String", # Query used to access data in the connection.
"table": "A String", # Table accessed in the connection.
- },
- ],
- "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
- { # Metadata for a BigTable connector used by the job.
- "instanceId": "A String", # InstanceId accessed in the connection.
- "tableId": "A String", # TableId accessed in the connection.
- "projectId": "A String", # ProjectId accessed in the connection.
+ "dataset": "A String", # Dataset accessed in the connection.
+ "projectId": "A String", # Project accessed in the connection.
},
],
},
"stepsLocation": "A String", # The GCS location where the steps are stored.
+ "transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
+ "a_key": "A String",
+ },
+ "type": "A String", # The type of Cloud Dataflow job.
+ "clientRequestId": "A String", # The client's unique identifier of the job, re-used across retried attempts. If this field is set, the service will ensure its uniqueness. The request to create a job will fail if the service has knowledge of a previously submitted job with the same client's ID and job name. The caller may use this field to ensure idempotence of job creation across retried attempts to create a job. By default, the field is empty and, in that case, the service ignores it.
+ "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+ "A String",
+ ],
+ "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
+ "sdkPipelineOptions": { # The Cloud Dataflow SDK pipeline options specified by the user. These options are passed through the service and are used to recreate the SDK pipeline options on the worker in a language agnostic and platform independent way.
+ "a_key": "", # Properties of the object.
+ },
+ "workerPools": [ # The worker pools. At least one "harness" worker pool must be specified in order for the job to have workers.
+ { # Describes one particular pool of Cloud Dataflow workers to be instantiated by the Cloud Dataflow service in order to perform the computations required by a job. Note that a workflow job may use multiple pools, in order to match the various computational requirements of the various stages of the job.
+ "diskSourceImage": "A String", # Fully qualified source image for disks.
+ "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the `TEARDOWN_ALWAYS` policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
+ "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
+ "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
+ "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
+ "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
+ "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
+ "algorithm": "A String", # The algorithm to use for autoscaling.
+ "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
+ },
+ "metadata": { # Metadata to set on the Google Compute Engine VMs.
+ "a_key": "A String",
+ },
+ "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle` are supported.
+ "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when using the standard Dataflow task runner. Users should ignore this field.
+ "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+ "commandlinesFileName": "A String", # The file to store preprocessing commands in.
+ "dataflowApiVersion": "A String", # The API version of endpoint, e.g. "v1b3"
+ "taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by taskrunner; e.g. "root".
+ "logDir": "A String", # The directory on the VM to store logs.
+ "parallelWorkerSettings": { # Provides data to pass through to the worker harness. # The settings to pass to the parallel worker harness.
+ "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+ "reportingEnabled": True or False, # Whether to send work progress updates to the service.
+ "shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example, "shuffle/v1beta1".
+ "workerId": "A String", # The ID of the worker running this pipeline.
+ "servicePath": "A String", # The Cloud Dataflow service path relative to the root URL, for example, "dataflow/v1b3/projects".
+ "baseUrl": "A String", # The base URL for accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
+ },
+ "vmId": "A String", # The ID string of the VM.
+ "continueOnException": True or False, # Whether to continue taskrunner if an exception is hit.
+ "baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs. When workers access Google Cloud APIs, they logically do so via relative URLs. If this field is specified, it supplies the base URL to use for resolving these relative URLs. The normative algorithm used is defined by RFC 1808, "Relative Uniform Resource Locators". If not specified, the default value is "http://www.googleapis.com/"
+ "streamingWorkerMainClass": "A String", # The streaming worker main class name.
+ "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by taskrunner; e.g. "wheel".
+ "harnessCommand": "A String", # The command to launch the worker harness.
+ "oauthScopes": [ # The OAuth2 scopes to be requested by the taskrunner in order to access the Cloud Dataflow API.
+ "A String",
+ ],
+ "languageHint": "A String", # The suggested backend language.
+ "baseTaskDir": "A String", # The location on the worker for task-specific subdirectories.
+ "alsologtostderr": True or False, # Whether to also send taskrunner log info to stderr.
+ "logToSerialconsole": True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial console.
+ "workflowFileName": "A String", # The file to store the workflow in.
+ "logUploadLocation": "A String", # Indicates where to put logs. If this is not specified, the logs will not be uploaded. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+ },
+ "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
+ "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
+ "workerHarnessContainerImage": "A String", # Required. Docker container image that executes the Cloud Dataflow worker harness, residing in Google Container Registry. Deprecated for the Fn API path. Use sdk_harness_container_images instead.
+ "packages": [ # Packages to be installed on workers.
+ { # The packages that must be installed in order for a worker to run the steps of the Cloud Dataflow job that will be assigned to its worker pool. This is the mechanism by which the Cloud Dataflow SDK causes code to be loaded onto the workers. For example, the Cloud Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc.) required in order for that code to run.
+ "location": "A String", # The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
+ "name": "A String", # The name of the package.
+ },
+ ],
+ "poolArgs": { # Extra arguments for this worker pool.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
+ "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
+ "sdkHarnessContainerImages": [ # Set of SDK harness containers needed to execute this pipeline. This will only be set in the Fn API path. For non-cross-language pipelines this should have only one entry. Cross-language pipelines will have two or more entries.
+ { # Defines a SDK harness container for executing Dataflow pipelines.
+ "useSingleCorePerContainer": True or False, # If true, recommends the Dataflow service to use only one core per SDK container instance with this image. If false (or unset) recommends using more than one core per SDK container instance with this image for efficiency. Note that Dataflow service may choose to override this property if needed.
+ "containerImage": "A String", # A docker container image that resides in Google Container Registry.
+ },
+ ],
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
+ "ipConfiguration": "A String", # Configuration for VM IPs.
+ "dataDisks": [ # Data disks that are used by a VM in this workflow.
+ { # Describes the data disk used by a workflow job.
+ "diskType": "A String", # Disk storage type, as defined by Google Compute Engine. This must be a disk type appropriate to the project and zone in which the workers will run. If unknown or unspecified, the service will attempt to choose a reasonable default. For example, the standard persistent disk type is a resource name typically ending in "pd-standard". If SSD persistent disks are available, the resource name typically ends with "pd-ssd". The actual valid values are defined the Google Compute Engine API, not by the Cloud Dataflow API; consult the Google Compute Engine documentation for more information about determining the set of available disk types for a particular project and zone. Google Compute Engine Disk types are local to a particular project in a particular zone, and so the resource name will typically look something like this: compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
+ "sizeGb": 42, # Size of disk in GB. If zero or unspecified, the service will attempt to choose a reasonable default.
+ "mountPoint": "A String", # Directory in a VM where disk is mounted.
+ },
+ ],
+ "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
+ },
+ ],
+ "serviceAccountEmail": "A String", # Identity to run virtual machines as. Defaults to the default account.
+ "serviceKmsKeyName": "A String", # If set, contains the Cloud KMS key identifier used to encrypt data at rest, AKA a Customer Managed Encryption Key (CMEK). Format: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
+ "flexResourceSchedulingGoal": "A String", # Which Flexible Resource Scheduling mode to run in.
+ "clusterManagerApiService": "A String", # The type of cluster manager API to use. If unknown or unspecified, the service will attempt to choose a reasonable default. This should be in the form of the API service name, e.g. "compute.googleapis.com".
+ "workerZone": "A String", # The Compute Engine zone (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1-a". Mutually exclusive with worker_region. If neither worker_region nor worker_zone is specified, a zone in the control plane's region is chosen based on available capacity.
+ "tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+ "workerRegion": "A String", # The Compute Engine region (https://cloud.google.com/compute/docs/regions-zones/regions-zones) in which worker processing should occur, e.g. "us-west1". Mutually exclusive with worker_zone. If neither worker_region nor worker_zone is specified, default to the control plane's region.
+ "internalExperiments": { # Experimental settings.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ "experiments": [ # The list of experiments to enable.
+ "A String",
+ ],
+ "userAgent": { # A description of the process that generated the request.
+ "a_key": "", # Properties of the object.
+ },
+ "dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
+ "version": { # A structure describing which components and their versions of the service are required in order to run the job.
+ "a_key": "", # Properties of the object.
+ },
+ },
+ "executionInfo": { # Additional information about how a Cloud Dataflow job will be executed that isn't contained in the submitted job. # Deprecated.
+ "stages": { # A mapping from each stage to the information about that stage.
+ "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
+ "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
+ "A String",
+ ],
+ },
+ },
+ },
+ "stageStates": [ # This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.
+ { # A message describing the state of a particular execution stage.
+ "currentStateTime": "A String", # The time at which the stage transitioned to this state.
+ "executionStageState": "A String", # Executions stage states allow the same set of values as JobState.
+ "executionStageName": "A String", # The name of the execution stage.
+ },
+ ],
+ "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the Cloud Dataflow service.
+ "startTime": "A String", # The timestamp when the job was started (transitioned to JOB_STATE_PENDING). Flexible resource scheduling jobs are started with some delay after job creation, so start_time is unset before start and is updated when the job is started by the Cloud Dataflow service. For other jobs, start_time always equals to create_time and is immutable and set by the Cloud Dataflow service.
+ "currentState": "A String", # The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state may asynchronously enter a terminal state. After a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Cloud Dataflow service; callers cannot mutate it.
+ "currentStateTime": "A String", # The timestamp associated with the current state.
+ "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID of the job it replaced. When sending a `CreateJobRequest`, you can update a job by specifying it here. The job named here is stopped, and its intermediate state is transferred to this job.
+ "replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in `JOB_STATE_UPDATED`), this field contains the ID of that job.
+ "createdFromSnapshotId": "A String", # If this is specified, the job's initial state is populated from the given snapshot.
+ "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics. # Preliminary field: The format of this data may change at any time. A description of the user pipeline and stages through which it is executed. Created by Cloud Dataflow service. Only retrieved with JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
+ "executionPipelineStage": [ # Description of each stage of execution of the pipeline.
+ { # Description of the composing transforms, names/ids, and input/outputs of a stage of execution. Some composing transforms and sources may have been generated by the Dataflow service during execution planning.
+ "name": "A String", # Dataflow service generated name for this stage.
+ "id": "A String", # Dataflow service generated id for this stage.
+ "componentSource": [ # Collections produced and consumed by component transforms of this stage.
+ { # Description of an interstitial value between transforms in an execution stage.
+ "userName": "A String", # Human-readable name for this transform; may be user or system generated.
+ "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
+ "name": "A String", # Dataflow service generated name for this source.
+ },
+ ],
+ "kind": "A String", # Type of tranform this stage is executing.
+ "inputSource": [ # Input sources for this stage.
+ { # Description of an input or output of an execution stage.
+ "sizeBytes": "A String", # Size of the source, if measurable.
+ "userName": "A String", # Human-readable name for this source; may be user or system generated.
+ "name": "A String", # Dataflow service generated name for this source.
+ "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
+ },
+ ],
+ "outputSource": [ # Output sources for this stage.
+ { # Description of an input or output of an execution stage.
+ "sizeBytes": "A String", # Size of the source, if measurable.
+ "userName": "A String", # Human-readable name for this source; may be user or system generated.
+ "name": "A String", # Dataflow service generated name for this source.
+ "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this source is most closely associated.
+ },
+ ],
+ "prerequisiteStage": [ # Other stages that must complete before this stage can run.
+ "A String",
+ ],
+ "componentTransform": [ # Transforms that comprise this execution stage.
+ { # Description of a transform executed as part of an execution stage.
+ "name": "A String", # Dataflow service generated name for this source.
+ "originalTransform": "A String", # User name for the original user transform with which this transform is most closely associated.
+ "userName": "A String", # Human-readable name for this transform; may be user or system generated.
+ },
+ ],
+ },
+ ],
+ "originalPipelineTransform": [ # Description of each transform in the pipeline and collections between them.
+ { # Description of the type, names/ids, and input/outputs for a transform.
+ "inputCollectionName": [ # User names for all collection inputs to this transform.
+ "A String",
+ ],
+ "outputCollectionName": [ # User names for all collection outputs to this transform.
+ "A String",
+ ],
+ "name": "A String", # User provided name for this transform instance.
+ "id": "A String", # SDK generated id of this transform instance.
+ "displayData": [ # Transform-specific display data.
+ { # Data provided with a pipeline or transform to provide descriptive info.
+ "boolValue": True or False, # Contains value if the data is of a boolean type.
+ "url": "A String", # An optional full URL.
+ "key": "A String", # The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system.
+ "floatValue": 3.14, # Contains value if the data is of float type.
+ "int64Value": "A String", # Contains value if the data is of int64 type.
+ "durationValue": "A String", # Contains value if the data is of duration type.
+ "namespace": "A String", # The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering.
+ "label": "A String", # An optional label to display in a dax UI for the element.
+ "timestampValue": "A String", # Contains value if the data is of timestamp type.
+ "javaClassValue": "A String", # Contains value if the data is of java class type.
+ "strValue": "A String", # Contains value if the data is of string type.
+ "shortStrValue": "A String", # A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip.
+ },
+ ],
+ "kind": "A String", # Type of transform.
+ },
+ ],
+ "displayData": [ # Pipeline level display data.
+ { # Data provided with a pipeline or transform to provide descriptive info.
+ "boolValue": True or False, # Contains value if the data is of a boolean type.
+ "url": "A String", # An optional full URL.
+ "key": "A String", # The key identifying the display data. This is intended to be used as a label for the display data when viewed in a dax monitoring system.
+ "floatValue": 3.14, # Contains value if the data is of float type.
+ "int64Value": "A String", # Contains value if the data is of int64 type.
+ "durationValue": "A String", # Contains value if the data is of duration type.
+ "namespace": "A String", # The namespace for the key. This is usually a class name or programming language namespace (i.e. python module) which defines the display data. This allows a dax monitoring system to specially handle the data and perform custom rendering.
+ "label": "A String", # An optional label to display in a dax UI for the element.
+ "timestampValue": "A String", # Contains value if the data is of timestamp type.
+ "javaClassValue": "A String", # Contains value if the data is of java class type.
+ "strValue": "A String", # Contains value if the data is of string type.
+ "shortStrValue": "A String", # A possible additional shorter value to display. For example a java_class_name_value of com.mypackage.MyDoFn will be stored with MyDoFn as the short_str_value and com.mypackage.MyDoFn as the java_class_name value. short_str_value can be displayed and java_class_name_value will be displayed as a tooltip.
+ },
+ ],
+ },
+ "location": "A String", # The [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints) that contains this job.
+ "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
+ "name": "A String", # The user-specified Cloud Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt returns the existing Job. The name must match the regular expression `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
+ "projectId": "A String", # The ID of the Cloud Platform project that the job belongs to.
},
}</pre>
</div>