Update documentation
diff --git a/docs/dyn/dataflow_v1b3.projects.jobs.html b/docs/dyn/dataflow_v1b3.projects.jobs.html
index c1835f8..28cb4e3 100644
--- a/docs/dyn/dataflow_v1b3.projects.jobs.html
+++ b/docs/dyn/dataflow_v1b3.projects.jobs.html
@@ -75,6 +75,11 @@
<h1><a href="dataflow_v1b3.html">Google Dataflow API</a> . <a href="dataflow_v1b3.projects.html">projects</a> . <a href="dataflow_v1b3.projects.jobs.html">jobs</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
+ <code><a href="dataflow_v1b3.projects.jobs.debug.html">debug()</a></code>
+</p>
+<p class="firstline">Returns the debug Resource.</p>
+
+<p class="toc_element">
<code><a href="dataflow_v1b3.projects.jobs.messages.html">messages()</a></code>
</p>
<p class="firstline">Returns the messages Resource.</p>
@@ -118,6 +123,9 @@
"name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
"replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
"projectId": "A String", # The project which owns the job.
+ "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
+ "a_key": "A String",
+ },
"transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
"a_key": "A String",
},
@@ -128,7 +136,7 @@
},
"tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
"internalExperiments": { # Experimental settings.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
"experiments": [ # The list of experiments to enable.
@@ -179,6 +187,7 @@
"network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
"zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
"numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
+ "ipConfiguration": "A String", # Configuration for VM IPs.
"onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
"diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
"teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -187,7 +196,7 @@
"a_key": "A String",
},
"poolArgs": { # Extra arguments for this worker pool.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
"workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
@@ -202,7 +211,7 @@
"maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
"algorithm": "A String", # The algorithm to use for autoscaling.
},
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "zones/ZONE/subnetworks/SUBNETWORK".
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
"dataDisks": [ # Data disks that are used by a VM in this workflow.
{ # Describes the data disk used by a workflow job.
"mountPoint": "A String", # Directory in a VM where disk is mounted.
@@ -255,6 +264,9 @@
"name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
"replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
"projectId": "A String", # The project which owns the job.
+ "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
+ "a_key": "A String",
+ },
"transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
"a_key": "A String",
},
@@ -265,7 +277,7 @@
},
"tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
"internalExperiments": { # Experimental settings.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
"experiments": [ # The list of experiments to enable.
@@ -316,6 +328,7 @@
"network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
"zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
"numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
+ "ipConfiguration": "A String", # Configuration for VM IPs.
"onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
"diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
"teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -324,7 +337,7 @@
"a_key": "A String",
},
"poolArgs": { # Extra arguments for this worker pool.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
"workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
@@ -339,7 +352,7 @@
"maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
"algorithm": "A String", # The algorithm to use for autoscaling.
},
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "zones/ZONE/subnetworks/SUBNETWORK".
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
"dataDisks": [ # Data disks that are used by a VM in this workflow.
{ # Describes the data disk used by a workflow job.
"mountPoint": "A String", # Directory in a VM where disk is mounted.
@@ -399,6 +412,9 @@
"name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
"replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
"projectId": "A String", # The project which owns the job.
+ "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
+ "a_key": "A String",
+ },
"transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
"a_key": "A String",
},
@@ -409,7 +425,7 @@
},
"tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
"internalExperiments": { # Experimental settings.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
"experiments": [ # The list of experiments to enable.
@@ -460,6 +476,7 @@
"network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
"zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
"numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
+ "ipConfiguration": "A String", # Configuration for VM IPs.
"onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
"diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
"teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -468,7 +485,7 @@
"a_key": "A String",
},
"poolArgs": { # Extra arguments for this worker pool.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
"workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
@@ -483,7 +500,7 @@
"maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
"algorithm": "A String", # The algorithm to use for autoscaling.
},
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "zones/ZONE/subnetworks/SUBNETWORK".
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
"dataDisks": [ # Data disks that are used by a VM in this workflow.
{ # Describes the data disk used by a workflow job.
"mountPoint": "A String", # Directory in a VM where disk is mounted.
@@ -586,6 +603,9 @@
"name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
"replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
"projectId": "A String", # The project which owns the job.
+ "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
+ "a_key": "A String",
+ },
"transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
"a_key": "A String",
},
@@ -596,7 +616,7 @@
},
"tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
"internalExperiments": { # Experimental settings.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
"experiments": [ # The list of experiments to enable.
@@ -647,6 +667,7 @@
"network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
"zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
"numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
+ "ipConfiguration": "A String", # Configuration for VM IPs.
"onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
"diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
"teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -655,7 +676,7 @@
"a_key": "A String",
},
"poolArgs": { # Extra arguments for this worker pool.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
"workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
@@ -670,7 +691,7 @@
"maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
"algorithm": "A String", # The algorithm to use for autoscaling.
},
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "zones/ZONE/subnetworks/SUBNETWORK".
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
"dataDisks": [ # Data disks that are used by a VM in this workflow.
{ # Describes the data disk used by a workflow job.
"mountPoint": "A String", # Directory in a VM where disk is mounted.
@@ -743,6 +764,9 @@
"name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
"replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
"projectId": "A String", # The project which owns the job.
+ "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
+ "a_key": "A String",
+ },
"transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
"a_key": "A String",
},
@@ -753,7 +777,7 @@
},
"tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
"internalExperiments": { # Experimental settings.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
"experiments": [ # The list of experiments to enable.
@@ -804,6 +828,7 @@
"network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
"zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
"numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
+ "ipConfiguration": "A String", # Configuration for VM IPs.
"onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
"diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
"teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -812,7 +837,7 @@
"a_key": "A String",
},
"poolArgs": { # Extra arguments for this worker pool.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
"workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
@@ -827,7 +852,7 @@
"maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
"algorithm": "A String", # The algorithm to use for autoscaling.
},
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "zones/ZONE/subnetworks/SUBNETWORK".
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
"dataDisks": [ # Data disks that are used by a VM in this workflow.
{ # Describes the data disk used by a workflow job.
"mountPoint": "A String", # Directory in a VM where disk is mounted.
@@ -878,6 +903,9 @@
"name": "A String", # The user-specified Dataflow job name. Only one Job with a given name may exist in a project at any given time. If a caller attempts to create a Job with the same name as an already-existing Job, the attempt will return the existing Job. The name must match the regular expression [a-z]([-a-z0-9]{0,38}[a-z0-9])?
"replacedByJobId": "A String", # If another job is an update of this job (and thus, this job is in JOB_STATE_UPDATED), this field will contain the ID of that job.
"projectId": "A String", # The project which owns the job.
+ "labels": { # User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and values are additionally constrained to be <= 128 bytes in size.
+ "a_key": "A String",
+ },
"transformNameMapping": { # Map of transform name prefixes of the job to be replaced to the corresponding name prefixes of the new job.
"a_key": "A String",
},
@@ -888,7 +916,7 @@
},
"tempStoragePrefix": "A String", # The prefix of the resources the system should use for temporary storage. The system will append the suffix "/temp-{JOBNAME} to this resource prefix, where {JOBNAME} is the value of the job_name field. The resulting bucket and object prefix is used as the prefix of the resources used to store temporary data needed during the job execution. NOTE: This will override the value in taskrunner_settings. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
"internalExperiments": { # Experimental settings.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"dataset": "A String", # The dataset for the current project where various workflow related tables are stored. The supported resource type is: Google BigQuery: bigquery.googleapis.com/{dataset}
"experiments": [ # The list of experiments to enable.
@@ -939,6 +967,7 @@
"network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
"zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
"numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).
+ "ipConfiguration": "A String", # Configuration for VM IPs.
"onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
"diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
"teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -947,7 +976,7 @@
"a_key": "A String",
},
"poolArgs": { # Extra arguments for this worker pool.
- "a_key": "", # Properties of the object. Contains field @ype with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
"numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
"workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
@@ -962,7 +991,7 @@
"maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
"algorithm": "A String", # The algorithm to use for autoscaling.
},
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "zones/ZONE/subnetworks/SUBNETWORK".
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of the form "regions/REGION/subnetworks/SUBNETWORK".
"dataDisks": [ # Data disks that are used by a VM in this workflow.
{ # Describes the data disk used by a workflow job.
"mountPoint": "A String", # Directory in a VM where disk is mounted.