Regen docs (#373)
diff --git a/docs/dyn/dataflow_v1b3.projects.templates.html b/docs/dyn/dataflow_v1b3.projects.templates.html
index c325c0f..197f0c9 100644
--- a/docs/dyn/dataflow_v1b3.projects.templates.html
+++ b/docs/dyn/dataflow_v1b3.projects.templates.html
@@ -161,13 +161,12 @@
# size.
"a_key": "A String",
},
- "location": "A String", # The location that contains this job.
- "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the
- # Cloud Dataflow service.
"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the
# corresponding name prefixes of the new job.
"a_key": "A String",
},
+ "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the
+ # Cloud Dataflow service.
"environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
"version": { # A structure describing which components and their versions of the service
# are required in order to run the job.
@@ -221,49 +220,6 @@
# multiple pools, in order to match the various computational
# requirements of the various stages of the job.
"diskSourceImage": "A String", # Fully qualified source image for disks.
- "ipConfiguration": "A String", # Configuration for VM IPs.
- "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle`
- # are supported.
- "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the
- # service will attempt to choose a reasonable default.
- "network": "A String", # Network to which VMs will be assigned. If empty or unspecified,
- # the service will use the network "default".
- "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service
- # will attempt to choose a reasonable default.
- "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will
- # attempt to choose a reasonable default.
- "metadata": { # Metadata to set on the Google Compute Engine VMs.
- "a_key": "A String",
- },
- "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google
- # Compute Engine API.
- "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool.
- # Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
- # `TEARDOWN_NEVER`.
- # `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
- # the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
- # if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
- # down.
- #
- # If the workers are not torn down by the service, they will
- # continue to run and use Google Compute Engine VM resources in the
- # user's project until they are explicitly terminated by the user.
- # Because of this, Google recommends using the `TEARDOWN_ALWAYS`
- # policy except for small, manually supervised test jobs.
- #
- # If unknown or unspecified, the service will attempt to choose a reasonable
- # default.
- "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the
- # service will choose a number of threads (according to the number of cores
- # on the selected machine type for batch, or 1 by convention for streaming).
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of
- # the form "regions/REGION/subnetworks/SUBNETWORK".
- "poolArgs": { # Extra arguments for this worker pool.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to
- # execute the job. If zero or unspecified, the service will
- # attempt to choose a reasonable default.
"taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when
# using the standard Dataflow task runner. Users should ignore
# this field.
@@ -276,24 +232,17 @@
# Google Cloud Storage:
# storage.googleapis.com/{bucket}/{object}
# bucket.storage.googleapis.com/{object}
- "commandlinesFileName": "A String", # The file to store preprocessing commands in.
- "alsologtostderr": True or False, # Whether to also send taskrunner log info to stderr.
- "continueOnException": True or False, # Whether to continue taskrunner if an exception is hit.
- "baseTaskDir": "A String", # The location on the worker for task-specific subdirectories.
- "vmId": "A String", # The ID string of the VM.
- "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by
- # taskrunner; e.g. "wheel".
"taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by
# taskrunner; e.g. "root".
+ "commandlinesFileName": "A String", # The file to store preprocessing commands in.
+ "alsologtostderr": True or False, # Whether to also send taskrunner log info to stderr.
+ "vmId": "A String", # The ID string of the VM.
+ "baseTaskDir": "A String", # The location on the worker for task-specific subdirectories.
+ "continueOnException": True or False, # Whether to continue taskrunner if an exception is hit.
"oauthScopes": [ # The OAuth2 scopes to be requested by the taskrunner in order to
# access the Cloud Dataflow API.
"A String",
],
- "languageHint": "A String", # The suggested backend language.
- "logToSerialconsole": True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial
- # console.
- "streamingWorkerMainClass": "A String", # The streaming worker main class name.
- "logDir": "A String", # The directory on the VM to store logs.
"parallelWorkerSettings": { # Provides data to pass through to the worker harness. # The settings to pass to the parallel worker harness.
"reportingEnabled": True or False, # Whether to send work progress updates to the service.
"shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example,
@@ -320,16 +269,14 @@
# storage.googleapis.com/{bucket}/{object}
# bucket.storage.googleapis.com/{object}
},
+ "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by
+ # taskrunner; e.g. "wheel".
+ "languageHint": "A String", # The suggested backend language.
+ "logToSerialconsole": True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial
+ # console.
+ "streamingWorkerMainClass": "A String", # The streaming worker main class name.
+ "logDir": "A String", # The directory on the VM to store logs.
"dataflowApiVersion": "A String", # The API version of endpoint, e.g. "v1b3"
- "harnessCommand": "A String", # The command to launch the worker harness.
- "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for
- # temporary storage.
- #
- # The supported resource type is:
- #
- # Google Cloud Storage:
- # storage.googleapis.com/{bucket}/{object}
- # bucket.storage.googleapis.com/{object}
"baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs.
#
# When workers access Google Cloud APIs, they logically do so via
@@ -339,33 +286,26 @@
# Locators".
#
# If not specified, the default value is "http://www.googleapis.com/"
- },
- "defaultPackageSet": "A String", # The default package set to install. This allows the service to
- # select a default set of packages which are useful to worker
- # harnesses written in a particular language.
- "packages": [ # Packages to be installed on workers.
- { # The packages that must be installed in order for a worker to run the
- # steps of the Cloud Dataflow job that will be assigned to its worker
- # pool.
+ "harnessCommand": "A String", # The command to launch the worker harness.
+ "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for
+ # temporary storage.
#
- # This is the mechanism by which the Cloud Dataflow SDK causes code to
- # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
- # might use this to install jars containing the user's code and all of the
- # various dependencies (libraries, data files, etc.) required in order
- # for that code to run.
- "location": "A String", # The resource to read the package from. The supported resource type is:
- #
- # Google Cloud Storage:
- #
- # storage.googleapis.com/{bucket}
- # bucket.storage.googleapis.com/
- "name": "A String", # The name of the package.
- },
- ],
- "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
- "algorithm": "A String", # The algorithm to use for autoscaling.
- "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
+ # The supported resource type is:
+ #
+ # Google Cloud Storage:
+ # storage.googleapis.com/{bucket}/{object}
+ # bucket.storage.googleapis.com/{object}
},
+ "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle`
+ # are supported.
+ "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the
+ # service will attempt to choose a reasonable default.
+ "network": "A String", # Network to which VMs will be assigned. If empty or unspecified,
+ # the service will use the network "default".
+ "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service
+ # will attempt to choose a reasonable default.
+ "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will
+ # attempt to choose a reasonable default.
"dataDisks": [ # Data disks that are used by a VM in this workflow.
{ # Describes the data disk used by a workflow job.
"mountPoint": "A String", # Directory in a VM where disk is mounted.
@@ -391,13 +331,78 @@
# compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
},
],
- "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will
+ "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool.
+ # Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
+ # `TEARDOWN_NEVER`.
+ # `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
+ # the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
+ # if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
+ # down.
+ #
+ # If the workers are not torn down by the service, they will
+ # continue to run and use Google Compute Engine VM resources in the
+ # user's project until they are explicitly terminated by the user.
+ # Because of this, Google recommends using the `TEARDOWN_ALWAYS`
+ # policy except for small, manually supervised test jobs.
+ #
+ # If unknown or unspecified, the service will attempt to choose a reasonable
+ # default.
+ "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google
+ # Compute Engine API.
+ "ipConfiguration": "A String", # Configuration for VM IPs.
+ "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the
+ # service will choose a number of threads (according to the number of cores
+ # on the selected machine type for batch, or 1 by convention for streaming).
+ "poolArgs": { # Extra arguments for this worker pool.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to
+ # execute the job. If zero or unspecified, the service will
# attempt to choose a reasonable default.
"workerHarnessContainerImage": "A String", # Required. Docker container image that executes the Cloud Dataflow worker
# harness, residing in Google Container Registry.
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of
+ # the form "regions/REGION/subnetworks/SUBNETWORK".
+ "packages": [ # Packages to be installed on workers.
+ { # The packages that must be installed in order for a worker to run the
+ # steps of the Cloud Dataflow job that will be assigned to its worker
+ # pool.
+ #
+ # This is the mechanism by which the Cloud Dataflow SDK causes code to
+ # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
+ # might use this to install jars containing the user's code and all of the
+ # various dependencies (libraries, data files, etc.) required in order
+ # for that code to run.
+ "location": "A String", # The resource to read the package from. The supported resource type is:
+ #
+ # Google Cloud Storage:
+ #
+ # storage.googleapis.com/{bucket}
+ # bucket.storage.googleapis.com/
+ "name": "A String", # The name of the package.
+ },
+ ],
+ "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
+ "algorithm": "A String", # The algorithm to use for autoscaling.
+ "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
+ },
+ "defaultPackageSet": "A String", # The default package set to install. This allows the service to
+ # select a default set of packages which are useful to worker
+ # harnesses written in a particular language.
+ "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will
+ # attempt to choose a reasonable default.
+ "metadata": { # Metadata to set on the Google Compute Engine VMs.
+ "a_key": "A String",
+ },
},
],
},
+ "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID
+ # of the job it replaced.
+ #
+ # When sending a `CreateJobRequest`, you can update a job by specifying it
+ # here. The job named here is stopped, and its intermediate state is
+ # transferred to this job.
"pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed # Preliminary field: The format of this data may change at any time.
# A description of the user pipeline and stages through which it is executed.
# Created by Cloud Dataflow service. Only retrieved with
@@ -413,9 +418,6 @@
],
"displayData": [ # Transform-specific display data.
{ # Data provided with a pipeline or transform to provide descriptive info.
- "key": "A String", # The key identifying the display data.
- # This is intended to be used as a label for the display data
- # when viewed in a dax monitoring system.
"shortStrValue": "A String", # A possible additional shorter value to display.
# For example a java_class_name_value of com.mypackage.MyDoFn
# will be stored with MyDoFn as the short_str_value and
@@ -423,6 +425,7 @@
# short_str_value can be displayed and java_class_name_value
# will be displayed as a tooltip.
"timestampValue": "A String", # Contains value if the data is of timestamp type.
+ "durationValue": "A String", # Contains value if the data is of duration type.
"url": "A String", # An optional full URL.
"floatValue": 3.14, # Contains value if the data is of float type.
"namespace": "A String", # The namespace for the key. This is usually a class name or programming
@@ -433,7 +436,9 @@
"label": "A String", # An optional label to display in a dax UI for the element.
"boolValue": True or False, # Contains value if the data is of a boolean type.
"strValue": "A String", # Contains value if the data is of string type.
- "durationValue": "A String", # Contains value if the data is of duration type.
+ "key": "A String", # The key identifying the display data.
+ # This is intended to be used as a label for the display data
+ # when viewed in a dax monitoring system.
"int64Value": "A String", # Contains value if the data is of int64 type.
},
],
@@ -445,9 +450,6 @@
],
"displayData": [ # Pipeline level display data.
{ # Data provided with a pipeline or transform to provide descriptive info.
- "key": "A String", # The key identifying the display data.
- # This is intended to be used as a label for the display data
- # when viewed in a dax monitoring system.
"shortStrValue": "A String", # A possible additional shorter value to display.
# For example a java_class_name_value of com.mypackage.MyDoFn
# will be stored with MyDoFn as the short_str_value and
@@ -455,6 +457,7 @@
# short_str_value can be displayed and java_class_name_value
# will be displayed as a tooltip.
"timestampValue": "A String", # Contains value if the data is of timestamp type.
+ "durationValue": "A String", # Contains value if the data is of duration type.
"url": "A String", # An optional full URL.
"floatValue": 3.14, # Contains value if the data is of float type.
"namespace": "A String", # The namespace for the key. This is usually a class name or programming
@@ -465,7 +468,9 @@
"label": "A String", # An optional label to display in a dax UI for the element.
"boolValue": True or False, # Contains value if the data is of a boolean type.
"strValue": "A String", # Contains value if the data is of string type.
- "durationValue": "A String", # Contains value if the data is of duration type.
+ "key": "A String", # The key identifying the display data.
+ # This is intended to be used as a label for the display data
+ # when viewed in a dax monitoring system.
"int64Value": "A String", # Contains value if the data is of int64 type.
},
],
@@ -487,19 +492,19 @@
"outputSource": [ # Output sources for this stage.
{ # Description of an input or output of an execution stage.
"userName": "A String", # Human-readable name for this source; may be user or system generated.
+ "sizeBytes": "A String", # Size of the source, if measurable.
+ "name": "A String", # Dataflow service generated name for this source.
"originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this
# source is most closely associated.
- "name": "A String", # Dataflow service generated name for this source.
- "sizeBytes": "A String", # Size of the source, if measurable.
},
],
"inputSource": [ # Input sources for this stage.
{ # Description of an input or output of an execution stage.
"userName": "A String", # Human-readable name for this source; may be user or system generated.
+ "sizeBytes": "A String", # Size of the source, if measurable.
+ "name": "A String", # Dataflow service generated name for this source.
"originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this
# source is most closely associated.
- "name": "A String", # Dataflow service generated name for this source.
- "sizeBytes": "A String", # Size of the source, if measurable.
},
],
"componentTransform": [ # Transforms that comprise this execution stage.
@@ -541,26 +546,16 @@
# Note that the Cloud Dataflow service may be used to run many different
# types of jobs, not just Map-Reduce.
"kind": "A String", # The kind of step in the Cloud Dataflow job.
+ "name": "A String", # The name that identifies the step. This must be unique for each
+ # step with respect to all other steps in the Cloud Dataflow job.
"properties": { # Named properties associated with the step. Each kind of
# predefined step has its own required set of properties.
# Must be provided on Create. Only retrieved with JOB_VIEW_ALL.
"a_key": "", # Properties of the object.
},
- "name": "A String", # The name that identifies the step. This must be unique for each
- # step with respect to all other steps in the Cloud Dataflow job.
},
],
- "currentState": "A String", # The current state of the job.
- #
- # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
- # specified.
- #
- # A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
- # terminal state. After a job has reached a terminal state, no
- # further state updates may be made.
- #
- # This field may be mutated by the Cloud Dataflow service;
- # callers cannot mutate it.
+ "location": "A String", # The location that contains this job.
"tempFiles": [ # A set of files the system should be aware of that are used
# for temporary storage. These temporary files will be
# removed on job completion.
@@ -580,12 +575,17 @@
#
# This field is set by the Cloud Dataflow service when the Job is
# created, and is immutable for the life of the job.
- "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID
- # of the job it replaced.
+ "currentState": "A String", # The current state of the job.
#
- # When sending a `CreateJobRequest`, you can update a job by specifying it
- # here. The job named here is stopped, and its intermediate state is
- # transferred to this job.
+ # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
+ # specified.
+ #
+ # A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
+ # terminal state. After a job has reached a terminal state, no
+ # further state updates may be made.
+ #
+ # This field may be mutated by the Cloud Dataflow service;
+ # callers cannot mutate it.
"executionInfo": { # Additional information about how a Cloud Dataflow job will be executed that # Deprecated.
# isn't contained in the submitted job.
"stages": { # A mapping from each stage to the information about that stage.
@@ -750,70 +750,6 @@
An object of the form:
{ # Response to the request to launch a template.
- "status": { # The `Status` type defines a logical error model that is suitable for different # The status of the launch template request. Any problems with the request
- # will be indicated in the error_details.
- # programming environments, including REST APIs and RPC APIs. It is used by
- # [gRPC](https://github.com/grpc). The error model is designed to be:
- #
- # - Simple to use and understand for most users
- # - Flexible enough to meet unexpected needs
- #
- # # Overview
- #
- # The `Status` message contains three pieces of data: error code, error message,
- # and error details. The error code should be an enum value of
- # google.rpc.Code, but it may accept additional error codes if needed. The
- # error message should be a developer-facing English message that helps
- # developers *understand* and *resolve* the error. If a localized user-facing
- # error message is needed, put the localized message in the error details or
- # localize it in the client. The optional error details may contain arbitrary
- # information about the error. There is a predefined set of error detail types
- # in the package `google.rpc` which can be used for common error conditions.
- #
- # # Language mapping
- #
- # The `Status` message is the logical representation of the error model, but it
- # is not necessarily the actual wire format. When the `Status` message is
- # exposed in different client libraries and different wire protocols, it can be
- # mapped differently. For example, it will likely be mapped to some exceptions
- # in Java, but more likely mapped to some error codes in C.
- #
- # # Other uses
- #
- # The error model and the `Status` message can be used in a variety of
- # environments, either with or without APIs, to provide a
- # consistent developer experience across different environments.
- #
- # Example uses of this error model include:
- #
- # - Partial errors. If a service needs to return partial errors to the client,
- # it may embed the `Status` in the normal response to indicate the partial
- # errors.
- #
- # - Workflow errors. A typical workflow has multiple steps. Each step may
- # have a `Status` message for error reporting purpose.
- #
- # - Batch operations. If a client uses batch request and batch response, the
- # `Status` message should be used directly inside batch response, one for
- # each error sub-response.
- #
- # - Asynchronous operations. If an API call embeds asynchronous operation
- # results in its response, the status of those operations should be
- # represented directly using the `Status` message.
- #
- # - Logging. If some API errors are stored in logs, the message `Status` could
- # be used directly after any stripping needed for security/privacy reasons.
- "message": "A String", # A developer-facing error message, which should be in English. Any
- # user-facing error message should be localized and sent in the
- # google.rpc.Status.details field, or localized by the client.
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
- "details": [ # A list of messages that carry the error details. There will be a
- # common set of message types for APIs to use.
- {
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- ],
- },
"job": { # Defines a job to be run by the Cloud Dataflow service. # The job that was launched, if the request was not a dry run and
# the job was successfully launched.
"clientRequestId": "A String", # The client's unique identifier of the job, re-used across retried attempts.
@@ -854,13 +790,12 @@
# size.
"a_key": "A String",
},
- "location": "A String", # The location that contains this job.
- "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the
- # Cloud Dataflow service.
"transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the
# corresponding name prefixes of the new job.
"a_key": "A String",
},
+ "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the
+ # Cloud Dataflow service.
"environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
"version": { # A structure describing which components and their versions of the service
# are required in order to run the job.
@@ -914,49 +849,6 @@
# multiple pools, in order to match the various computational
# requirements of the various stages of the job.
"diskSourceImage": "A String", # Fully qualified source image for disks.
- "ipConfiguration": "A String", # Configuration for VM IPs.
- "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle`
- # are supported.
- "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the
- # service will attempt to choose a reasonable default.
- "network": "A String", # Network to which VMs will be assigned. If empty or unspecified,
- # the service will use the network "default".
- "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service
- # will attempt to choose a reasonable default.
- "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will
- # attempt to choose a reasonable default.
- "metadata": { # Metadata to set on the Google Compute Engine VMs.
- "a_key": "A String",
- },
- "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google
- # Compute Engine API.
- "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool.
- # Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
- # `TEARDOWN_NEVER`.
- # `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
- # the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
- # if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
- # down.
- #
- # If the workers are not torn down by the service, they will
- # continue to run and use Google Compute Engine VM resources in the
- # user's project until they are explicitly terminated by the user.
- # Because of this, Google recommends using the `TEARDOWN_ALWAYS`
- # policy except for small, manually supervised test jobs.
- #
- # If unknown or unspecified, the service will attempt to choose a reasonable
- # default.
- "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the
- # service will choose a number of threads (according to the number of cores
- # on the selected machine type for batch, or 1 by convention for streaming).
- "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of
- # the form "regions/REGION/subnetworks/SUBNETWORK".
- "poolArgs": { # Extra arguments for this worker pool.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to
- # execute the job. If zero or unspecified, the service will
- # attempt to choose a reasonable default.
"taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when
# using the standard Dataflow task runner. Users should ignore
# this field.
@@ -969,24 +861,17 @@
# Google Cloud Storage:
# storage.googleapis.com/{bucket}/{object}
# bucket.storage.googleapis.com/{object}
- "commandlinesFileName": "A String", # The file to store preprocessing commands in.
- "alsologtostderr": True or False, # Whether to also send taskrunner log info to stderr.
- "continueOnException": True or False, # Whether to continue taskrunner if an exception is hit.
- "baseTaskDir": "A String", # The location on the worker for task-specific subdirectories.
- "vmId": "A String", # The ID string of the VM.
- "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by
- # taskrunner; e.g. "wheel".
"taskUser": "A String", # The UNIX user ID on the worker VM to use for tasks launched by
# taskrunner; e.g. "root".
+ "commandlinesFileName": "A String", # The file to store preprocessing commands in.
+ "alsologtostderr": True or False, # Whether to also send taskrunner log info to stderr.
+ "vmId": "A String", # The ID string of the VM.
+ "baseTaskDir": "A String", # The location on the worker for task-specific subdirectories.
+ "continueOnException": True or False, # Whether to continue taskrunner if an exception is hit.
"oauthScopes": [ # The OAuth2 scopes to be requested by the taskrunner in order to
# access the Cloud Dataflow API.
"A String",
],
- "languageHint": "A String", # The suggested backend language.
- "logToSerialconsole": True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial
- # console.
- "streamingWorkerMainClass": "A String", # The streaming worker main class name.
- "logDir": "A String", # The directory on the VM to store logs.
"parallelWorkerSettings": { # Provides data to pass through to the worker harness. # The settings to pass to the parallel worker harness.
"reportingEnabled": True or False, # Whether to send work progress updates to the service.
"shuffleServicePath": "A String", # The Shuffle service path relative to the root URL, for example,
@@ -1013,16 +898,14 @@
# storage.googleapis.com/{bucket}/{object}
# bucket.storage.googleapis.com/{object}
},
+ "taskGroup": "A String", # The UNIX group ID on the worker VM to use for tasks launched by
+ # taskrunner; e.g. "wheel".
+ "languageHint": "A String", # The suggested backend language.
+ "logToSerialconsole": True or False, # Whether to send taskrunner log info to Google Compute Engine VM serial
+ # console.
+ "streamingWorkerMainClass": "A String", # The streaming worker main class name.
+ "logDir": "A String", # The directory on the VM to store logs.
"dataflowApiVersion": "A String", # The API version of endpoint, e.g. "v1b3"
- "harnessCommand": "A String", # The command to launch the worker harness.
- "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for
- # temporary storage.
- #
- # The supported resource type is:
- #
- # Google Cloud Storage:
- # storage.googleapis.com/{bucket}/{object}
- # bucket.storage.googleapis.com/{object}
"baseUrl": "A String", # The base URL for the taskrunner to use when accessing Google Cloud APIs.
#
# When workers access Google Cloud APIs, they logically do so via
@@ -1032,33 +915,26 @@
# Locators".
#
# If not specified, the default value is "http://www.googleapis.com/"
- },
- "defaultPackageSet": "A String", # The default package set to install. This allows the service to
- # select a default set of packages which are useful to worker
- # harnesses written in a particular language.
- "packages": [ # Packages to be installed on workers.
- { # The packages that must be installed in order for a worker to run the
- # steps of the Cloud Dataflow job that will be assigned to its worker
- # pool.
+ "harnessCommand": "A String", # The command to launch the worker harness.
+ "tempStoragePrefix": "A String", # The prefix of the resources the taskrunner should use for
+ # temporary storage.
#
- # This is the mechanism by which the Cloud Dataflow SDK causes code to
- # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
- # might use this to install jars containing the user's code and all of the
- # various dependencies (libraries, data files, etc.) required in order
- # for that code to run.
- "location": "A String", # The resource to read the package from. The supported resource type is:
- #
- # Google Cloud Storage:
- #
- # storage.googleapis.com/{bucket}
- # bucket.storage.googleapis.com/
- "name": "A String", # The name of the package.
- },
- ],
- "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
- "algorithm": "A String", # The algorithm to use for autoscaling.
- "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
+ # The supported resource type is:
+ #
+ # Google Cloud Storage:
+ # storage.googleapis.com/{bucket}/{object}
+ # bucket.storage.googleapis.com/{object}
},
+ "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle`
+ # are supported.
+ "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the
+ # service will attempt to choose a reasonable default.
+ "network": "A String", # Network to which VMs will be assigned. If empty or unspecified,
+ # the service will use the network "default".
+ "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service
+ # will attempt to choose a reasonable default.
+ "diskSizeGb": 42, # Size of root disk for VMs, in GB. If zero or unspecified, the service will
+ # attempt to choose a reasonable default.
"dataDisks": [ # Data disks that are used by a VM in this workflow.
{ # Describes the data disk used by a workflow job.
"mountPoint": "A String", # Directory in a VM where disk is mounted.
@@ -1084,13 +960,78 @@
# compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
},
],
- "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will
+ "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool.
+ # Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
+ # `TEARDOWN_NEVER`.
+ # `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
+ # the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
+ # if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
+ # down.
+ #
+ # If the workers are not torn down by the service, they will
+ # continue to run and use Google Compute Engine VM resources in the
+ # user's project until they are explicitly terminated by the user.
+ # Because of this, Google recommends using the `TEARDOWN_ALWAYS`
+ # policy except for small, manually supervised test jobs.
+ #
+ # If unknown or unspecified, the service will attempt to choose a reasonable
+ # default.
+ "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google
+ # Compute Engine API.
+ "ipConfiguration": "A String", # Configuration for VM IPs.
+ "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the
+ # service will choose a number of threads (according to the number of cores
+ # on the selected machine type for batch, or 1 by convention for streaming).
+ "poolArgs": { # Extra arguments for this worker pool.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to
+ # execute the job. If zero or unspecified, the service will
# attempt to choose a reasonable default.
"workerHarnessContainerImage": "A String", # Required. Docker container image that executes the Cloud Dataflow worker
# harness, residing in Google Container Registry.
+ "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired. Expected to be of
+ # the form "regions/REGION/subnetworks/SUBNETWORK".
+ "packages": [ # Packages to be installed on workers.
+ { # The packages that must be installed in order for a worker to run the
+ # steps of the Cloud Dataflow job that will be assigned to its worker
+ # pool.
+ #
+ # This is the mechanism by which the Cloud Dataflow SDK causes code to
+ # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
+ # might use this to install jars containing the user's code and all of the
+ # various dependencies (libraries, data files, etc.) required in order
+ # for that code to run.
+ "location": "A String", # The resource to read the package from. The supported resource type is:
+ #
+ # Google Cloud Storage:
+ #
+ # storage.googleapis.com/{bucket}
+ # bucket.storage.googleapis.com/
+ "name": "A String", # The name of the package.
+ },
+ ],
+ "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
+ "algorithm": "A String", # The algorithm to use for autoscaling.
+ "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
+ },
+ "defaultPackageSet": "A String", # The default package set to install. This allows the service to
+ # select a default set of packages which are useful to worker
+ # harnesses written in a particular language.
+ "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will
+ # attempt to choose a reasonable default.
+ "metadata": { # Metadata to set on the Google Compute Engine VMs.
+ "a_key": "A String",
+ },
},
],
},
+ "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID
+ # of the job it replaced.
+ #
+ # When sending a `CreateJobRequest`, you can update a job by specifying it
+ # here. The job named here is stopped, and its intermediate state is
+ # transferred to this job.
"pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed # Preliminary field: The format of this data may change at any time.
# A description of the user pipeline and stages through which it is executed.
# Created by Cloud Dataflow service. Only retrieved with
@@ -1106,9 +1047,6 @@
],
"displayData": [ # Transform-specific display data.
{ # Data provided with a pipeline or transform to provide descriptive info.
- "key": "A String", # The key identifying the display data.
- # This is intended to be used as a label for the display data
- # when viewed in a dax monitoring system.
"shortStrValue": "A String", # A possible additional shorter value to display.
# For example a java_class_name_value of com.mypackage.MyDoFn
# will be stored with MyDoFn as the short_str_value and
@@ -1116,6 +1054,7 @@
# short_str_value can be displayed and java_class_name_value
# will be displayed as a tooltip.
"timestampValue": "A String", # Contains value if the data is of timestamp type.
+ "durationValue": "A String", # Contains value if the data is of duration type.
"url": "A String", # An optional full URL.
"floatValue": 3.14, # Contains value if the data is of float type.
"namespace": "A String", # The namespace for the key. This is usually a class name or programming
@@ -1126,7 +1065,9 @@
"label": "A String", # An optional label to display in a dax UI for the element.
"boolValue": True or False, # Contains value if the data is of a boolean type.
"strValue": "A String", # Contains value if the data is of string type.
- "durationValue": "A String", # Contains value if the data is of duration type.
+ "key": "A String", # The key identifying the display data.
+ # This is intended to be used as a label for the display data
+ # when viewed in a dax monitoring system.
"int64Value": "A String", # Contains value if the data is of int64 type.
},
],
@@ -1138,9 +1079,6 @@
],
"displayData": [ # Pipeline level display data.
{ # Data provided with a pipeline or transform to provide descriptive info.
- "key": "A String", # The key identifying the display data.
- # This is intended to be used as a label for the display data
- # when viewed in a dax monitoring system.
"shortStrValue": "A String", # A possible additional shorter value to display.
# For example a java_class_name_value of com.mypackage.MyDoFn
# will be stored with MyDoFn as the short_str_value and
@@ -1148,6 +1086,7 @@
# short_str_value can be displayed and java_class_name_value
# will be displayed as a tooltip.
"timestampValue": "A String", # Contains value if the data is of timestamp type.
+ "durationValue": "A String", # Contains value if the data is of duration type.
"url": "A String", # An optional full URL.
"floatValue": 3.14, # Contains value if the data is of float type.
"namespace": "A String", # The namespace for the key. This is usually a class name or programming
@@ -1158,7 +1097,9 @@
"label": "A String", # An optional label to display in a dax UI for the element.
"boolValue": True or False, # Contains value if the data is of a boolean type.
"strValue": "A String", # Contains value if the data is of string type.
- "durationValue": "A String", # Contains value if the data is of duration type.
+ "key": "A String", # The key identifying the display data.
+ # This is intended to be used as a label for the display data
+ # when viewed in a dax monitoring system.
"int64Value": "A String", # Contains value if the data is of int64 type.
},
],
@@ -1180,19 +1121,19 @@
"outputSource": [ # Output sources for this stage.
{ # Description of an input or output of an execution stage.
"userName": "A String", # Human-readable name for this source; may be user or system generated.
+ "sizeBytes": "A String", # Size of the source, if measurable.
+ "name": "A String", # Dataflow service generated name for this source.
"originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this
# source is most closely associated.
- "name": "A String", # Dataflow service generated name for this source.
- "sizeBytes": "A String", # Size of the source, if measurable.
},
],
"inputSource": [ # Input sources for this stage.
{ # Description of an input or output of an execution stage.
"userName": "A String", # Human-readable name for this source; may be user or system generated.
+ "sizeBytes": "A String", # Size of the source, if measurable.
+ "name": "A String", # Dataflow service generated name for this source.
"originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this
# source is most closely associated.
- "name": "A String", # Dataflow service generated name for this source.
- "sizeBytes": "A String", # Size of the source, if measurable.
},
],
"componentTransform": [ # Transforms that comprise this execution stage.
@@ -1234,26 +1175,16 @@
# Note that the Cloud Dataflow service may be used to run many different
# types of jobs, not just Map-Reduce.
"kind": "A String", # The kind of step in the Cloud Dataflow job.
+ "name": "A String", # The name that identifies the step. This must be unique for each
+ # step with respect to all other steps in the Cloud Dataflow job.
"properties": { # Named properties associated with the step. Each kind of
# predefined step has its own required set of properties.
# Must be provided on Create. Only retrieved with JOB_VIEW_ALL.
"a_key": "", # Properties of the object.
},
- "name": "A String", # The name that identifies the step. This must be unique for each
- # step with respect to all other steps in the Cloud Dataflow job.
},
],
- "currentState": "A String", # The current state of the job.
- #
- # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
- # specified.
- #
- # A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
- # terminal state. After a job has reached a terminal state, no
- # further state updates may be made.
- #
- # This field may be mutated by the Cloud Dataflow service;
- # callers cannot mutate it.
+ "location": "A String", # The location that contains this job.
"tempFiles": [ # A set of files the system should be aware of that are used
# for temporary storage. These temporary files will be
# removed on job completion.
@@ -1273,12 +1204,17 @@
#
# This field is set by the Cloud Dataflow service when the Job is
# created, and is immutable for the life of the job.
- "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID
- # of the job it replaced.
+ "currentState": "A String", # The current state of the job.
#
- # When sending a `CreateJobRequest`, you can update a job by specifying it
- # here. The job named here is stopped, and its intermediate state is
- # transferred to this job.
+ # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
+ # specified.
+ #
+ # A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
+ # terminal state. After a job has reached a terminal state, no
+ # further state updates may be made.
+ #
+ # This field may be mutated by the Cloud Dataflow service;
+ # callers cannot mutate it.
"executionInfo": { # Additional information about how a Cloud Dataflow job will be executed that # Deprecated.
# isn't contained in the submitted job.
"stages": { # A mapping from each stage to the information about that stage.