Fix method doc signatures for multiline required parameters (#374)

* Fix method doc signatures for multiline required parameters.

Existing doc generator failed to recognize parameters as required when parameter descriptions
extended over more than one line. Besides presenting incorrect information, resulting
inconsistencies broke checks for automated sample generation.

* Regen docs
diff --git a/docs/dyn/dataflow_v1b3.projects.templates.html b/docs/dyn/dataflow_v1b3.projects.templates.html
index 197f0c9..c5f3998 100644
--- a/docs/dyn/dataflow_v1b3.projects.templates.html
+++ b/docs/dyn/dataflow_v1b3.projects.templates.html
@@ -95,15 +95,15 @@
 
 { # A request to create a Cloud Dataflow job from a template.
     "environment": { # The environment values to set at runtime. # The runtime environment for the job.
-      "bypassTempDirValidation": True or False, # Whether to bypass the safety checks for the job's temporary directory.
-          # Use with caution.
+      "maxWorkers": 42, # The maximum number of Google Compute Engine instances to be made
+          # available to your pipeline during execution, from 1 to 1000.
       "tempLocation": "A String", # The Cloud Storage path to use for temporary files.
           # Must be a valid Cloud Storage URL, beginning with `gs://`.
       "serviceAccountEmail": "A String", # The email address of the service account to run the job as.
       "zone": "A String", # The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)
           # for launching worker instances to run your pipeline.
-      "maxWorkers": 42, # The maximum number of Google Compute Engine instances to be made
-          # available to your pipeline during execution, from 1 to 1000.
+      "bypassTempDirValidation": True or False, # Whether to bypass the safety checks for the job's temporary directory.
+          # Use with caution.
     },
     "gcsPath": "A String", # Required. A Cloud Storage path to the template from which to
         # create the job.
@@ -161,12 +161,13 @@
           # size.
         "a_key": "A String",
       },
+      "location": "A String", # The location that contains this job.
+      "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the
+          # Cloud Dataflow service.
       "transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the
           # corresponding name prefixes of the new job.
         "a_key": "A String",
       },
-      "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the
-          # Cloud Dataflow service.
       "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
         "version": { # A structure describing which components and their versions of the service
             # are required in order to run the job.
@@ -220,6 +221,49 @@
               # multiple pools, in order to match the various computational
               # requirements of the various stages of the job.
             "diskSourceImage": "A String", # Fully qualified source image for disks.
+            "ipConfiguration": "A String", # Configuration for VM IPs.
+            "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle`
+                # are supported.
+            "machineType": "A String", # Machine type (e.g. "n1-standard-1").  If empty or unspecified, the
+                # service will attempt to choose a reasonable default.
+            "network": "A String", # Network to which VMs will be assigned.  If empty or unspecified,
+                # the service will use the network "default".
+            "zone": "A String", # Zone to run the worker pools in.  If empty or unspecified, the service
+                # will attempt to choose a reasonable default.
+            "diskSizeGb": 42, # Size of root disk for VMs, in GB.  If zero or unspecified, the service will
+                # attempt to choose a reasonable default.
+            "metadata": { # Metadata to set on the Google Compute Engine VMs.
+              "a_key": "A String",
+            },
+            "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google
+                # Compute Engine API.
+            "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool.
+                # Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
+                # `TEARDOWN_NEVER`.
+                # `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
+                # the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
+                # if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
+                # down.
+                #
+                # If the workers are not torn down by the service, they will
+                # continue to run and use Google Compute Engine VM resources in the
+                # user's project until they are explicitly terminated by the user.
+                # Because of this, Google recommends using the `TEARDOWN_ALWAYS`
+                # policy except for small, manually supervised test jobs.
+                #
+                # If unknown or unspecified, the service will attempt to choose a reasonable
+                # default.
+            "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the
+                # service will choose a number of threads (according to the number of cores
+                # on the selected machine type for batch, or 1 by convention for streaming).
+            "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
+                # the form "regions/REGION/subnetworks/SUBNETWORK".
+            "poolArgs": { # Extra arguments for this worker pool.
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+            "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to
+                # execute the job.  If zero or unspecified, the service will
+                # attempt to choose a reasonable default.
             "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when
                 # using the standard Dataflow task runner.  Users should ignore
                 # this field.
@@ -296,16 +340,32 @@
                   #   storage.googleapis.com/{bucket}/{object}
                   #   bucket.storage.googleapis.com/{object}
             },
-            "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle`
-                # are supported.
-            "machineType": "A String", # Machine type (e.g. "n1-standard-1").  If empty or unspecified, the
-                # service will attempt to choose a reasonable default.
-            "network": "A String", # Network to which VMs will be assigned.  If empty or unspecified,
-                # the service will use the network "default".
-            "zone": "A String", # Zone to run the worker pools in.  If empty or unspecified, the service
-                # will attempt to choose a reasonable default.
-            "diskSizeGb": 42, # Size of root disk for VMs, in GB.  If zero or unspecified, the service will
-                # attempt to choose a reasonable default.
+            "defaultPackageSet": "A String", # The default package set to install.  This allows the service to
+                # select a default set of packages which are useful to worker
+                # harnesses written in a particular language.
+            "packages": [ # Packages to be installed on workers.
+              { # The packages that must be installed in order for a worker to run the
+                  # steps of the Cloud Dataflow job that will be assigned to its worker
+                  # pool.
+                  #
+                  # This is the mechanism by which the Cloud Dataflow SDK causes code to
+                  # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
+                  # might use this to install jars containing the user's code and all of the
+                  # various dependencies (libraries, data files, etc.) required in order
+                  # for that code to run.
+                "name": "A String", # The name of the package.
+                "location": "A String", # The resource to read the package from. The supported resource type is:
+                    #
+                    # Google Cloud Storage:
+                    #
+                    #   storage.googleapis.com/{bucket}
+                    #   bucket.storage.googleapis.com/
+              },
+            ],
+            "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
+              "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
+              "algorithm": "A String", # The algorithm to use for autoscaling.
+            },
             "dataDisks": [ # Data disks that are used by a VM in this workflow.
               { # Describes the data disk used by a workflow job.
                 "mountPoint": "A String", # Directory in a VM where disk is mounted.
@@ -331,78 +391,13 @@
                     # compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
               },
             ],
-            "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool.
-                # Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
-                # `TEARDOWN_NEVER`.
-                # `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
-                # the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
-                # if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
-                # down.
-                #
-                # If the workers are not torn down by the service, they will
-                # continue to run and use Google Compute Engine VM resources in the
-                # user's project until they are explicitly terminated by the user.
-                # Because of this, Google recommends using the `TEARDOWN_ALWAYS`
-                # policy except for small, manually supervised test jobs.
-                #
-                # If unknown or unspecified, the service will attempt to choose a reasonable
-                # default.
-            "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google
-                # Compute Engine API.
-            "ipConfiguration": "A String", # Configuration for VM IPs.
-            "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the
-                # service will choose a number of threads (according to the number of cores
-                # on the selected machine type for batch, or 1 by convention for streaming).
-            "poolArgs": { # Extra arguments for this worker pool.
-              "a_key": "", # Properties of the object. Contains field @type with type URL.
-            },
-            "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to
-                # execute the job.  If zero or unspecified, the service will
+            "diskType": "A String", # Type of root disk for VMs.  If empty or unspecified, the service will
                 # attempt to choose a reasonable default.
             "workerHarnessContainerImage": "A String", # Required. Docker container image that executes the Cloud Dataflow worker
                 # harness, residing in Google Container Registry.
-            "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
-                # the form "regions/REGION/subnetworks/SUBNETWORK".
-            "packages": [ # Packages to be installed on workers.
-              { # The packages that must be installed in order for a worker to run the
-                  # steps of the Cloud Dataflow job that will be assigned to its worker
-                  # pool.
-                  #
-                  # This is the mechanism by which the Cloud Dataflow SDK causes code to
-                  # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
-                  # might use this to install jars containing the user's code and all of the
-                  # various dependencies (libraries, data files, etc.) required in order
-                  # for that code to run.
-                "location": "A String", # The resource to read the package from. The supported resource type is:
-                    #
-                    # Google Cloud Storage:
-                    #
-                    #   storage.googleapis.com/{bucket}
-                    #   bucket.storage.googleapis.com/
-                "name": "A String", # The name of the package.
-              },
-            ],
-            "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
-              "algorithm": "A String", # The algorithm to use for autoscaling.
-              "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
-            },
-            "defaultPackageSet": "A String", # The default package set to install.  This allows the service to
-                # select a default set of packages which are useful to worker
-                # harnesses written in a particular language.
-            "diskType": "A String", # Type of root disk for VMs.  If empty or unspecified, the service will
-                # attempt to choose a reasonable default.
-            "metadata": { # Metadata to set on the Google Compute Engine VMs.
-              "a_key": "A String",
-            },
           },
         ],
       },
-      "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID
-          # of the job it replaced.
-          #
-          # When sending a `CreateJobRequest`, you can update a job by specifying it
-          # here. The job named here is stopped, and its intermediate state is
-          # transferred to this job.
       "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed # Preliminary field: The format of this data may change at any time.
           # A description of the user pipeline and stages through which it is executed.
           # Created by Cloud Dataflow service.  Only retrieved with
@@ -418,6 +413,9 @@
             ],
             "displayData": [ # Transform-specific display data.
               { # Data provided with a pipeline or transform to provide descriptive info.
+                "key": "A String", # The key identifying the display data.
+                    # This is intended to be used as a label for the display data
+                    # when viewed in a dax monitoring system.
                 "shortStrValue": "A String", # A possible additional shorter value to display.
                     # For example a java_class_name_value of com.mypackage.MyDoFn
                     # will be stored with MyDoFn as the short_str_value and
@@ -425,7 +423,6 @@
                     # short_str_value can be displayed and java_class_name_value
                     # will be displayed as a tooltip.
                 "timestampValue": "A String", # Contains value if the data is of timestamp type.
-                "durationValue": "A String", # Contains value if the data is of duration type.
                 "url": "A String", # An optional full URL.
                 "floatValue": 3.14, # Contains value if the data is of float type.
                 "namespace": "A String", # The namespace for the key. This is usually a class name or programming
@@ -436,9 +433,7 @@
                 "label": "A String", # An optional label to display in a dax UI for the element.
                 "boolValue": True or False, # Contains value if the data is of a boolean type.
                 "strValue": "A String", # Contains value if the data is of string type.
-                "key": "A String", # The key identifying the display data.
-                    # This is intended to be used as a label for the display data
-                    # when viewed in a dax monitoring system.
+                "durationValue": "A String", # Contains value if the data is of duration type.
                 "int64Value": "A String", # Contains value if the data is of int64 type.
               },
             ],
@@ -450,6 +445,9 @@
         ],
         "displayData": [ # Pipeline level display data.
           { # Data provided with a pipeline or transform to provide descriptive info.
+            "key": "A String", # The key identifying the display data.
+                # This is intended to be used as a label for the display data
+                # when viewed in a dax monitoring system.
             "shortStrValue": "A String", # A possible additional shorter value to display.
                 # For example a java_class_name_value of com.mypackage.MyDoFn
                 # will be stored with MyDoFn as the short_str_value and
@@ -457,7 +455,6 @@
                 # short_str_value can be displayed and java_class_name_value
                 # will be displayed as a tooltip.
             "timestampValue": "A String", # Contains value if the data is of timestamp type.
-            "durationValue": "A String", # Contains value if the data is of duration type.
             "url": "A String", # An optional full URL.
             "floatValue": 3.14, # Contains value if the data is of float type.
             "namespace": "A String", # The namespace for the key. This is usually a class name or programming
@@ -468,9 +465,7 @@
             "label": "A String", # An optional label to display in a dax UI for the element.
             "boolValue": True or False, # Contains value if the data is of a boolean type.
             "strValue": "A String", # Contains value if the data is of string type.
-            "key": "A String", # The key identifying the display data.
-                # This is intended to be used as a label for the display data
-                # when viewed in a dax monitoring system.
+            "durationValue": "A String", # Contains value if the data is of duration type.
             "int64Value": "A String", # Contains value if the data is of int64 type.
           },
         ],
@@ -492,19 +487,19 @@
             "outputSource": [ # Output sources for this stage.
               { # Description of an input or output of an execution stage.
                 "userName": "A String", # Human-readable name for this source; may be user or system generated.
-                "sizeBytes": "A String", # Size of the source, if measurable.
-                "name": "A String", # Dataflow service generated name for this source.
                 "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this
                     # source is most closely associated.
+                "name": "A String", # Dataflow service generated name for this source.
+                "sizeBytes": "A String", # Size of the source, if measurable.
               },
             ],
             "inputSource": [ # Input sources for this stage.
               { # Description of an input or output of an execution stage.
                 "userName": "A String", # Human-readable name for this source; may be user or system generated.
-                "sizeBytes": "A String", # Size of the source, if measurable.
-                "name": "A String", # Dataflow service generated name for this source.
                 "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this
                     # source is most closely associated.
+                "name": "A String", # Dataflow service generated name for this source.
+                "sizeBytes": "A String", # Size of the source, if measurable.
               },
             ],
             "componentTransform": [ # Transforms that comprise this execution stage.
@@ -546,16 +541,26 @@
             # Note that the Cloud Dataflow service may be used to run many different
             # types of jobs, not just Map-Reduce.
           "kind": "A String", # The kind of step in the Cloud Dataflow job.
-          "name": "A String", # The name that identifies the step. This must be unique for each
-              # step with respect to all other steps in the Cloud Dataflow job.
           "properties": { # Named properties associated with the step. Each kind of
               # predefined step has its own required set of properties.
               # Must be provided on Create.  Only retrieved with JOB_VIEW_ALL.
             "a_key": "", # Properties of the object.
           },
+          "name": "A String", # The name that identifies the step. This must be unique for each
+              # step with respect to all other steps in the Cloud Dataflow job.
         },
       ],
-      "location": "A String", # The location that contains this job.
+      "currentState": "A String", # The current state of the job.
+          #
+          # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
+          # specified.
+          #
+          # A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
+          # terminal state. After a job has reached a terminal state, no
+          # further state updates may be made.
+          #
+          # This field may be mutated by the Cloud Dataflow service;
+          # callers cannot mutate it.
       "tempFiles": [ # A set of files the system should be aware of that are used
           # for temporary storage. These temporary files will be
           # removed on job completion.
@@ -575,17 +580,12 @@
           #
           # This field is set by the Cloud Dataflow service when the Job is
           # created, and is immutable for the life of the job.
-      "currentState": "A String", # The current state of the job.
+      "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID
+          # of the job it replaced.
           #
-          # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
-          # specified.
-          #
-          # A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
-          # terminal state. After a job has reached a terminal state, no
-          # further state updates may be made.
-          #
-          # This field may be mutated by the Cloud Dataflow service;
-          # callers cannot mutate it.
+          # When sending a `CreateJobRequest`, you can update a job by specifying it
+          # here. The job named here is stopped, and its intermediate state is
+          # transferred to this job.
       "executionInfo": { # Additional information about how a Cloud Dataflow job will be executed that # Deprecated.
           # isn't contained in the submitted job.
         "stages": { # A mapping from each stage to the information about that stage.
@@ -719,15 +719,15 @@
 
 { # Parameters to provide to the template being launched.
     "environment": { # The environment values to set at runtime. # The runtime environment for the job.
-      "bypassTempDirValidation": True or False, # Whether to bypass the safety checks for the job's temporary directory.
-          # Use with caution.
+      "maxWorkers": 42, # The maximum number of Google Compute Engine instances to be made
+          # available to your pipeline during execution, from 1 to 1000.
       "tempLocation": "A String", # The Cloud Storage path to use for temporary files.
           # Must be a valid Cloud Storage URL, beginning with `gs://`.
       "serviceAccountEmail": "A String", # The email address of the service account to run the job as.
       "zone": "A String", # The Compute Engine [availability zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)
           # for launching worker instances to run your pipeline.
-      "maxWorkers": 42, # The maximum number of Google Compute Engine instances to be made
-          # available to your pipeline during execution, from 1 to 1000.
+      "bypassTempDirValidation": True or False, # Whether to bypass the safety checks for the job's temporary directory.
+          # Use with caution.
     },
     "parameters": { # The runtime parameters to pass to the job.
       "a_key": "A String",
@@ -790,12 +790,13 @@
             # size.
           "a_key": "A String",
         },
+        "location": "A String", # The location that contains this job.
+        "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the
+            # Cloud Dataflow service.
         "transformNameMapping": { # The map of transform name prefixes of the job to be replaced to the
             # corresponding name prefixes of the new job.
           "a_key": "A String",
         },
-        "createTime": "A String", # The timestamp when the job was initially created. Immutable and set by the
-            # Cloud Dataflow service.
         "environment": { # Describes the environment in which a Dataflow Job runs. # The environment for the job.
           "version": { # A structure describing which components and their versions of the service
               # are required in order to run the job.
@@ -849,6 +850,49 @@
                 # multiple pools, in order to match the various computational
                 # requirements of the various stages of the job.
               "diskSourceImage": "A String", # Fully qualified source image for disks.
+              "ipConfiguration": "A String", # Configuration for VM IPs.
+              "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle`
+                  # are supported.
+              "machineType": "A String", # Machine type (e.g. "n1-standard-1").  If empty or unspecified, the
+                  # service will attempt to choose a reasonable default.
+              "network": "A String", # Network to which VMs will be assigned.  If empty or unspecified,
+                  # the service will use the network "default".
+              "zone": "A String", # Zone to run the worker pools in.  If empty or unspecified, the service
+                  # will attempt to choose a reasonable default.
+              "diskSizeGb": 42, # Size of root disk for VMs, in GB.  If zero or unspecified, the service will
+                  # attempt to choose a reasonable default.
+              "metadata": { # Metadata to set on the Google Compute Engine VMs.
+                "a_key": "A String",
+              },
+              "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google
+                  # Compute Engine API.
+              "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool.
+                  # Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
+                  # `TEARDOWN_NEVER`.
+                  # `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
+                  # the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
+                  # if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
+                  # down.
+                  #
+                  # If the workers are not torn down by the service, they will
+                  # continue to run and use Google Compute Engine VM resources in the
+                  # user's project until they are explicitly terminated by the user.
+                  # Because of this, Google recommends using the `TEARDOWN_ALWAYS`
+                  # policy except for small, manually supervised test jobs.
+                  #
+                  # If unknown or unspecified, the service will attempt to choose a reasonable
+                  # default.
+              "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the
+                  # service will choose a number of threads (according to the number of cores
+                  # on the selected machine type for batch, or 1 by convention for streaming).
+              "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
+                  # the form "regions/REGION/subnetworks/SUBNETWORK".
+              "poolArgs": { # Extra arguments for this worker pool.
+                "a_key": "", # Properties of the object. Contains field @type with type URL.
+              },
+              "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to
+                  # execute the job.  If zero or unspecified, the service will
+                  # attempt to choose a reasonable default.
               "taskrunnerSettings": { # Taskrunner configuration settings. # Settings passed through to Google Compute Engine workers when
                   # using the standard Dataflow task runner.  Users should ignore
                   # this field.
@@ -925,16 +969,32 @@
                     #   storage.googleapis.com/{bucket}/{object}
                     #   bucket.storage.googleapis.com/{object}
               },
-              "kind": "A String", # The kind of the worker pool; currently only `harness` and `shuffle`
-                  # are supported.
-              "machineType": "A String", # Machine type (e.g. "n1-standard-1").  If empty or unspecified, the
-                  # service will attempt to choose a reasonable default.
-              "network": "A String", # Network to which VMs will be assigned.  If empty or unspecified,
-                  # the service will use the network "default".
-              "zone": "A String", # Zone to run the worker pools in.  If empty or unspecified, the service
-                  # will attempt to choose a reasonable default.
-              "diskSizeGb": 42, # Size of root disk for VMs, in GB.  If zero or unspecified, the service will
-                  # attempt to choose a reasonable default.
+              "defaultPackageSet": "A String", # The default package set to install.  This allows the service to
+                  # select a default set of packages which are useful to worker
+                  # harnesses written in a particular language.
+              "packages": [ # Packages to be installed on workers.
+                { # The packages that must be installed in order for a worker to run the
+                    # steps of the Cloud Dataflow job that will be assigned to its worker
+                    # pool.
+                    #
+                    # This is the mechanism by which the Cloud Dataflow SDK causes code to
+                    # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
+                    # might use this to install jars containing the user's code and all of the
+                    # various dependencies (libraries, data files, etc.) required in order
+                    # for that code to run.
+                  "name": "A String", # The name of the package.
+                  "location": "A String", # The resource to read the package from. The supported resource type is:
+                      #
+                      # Google Cloud Storage:
+                      #
+                      #   storage.googleapis.com/{bucket}
+                      #   bucket.storage.googleapis.com/
+                },
+              ],
+              "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
+                "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
+                "algorithm": "A String", # The algorithm to use for autoscaling.
+              },
               "dataDisks": [ # Data disks that are used by a VM in this workflow.
                 { # Describes the data disk used by a workflow job.
                   "mountPoint": "A String", # Directory in a VM where disk is mounted.
@@ -960,78 +1020,13 @@
                       # compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
                 },
               ],
-              "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool.
-                  # Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
-                  # `TEARDOWN_NEVER`.
-                  # `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
-                  # the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
-                  # if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
-                  # down.
-                  #
-                  # If the workers are not torn down by the service, they will
-                  # continue to run and use Google Compute Engine VM resources in the
-                  # user's project until they are explicitly terminated by the user.
-                  # Because of this, Google recommends using the `TEARDOWN_ALWAYS`
-                  # policy except for small, manually supervised test jobs.
-                  #
-                  # If unknown or unspecified, the service will attempt to choose a reasonable
-                  # default.
-              "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google
-                  # Compute Engine API.
-              "ipConfiguration": "A String", # Configuration for VM IPs.
-              "numThreadsPerWorker": 42, # The number of threads per worker harness. If empty or unspecified, the
-                  # service will choose a number of threads (according to the number of cores
-                  # on the selected machine type for batch, or 1 by convention for streaming).
-              "poolArgs": { # Extra arguments for this worker pool.
-                "a_key": "", # Properties of the object. Contains field @type with type URL.
-              },
-              "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to
-                  # execute the job.  If zero or unspecified, the service will
+              "diskType": "A String", # Type of root disk for VMs.  If empty or unspecified, the service will
                   # attempt to choose a reasonable default.
               "workerHarnessContainerImage": "A String", # Required. Docker container image that executes the Cloud Dataflow worker
                   # harness, residing in Google Container Registry.
-              "subnetwork": "A String", # Subnetwork to which VMs will be assigned, if desired.  Expected to be of
-                  # the form "regions/REGION/subnetworks/SUBNETWORK".
-              "packages": [ # Packages to be installed on workers.
-                { # The packages that must be installed in order for a worker to run the
-                    # steps of the Cloud Dataflow job that will be assigned to its worker
-                    # pool.
-                    #
-                    # This is the mechanism by which the Cloud Dataflow SDK causes code to
-                    # be loaded onto the workers. For example, the Cloud Dataflow Java SDK
-                    # might use this to install jars containing the user's code and all of the
-                    # various dependencies (libraries, data files, etc.) required in order
-                    # for that code to run.
-                  "location": "A String", # The resource to read the package from. The supported resource type is:
-                      #
-                      # Google Cloud Storage:
-                      #
-                      #   storage.googleapis.com/{bucket}
-                      #   bucket.storage.googleapis.com/
-                  "name": "A String", # The name of the package.
-                },
-              ],
-              "autoscalingSettings": { # Settings for WorkerPool autoscaling. # Settings for autoscaling of this WorkerPool.
-                "algorithm": "A String", # The algorithm to use for autoscaling.
-                "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
-              },
-              "defaultPackageSet": "A String", # The default package set to install.  This allows the service to
-                  # select a default set of packages which are useful to worker
-                  # harnesses written in a particular language.
-              "diskType": "A String", # Type of root disk for VMs.  If empty or unspecified, the service will
-                  # attempt to choose a reasonable default.
-              "metadata": { # Metadata to set on the Google Compute Engine VMs.
-                "a_key": "A String",
-              },
             },
           ],
         },
-        "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID
-            # of the job it replaced.
-            #
-            # When sending a `CreateJobRequest`, you can update a job by specifying it
-            # here. The job named here is stopped, and its intermediate state is
-            # transferred to this job.
         "pipelineDescription": { # A descriptive representation of submitted pipeline as well as the executed # Preliminary field: The format of this data may change at any time.
             # A description of the user pipeline and stages through which it is executed.
             # Created by Cloud Dataflow service.  Only retrieved with
@@ -1047,6 +1042,9 @@
               ],
               "displayData": [ # Transform-specific display data.
                 { # Data provided with a pipeline or transform to provide descriptive info.
+                  "key": "A String", # The key identifying the display data.
+                      # This is intended to be used as a label for the display data
+                      # when viewed in a dax monitoring system.
                   "shortStrValue": "A String", # A possible additional shorter value to display.
                       # For example a java_class_name_value of com.mypackage.MyDoFn
                       # will be stored with MyDoFn as the short_str_value and
@@ -1054,7 +1052,6 @@
                       # short_str_value can be displayed and java_class_name_value
                       # will be displayed as a tooltip.
                   "timestampValue": "A String", # Contains value if the data is of timestamp type.
-                  "durationValue": "A String", # Contains value if the data is of duration type.
                   "url": "A String", # An optional full URL.
                   "floatValue": 3.14, # Contains value if the data is of float type.
                   "namespace": "A String", # The namespace for the key. This is usually a class name or programming
@@ -1065,9 +1062,7 @@
                   "label": "A String", # An optional label to display in a dax UI for the element.
                   "boolValue": True or False, # Contains value if the data is of a boolean type.
                   "strValue": "A String", # Contains value if the data is of string type.
-                  "key": "A String", # The key identifying the display data.
-                      # This is intended to be used as a label for the display data
-                      # when viewed in a dax monitoring system.
+                  "durationValue": "A String", # Contains value if the data is of duration type.
                   "int64Value": "A String", # Contains value if the data is of int64 type.
                 },
               ],
@@ -1079,6 +1074,9 @@
           ],
           "displayData": [ # Pipeline level display data.
             { # Data provided with a pipeline or transform to provide descriptive info.
+              "key": "A String", # The key identifying the display data.
+                  # This is intended to be used as a label for the display data
+                  # when viewed in a dax monitoring system.
               "shortStrValue": "A String", # A possible additional shorter value to display.
                   # For example a java_class_name_value of com.mypackage.MyDoFn
                   # will be stored with MyDoFn as the short_str_value and
@@ -1086,7 +1084,6 @@
                   # short_str_value can be displayed and java_class_name_value
                   # will be displayed as a tooltip.
               "timestampValue": "A String", # Contains value if the data is of timestamp type.
-              "durationValue": "A String", # Contains value if the data is of duration type.
               "url": "A String", # An optional full URL.
               "floatValue": 3.14, # Contains value if the data is of float type.
               "namespace": "A String", # The namespace for the key. This is usually a class name or programming
@@ -1097,9 +1094,7 @@
               "label": "A String", # An optional label to display in a dax UI for the element.
               "boolValue": True or False, # Contains value if the data is of a boolean type.
               "strValue": "A String", # Contains value if the data is of string type.
-              "key": "A String", # The key identifying the display data.
-                  # This is intended to be used as a label for the display data
-                  # when viewed in a dax monitoring system.
+              "durationValue": "A String", # Contains value if the data is of duration type.
               "int64Value": "A String", # Contains value if the data is of int64 type.
             },
           ],
@@ -1121,19 +1116,19 @@
               "outputSource": [ # Output sources for this stage.
                 { # Description of an input or output of an execution stage.
                   "userName": "A String", # Human-readable name for this source; may be user or system generated.
-                  "sizeBytes": "A String", # Size of the source, if measurable.
-                  "name": "A String", # Dataflow service generated name for this source.
                   "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this
                       # source is most closely associated.
+                  "name": "A String", # Dataflow service generated name for this source.
+                  "sizeBytes": "A String", # Size of the source, if measurable.
                 },
               ],
               "inputSource": [ # Input sources for this stage.
                 { # Description of an input or output of an execution stage.
                   "userName": "A String", # Human-readable name for this source; may be user or system generated.
-                  "sizeBytes": "A String", # Size of the source, if measurable.
-                  "name": "A String", # Dataflow service generated name for this source.
                   "originalTransformOrCollection": "A String", # User name for the original user transform or collection with which this
                       # source is most closely associated.
+                  "name": "A String", # Dataflow service generated name for this source.
+                  "sizeBytes": "A String", # Size of the source, if measurable.
                 },
               ],
               "componentTransform": [ # Transforms that comprise this execution stage.
@@ -1175,16 +1170,26 @@
               # Note that the Cloud Dataflow service may be used to run many different
               # types of jobs, not just Map-Reduce.
             "kind": "A String", # The kind of step in the Cloud Dataflow job.
-            "name": "A String", # The name that identifies the step. This must be unique for each
-                # step with respect to all other steps in the Cloud Dataflow job.
             "properties": { # Named properties associated with the step. Each kind of
                 # predefined step has its own required set of properties.
                 # Must be provided on Create.  Only retrieved with JOB_VIEW_ALL.
               "a_key": "", # Properties of the object.
             },
+            "name": "A String", # The name that identifies the step. This must be unique for each
+                # step with respect to all other steps in the Cloud Dataflow job.
           },
         ],
-        "location": "A String", # The location that contains this job.
+        "currentState": "A String", # The current state of the job.
+            #
+            # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
+            # specified.
+            #
+            # A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
+            # terminal state. After a job has reached a terminal state, no
+            # further state updates may be made.
+            #
+            # This field may be mutated by the Cloud Dataflow service;
+            # callers cannot mutate it.
         "tempFiles": [ # A set of files the system should be aware of that are used
             # for temporary storage. These temporary files will be
             # removed on job completion.
@@ -1204,17 +1209,12 @@
             #
             # This field is set by the Cloud Dataflow service when the Job is
             # created, and is immutable for the life of the job.
-        "currentState": "A String", # The current state of the job.
+        "replaceJobId": "A String", # If this job is an update of an existing job, this field is the job ID
+            # of the job it replaced.
             #
-            # Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
-            # specified.
-            #
-            # A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
-            # terminal state. After a job has reached a terminal state, no
-            # further state updates may be made.
-            #
-            # This field may be mutated by the Cloud Dataflow service;
-            # callers cannot mutate it.
+            # When sending a `CreateJobRequest`, you can update a job by specifying it
+            # here. The job named here is stopped, and its intermediate state is
+            # transferred to this job.
         "executionInfo": { # Additional information about how a Cloud Dataflow job will be executed that # Deprecated.
             # isn't contained in the submitted job.
           "stages": { # A mapping from each stage to the information about that stage.