Updated docs
diff --git a/docs/dyn/dataflow_v1b3.projects.jobs.html b/docs/dyn/dataflow_v1b3.projects.jobs.html
index fa98557..d26949e 100644
--- a/docs/dyn/dataflow_v1b3.projects.jobs.html
+++ b/docs/dyn/dataflow_v1b3.projects.jobs.html
@@ -177,7 +177,7 @@
           "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
           "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
           "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
-          "zone": "A String", # Zone to run the worker pools in (e.g. "us-central1-b"). If empty or unspecified, the service will attempt to choose a reasonable default.
+          "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
           "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
           "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
           "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -189,6 +189,7 @@
             "a_key": "", # Properties of the object. Contains field @ype with type URL.
           },
           "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
+          "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
           "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
           "packages": [ # Packages to be installed on workers.
             { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
@@ -222,12 +223,15 @@
       },
     ],
     "currentStateTime": "A String", # The timestamp associated with the current state.
+    "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+      "A String",
+    ],
     "type": "A String", # The type of dataflow job.
     "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
     "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
     "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
       "stages": { # A mapping from each stage to the information about that stage.
-        "a_key": { # Contains information about how a particular [google.dataflow.v1beta3.Step][google.dataflow.v1beta3.Step] will be executed.
+        "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
           "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
             "A String",
           ],
@@ -308,7 +312,7 @@
             "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
             "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
             "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
-            "zone": "A String", # Zone to run the worker pools in (e.g. "us-central1-b"). If empty or unspecified, the service will attempt to choose a reasonable default.
+            "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
             "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
             "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
             "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -320,6 +324,7 @@
               "a_key": "", # Properties of the object. Contains field @ype with type URL.
             },
             "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
+            "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
             "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
             "packages": [ # Packages to be installed on workers.
               { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
@@ -353,12 +358,15 @@
         },
       ],
       "currentStateTime": "A String", # The timestamp associated with the current state.
+      "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+        "A String",
+      ],
       "type": "A String", # The type of dataflow job.
       "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
       "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
       "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
         "stages": { # A mapping from each stage to the information about that stage.
-          "a_key": { # Contains information about how a particular [google.dataflow.v1beta3.Step][google.dataflow.v1beta3.Step] will be executed.
+          "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
             "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
               "A String",
             ],
@@ -446,7 +454,7 @@
             "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
             "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
             "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
-            "zone": "A String", # Zone to run the worker pools in (e.g. "us-central1-b"). If empty or unspecified, the service will attempt to choose a reasonable default.
+            "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
             "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
             "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
             "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -458,6 +466,7 @@
               "a_key": "", # Properties of the object. Contains field @ype with type URL.
             },
             "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
+            "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
             "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
             "packages": [ # Packages to be installed on workers.
               { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
@@ -491,12 +500,15 @@
         },
       ],
       "currentStateTime": "A String", # The timestamp associated with the current state.
+      "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+        "A String",
+      ],
       "type": "A String", # The type of dataflow job.
       "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
       "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
       "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
         "stages": { # A mapping from each stage to the information about that stage.
-          "a_key": { # Contains information about how a particular [google.dataflow.v1beta3.Step][google.dataflow.v1beta3.Step] will be executed.
+          "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
             "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
               "A String",
             ],
@@ -626,7 +638,7 @@
                 "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
                 "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
                 "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
-                "zone": "A String", # Zone to run the worker pools in (e.g. "us-central1-b"). If empty or unspecified, the service will attempt to choose a reasonable default.
+                "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
                 "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
                 "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
                 "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -638,6 +650,7 @@
                   "a_key": "", # Properties of the object. Contains field @ype with type URL.
                 },
                 "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
+                "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
                 "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
                 "packages": [ # Packages to be installed on workers.
                   { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
@@ -671,12 +684,15 @@
             },
           ],
           "currentStateTime": "A String", # The timestamp associated with the current state.
+          "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+            "A String",
+          ],
           "type": "A String", # The type of dataflow job.
           "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
           "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
           "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
             "stages": { # A mapping from each stage to the information about that stage.
-              "a_key": { # Contains information about how a particular [google.dataflow.v1beta3.Step][google.dataflow.v1beta3.Step] will be executed.
+              "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
                 "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
                   "A String",
                 ],
@@ -777,7 +793,7 @@
           "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
           "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
           "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
-          "zone": "A String", # Zone to run the worker pools in (e.g. "us-central1-b"). If empty or unspecified, the service will attempt to choose a reasonable default.
+          "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
           "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
           "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
           "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -789,6 +805,7 @@
             "a_key": "", # Properties of the object. Contains field @ype with type URL.
           },
           "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
+          "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
           "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
           "packages": [ # Packages to be installed on workers.
             { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
@@ -822,12 +839,15 @@
       },
     ],
     "currentStateTime": "A String", # The timestamp associated with the current state.
+    "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+      "A String",
+    ],
     "type": "A String", # The type of dataflow job.
     "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
     "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
     "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
       "stages": { # A mapping from each stage to the information about that stage.
-        "a_key": { # Contains information about how a particular [google.dataflow.v1beta3.Step][google.dataflow.v1beta3.Step] will be executed.
+        "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
           "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
             "A String",
           ],
@@ -906,7 +926,7 @@
             "kind": "A String", # The kind of the worker pool; currently only 'harness' and 'shuffle' are supported.
             "machineType": "A String", # Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service will attempt to choose a reasonable default.
             "network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
-            "zone": "A String", # Zone to run the worker pools in (e.g. "us-central1-b"). If empty or unspecified, the service will attempt to choose a reasonable default.
+            "zone": "A String", # Zone to run the worker pools in. If empty or unspecified, the service will attempt to choose a reasonable default.
             "onHostMaintenance": "A String", # The action to take on host maintenance, as defined by the Google Compute Engine API.
             "diskType": "A String", # Type of root disk for VMs. If empty or unspecified, the service will attempt to choose a reasonable default.
             "teardownPolicy": "A String", # Sets the policy for determining when to turndown worker pool. Allowed values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. TEARDOWN_ALWAYS means workers are always torn down regardless of whether the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the workers are not torn down by the service, they will continue to run and use Google Compute Engine VM resources in the user's project until they are explicitly terminated by the user. Because of this, Google recommends using the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. If unknown or unspecified, the service will attempt to choose a reasonable default.
@@ -918,6 +938,7 @@
               "a_key": "", # Properties of the object. Contains field @ype with type URL.
             },
             "numWorkers": 42, # Number of Google Compute Engine workers in this pool needed to execute the job. If zero or unspecified, the service will attempt to choose a reasonable default.
+            "workerHarnessContainerImage": "A String", # Docker container image that executes Dataflow worker harness, residing in Google Container Registry. Required.
             "defaultPackageSet": "A String", # The default package set to install. This allows the service to select a default set of packages which are useful to worker harnesses written in a particular language.
             "packages": [ # Packages to be installed on workers.
               { # Packages that need to be installed in order for a worker to run the steps of the Dataflow job which will be assigned to its worker pool. This is the mechanism by which the SDK causes code to be loaded onto the workers. For example, the Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc) required in order for that code to run.
@@ -951,12 +972,15 @@
         },
       ],
       "currentStateTime": "A String", # The timestamp associated with the current state.
+      "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
+        "A String",
+      ],
       "type": "A String", # The type of dataflow job.
       "id": "A String", # The unique ID of this job. This field is set by the Dataflow service when the Job is created, and is immutable for the life of the Job.
       "currentState": "A String", # The current state of the job. Jobs are created in the JOB_STATE_STOPPED state unless otherwise specified. A job in the JOB_STATE_RUNNING state may asynchronously enter a terminal state. Once a job has reached a terminal state, no further state updates may be made. This field may be mutated by the Dataflow service; callers cannot mutate it.
       "executionInfo": { # Additional information about how a Dataflow job will be executed which isn’t contained in the submitted job. # Information about how the Dataflow service will actually run the job.
         "stages": { # A mapping from each stage to the information about that stage.
-          "a_key": { # Contains information about how a particular [google.dataflow.v1beta3.Step][google.dataflow.v1beta3.Step] will be executed.
+          "a_key": { # Contains information about how a particular google.dataflow.v1beta3.Step will be executed.
             "stepName": [ # The steps associated with the execution stage. Note that stages may have several steps, and that a given step might be run by more than one stage.
               "A String",
             ],