Release v1.6.0 (#324)

* Update version and changelog for v1.6.0
* Update docs
diff --git a/docs/dyn/dataproc_v1.projects.regions.jobs.html b/docs/dyn/dataproc_v1.projects.regions.jobs.html
index e3f6df9..ab6e84e 100644
--- a/docs/dyn/dataproc_v1.projects.regions.jobs.html
+++ b/docs/dyn/dataproc_v1.projects.regions.jobs.html
@@ -84,7 +84,7 @@
   <code><a href="#get">get(projectId, region, jobId, x__xgafv=None)</a></code></p>
 <p class="firstline">Gets the resource representation for a job in a project.</p>
 <p class="toc_element">
-  <code><a href="#list">list(projectId, region, pageSize=None, x__xgafv=None, jobStateMatcher=None, pageToken=None, clusterName=None)</a></code></p>
+  <code><a href="#list">list(projectId, region, pageSize=None, x__xgafv=None, pageToken=None, clusterName=None, jobStateMatcher=None, filter=None)</a></code></p>
 <p class="firstline">Lists regions/{region}/jobs in a project.</p>
 <p class="toc_element">
   <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
@@ -114,11 +114,11 @@
 
     { # A Cloud Dataproc job resource.
     "status": { # Cloud Dataproc job status. # [Output-only] The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields.
-      "state": "A String", # [Required] A state message specifying the overall job state.
+      "state": "A String", # [Output-only] A state message specifying the overall job state.
       "stateStartTime": "A String", # [Output-only] The time when this state was entered.
-      "details": "A String", # [Optional] Job state details, such as an error description if the state is ERROR.
+      "details": "A String", # [Output-only] Optional job state details, such as an error description if the state is ERROR.
     },
-    "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+    "hadoopJob": { # A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
       "jarFileUris": [ # [Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
         "A String",
       ],
@@ -144,9 +144,9 @@
     },
     "statusHistory": [ # [Output-only] The previous job status.
       { # Cloud Dataproc job status.
-        "state": "A String", # [Required] A state message specifying the overall job state.
+        "state": "A String", # [Output-only] A state message specifying the overall job state.
         "stateStartTime": "A String", # [Output-only] The time when this state was entered.
-        "details": "A String", # [Optional] Job state details, such as an error description if the state is ERROR.
+        "details": "A String", # [Output-only] Optional job state details, such as an error description if the state is ERROR.
       },
     ],
     "placement": { # Cloud Dataproc job config. # [Required] Job information, including how, when, and where to run the job.
@@ -155,9 +155,9 @@
     },
     "reference": { # Encapsulates the full scoping used to reference a job. # [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
       "projectId": "A String", # [Required] The ID of the Google Cloud Platform project that the job belongs to.
-      "jobId": "A String", # [Required] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
+      "jobId": "A String", # [Optional] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
     },
-    "sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
+    "sparkSqlJob": { # A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
       "queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
       "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
         "a_key": "A String",
@@ -179,7 +179,7 @@
         "a_key": "A String",
       },
     },
-    "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+    "pigJob": { # A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN. # Job is a Pig job.
       "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
       "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).
         "a_key": "A String",
@@ -203,8 +203,11 @@
       },
     },
     "driverOutputResourceUri": "A String", # [Output-only] A URI pointing to the location of the stdout of the job's driver program.
+    "labels": { # [Optional] The labels to associate with this job. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+      "a_key": "A String",
+    },
     "driverControlFilesUri": "A String", # [Output-only] If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as `driver_output_uri`.
-    "sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
+    "sparkJob": { # A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN. # Job is a Spark job.
       "jarFileUris": [ # [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
         "A String",
       ],
@@ -228,7 +231,15 @@
         "a_key": "A String",
       },
     },
-    "pysparkJob": { # A Cloud Dataproc job for running PySpark applications on YARN. # Job is a Pyspark job.
+    "yarnApplications": [ # [Output-only] The collection of YARN applications spun up by this job. **Beta** Feature: This report is available for testing purposes only. It may be changed before final release.
+      { # A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.
+        "progress": 3.14, # [Required] The numerical progress of the application, from 1 to 100.
+        "state": "A String", # [Required] The application state.
+        "name": "A String", # [Required] The application name.
+        "trackingUrl": "A String", # [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
+      },
+    ],
+    "pysparkJob": { # A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
       "mainPythonFileUri": "A String", # [Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file.
       "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
         "A String",
@@ -254,7 +265,7 @@
         "a_key": "A String",
       },
     },
-    "hiveJob": { # A Cloud Dataproc job for running Hive queries on YARN. # Job is a Hive job.
+    "hiveJob": { # A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN. # Job is a Hive job.
       "queryFileUri": "A String", # The HCFS URI of the script that contains Hive queries.
       "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Hive command: `SET name="value";`).
         "a_key": "A String",
@@ -307,11 +318,11 @@
 
     { # A Cloud Dataproc job resource.
     "status": { # Cloud Dataproc job status. # [Output-only] The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields.
-      "state": "A String", # [Required] A state message specifying the overall job state.
+      "state": "A String", # [Output-only] A state message specifying the overall job state.
       "stateStartTime": "A String", # [Output-only] The time when this state was entered.
-      "details": "A String", # [Optional] Job state details, such as an error description if the state is ERROR.
+      "details": "A String", # [Output-only] Optional job state details, such as an error description if the state is ERROR.
     },
-    "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+    "hadoopJob": { # A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
       "jarFileUris": [ # [Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
         "A String",
       ],
@@ -337,9 +348,9 @@
     },
     "statusHistory": [ # [Output-only] The previous job status.
       { # Cloud Dataproc job status.
-        "state": "A String", # [Required] A state message specifying the overall job state.
+        "state": "A String", # [Output-only] A state message specifying the overall job state.
         "stateStartTime": "A String", # [Output-only] The time when this state was entered.
-        "details": "A String", # [Optional] Job state details, such as an error description if the state is ERROR.
+        "details": "A String", # [Output-only] Optional job state details, such as an error description if the state is ERROR.
       },
     ],
     "placement": { # Cloud Dataproc job config. # [Required] Job information, including how, when, and where to run the job.
@@ -348,9 +359,9 @@
     },
     "reference": { # Encapsulates the full scoping used to reference a job. # [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
       "projectId": "A String", # [Required] The ID of the Google Cloud Platform project that the job belongs to.
-      "jobId": "A String", # [Required] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
+      "jobId": "A String", # [Optional] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
     },
-    "sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
+    "sparkSqlJob": { # A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
       "queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
       "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
         "a_key": "A String",
@@ -372,7 +383,7 @@
         "a_key": "A String",
       },
     },
-    "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+    "pigJob": { # A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN. # Job is a Pig job.
       "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
       "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).
         "a_key": "A String",
@@ -396,8 +407,11 @@
       },
     },
     "driverOutputResourceUri": "A String", # [Output-only] A URI pointing to the location of the stdout of the job's driver program.
+    "labels": { # [Optional] The labels to associate with this job. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+      "a_key": "A String",
+    },
     "driverControlFilesUri": "A String", # [Output-only] If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as `driver_output_uri`.
-    "sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
+    "sparkJob": { # A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN. # Job is a Spark job.
       "jarFileUris": [ # [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
         "A String",
       ],
@@ -421,7 +435,15 @@
         "a_key": "A String",
       },
     },
-    "pysparkJob": { # A Cloud Dataproc job for running PySpark applications on YARN. # Job is a Pyspark job.
+    "yarnApplications": [ # [Output-only] The collection of YARN applications spun up by this job. **Beta** Feature: This report is available for testing purposes only. It may be changed before final release.
+      { # A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.
+        "progress": 3.14, # [Required] The numerical progress of the application, from 1 to 100.
+        "state": "A String", # [Required] The application state.
+        "name": "A String", # [Required] The application name.
+        "trackingUrl": "A String", # [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
+      },
+    ],
+    "pysparkJob": { # A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
       "mainPythonFileUri": "A String", # [Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file.
       "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
         "A String",
@@ -447,7 +469,7 @@
         "a_key": "A String",
       },
     },
-    "hiveJob": { # A Cloud Dataproc job for running Hive queries on YARN. # Job is a Hive job.
+    "hiveJob": { # A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN. # Job is a Hive job.
       "queryFileUri": "A String", # The HCFS URI of the script that contains Hive queries.
       "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Hive command: `SET name="value";`).
         "a_key": "A String",
@@ -469,7 +491,7 @@
 </div>
 
 <div class="method">
-    <code class="details" id="list">list(projectId, region, pageSize=None, x__xgafv=None, jobStateMatcher=None, pageToken=None, clusterName=None)</code>
+    <code class="details" id="list">list(projectId, region, pageSize=None, x__xgafv=None, pageToken=None, clusterName=None, jobStateMatcher=None, filter=None)</code>
   <pre>Lists regions/{region}/jobs in a project.
 
 Args:
@@ -477,9 +499,10 @@
   region: string, [Required] The Cloud Dataproc region in which to handle the request. (required)
   pageSize: integer, [Optional] The number of results to return in each response.
   x__xgafv: string, V1 error format.
-  jobStateMatcher: string, [Optional] Specifies enumerated categories of jobs to list.
   pageToken: string, [Optional] The page token, returned by a previous call, to request the next page of results.
   clusterName: string, [Optional] If set, the returned jobs list includes only jobs that were submitted to the named cluster.
+  jobStateMatcher: string, [Optional] Specifies enumerated categories of jobs to list (default = match ALL jobs).
+  filter: string, [Optional] A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax: field:value] ... or [field = value] AND [field [= value]] ... where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label key. **value** can be `*` to match all values. `status.state` can be either `ACTIVE` or `INACTIVE`. Only the logical `AND` operator is supported; space-separated items are treated as having an implicit `AND` operator. Example valid filters are: status.state:ACTIVE labels.env:staging labels.starred:* and status.state = ACTIVE AND labels.env = staging AND labels.starred = *
 
 Returns:
   An object of the form:
@@ -489,11 +512,11 @@
     "jobs": [ # [Output-only] Jobs list.
       { # A Cloud Dataproc job resource.
         "status": { # Cloud Dataproc job status. # [Output-only] The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields.
-          "state": "A String", # [Required] A state message specifying the overall job state.
+          "state": "A String", # [Output-only] A state message specifying the overall job state.
           "stateStartTime": "A String", # [Output-only] The time when this state was entered.
-          "details": "A String", # [Optional] Job state details, such as an error description if the state is ERROR.
+          "details": "A String", # [Output-only] Optional job state details, such as an error description if the state is ERROR.
         },
-        "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+        "hadoopJob": { # A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
           "jarFileUris": [ # [Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
             "A String",
           ],
@@ -519,9 +542,9 @@
         },
         "statusHistory": [ # [Output-only] The previous job status.
           { # Cloud Dataproc job status.
-            "state": "A String", # [Required] A state message specifying the overall job state.
+            "state": "A String", # [Output-only] A state message specifying the overall job state.
             "stateStartTime": "A String", # [Output-only] The time when this state was entered.
-            "details": "A String", # [Optional] Job state details, such as an error description if the state is ERROR.
+            "details": "A String", # [Output-only] Optional job state details, such as an error description if the state is ERROR.
           },
         ],
         "placement": { # Cloud Dataproc job config. # [Required] Job information, including how, when, and where to run the job.
@@ -530,9 +553,9 @@
         },
         "reference": { # Encapsulates the full scoping used to reference a job. # [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
           "projectId": "A String", # [Required] The ID of the Google Cloud Platform project that the job belongs to.
-          "jobId": "A String", # [Required] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
+          "jobId": "A String", # [Optional] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
         },
-        "sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
+        "sparkSqlJob": { # A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
           "queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
           "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
             "a_key": "A String",
@@ -554,7 +577,7 @@
             "a_key": "A String",
           },
         },
-        "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+        "pigJob": { # A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN. # Job is a Pig job.
           "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
           "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).
             "a_key": "A String",
@@ -578,8 +601,11 @@
           },
         },
         "driverOutputResourceUri": "A String", # [Output-only] A URI pointing to the location of the stdout of the job's driver program.
+        "labels": { # [Optional] The labels to associate with this job. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+          "a_key": "A String",
+        },
         "driverControlFilesUri": "A String", # [Output-only] If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as `driver_output_uri`.
-        "sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
+        "sparkJob": { # A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN. # Job is a Spark job.
           "jarFileUris": [ # [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
             "A String",
           ],
@@ -603,7 +629,15 @@
             "a_key": "A String",
           },
         },
-        "pysparkJob": { # A Cloud Dataproc job for running PySpark applications on YARN. # Job is a Pyspark job.
+        "yarnApplications": [ # [Output-only] The collection of YARN applications spun up by this job. **Beta** Feature: This report is available for testing purposes only. It may be changed before final release.
+          { # A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.
+            "progress": 3.14, # [Required] The numerical progress of the application, from 1 to 100.
+            "state": "A String", # [Required] The application state.
+            "name": "A String", # [Required] The application name.
+            "trackingUrl": "A String", # [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
+          },
+        ],
+        "pysparkJob": { # A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
           "mainPythonFileUri": "A String", # [Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file.
           "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
             "A String",
@@ -629,7 +663,7 @@
             "a_key": "A String",
           },
         },
-        "hiveJob": { # A Cloud Dataproc job for running Hive queries on YARN. # Job is a Hive job.
+        "hiveJob": { # A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN. # Job is a Hive job.
           "queryFileUri": "A String", # The HCFS URI of the script that contains Hive queries.
           "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Hive command: `SET name="value";`).
             "a_key": "A String",
@@ -679,11 +713,11 @@
 { # A request to submit a job.
     "job": { # A Cloud Dataproc job resource. # [Required] The job resource.
       "status": { # Cloud Dataproc job status. # [Output-only] The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields.
-        "state": "A String", # [Required] A state message specifying the overall job state.
+        "state": "A String", # [Output-only] A state message specifying the overall job state.
         "stateStartTime": "A String", # [Output-only] The time when this state was entered.
-        "details": "A String", # [Optional] Job state details, such as an error description if the state is ERROR.
+        "details": "A String", # [Output-only] Optional job state details, such as an error description if the state is ERROR.
       },
-      "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+      "hadoopJob": { # A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
         "jarFileUris": [ # [Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
           "A String",
         ],
@@ -709,9 +743,9 @@
       },
       "statusHistory": [ # [Output-only] The previous job status.
         { # Cloud Dataproc job status.
-          "state": "A String", # [Required] A state message specifying the overall job state.
+          "state": "A String", # [Output-only] A state message specifying the overall job state.
           "stateStartTime": "A String", # [Output-only] The time when this state was entered.
-          "details": "A String", # [Optional] Job state details, such as an error description if the state is ERROR.
+          "details": "A String", # [Output-only] Optional job state details, such as an error description if the state is ERROR.
         },
       ],
       "placement": { # Cloud Dataproc job config. # [Required] Job information, including how, when, and where to run the job.
@@ -720,9 +754,9 @@
       },
       "reference": { # Encapsulates the full scoping used to reference a job. # [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
         "projectId": "A String", # [Required] The ID of the Google Cloud Platform project that the job belongs to.
-        "jobId": "A String", # [Required] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
+        "jobId": "A String", # [Optional] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
       },
-      "sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
+      "sparkSqlJob": { # A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
         "queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
         "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
           "a_key": "A String",
@@ -744,7 +778,7 @@
           "a_key": "A String",
         },
       },
-      "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+      "pigJob": { # A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN. # Job is a Pig job.
         "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
         "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).
           "a_key": "A String",
@@ -768,8 +802,11 @@
         },
       },
       "driverOutputResourceUri": "A String", # [Output-only] A URI pointing to the location of the stdout of the job's driver program.
+      "labels": { # [Optional] The labels to associate with this job. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+        "a_key": "A String",
+      },
       "driverControlFilesUri": "A String", # [Output-only] If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as `driver_output_uri`.
-      "sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
+      "sparkJob": { # A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN. # Job is a Spark job.
         "jarFileUris": [ # [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
           "A String",
         ],
@@ -793,7 +830,15 @@
           "a_key": "A String",
         },
       },
-      "pysparkJob": { # A Cloud Dataproc job for running PySpark applications on YARN. # Job is a Pyspark job.
+      "yarnApplications": [ # [Output-only] The collection of YARN applications spun up by this job. **Beta** Feature: This report is available for testing purposes only. It may be changed before final release.
+        { # A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.
+          "progress": 3.14, # [Required] The numerical progress of the application, from 1 to 100.
+          "state": "A String", # [Required] The application state.
+          "name": "A String", # [Required] The application name.
+          "trackingUrl": "A String", # [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
+        },
+      ],
+      "pysparkJob": { # A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
         "mainPythonFileUri": "A String", # [Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file.
         "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
           "A String",
@@ -819,7 +864,7 @@
           "a_key": "A String",
         },
       },
-      "hiveJob": { # A Cloud Dataproc job for running Hive queries on YARN. # Job is a Hive job.
+      "hiveJob": { # A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN. # Job is a Hive job.
         "queryFileUri": "A String", # The HCFS URI of the script that contains Hive queries.
         "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Hive command: `SET name="value";`).
           "a_key": "A String",
@@ -847,11 +892,11 @@
 
     { # A Cloud Dataproc job resource.
     "status": { # Cloud Dataproc job status. # [Output-only] The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields.
-      "state": "A String", # [Required] A state message specifying the overall job state.
+      "state": "A String", # [Output-only] A state message specifying the overall job state.
       "stateStartTime": "A String", # [Output-only] The time when this state was entered.
-      "details": "A String", # [Optional] Job state details, such as an error description if the state is ERROR.
+      "details": "A String", # [Output-only] Optional job state details, such as an error description if the state is ERROR.
     },
-    "hadoopJob": { # A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. # Job is a Hadoop job.
+    "hadoopJob": { # A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). # Job is a Hadoop job.
       "jarFileUris": [ # [Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.
         "A String",
       ],
@@ -877,9 +922,9 @@
     },
     "statusHistory": [ # [Output-only] The previous job status.
       { # Cloud Dataproc job status.
-        "state": "A String", # [Required] A state message specifying the overall job state.
+        "state": "A String", # [Output-only] A state message specifying the overall job state.
         "stateStartTime": "A String", # [Output-only] The time when this state was entered.
-        "details": "A String", # [Optional] Job state details, such as an error description if the state is ERROR.
+        "details": "A String", # [Output-only] Optional job state details, such as an error description if the state is ERROR.
       },
     ],
     "placement": { # Cloud Dataproc job config. # [Required] Job information, including how, when, and where to run the job.
@@ -888,9 +933,9 @@
     },
     "reference": { # Encapsulates the full scoping used to reference a job. # [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
       "projectId": "A String", # [Required] The ID of the Google Cloud Platform project that the job belongs to.
-      "jobId": "A String", # [Required] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
+      "jobId": "A String", # [Optional] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
     },
-    "sparkSqlJob": { # A Cloud Dataproc job for running Spark SQL queries. # Job is a SparkSql job.
+    "sparkSqlJob": { # A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries. # Job is a SparkSql job.
       "queryFileUri": "A String", # The HCFS URI of the script that contains SQL queries.
       "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`).
         "a_key": "A String",
@@ -912,7 +957,7 @@
         "a_key": "A String",
       },
     },
-    "pigJob": { # A Cloud Dataproc job for running Pig queries on YARN. # Job is a Pig job.
+    "pigJob": { # A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN. # Job is a Pig job.
       "queryFileUri": "A String", # The HCFS URI of the script that contains the Pig queries.
       "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).
         "a_key": "A String",
@@ -936,8 +981,11 @@
       },
     },
     "driverOutputResourceUri": "A String", # [Output-only] A URI pointing to the location of the stdout of the job's driver program.
+    "labels": { # [Optional] The labels to associate with this job. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
+      "a_key": "A String",
+    },
     "driverControlFilesUri": "A String", # [Output-only] If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as `driver_output_uri`.
-    "sparkJob": { # A Cloud Dataproc job for running Spark applications on YARN. # Job is a Spark job.
+    "sparkJob": { # A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN. # Job is a Spark job.
       "jarFileUris": [ # [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.
         "A String",
       ],
@@ -961,7 +1009,15 @@
         "a_key": "A String",
       },
     },
-    "pysparkJob": { # A Cloud Dataproc job for running PySpark applications on YARN. # Job is a Pyspark job.
+    "yarnApplications": [ # [Output-only] The collection of YARN applications spun up by this job. **Beta** Feature: This report is available for testing purposes only. It may be changed before final release.
+      { # A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.
+        "progress": 3.14, # [Required] The numerical progress of the application, from 1 to 100.
+        "state": "A String", # [Required] The application state.
+        "name": "A String", # [Required] The application name.
+        "trackingUrl": "A String", # [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
+      },
+    ],
+    "pysparkJob": { # A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN. # Job is a Pyspark job.
       "mainPythonFileUri": "A String", # [Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file.
       "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
         "A String",
@@ -987,7 +1043,7 @@
         "a_key": "A String",
       },
     },
-    "hiveJob": { # A Cloud Dataproc job for running Hive queries on YARN. # Job is a Hive job.
+    "hiveJob": { # A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN. # Job is a Hive job.
       "queryFileUri": "A String", # The HCFS URI of the script that contains Hive queries.
       "scriptVariables": { # [Optional] Mapping of query variable names to values (equivalent to the Hive command: `SET name="value";`).
         "a_key": "A String",