Update docs
diff --git a/docs/dyn/dataproc_v1.projects.regions.jobs.html b/docs/dyn/dataproc_v1.projects.regions.jobs.html
index 0b7aa51..e3f6df9 100644
--- a/docs/dyn/dataproc_v1.projects.regions.jobs.html
+++ b/docs/dyn/dataproc_v1.projects.regions.jobs.html
@@ -76,7 +76,7 @@
 <h2>Instance Methods</h2>
 <p class="toc_element">
   <code><a href="#cancel">cancel(projectId, region, jobId, body, x__xgafv=None)</a></code></p>
-<p class="firstline">Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions/{region}/jobs/list) or [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions/{region}/jobs/get).</p>
+<p class="firstline">Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get).</p>
 <p class="toc_element">
   <code><a href="#delete">delete(projectId, region, jobId, x__xgafv=None)</a></code></p>
 <p class="firstline">Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.</p>
@@ -95,11 +95,11 @@
 <h3>Method Details</h3>
 <div class="method">
     <code class="details" id="cancel">cancel(projectId, region, jobId, body, x__xgafv=None)</code>
-  <pre>Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions/{region}/jobs/list) or [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions/{region}/jobs/get).
+  <pre>Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get).
 
 Args:
   projectId: string, [Required] The ID of the Google Cloud Platform project that the job belongs to. (required)
-  region: string, [Required] The Dataproc region in which to handle the request. (required)
+  region: string, [Required] The Cloud Dataproc region in which to handle the request. (required)
   jobId: string, [Required] The job ID. (required)
   body: object, The request body. (required)
     The object takes the form of:
@@ -130,14 +130,14 @@
       "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
         "A String",
       ],
-      "fileUris": [ # [Optional] HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+      "fileUris": [ # [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
         "A String",
       ],
       "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.
       "archiveUris": [ # [Optional] HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
         "A String",
       ],
-      "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+      "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
       "properties": { # [Optional] A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
         "a_key": "A String",
       },
@@ -151,7 +151,7 @@
     ],
     "placement": { # Cloud Dataproc job config. # [Required] Job information, including how, when, and where to run the job.
       "clusterName": "A String", # [Required] The name of the cluster where the job will be submitted.
-      "clusterUuid": "A String", # [Output-only] A cluster UUID generated by the Dataproc service when the job is submitted.
+      "clusterUuid": "A String", # [Output-only] A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
     },
     "reference": { # Encapsulates the full scoping used to reference a job. # [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
       "projectId": "A String", # [Required] The ID of the Google Cloud Platform project that the job belongs to.
@@ -223,13 +223,13 @@
       "archiveUris": [ # [Optional] HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
         "A String",
       ],
-      "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains the main class.
+      "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class.
       "properties": { # [Optional] A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
         "a_key": "A String",
       },
     },
     "pysparkJob": { # A Cloud Dataproc job for running PySpark applications on YARN. # Job is a Pyspark job.
-      "mainPythonFileUri": "A String", # [Required] The Hadoop Compatible Filesystem (HCFS) URI of the main Python file to use as the driver. Must be a .py file.
+      "mainPythonFileUri": "A String", # [Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file.
       "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
         "A String",
       ],
@@ -281,7 +281,7 @@
 
 Args:
   projectId: string, [Required] The ID of the Google Cloud Platform project that the job belongs to. (required)
-  region: string, [Required] The Dataproc region in which to handle the request. (required)
+  region: string, [Required] The Cloud Dataproc region in which to handle the request. (required)
   jobId: string, [Required] The job ID. (required)
   x__xgafv: string, V1 error format.
 
@@ -298,7 +298,7 @@
 
 Args:
   projectId: string, [Required] The ID of the Google Cloud Platform project that the job belongs to. (required)
-  region: string, [Required] The Dataproc region in which to handle the request. (required)
+  region: string, [Required] The Cloud Dataproc region in which to handle the request. (required)
   jobId: string, [Required] The job ID. (required)
   x__xgafv: string, V1 error format.
 
@@ -323,14 +323,14 @@
       "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
         "A String",
       ],
-      "fileUris": [ # [Optional] HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+      "fileUris": [ # [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
         "A String",
       ],
       "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.
       "archiveUris": [ # [Optional] HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
         "A String",
       ],
-      "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+      "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
       "properties": { # [Optional] A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
         "a_key": "A String",
       },
@@ -344,7 +344,7 @@
     ],
     "placement": { # Cloud Dataproc job config. # [Required] Job information, including how, when, and where to run the job.
       "clusterName": "A String", # [Required] The name of the cluster where the job will be submitted.
-      "clusterUuid": "A String", # [Output-only] A cluster UUID generated by the Dataproc service when the job is submitted.
+      "clusterUuid": "A String", # [Output-only] A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
     },
     "reference": { # Encapsulates the full scoping used to reference a job. # [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
       "projectId": "A String", # [Required] The ID of the Google Cloud Platform project that the job belongs to.
@@ -416,13 +416,13 @@
       "archiveUris": [ # [Optional] HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
         "A String",
       ],
-      "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains the main class.
+      "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class.
       "properties": { # [Optional] A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
         "a_key": "A String",
       },
     },
     "pysparkJob": { # A Cloud Dataproc job for running PySpark applications on YARN. # Job is a Pyspark job.
-      "mainPythonFileUri": "A String", # [Required] The Hadoop Compatible Filesystem (HCFS) URI of the main Python file to use as the driver. Must be a .py file.
+      "mainPythonFileUri": "A String", # [Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file.
       "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
         "A String",
       ],
@@ -474,7 +474,7 @@
 
 Args:
   projectId: string, [Required] The ID of the Google Cloud Platform project that the job belongs to. (required)
-  region: string, [Required] The Dataproc region in which to handle the request. (required)
+  region: string, [Required] The Cloud Dataproc region in which to handle the request. (required)
   pageSize: integer, [Optional] The number of results to return in each response.
   x__xgafv: string, V1 error format.
   jobStateMatcher: string, [Optional] Specifies enumerated categories of jobs to list.
@@ -505,14 +505,14 @@
           "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
             "A String",
           ],
-          "fileUris": [ # [Optional] HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+          "fileUris": [ # [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
             "A String",
           ],
           "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.
           "archiveUris": [ # [Optional] HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
             "A String",
           ],
-          "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+          "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
           "properties": { # [Optional] A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
             "a_key": "A String",
           },
@@ -526,7 +526,7 @@
         ],
         "placement": { # Cloud Dataproc job config. # [Required] Job information, including how, when, and where to run the job.
           "clusterName": "A String", # [Required] The name of the cluster where the job will be submitted.
-          "clusterUuid": "A String", # [Output-only] A cluster UUID generated by the Dataproc service when the job is submitted.
+          "clusterUuid": "A String", # [Output-only] A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
         },
         "reference": { # Encapsulates the full scoping used to reference a job. # [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
           "projectId": "A String", # [Required] The ID of the Google Cloud Platform project that the job belongs to.
@@ -598,13 +598,13 @@
           "archiveUris": [ # [Optional] HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
             "A String",
           ],
-          "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains the main class.
+          "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class.
           "properties": { # [Optional] A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
             "a_key": "A String",
           },
         },
         "pysparkJob": { # A Cloud Dataproc job for running PySpark applications on YARN. # Job is a Pyspark job.
-          "mainPythonFileUri": "A String", # [Required] The Hadoop Compatible Filesystem (HCFS) URI of the main Python file to use as the driver. Must be a .py file.
+          "mainPythonFileUri": "A String", # [Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file.
           "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
             "A String",
           ],
@@ -672,7 +672,7 @@
 
 Args:
   projectId: string, [Required] The ID of the Google Cloud Platform project that the job belongs to. (required)
-  region: string, [Required] The Dataproc region in which to handle the request. (required)
+  region: string, [Required] The Cloud Dataproc region in which to handle the request. (required)
   body: object, The request body. (required)
     The object takes the form of:
 
@@ -695,14 +695,14 @@
         "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
           "A String",
         ],
-        "fileUris": [ # [Optional] HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+        "fileUris": [ # [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
           "A String",
         ],
         "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.
         "archiveUris": [ # [Optional] HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
           "A String",
         ],
-        "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+        "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
         "properties": { # [Optional] A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
           "a_key": "A String",
         },
@@ -716,7 +716,7 @@
       ],
       "placement": { # Cloud Dataproc job config. # [Required] Job information, including how, when, and where to run the job.
         "clusterName": "A String", # [Required] The name of the cluster where the job will be submitted.
-        "clusterUuid": "A String", # [Output-only] A cluster UUID generated by the Dataproc service when the job is submitted.
+        "clusterUuid": "A String", # [Output-only] A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
       },
       "reference": { # Encapsulates the full scoping used to reference a job. # [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
         "projectId": "A String", # [Required] The ID of the Google Cloud Platform project that the job belongs to.
@@ -788,13 +788,13 @@
         "archiveUris": [ # [Optional] HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
           "A String",
         ],
-        "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains the main class.
+        "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class.
         "properties": { # [Optional] A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
           "a_key": "A String",
         },
       },
       "pysparkJob": { # A Cloud Dataproc job for running PySpark applications on YARN. # Job is a Pyspark job.
-        "mainPythonFileUri": "A String", # [Required] The Hadoop Compatible Filesystem (HCFS) URI of the main Python file to use as the driver. Must be a .py file.
+        "mainPythonFileUri": "A String", # [Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file.
         "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
           "A String",
         ],
@@ -863,14 +863,14 @@
       "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
         "A String",
       ],
-      "fileUris": [ # [Optional] HCFS URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
+      "fileUris": [ # [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.
         "A String",
       ],
       "mainClass": "A String", # The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.
       "archiveUris": [ # [Optional] HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.
         "A String",
       ],
-      "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
+      "mainJarFileUri": "A String", # The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
       "properties": { # [Optional] A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.
         "a_key": "A String",
       },
@@ -884,7 +884,7 @@
     ],
     "placement": { # Cloud Dataproc job config. # [Required] Job information, including how, when, and where to run the job.
       "clusterName": "A String", # [Required] The name of the cluster where the job will be submitted.
-      "clusterUuid": "A String", # [Output-only] A cluster UUID generated by the Dataproc service when the job is submitted.
+      "clusterUuid": "A String", # [Output-only] A cluster UUID generated by the Cloud Dataproc service when the job is submitted.
     },
     "reference": { # Encapsulates the full scoping used to reference a job. # [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
       "projectId": "A String", # [Required] The ID of the Google Cloud Platform project that the job belongs to.
@@ -956,13 +956,13 @@
       "archiveUris": [ # [Optional] HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.
         "A String",
       ],
-      "mainJarFileUri": "A String", # The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains the main class.
+      "mainJarFileUri": "A String", # The HCFS URI of the jar file that contains the main class.
       "properties": { # [Optional] A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.
         "a_key": "A String",
       },
     },
     "pysparkJob": { # A Cloud Dataproc job for running PySpark applications on YARN. # Job is a Pyspark job.
-      "mainPythonFileUri": "A String", # [Required] The Hadoop Compatible Filesystem (HCFS) URI of the main Python file to use as the driver. Must be a .py file.
+      "mainPythonFileUri": "A String", # [Required] The HCFS URI of the main Python file to use as the driver. Must be a .py file.
       "args": [ # [Optional] The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.
         "A String",
       ],