chore: Update discovery artifacts (#1291)

* chore: update docs/dyn/index.md
* chore(abusiveexperiencereport): update the api
* chore(acceleratedmobilepageurl): update the api
* chore(accessapproval): update the api
* chore(accesscontextmanager): update the api
* chore(adexchangebuyer2): update the api
* chore(adexperiencereport): update the api
* chore(admob): update the api
* chore(analytics): update the api
* chore(analyticsreporting): update the api
* chore(androiddeviceprovisioning): update the api
* chore(androidenterprise): update the api
* chore(androidpublisher): update the api
* chore(apigateway): update the api
* chore(artifactregistry): update the api
* chore(bigqueryconnection): update the api
* chore(bigquerydatatransfer): update the api
* chore(billingbudgets): update the api
* chore(binaryauthorization): update the api
* chore(blogger): update the api
* chore(books): update the api
* chore(calendar): update the api
* chore(chat): update the api
* chore(chromeuxreport): update the api
* chore(civicinfo): update the api
* chore(classroom): update the api
* chore(cloudbilling): update the api
* chore(cloudbuild): update the api
* chore(clouddebugger): update the api
* chore(clouderrorreporting): update the api
* chore(cloudfunctions): update the api
* chore(cloudidentity): update the api
* chore(cloudiot): update the api
* chore(cloudkms): update the api
* chore(cloudprofiler): update the api
* chore(cloudresourcemanager): update the api
* chore(cloudscheduler): update the api
* chore(cloudshell): update the api
* chore(cloudtasks): update the api
* chore(cloudtrace): update the api
* chore(composer): update the api
* chore(containeranalysis): update the api
* chore(content): update the api
* chore(customsearch): update the api
* chore(datacatalog): update the api
* chore(datafusion): update the api
* chore(datamigration): update the api
* chore(datastore): update the api
* chore(deploymentmanager): update the api
* chore(digitalassetlinks): update the api
* chore(displayvideo): update the api
* chore(dlp): update the api
* chore(dns): update the api
* chore(docs): update the api
* chore(domains): update the api
* chore(domainsrdap): update the api
* chore(doubleclickbidmanager): update the api
* chore(doubleclicksearch): update the api
* chore(drive): update the api
* chore(driveactivity): update the api
* chore(eventarc): update the api
* chore(factchecktools): update the api
* chore(fcm): update the api
* chore(file): update the api
* chore(firebase): update the api
* chore(firebasedatabase): update the api
* chore(firebasedynamiclinks): update the api
* chore(firebasehosting): update the api
* chore(firebaseml): update the api
* chore(firebaserules): update the api
* chore(firestore): update the api
* chore(fitness): update the api
* chore(gamesConfiguration): update the api
* chore(gamesManagement): update the api
* chore(gameservices): update the api
* chore(genomics): update the api
* chore(gmail): update the api
* chore(gmailpostmastertools): update the api
* chore(groupsmigration): update the api
* chore(groupssettings): update the api
* chore(healthcare): update the api
* chore(iam): update the api
* chore(iamcredentials): update the api
* chore(iap): update the api
* chore(identitytoolkit): update the api
* chore(indexing): update the api
* chore(jobs): update the api
* chore(kgsearch): update the api
* chore(language): update the api
* chore(libraryagent): update the api
* chore(licensing): update the api
* chore(lifesciences): update the api
* chore(logging): update the api
* chore(managedidentities): update the api
* chore(manufacturers): update the api
* chore(memcache): update the api
* chore(ml): update the api
* chore(monitoring): update the api
* chore(networkmanagement): update the api
* chore(osconfig): update the api
* chore(pagespeedonline): update the api
* chore(playablelocations): update the api
* chore(playcustomapp): update the api
* chore(policytroubleshooter): update the api
* chore(poly): update the api
* chore(privateca): update the api
* chore(prod_tt_sasportal): update the api
* chore(pubsub): update the api
* chore(pubsublite): update the api
* chore(realtimebidding): update the api
* chore(recommendationengine): update the api
* chore(recommender): update the api
* chore(redis): update the api
* chore(remotebuildexecution): update the api
* chore(reseller): update the api
* chore(runtimeconfig): update the api
* chore(safebrowsing): update the api
* chore(sasportal): update the api
* chore(script): update the api
* chore(searchconsole): update the api
* chore(secretmanager): update the api
* chore(servicecontrol): update the api
* chore(servicedirectory): update the api
* chore(siteVerification): update the api
* chore(slides): update the api
* chore(smartdevicemanagement): update the api
* chore(sourcerepo): update the api
* chore(sqladmin): update the api
* chore(storage): update the api
* chore(storagetransfer): update the api
* chore(streetviewpublish): update the api
* chore(sts): update the api
* chore(tagmanager): update the api
* chore(tasks): update the api
* chore(testing): update the api
* chore(texttospeech): update the api
* chore(toolresults): update the api
* chore(trafficdirector): update the api
* chore(transcoder): update the api
* chore(translate): update the api
* chore(vault): update the api
* chore(vectortile): update the api
* chore(verifiedaccess): update the api
* chore(videointelligence): update the api
* chore(vision): update the api
* chore(webfonts): update the api
* chore(webmasters): update the api
* chore(websecurityscanner): update the api
* chore(workflowexecutions): update the api
* chore(workflows): update the api
* chore(youtubeAnalytics): update the api
* chore(youtubereporting): update the api
* chore(docs): Add new discovery artifacts and reference documents
diff --git a/docs/dyn/dataflow_v1b3.projects.jobs.html b/docs/dyn/dataflow_v1b3.projects.jobs.html
index 09e8014..661cda1 100644
--- a/docs/dyn/dataflow_v1b3.projects.jobs.html
+++ b/docs/dyn/dataflow_v1b3.projects.jobs.html
@@ -284,8 +284,8 @@
       },
       "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
       "jobMetadata": { # Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. # This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher.
-        "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
-          { # Metadata for a BigTable connector used by the job.
+        "bigTableDetails": [ # Identification of a Cloud BigTable source used in the Dataflow job.
+          { # Metadata for a Cloud BigTable connector used by the job.
             "instanceId": "A String", # InstanceId accessed in the connection.
             "projectId": "A String", # ProjectId accessed in the connection.
             "tableId": "A String", # TableId accessed in the connection.
@@ -311,7 +311,7 @@
           },
         ],
         "pubsubDetails": [ # Identification of a PubSub source used in the Dataflow job.
-          { # Metadata for a PubSub connector used by the job.
+          { # Metadata for a Pub/Sub connector used by the job.
             "subscription": "A String", # Subscription used in the connection.
             "topic": "A String", # Topic accessed in the connection.
           },
@@ -376,7 +376,7 @@
                 "userName": "A String", # Human-readable name for this source; may be user or system generated.
               },
             ],
-            "kind": "A String", # Type of tranform this stage is executing.
+            "kind": "A String", # Type of transform this stage is executing.
             "name": "A String", # Dataflow service generated name for this stage.
             "outputSource": [ # Output sources for this stage.
               { # Description of an input or output of an execution stage.
@@ -443,7 +443,7 @@
           },
         },
       ],
-      "stepsLocation": "A String", # The GCS location where the steps are stored.
+      "stepsLocation": "A String", # The Cloud Storage location where the steps are stored.
       "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
         "A String",
       ],
@@ -613,8 +613,8 @@
   },
   "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
   "jobMetadata": { # Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. # This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher.
-    "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
-      { # Metadata for a BigTable connector used by the job.
+    "bigTableDetails": [ # Identification of a Cloud BigTable source used in the Dataflow job.
+      { # Metadata for a Cloud BigTable connector used by the job.
         "instanceId": "A String", # InstanceId accessed in the connection.
         "projectId": "A String", # ProjectId accessed in the connection.
         "tableId": "A String", # TableId accessed in the connection.
@@ -640,7 +640,7 @@
       },
     ],
     "pubsubDetails": [ # Identification of a PubSub source used in the Dataflow job.
-      { # Metadata for a PubSub connector used by the job.
+      { # Metadata for a Pub/Sub connector used by the job.
         "subscription": "A String", # Subscription used in the connection.
         "topic": "A String", # Topic accessed in the connection.
       },
@@ -705,7 +705,7 @@
             "userName": "A String", # Human-readable name for this source; may be user or system generated.
           },
         ],
-        "kind": "A String", # Type of tranform this stage is executing.
+        "kind": "A String", # Type of transform this stage is executing.
         "name": "A String", # Dataflow service generated name for this stage.
         "outputSource": [ # Output sources for this stage.
           { # Description of an input or output of an execution stage.
@@ -772,7 +772,7 @@
       },
     },
   ],
-  "stepsLocation": "A String", # The GCS location where the steps are stored.
+  "stepsLocation": "A String", # The Cloud Storage location where the steps are stored.
   "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
     "A String",
   ],
@@ -926,8 +926,8 @@
   },
   "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
   "jobMetadata": { # Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. # This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher.
-    "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
-      { # Metadata for a BigTable connector used by the job.
+    "bigTableDetails": [ # Identification of a Cloud BigTable source used in the Dataflow job.
+      { # Metadata for a Cloud BigTable connector used by the job.
         "instanceId": "A String", # InstanceId accessed in the connection.
         "projectId": "A String", # ProjectId accessed in the connection.
         "tableId": "A String", # TableId accessed in the connection.
@@ -953,7 +953,7 @@
       },
     ],
     "pubsubDetails": [ # Identification of a PubSub source used in the Dataflow job.
-      { # Metadata for a PubSub connector used by the job.
+      { # Metadata for a Pub/Sub connector used by the job.
         "subscription": "A String", # Subscription used in the connection.
         "topic": "A String", # Topic accessed in the connection.
       },
@@ -1018,7 +1018,7 @@
             "userName": "A String", # Human-readable name for this source; may be user or system generated.
           },
         ],
-        "kind": "A String", # Type of tranform this stage is executing.
+        "kind": "A String", # Type of transform this stage is executing.
         "name": "A String", # Dataflow service generated name for this stage.
         "outputSource": [ # Output sources for this stage.
           { # Description of an input or output of an execution stage.
@@ -1085,7 +1085,7 @@
       },
     },
   ],
-  "stepsLocation": "A String", # The GCS location where the steps are stored.
+  "stepsLocation": "A String", # The Cloud Storage location where the steps are stored.
   "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
     "A String",
   ],
@@ -1246,8 +1246,8 @@
   },
   "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
   "jobMetadata": { # Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. # This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher.
-    "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
-      { # Metadata for a BigTable connector used by the job.
+    "bigTableDetails": [ # Identification of a Cloud BigTable source used in the Dataflow job.
+      { # Metadata for a Cloud BigTable connector used by the job.
         "instanceId": "A String", # InstanceId accessed in the connection.
         "projectId": "A String", # ProjectId accessed in the connection.
         "tableId": "A String", # TableId accessed in the connection.
@@ -1273,7 +1273,7 @@
       },
     ],
     "pubsubDetails": [ # Identification of a PubSub source used in the Dataflow job.
-      { # Metadata for a PubSub connector used by the job.
+      { # Metadata for a Pub/Sub connector used by the job.
         "subscription": "A String", # Subscription used in the connection.
         "topic": "A String", # Topic accessed in the connection.
       },
@@ -1338,7 +1338,7 @@
             "userName": "A String", # Human-readable name for this source; may be user or system generated.
           },
         ],
-        "kind": "A String", # Type of tranform this stage is executing.
+        "kind": "A String", # Type of transform this stage is executing.
         "name": "A String", # Dataflow service generated name for this stage.
         "outputSource": [ # Output sources for this stage.
           { # Description of an input or output of an execution stage.
@@ -1405,7 +1405,7 @@
       },
     },
   ],
-  "stepsLocation": "A String", # The GCS location where the steps are stored.
+  "stepsLocation": "A String", # The Cloud Storage location where the steps are stored.
   "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
     "A String",
   ],
@@ -1623,8 +1623,8 @@
       },
       "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
       "jobMetadata": { # Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. # This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher.
-        "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
-          { # Metadata for a BigTable connector used by the job.
+        "bigTableDetails": [ # Identification of a Cloud BigTable source used in the Dataflow job.
+          { # Metadata for a Cloud BigTable connector used by the job.
             "instanceId": "A String", # InstanceId accessed in the connection.
             "projectId": "A String", # ProjectId accessed in the connection.
             "tableId": "A String", # TableId accessed in the connection.
@@ -1650,7 +1650,7 @@
           },
         ],
         "pubsubDetails": [ # Identification of a PubSub source used in the Dataflow job.
-          { # Metadata for a PubSub connector used by the job.
+          { # Metadata for a Pub/Sub connector used by the job.
             "subscription": "A String", # Subscription used in the connection.
             "topic": "A String", # Topic accessed in the connection.
           },
@@ -1715,7 +1715,7 @@
                 "userName": "A String", # Human-readable name for this source; may be user or system generated.
               },
             ],
-            "kind": "A String", # Type of tranform this stage is executing.
+            "kind": "A String", # Type of transform this stage is executing.
             "name": "A String", # Dataflow service generated name for this stage.
             "outputSource": [ # Output sources for this stage.
               { # Description of an input or output of an execution stage.
@@ -1782,7 +1782,7 @@
           },
         },
       ],
-      "stepsLocation": "A String", # The GCS location where the steps are stored.
+      "stepsLocation": "A String", # The Cloud Storage location where the steps are stored.
       "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
         "A String",
       ],
@@ -1992,8 +1992,8 @@
   },
   "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
   "jobMetadata": { # Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. # This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher.
-    "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
-      { # Metadata for a BigTable connector used by the job.
+    "bigTableDetails": [ # Identification of a Cloud BigTable source used in the Dataflow job.
+      { # Metadata for a Cloud BigTable connector used by the job.
         "instanceId": "A String", # InstanceId accessed in the connection.
         "projectId": "A String", # ProjectId accessed in the connection.
         "tableId": "A String", # TableId accessed in the connection.
@@ -2019,7 +2019,7 @@
       },
     ],
     "pubsubDetails": [ # Identification of a PubSub source used in the Dataflow job.
-      { # Metadata for a PubSub connector used by the job.
+      { # Metadata for a Pub/Sub connector used by the job.
         "subscription": "A String", # Subscription used in the connection.
         "topic": "A String", # Topic accessed in the connection.
       },
@@ -2084,7 +2084,7 @@
             "userName": "A String", # Human-readable name for this source; may be user or system generated.
           },
         ],
-        "kind": "A String", # Type of tranform this stage is executing.
+        "kind": "A String", # Type of transform this stage is executing.
         "name": "A String", # Dataflow service generated name for this stage.
         "outputSource": [ # Output sources for this stage.
           { # Description of an input or output of an execution stage.
@@ -2151,7 +2151,7 @@
       },
     },
   ],
-  "stepsLocation": "A String", # The GCS location where the steps are stored.
+  "stepsLocation": "A String", # The Cloud Storage location where the steps are stored.
   "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
     "A String",
   ],
@@ -2298,8 +2298,8 @@
   },
   "id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
   "jobMetadata": { # Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. # This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher.
-    "bigTableDetails": [ # Identification of a BigTable source used in the Dataflow job.
-      { # Metadata for a BigTable connector used by the job.
+    "bigTableDetails": [ # Identification of a Cloud BigTable source used in the Dataflow job.
+      { # Metadata for a Cloud BigTable connector used by the job.
         "instanceId": "A String", # InstanceId accessed in the connection.
         "projectId": "A String", # ProjectId accessed in the connection.
         "tableId": "A String", # TableId accessed in the connection.
@@ -2325,7 +2325,7 @@
       },
     ],
     "pubsubDetails": [ # Identification of a PubSub source used in the Dataflow job.
-      { # Metadata for a PubSub connector used by the job.
+      { # Metadata for a Pub/Sub connector used by the job.
         "subscription": "A String", # Subscription used in the connection.
         "topic": "A String", # Topic accessed in the connection.
       },
@@ -2390,7 +2390,7 @@
             "userName": "A String", # Human-readable name for this source; may be user or system generated.
           },
         ],
-        "kind": "A String", # Type of tranform this stage is executing.
+        "kind": "A String", # Type of transform this stage is executing.
         "name": "A String", # Dataflow service generated name for this stage.
         "outputSource": [ # Output sources for this stage.
           { # Description of an input or output of an execution stage.
@@ -2457,7 +2457,7 @@
       },
     },
   ],
-  "stepsLocation": "A String", # The GCS location where the steps are stored.
+  "stepsLocation": "A String", # The Cloud Storage location where the steps are stored.
   "tempFiles": [ # A set of files the system should be aware of that are used for temporary storage. These temporary files will be removed on job completion. No duplicates are allowed. No file patterns are supported. The supported files are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} bucket.storage.googleapis.com/{object}
     "A String",
   ],