chore: Update discovery artifacts (#1505)
## Deleted keys were detected in the following stable discovery artifacts:
bigquery v2 https://github.com/googleapis/google-api-python-client/commit/795df26b99759db8f2e45a876b9c1374e2fc14ab
managedidentities v1 https://github.com/googleapis/google-api-python-client/commit/9f85c13a423d972707db785c03c832948fc4ce31
ondemandscanning v1 https://github.com/googleapis/google-api-python-client/commit/11b0c0eb16bb850077fc97da2b6cc29ffe4378b0
## Deleted keys were detected in the following pre-stable discovery artifacts:
artifactregistry v1beta1 https://github.com/googleapis/google-api-python-client/commit/d8c9d9b57835594456c766421e29c9d94a9f09fc
artifactregistry v1beta2 https://github.com/googleapis/google-api-python-client/commit/d8c9d9b57835594456c766421e29c9d94a9f09fc
dataflow v1b3 https://github.com/googleapis/google-api-python-client/commit/a8b34005d6ef733ed7230890d8b515a3b80334ec
managedidentities v1alpha1 https://github.com/googleapis/google-api-python-client/commit/9f85c13a423d972707db785c03c832948fc4ce31
managedidentities v1beta1 https://github.com/googleapis/google-api-python-client/commit/9f85c13a423d972707db785c03c832948fc4ce31
ondemandscanning v1beta1 https://github.com/googleapis/google-api-python-client/commit/11b0c0eb16bb850077fc97da2b6cc29ffe4378b0
## Discovery Artifact Change Summary:
feat(analyticsdata): update the api https://github.com/googleapis/google-api-python-client/commit/197540040eaedf672e608435ab0783e4ec203376
feat(androidmanagement): update the api https://github.com/googleapis/google-api-python-client/commit/d253dad6da1a4e0ff1bfb225de9a93ec0b5bbbb5
feat(artifactregistry): update the api https://github.com/googleapis/google-api-python-client/commit/d8c9d9b57835594456c766421e29c9d94a9f09fc
feat(bigquery): update the api https://github.com/googleapis/google-api-python-client/commit/795df26b99759db8f2e45a876b9c1374e2fc14ab
feat(bigquerydatatransfer): update the api https://github.com/googleapis/google-api-python-client/commit/9044b191958b077b84fad16e3f8f93a03b021dce
feat(chat): update the api https://github.com/googleapis/google-api-python-client/commit/5fe2f9ccbacc1a34f16750eeca103124fb4df48c
feat(cloudidentity): update the api https://github.com/googleapis/google-api-python-client/commit/24da17a1f30d97483c3da5d4fffa209cdae5d445
feat(cloudkms): update the api https://github.com/googleapis/google-api-python-client/commit/bb6c83bbdcfa0867bddb2305c26bf5aced0a4fb9
feat(compute): update the api https://github.com/googleapis/google-api-python-client/commit/1c2b5b24e32a9587f84e7240e2ebc39576760841
feat(container): update the api https://github.com/googleapis/google-api-python-client/commit/f5e7af12443c6eb9130fd62257c78d8339e76a08
feat(containeranalysis): update the api https://github.com/googleapis/google-api-python-client/commit/bb9bbeaebed4bb164ec1894896e9011253cd65cf
feat(content): update the api https://github.com/googleapis/google-api-python-client/commit/c9ba1f1852ade510cb9c89c92134b5ea95f7b9e2
feat(dataflow): update the api https://github.com/googleapis/google-api-python-client/commit/a8b34005d6ef733ed7230890d8b515a3b80334ec
feat(dataproc): update the api https://github.com/googleapis/google-api-python-client/commit/0f222f72fef0fbad5d47c0b054cb695fc99317e1
feat(displayvideo): update the api https://github.com/googleapis/google-api-python-client/commit/fbfb3cae7338b252f45a9417d46e1842bf2651e3
feat(documentai): update the api https://github.com/googleapis/google-api-python-client/commit/c21f6c964fd613125c920dede0aa4ad46288096e
feat(file): update the api https://github.com/googleapis/google-api-python-client/commit/4ea8b8d219de81728f701ca600cf38cec237bc3e
feat(gkehub): update the api https://github.com/googleapis/google-api-python-client/commit/abde26f80fd8b856785c60bdcc8eac1233c67980
feat(logging): update the api https://github.com/googleapis/google-api-python-client/commit/55ba494a751bbbf6f8bbf79a68249b6b4e062748
feat(managedidentities): update the api https://github.com/googleapis/google-api-python-client/commit/9f85c13a423d972707db785c03c832948fc4ce31
feat(metastore): update the api https://github.com/googleapis/google-api-python-client/commit/69301238fcd311486ba88209d9a1f746b51c1451
feat(ondemandscanning): update the api https://github.com/googleapis/google-api-python-client/commit/11b0c0eb16bb850077fc97da2b6cc29ffe4378b0
feat(people): update the api https://github.com/googleapis/google-api-python-client/commit/8a6dc23bda9dc7ab2c39d22e307590fb1c6e15c0
feat(sqladmin): update the api https://github.com/googleapis/google-api-python-client/commit/552d62b418d122831f0b4a38f577b4e106c6a070
feat(sts): update the api https://github.com/googleapis/google-api-python-client/commit/701b09cd34bf5a16d912b967e72fd23da77c26ec
feat(workflowexecutions): update the api https://github.com/googleapis/google-api-python-client/commit/26e7f6db0d773b7940b6c072b1f383c6edefd9f2
feat(youtube): update the api https://github.com/googleapis/google-api-python-client/commit/805e784420807e19b6e90cda17b03e74c50e7185
diff --git a/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html b/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html
index eb6293c..273422e 100644
--- a/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html
+++ b/docs/dyn/dataflow_v1b3.projects.locations.flexTemplates.html
@@ -106,16 +106,18 @@
"additionalUserLabels": { # Additional user labels to be specified for the job. Keys and values must follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1kg", "count": "3" }.
"a_key": "A String",
},
+ "autoscalingAlgorithm": "A String", # The algorithm to use for autoscaling
"diskSizeGb": 42, # Worker disk size, in gigabytes.
+ "dumpHeapOnOom": True or False, # If true, save a heap dump before killing a thread or process which is GC thrashing or out of memory. The location of the heap file will either be echoed back to the user, or the user will be given the opportunity to download the heap file.
"enableStreamingEngine": True or False, # Whether to enable Streaming Engine for the job.
"flexrsGoal": "A String", # Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
"ipConfiguration": "A String", # Configuration for VM IPs.
"kmsKeyName": "A String", # Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/
"machineType": "A String", # The machine type to use for the job. Defaults to the value from the template if not specified.
- "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
"maxWorkers": 42, # The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
"network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
"numWorkers": 42, # The initial number of Google Compute Engine instances for the job.
+ "saveHeapDumpsToGcsPath": "A String", # Cloud Storage bucket (directory) to upload heap dumps to the given location. Enabling this implies that heap dumps should be generated on OOM (dump_heap_on_oom is set to true).
"sdkContainerImage": "A String", # Docker registry location of container image to use for the 'worker harness. Default is the container for the version of the SDK. Note this field is only valid for portable pipelines.
"serviceAccountEmail": "A String", # The email address of the service account to run the job as.
"stagingLocation": "A String", # The Cloud Storage path for staging local files. Must be a valid Cloud Storage URL, beginning with `gs://`.
@@ -158,16 +160,18 @@
"additionalUserLabels": { # Additional user labels to be specified for the job. Keys and values must follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1kg", "count": "3" }.
"a_key": "A String",
},
+ "autoscalingAlgorithm": "A String", # The algorithm to use for autoscaling
"diskSizeGb": 42, # Worker disk size, in gigabytes.
+ "dumpHeapOnOom": True or False, # If true, save a heap dump before killing a thread or process which is GC thrashing or out of memory. The location of the heap file will either be echoed back to the user, or the user will be given the opportunity to download the heap file.
"enableStreamingEngine": True or False, # Whether to enable Streaming Engine for the job.
"flexrsGoal": "A String", # Set FlexRS goal for the job. https://cloud.google.com/dataflow/docs/guides/flexrs
"ipConfiguration": "A String", # Configuration for VM IPs.
"kmsKeyName": "A String", # Name for the Cloud KMS key for the job. Key format is: projects//locations//keyRings//cryptoKeys/
"machineType": "A String", # The machine type to use for the job. Defaults to the value from the template if not specified.
- "maxNumWorkers": 42, # The maximum number of workers to cap scaling at.
"maxWorkers": 42, # The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.
"network": "A String", # Network to which VMs will be assigned. If empty or unspecified, the service will use the network "default".
"numWorkers": 42, # The initial number of Google Compute Engine instances for the job.
+ "saveHeapDumpsToGcsPath": "A String", # Cloud Storage bucket (directory) to upload heap dumps to the given location. Enabling this implies that heap dumps should be generated on OOM (dump_heap_on_oom is set to true).
"sdkContainerImage": "A String", # Docker registry location of container image to use for the 'worker harness. Default is the container for the version of the SDK. Note this field is only valid for portable pipelines.
"serviceAccountEmail": "A String", # The email address of the service account to run the job as.
"stagingLocation": "A String", # The Cloud Storage path for staging local files. Must be a valid Cloud Storage URL, beginning with `gs://`.
@@ -329,8 +333,8 @@
},
"id": "A String", # The unique ID of this job. This field is set by the Cloud Dataflow service when the Job is created, and is immutable for the life of the job.
"jobMetadata": { # Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view. # This field is populated by the Dataflow service to support filtering jobs by the metadata values provided here. Populated for ListJobs and all GetJob views SUMMARY and higher.
- "bigTableDetails": [ # Identification of a Cloud BigTable source used in the Dataflow job.
- { # Metadata for a Cloud BigTable connector used by the job.
+ "bigTableDetails": [ # Identification of a Cloud Bigtable source used in the Dataflow job.
+ { # Metadata for a Cloud Bigtable connector used by the job.
"instanceId": "A String", # InstanceId accessed in the connection.
"projectId": "A String", # ProjectId accessed in the connection.
"tableId": "A String", # TableId accessed in the connection.
@@ -355,7 +359,7 @@
"filePattern": "A String", # File Pattern used to access files by the connector.
},
],
- "pubsubDetails": [ # Identification of a PubSub source used in the Dataflow job.
+ "pubsubDetails": [ # Identification of a Pub/Sub source used in the Dataflow job.
{ # Metadata for a Pub/Sub connector used by the job.
"subscription": "A String", # Subscription used in the connection.
"topic": "A String", # Topic accessed in the connection.