chore: Update discovery artifacts (#1195)
* chore(accesscontextmanager): update the api
* chore(adexchangebuyer2): update the api
* chore(admin): update the api
* chore(alertcenter): update the api
* chore(analyticsadmin): update the api
* chore(analyticsdata): update the api
* chore(androidmanagement): update the api
* chore(apigateway): update the api
* chore(apigee): update the api
* chore(appengine): update the api
* chore(area120tables): update the api
* chore(artifactregistry): update the api
* chore(bigquery): update the api
* chore(bigqueryconnection): update the api
* chore(bigqueryreservation): update the api
* chore(billingbudgets): update the api
* chore(binaryauthorization): update the api
* chore(blogger): update the api
* chore(calendar): update the api
* chore(chat): update the api
* chore(cloudasset): update the api
* chore(cloudbuild): update the api
* chore(cloudfunctions): update the api
* chore(cloudidentity): update the api
* chore(cloudkms): update the api
* chore(cloudresourcemanager): update the api
* chore(cloudscheduler): update the api
* chore(cloudtasks): update the api
* chore(composer): update the api
* chore(compute): update the api
* chore(container): update the api
* chore(containeranalysis): update the api
* chore(content): update the api
* chore(datacatalog): update the api
* chore(dataflow): update the api
* chore(datafusion): update the api
* chore(datamigration): update the api
* chore(dataproc): update the api
* chore(deploymentmanager): update the api
* chore(dialogflow): update the api
* chore(displayvideo): update the api
* chore(dlp): update the api
* chore(dns): update the api
* chore(documentai): update the api
* chore(eventarc): update the api
* chore(file): update the api
* chore(firebaseml): update the api
* chore(games): update the api
* chore(gameservices): update the api
* chore(genomics): update the api
* chore(healthcare): update the api
* chore(homegraph): update the api
* chore(iam): update the api
* chore(iap): update the api
* chore(jobs): update the api
* chore(lifesciences): update the api
* chore(localservices): update the api
* chore(managedidentities): update the api
* chore(manufacturers): update the api
* chore(memcache): update the api
* chore(ml): update the api
* chore(monitoring): update the api
* chore(notebooks): update the api
* chore(osconfig): update the api
* chore(pagespeedonline): update the api
* chore(people): update the api
* chore(privateca): update the api
* chore(prod_tt_sasportal): update the api
* chore(pubsub): update the api
* chore(pubsublite): update the api
* chore(recommender): update the api
* chore(remotebuildexecution): update the api
* chore(reseller): update the api
* chore(run): update the api
* chore(safebrowsing): update the api
* chore(sasportal): update the api
* chore(searchconsole): update the api
* chore(secretmanager): update the api
* chore(securitycenter): update the api
* chore(serviceconsumermanagement): update the api
* chore(servicecontrol): update the api
* chore(servicenetworking): update the api
* chore(serviceusage): update the api
* chore(sheets): update the api
* chore(slides): update the api
* chore(spanner): update the api
* chore(speech): update the api
* chore(sqladmin): update the api
* chore(storage): update the api
* chore(storagetransfer): update the api
* chore(sts): update the api
* chore(tagmanager): update the api
* chore(testing): update the api
* chore(toolresults): update the api
* chore(transcoder): update the api
* chore(vectortile): update the api
* chore(videointelligence): update the api
* chore(vision): update the api
* chore(webmasters): update the api
* chore(workflowexecutions): update the api
* chore(youtube): update the api
diff --git a/docs/dyn/dlp_v2.projects.jobTriggers.html b/docs/dyn/dlp_v2.projects.jobTriggers.html
index 68d1f7c..2cb1926 100644
--- a/docs/dyn/dlp_v2.projects.jobTriggers.html
+++ b/docs/dyn/dlp_v2.projects.jobTriggers.html
@@ -228,7 +228,7 @@
"name": "A String", # Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
},
],
- "limits": { # Configuration to control the number of findings returned. # Configuration to control the number of findings returned.
+ "limits": { # Configuration to control the number of findings returned. Cannot be set if de-identification is requested. # Configuration to control the number of findings returned.
"maxFindingsPerInfoType": [ # Configuration of findings limit given for specified infoTypes.
{ # Max findings configuration per infoType, per content item or long running DlpJob.
"infoType": { # Type of information detected by the API. # Type of information the findings limit applies to. Only one limit per info_type should be provided. If InfoTypeLimit does not have an info_type, the DLP API applies the limit against all info_types that are found but not specified in another InfoTypeLimit.
@@ -320,8 +320,8 @@
},
},
"cloudStorageOptions": { # Options defining a file or a set of files within a Google Cloud Storage bucket. # Google Cloud Storage options.
- "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
- "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
+ "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
+ "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
"fileSet": { # Set of files to scan. # The set of one or more files to scan.
"regexFileSet": { # Message representing a set of files in a Cloud Storage bucket. Regular expressions are used to allow fine-grained control over which files in the bucket to include. Included files are those that match at least one item in `include_regex` and do not match any items in `exclude_regex`. Note that a file that matches items from both lists will _not_ be included. For a match to occur, the entire file path (i.e., everything in the url after the bucket name) must match the regular expression. For example, given the input `{bucket_name: "mybucket", include_regex: ["directory1/.*"], exclude_regex: ["directory1/excluded.*"]}`: * `gs://mybucket/directory1/myfile` will be included * `gs://mybucket/directory1/directory2/myfile` will be included (`.*` matches across `/`) * `gs://mybucket/directory0/directory1/myfile` will _not_ be included (the full path doesn't match any items in `include_regex`) * `gs://mybucket/directory1/excludedfile` will _not_ be included (the path matches an item in `exclude_regex`) If `include_regex` is left empty, it will match all files by default (this is equivalent to setting `include_regex: [".*"]`). Some other common use cases: * `{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}` will include all files in `mybucket` except for .pdf files * `{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}` will include all files directly under `gs://mybucket/directory/`, without matching across `/` # The regex-filtered set of files to scan. Exactly one of `url` or `regex_file_set` must be set.
"bucketName": "A String", # The name of a Cloud Storage bucket. Required.
@@ -349,7 +349,7 @@
"projectId": "A String", # The ID of the project to which the entities belong.
},
},
- "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options.
"description": "A String", # A short description of where the data is coming from. Will be stored once in the job. 256 max length.
"labels": { # To organize findings, these labels will be added to each finding. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. No more than 10 labels can be associated with a given finding. Examples: * `"environment" : "production"` * `"pipeline" : "etl"`
"a_key": "A String",
@@ -441,7 +441,7 @@
"name": "A String", # Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
},
],
- "limits": { # Configuration to control the number of findings returned. # Configuration to control the number of findings returned.
+ "limits": { # Configuration to control the number of findings returned. Cannot be set if de-identification is requested. # Configuration to control the number of findings returned.
"maxFindingsPerInfoType": [ # Configuration of findings limit given for specified infoTypes.
{ # Max findings configuration per infoType, per content item or long running DlpJob.
"infoType": { # Type of information detected by the API. # Type of information the findings limit applies to. Only one limit per info_type should be provided. If InfoTypeLimit does not have an info_type, the DLP API applies the limit against all info_types that are found but not specified in another InfoTypeLimit.
@@ -514,8 +514,8 @@
"updateTime": "A String", # Output only. The last update timestamp of an inspectTemplate.
},
},
- "result": { # All result fields mentioned below are updated while the job is processing. # A summary of the outcome of this inspect job.
- "hybridStats": { # Statistics related to processing hybrid inspect requests. # Statistics related to the processing of hybrid inspect. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "result": { # All result fields mentioned below are updated while the job is processing. # A summary of the outcome of this inspection job.
+ "hybridStats": { # Statistics related to processing hybrid inspect requests. # Statistics related to the processing of hybrid inspect.
"abortedCount": "A String", # The number of hybrid inspection requests aborted because the job ran out of quota or was ended before they could be processed.
"pendingCount": "A String", # The number of hybrid requests currently being processed. Only populated when called via method `getDlpJob`. A burst of traffic may cause hybrid inspect requests to be enqueued. Processing will take place as quickly as possible, but resource limitations may impact how long a request is enqueued for.
"processedCount": "A String", # The number of hybrid inspection requests processed within this job.
@@ -1180,7 +1180,7 @@
"name": "A String", # Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
},
],
- "limits": { # Configuration to control the number of findings returned. # Configuration to control the number of findings returned.
+ "limits": { # Configuration to control the number of findings returned. Cannot be set if de-identification is requested. # Configuration to control the number of findings returned.
"maxFindingsPerInfoType": [ # Configuration of findings limit given for specified infoTypes.
{ # Max findings configuration per infoType, per content item or long running DlpJob.
"infoType": { # Type of information detected by the API. # Type of information the findings limit applies to. Only one limit per info_type should be provided. If InfoTypeLimit does not have an info_type, the DLP API applies the limit against all info_types that are found but not specified in another InfoTypeLimit.
@@ -1272,8 +1272,8 @@
},
},
"cloudStorageOptions": { # Options defining a file or a set of files within a Google Cloud Storage bucket. # Google Cloud Storage options.
- "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
- "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
+ "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
+ "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
"fileSet": { # Set of files to scan. # The set of one or more files to scan.
"regexFileSet": { # Message representing a set of files in a Cloud Storage bucket. Regular expressions are used to allow fine-grained control over which files in the bucket to include. Included files are those that match at least one item in `include_regex` and do not match any items in `exclude_regex`. Note that a file that matches items from both lists will _not_ be included. For a match to occur, the entire file path (i.e., everything in the url after the bucket name) must match the regular expression. For example, given the input `{bucket_name: "mybucket", include_regex: ["directory1/.*"], exclude_regex: ["directory1/excluded.*"]}`: * `gs://mybucket/directory1/myfile` will be included * `gs://mybucket/directory1/directory2/myfile` will be included (`.*` matches across `/`) * `gs://mybucket/directory0/directory1/myfile` will _not_ be included (the full path doesn't match any items in `include_regex`) * `gs://mybucket/directory1/excludedfile` will _not_ be included (the path matches an item in `exclude_regex`) If `include_regex` is left empty, it will match all files by default (this is equivalent to setting `include_regex: [".*"]`). Some other common use cases: * `{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}` will include all files in `mybucket` except for .pdf files * `{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}` will include all files directly under `gs://mybucket/directory/`, without matching across `/` # The regex-filtered set of files to scan. Exactly one of `url` or `regex_file_set` must be set.
"bucketName": "A String", # The name of a Cloud Storage bucket. Required.
@@ -1301,7 +1301,7 @@
"projectId": "A String", # The ID of the project to which the entities belong.
},
},
- "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options.
"description": "A String", # A short description of where the data is coming from. Will be stored once in the job. 256 max length.
"labels": { # To organize findings, these labels will be added to each finding. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. No more than 10 labels can be associated with a given finding. Examples: * `"environment" : "production"` * `"pipeline" : "etl"`
"a_key": "A String",
@@ -1332,9 +1332,9 @@
"status": "A String", # Required. A status for this trigger.
"triggers": [ # A list of triggers which will be OR'ed together. Only one in the list needs to trigger for a job to be started. The list may contain only a single Schedule trigger and must have at least one object.
{ # What event needs to occur for a new job to be started.
- "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished.
},
- "schedule": { # Schedule for triggeredJobs. # Create a job on a repeating basis based on the elapse of time.
+ "schedule": { # Schedule for inspect job triggers. # Create a job on a repeating basis based on the elapse of time.
"recurrencePeriodDuration": "A String", # With this option a job is started a regular periodic basis. For example: every day (86400 seconds). A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days.
},
},
@@ -1461,7 +1461,7 @@
"name": "A String", # Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
},
],
- "limits": { # Configuration to control the number of findings returned. # Configuration to control the number of findings returned.
+ "limits": { # Configuration to control the number of findings returned. Cannot be set if de-identification is requested. # Configuration to control the number of findings returned.
"maxFindingsPerInfoType": [ # Configuration of findings limit given for specified infoTypes.
{ # Max findings configuration per infoType, per content item or long running DlpJob.
"infoType": { # Type of information detected by the API. # Type of information the findings limit applies to. Only one limit per info_type should be provided. If InfoTypeLimit does not have an info_type, the DLP API applies the limit against all info_types that are found but not specified in another InfoTypeLimit.
@@ -1553,8 +1553,8 @@
},
},
"cloudStorageOptions": { # Options defining a file or a set of files within a Google Cloud Storage bucket. # Google Cloud Storage options.
- "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
- "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
+ "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
+ "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
"fileSet": { # Set of files to scan. # The set of one or more files to scan.
"regexFileSet": { # Message representing a set of files in a Cloud Storage bucket. Regular expressions are used to allow fine-grained control over which files in the bucket to include. Included files are those that match at least one item in `include_regex` and do not match any items in `exclude_regex`. Note that a file that matches items from both lists will _not_ be included. For a match to occur, the entire file path (i.e., everything in the url after the bucket name) must match the regular expression. For example, given the input `{bucket_name: "mybucket", include_regex: ["directory1/.*"], exclude_regex: ["directory1/excluded.*"]}`: * `gs://mybucket/directory1/myfile` will be included * `gs://mybucket/directory1/directory2/myfile` will be included (`.*` matches across `/`) * `gs://mybucket/directory0/directory1/myfile` will _not_ be included (the full path doesn't match any items in `include_regex`) * `gs://mybucket/directory1/excludedfile` will _not_ be included (the path matches an item in `exclude_regex`) If `include_regex` is left empty, it will match all files by default (this is equivalent to setting `include_regex: [".*"]`). Some other common use cases: * `{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}` will include all files in `mybucket` except for .pdf files * `{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}` will include all files directly under `gs://mybucket/directory/`, without matching across `/` # The regex-filtered set of files to scan. Exactly one of `url` or `regex_file_set` must be set.
"bucketName": "A String", # The name of a Cloud Storage bucket. Required.
@@ -1582,7 +1582,7 @@
"projectId": "A String", # The ID of the project to which the entities belong.
},
},
- "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options.
"description": "A String", # A short description of where the data is coming from. Will be stored once in the job. 256 max length.
"labels": { # To organize findings, these labels will be added to each finding. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. No more than 10 labels can be associated with a given finding. Examples: * `"environment" : "production"` * `"pipeline" : "etl"`
"a_key": "A String",
@@ -1613,9 +1613,9 @@
"status": "A String", # Required. A status for this trigger.
"triggers": [ # A list of triggers which will be OR'ed together. Only one in the list needs to trigger for a job to be started. The list may contain only a single Schedule trigger and must have at least one object.
{ # What event needs to occur for a new job to be started.
- "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished.
},
- "schedule": { # Schedule for triggeredJobs. # Create a job on a repeating basis based on the elapse of time.
+ "schedule": { # Schedule for inspect job triggers. # Create a job on a repeating basis based on the elapse of time.
"recurrencePeriodDuration": "A String", # With this option a job is started a regular periodic basis. For example: every day (86400 seconds). A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days.
},
},
@@ -1764,7 +1764,7 @@
"name": "A String", # Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
},
],
- "limits": { # Configuration to control the number of findings returned. # Configuration to control the number of findings returned.
+ "limits": { # Configuration to control the number of findings returned. Cannot be set if de-identification is requested. # Configuration to control the number of findings returned.
"maxFindingsPerInfoType": [ # Configuration of findings limit given for specified infoTypes.
{ # Max findings configuration per infoType, per content item or long running DlpJob.
"infoType": { # Type of information detected by the API. # Type of information the findings limit applies to. Only one limit per info_type should be provided. If InfoTypeLimit does not have an info_type, the DLP API applies the limit against all info_types that are found but not specified in another InfoTypeLimit.
@@ -1856,8 +1856,8 @@
},
},
"cloudStorageOptions": { # Options defining a file or a set of files within a Google Cloud Storage bucket. # Google Cloud Storage options.
- "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
- "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
+ "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
+ "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
"fileSet": { # Set of files to scan. # The set of one or more files to scan.
"regexFileSet": { # Message representing a set of files in a Cloud Storage bucket. Regular expressions are used to allow fine-grained control over which files in the bucket to include. Included files are those that match at least one item in `include_regex` and do not match any items in `exclude_regex`. Note that a file that matches items from both lists will _not_ be included. For a match to occur, the entire file path (i.e., everything in the url after the bucket name) must match the regular expression. For example, given the input `{bucket_name: "mybucket", include_regex: ["directory1/.*"], exclude_regex: ["directory1/excluded.*"]}`: * `gs://mybucket/directory1/myfile` will be included * `gs://mybucket/directory1/directory2/myfile` will be included (`.*` matches across `/`) * `gs://mybucket/directory0/directory1/myfile` will _not_ be included (the full path doesn't match any items in `include_regex`) * `gs://mybucket/directory1/excludedfile` will _not_ be included (the path matches an item in `exclude_regex`) If `include_regex` is left empty, it will match all files by default (this is equivalent to setting `include_regex: [".*"]`). Some other common use cases: * `{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}` will include all files in `mybucket` except for .pdf files * `{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}` will include all files directly under `gs://mybucket/directory/`, without matching across `/` # The regex-filtered set of files to scan. Exactly one of `url` or `regex_file_set` must be set.
"bucketName": "A String", # The name of a Cloud Storage bucket. Required.
@@ -1885,7 +1885,7 @@
"projectId": "A String", # The ID of the project to which the entities belong.
},
},
- "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options.
"description": "A String", # A short description of where the data is coming from. Will be stored once in the job. 256 max length.
"labels": { # To organize findings, these labels will be added to each finding. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. No more than 10 labels can be associated with a given finding. Examples: * `"environment" : "production"` * `"pipeline" : "etl"`
"a_key": "A String",
@@ -1916,9 +1916,9 @@
"status": "A String", # Required. A status for this trigger.
"triggers": [ # A list of triggers which will be OR'ed together. Only one in the list needs to trigger for a job to be started. The list may contain only a single Schedule trigger and must have at least one object.
{ # What event needs to occur for a new job to be started.
- "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished.
},
- "schedule": { # Schedule for triggeredJobs. # Create a job on a repeating basis based on the elapse of time.
+ "schedule": { # Schedule for inspect job triggers. # Create a job on a repeating basis based on the elapse of time.
"recurrencePeriodDuration": "A String", # With this option a job is started a regular periodic basis. For example: every day (86400 seconds). A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days.
},
},
@@ -1933,7 +1933,7 @@
Args:
parent: string, Required. Parent resource name. The format of this value varies depending on whether you have [specified a processing location](https://cloud.google.com/dlp/docs/specifying-location): + Projects scope, location specified: `projects/`PROJECT_ID`/locations/`LOCATION_ID + Projects scope, no location specified (defaults to global): `projects/`PROJECT_ID The following example `parent` string specifies a parent project with the identifier `example-project`, and specifies the `europe-west3` location for processing data: parent=projects/example-project/locations/europe-west3 (required)
- filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect jobs: - `status` - HEALTHY|PAUSED|CANCELLED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - 'last_run_time` - RFC 3339 formatted timestamp, surrounded by quotation marks. Nanoseconds are ignored. - 'error_count' - Number of errors that have occurred while running. * The operator must be `=` or `!=` for status and inspected_storage. Examples: * inspected_storage = cloud_storage AND status = HEALTHY * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = PAUSED OR state = HEALTHY) * last_run_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters.
+ filter: string, Allows filtering. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` or `OR` logical operators. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * Supported fields/values for inspect triggers: - `status` - HEALTHY|PAUSED|CANCELLED - `inspected_storage` - DATASTORE|CLOUD_STORAGE|BIGQUERY - 'last_run_time` - RFC 3339 formatted timestamp, surrounded by quotation marks. Nanoseconds are ignored. - 'error_count' - Number of errors that have occurred while running. * The operator must be `=` or `!=` for status and inspected_storage. Examples: * inspected_storage = cloud_storage AND status = HEALTHY * inspected_storage = cloud_storage OR inspected_storage = bigquery * inspected_storage = cloud_storage AND (state = PAUSED OR state = HEALTHY) * last_run_time > \"2017-12-12T00:00:00+00:00\" The length of this field should be no more than 500 characters.
locationId: string, Deprecated. This field has no effect.
orderBy: string, Comma separated list of triggeredJob fields to order by, followed by `asc` or `desc` postfix. This list is case-insensitive, default sorting order is ascending, redundant space characters are insignificant. Example: `name asc,update_time, create_time desc` Supported fields are: - `create_time`: corresponds to time the JobTrigger was created. - `update_time`: corresponds to time the JobTrigger was last updated. - `last_run_time`: corresponds to the last time the JobTrigger ran. - `name`: corresponds to JobTrigger's name. - `display_name`: corresponds to JobTrigger's display name. - `status`: corresponds to JobTrigger's status.
pageSize: integer, Size of the page, can be limited by a server.
@@ -2056,7 +2056,7 @@
"name": "A String", # Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
},
],
- "limits": { # Configuration to control the number of findings returned. # Configuration to control the number of findings returned.
+ "limits": { # Configuration to control the number of findings returned. Cannot be set if de-identification is requested. # Configuration to control the number of findings returned.
"maxFindingsPerInfoType": [ # Configuration of findings limit given for specified infoTypes.
{ # Max findings configuration per infoType, per content item or long running DlpJob.
"infoType": { # Type of information detected by the API. # Type of information the findings limit applies to. Only one limit per info_type should be provided. If InfoTypeLimit does not have an info_type, the DLP API applies the limit against all info_types that are found but not specified in another InfoTypeLimit.
@@ -2148,8 +2148,8 @@
},
},
"cloudStorageOptions": { # Options defining a file or a set of files within a Google Cloud Storage bucket. # Google Cloud Storage options.
- "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
- "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
+ "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
+ "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
"fileSet": { # Set of files to scan. # The set of one or more files to scan.
"regexFileSet": { # Message representing a set of files in a Cloud Storage bucket. Regular expressions are used to allow fine-grained control over which files in the bucket to include. Included files are those that match at least one item in `include_regex` and do not match any items in `exclude_regex`. Note that a file that matches items from both lists will _not_ be included. For a match to occur, the entire file path (i.e., everything in the url after the bucket name) must match the regular expression. For example, given the input `{bucket_name: "mybucket", include_regex: ["directory1/.*"], exclude_regex: ["directory1/excluded.*"]}`: * `gs://mybucket/directory1/myfile` will be included * `gs://mybucket/directory1/directory2/myfile` will be included (`.*` matches across `/`) * `gs://mybucket/directory0/directory1/myfile` will _not_ be included (the full path doesn't match any items in `include_regex`) * `gs://mybucket/directory1/excludedfile` will _not_ be included (the path matches an item in `exclude_regex`) If `include_regex` is left empty, it will match all files by default (this is equivalent to setting `include_regex: [".*"]`). Some other common use cases: * `{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}` will include all files in `mybucket` except for .pdf files * `{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}` will include all files directly under `gs://mybucket/directory/`, without matching across `/` # The regex-filtered set of files to scan. Exactly one of `url` or `regex_file_set` must be set.
"bucketName": "A String", # The name of a Cloud Storage bucket. Required.
@@ -2177,7 +2177,7 @@
"projectId": "A String", # The ID of the project to which the entities belong.
},
},
- "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options.
"description": "A String", # A short description of where the data is coming from. Will be stored once in the job. 256 max length.
"labels": { # To organize findings, these labels will be added to each finding. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. No more than 10 labels can be associated with a given finding. Examples: * `"environment" : "production"` * `"pipeline" : "etl"`
"a_key": "A String",
@@ -2208,9 +2208,9 @@
"status": "A String", # Required. A status for this trigger.
"triggers": [ # A list of triggers which will be OR'ed together. Only one in the list needs to trigger for a job to be started. The list may contain only a single Schedule trigger and must have at least one object.
{ # What event needs to occur for a new job to be started.
- "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished.
},
- "schedule": { # Schedule for triggeredJobs. # Create a job on a repeating basis based on the elapse of time.
+ "schedule": { # Schedule for inspect job triggers. # Create a job on a repeating basis based on the elapse of time.
"recurrencePeriodDuration": "A String", # With this option a job is started a regular periodic basis. For example: every day (86400 seconds). A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days.
},
},
@@ -2354,7 +2354,7 @@
"name": "A String", # Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
},
],
- "limits": { # Configuration to control the number of findings returned. # Configuration to control the number of findings returned.
+ "limits": { # Configuration to control the number of findings returned. Cannot be set if de-identification is requested. # Configuration to control the number of findings returned.
"maxFindingsPerInfoType": [ # Configuration of findings limit given for specified infoTypes.
{ # Max findings configuration per infoType, per content item or long running DlpJob.
"infoType": { # Type of information detected by the API. # Type of information the findings limit applies to. Only one limit per info_type should be provided. If InfoTypeLimit does not have an info_type, the DLP API applies the limit against all info_types that are found but not specified in another InfoTypeLimit.
@@ -2446,8 +2446,8 @@
},
},
"cloudStorageOptions": { # Options defining a file or a set of files within a Google Cloud Storage bucket. # Google Cloud Storage options.
- "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
- "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
+ "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
+ "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
"fileSet": { # Set of files to scan. # The set of one or more files to scan.
"regexFileSet": { # Message representing a set of files in a Cloud Storage bucket. Regular expressions are used to allow fine-grained control over which files in the bucket to include. Included files are those that match at least one item in `include_regex` and do not match any items in `exclude_regex`. Note that a file that matches items from both lists will _not_ be included. For a match to occur, the entire file path (i.e., everything in the url after the bucket name) must match the regular expression. For example, given the input `{bucket_name: "mybucket", include_regex: ["directory1/.*"], exclude_regex: ["directory1/excluded.*"]}`: * `gs://mybucket/directory1/myfile` will be included * `gs://mybucket/directory1/directory2/myfile` will be included (`.*` matches across `/`) * `gs://mybucket/directory0/directory1/myfile` will _not_ be included (the full path doesn't match any items in `include_regex`) * `gs://mybucket/directory1/excludedfile` will _not_ be included (the path matches an item in `exclude_regex`) If `include_regex` is left empty, it will match all files by default (this is equivalent to setting `include_regex: [".*"]`). Some other common use cases: * `{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}` will include all files in `mybucket` except for .pdf files * `{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}` will include all files directly under `gs://mybucket/directory/`, without matching across `/` # The regex-filtered set of files to scan. Exactly one of `url` or `regex_file_set` must be set.
"bucketName": "A String", # The name of a Cloud Storage bucket. Required.
@@ -2475,7 +2475,7 @@
"projectId": "A String", # The ID of the project to which the entities belong.
},
},
- "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options.
"description": "A String", # A short description of where the data is coming from. Will be stored once in the job. 256 max length.
"labels": { # To organize findings, these labels will be added to each finding. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. No more than 10 labels can be associated with a given finding. Examples: * `"environment" : "production"` * `"pipeline" : "etl"`
"a_key": "A String",
@@ -2506,9 +2506,9 @@
"status": "A String", # Required. A status for this trigger.
"triggers": [ # A list of triggers which will be OR'ed together. Only one in the list needs to trigger for a job to be started. The list may contain only a single Schedule trigger and must have at least one object.
{ # What event needs to occur for a new job to be started.
- "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished.
},
- "schedule": { # Schedule for triggeredJobs. # Create a job on a repeating basis based on the elapse of time.
+ "schedule": { # Schedule for inspect job triggers. # Create a job on a repeating basis based on the elapse of time.
"recurrencePeriodDuration": "A String", # With this option a job is started a regular periodic basis. For example: every day (86400 seconds). A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days.
},
},
@@ -2634,7 +2634,7 @@
"name": "A String", # Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
},
],
- "limits": { # Configuration to control the number of findings returned. # Configuration to control the number of findings returned.
+ "limits": { # Configuration to control the number of findings returned. Cannot be set if de-identification is requested. # Configuration to control the number of findings returned.
"maxFindingsPerInfoType": [ # Configuration of findings limit given for specified infoTypes.
{ # Max findings configuration per infoType, per content item or long running DlpJob.
"infoType": { # Type of information detected by the API. # Type of information the findings limit applies to. Only one limit per info_type should be provided. If InfoTypeLimit does not have an info_type, the DLP API applies the limit against all info_types that are found but not specified in another InfoTypeLimit.
@@ -2726,8 +2726,8 @@
},
},
"cloudStorageOptions": { # Options defining a file or a set of files within a Google Cloud Storage bucket. # Google Cloud Storage options.
- "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
- "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified.
+ "bytesLimitPerFile": "A String", # Max number of bytes to scan from a file. If a scanned file's size is bigger than this value then the rest of the bytes are omitted. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
+ "bytesLimitPerFilePercent": 42, # Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. Must be between 0 and 100, inclusively. Both 0 and 100 means no limit. Defaults to 0. Only one of bytes_limit_per_file and bytes_limit_per_file_percent can be specified. Cannot be set if de-identification is requested.
"fileSet": { # Set of files to scan. # The set of one or more files to scan.
"regexFileSet": { # Message representing a set of files in a Cloud Storage bucket. Regular expressions are used to allow fine-grained control over which files in the bucket to include. Included files are those that match at least one item in `include_regex` and do not match any items in `exclude_regex`. Note that a file that matches items from both lists will _not_ be included. For a match to occur, the entire file path (i.e., everything in the url after the bucket name) must match the regular expression. For example, given the input `{bucket_name: "mybucket", include_regex: ["directory1/.*"], exclude_regex: ["directory1/excluded.*"]}`: * `gs://mybucket/directory1/myfile` will be included * `gs://mybucket/directory1/directory2/myfile` will be included (`.*` matches across `/`) * `gs://mybucket/directory0/directory1/myfile` will _not_ be included (the full path doesn't match any items in `include_regex`) * `gs://mybucket/directory1/excludedfile` will _not_ be included (the path matches an item in `exclude_regex`) If `include_regex` is left empty, it will match all files by default (this is equivalent to setting `include_regex: [".*"]`). Some other common use cases: * `{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}` will include all files in `mybucket` except for .pdf files * `{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}` will include all files directly under `gs://mybucket/directory/`, without matching across `/` # The regex-filtered set of files to scan. Exactly one of `url` or `regex_file_set` must be set.
"bucketName": "A String", # The name of a Cloud Storage bucket. Required.
@@ -2755,7 +2755,7 @@
"projectId": "A String", # The ID of the project to which the entities belong.
},
},
- "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "hybridOptions": { # Configuration to control jobs where the content being inspected is outside of Google Cloud Platform. # Hybrid inspection options.
"description": "A String", # A short description of where the data is coming from. Will be stored once in the job. 256 max length.
"labels": { # To organize findings, these labels will be added to each finding. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`. Label values must be between 0 and 63 characters long and must conform to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`. No more than 10 labels can be associated with a given finding. Examples: * `"environment" : "production"` * `"pipeline" : "etl"`
"a_key": "A String",
@@ -2786,9 +2786,9 @@
"status": "A String", # Required. A status for this trigger.
"triggers": [ # A list of triggers which will be OR'ed together. Only one in the list needs to trigger for a job to be started. The list may contain only a single Schedule trigger and must have at least one object.
{ # What event needs to occur for a new job to be started.
- "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished. Early access feature is in a pre-release state and might change or have limited support. For more information, see https://cloud.google.com/products#product-launch-stages.
+ "manual": { # Job trigger option for hybrid jobs. Jobs must be manually created and finished. # For use with hybrid jobs. Jobs must be manually created and finished.
},
- "schedule": { # Schedule for triggeredJobs. # Create a job on a repeating basis based on the elapse of time.
+ "schedule": { # Schedule for inspect job triggers. # Create a job on a repeating basis based on the elapse of time.
"recurrencePeriodDuration": "A String", # With this option a job is started a regular periodic basis. For example: every day (86400 seconds). A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days.
},
},