chore: Update discovery artifacts (#1195)
* chore(accesscontextmanager): update the api
* chore(adexchangebuyer2): update the api
* chore(admin): update the api
* chore(alertcenter): update the api
* chore(analyticsadmin): update the api
* chore(analyticsdata): update the api
* chore(androidmanagement): update the api
* chore(apigateway): update the api
* chore(apigee): update the api
* chore(appengine): update the api
* chore(area120tables): update the api
* chore(artifactregistry): update the api
* chore(bigquery): update the api
* chore(bigqueryconnection): update the api
* chore(bigqueryreservation): update the api
* chore(billingbudgets): update the api
* chore(binaryauthorization): update the api
* chore(blogger): update the api
* chore(calendar): update the api
* chore(chat): update the api
* chore(cloudasset): update the api
* chore(cloudbuild): update the api
* chore(cloudfunctions): update the api
* chore(cloudidentity): update the api
* chore(cloudkms): update the api
* chore(cloudresourcemanager): update the api
* chore(cloudscheduler): update the api
* chore(cloudtasks): update the api
* chore(composer): update the api
* chore(compute): update the api
* chore(container): update the api
* chore(containeranalysis): update the api
* chore(content): update the api
* chore(datacatalog): update the api
* chore(dataflow): update the api
* chore(datafusion): update the api
* chore(datamigration): update the api
* chore(dataproc): update the api
* chore(deploymentmanager): update the api
* chore(dialogflow): update the api
* chore(displayvideo): update the api
* chore(dlp): update the api
* chore(dns): update the api
* chore(documentai): update the api
* chore(eventarc): update the api
* chore(file): update the api
* chore(firebaseml): update the api
* chore(games): update the api
* chore(gameservices): update the api
* chore(genomics): update the api
* chore(healthcare): update the api
* chore(homegraph): update the api
* chore(iam): update the api
* chore(iap): update the api
* chore(jobs): update the api
* chore(lifesciences): update the api
* chore(localservices): update the api
* chore(managedidentities): update the api
* chore(manufacturers): update the api
* chore(memcache): update the api
* chore(ml): update the api
* chore(monitoring): update the api
* chore(notebooks): update the api
* chore(osconfig): update the api
* chore(pagespeedonline): update the api
* chore(people): update the api
* chore(privateca): update the api
* chore(prod_tt_sasportal): update the api
* chore(pubsub): update the api
* chore(pubsublite): update the api
* chore(recommender): update the api
* chore(remotebuildexecution): update the api
* chore(reseller): update the api
* chore(run): update the api
* chore(safebrowsing): update the api
* chore(sasportal): update the api
* chore(searchconsole): update the api
* chore(secretmanager): update the api
* chore(securitycenter): update the api
* chore(serviceconsumermanagement): update the api
* chore(servicecontrol): update the api
* chore(servicenetworking): update the api
* chore(serviceusage): update the api
* chore(sheets): update the api
* chore(slides): update the api
* chore(spanner): update the api
* chore(speech): update the api
* chore(sqladmin): update the api
* chore(storage): update the api
* chore(storagetransfer): update the api
* chore(sts): update the api
* chore(tagmanager): update the api
* chore(testing): update the api
* chore(toolresults): update the api
* chore(transcoder): update the api
* chore(vectortile): update the api
* chore(videointelligence): update the api
* chore(vision): update the api
* chore(webmasters): update the api
* chore(workflowexecutions): update the api
* chore(youtube): update the api
diff --git a/docs/dyn/sqladmin_v1beta4.databases.html b/docs/dyn/sqladmin_v1beta4.databases.html
index 9af2d39..9dc55bd 100644
--- a/docs/dyn/sqladmin_v1beta4.databases.html
+++ b/docs/dyn/sqladmin_v1beta4.databases.html
@@ -117,7 +117,11 @@
Returns:
An object of the form:
- { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource. Next field: 18
+ "backupContext": { # Backup context. # The context for backup operation, if applicable.
+ "backupId": "A String", # The identifier of the backup.
+ "kind": "A String", # This is always *sql#backupContext*.
+ },
"endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
"error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
"errors": [ # The list of errors encountered while processing this operation.
@@ -130,25 +134,25 @@
"kind": "A String", # This is always *sql#operationErrors*.
},
"exportContext": { # Database instance export context. # The context for export operation, if applicable.
- "csvExportOptions": { # Options for exporting data as CSV.
+ "csvExportOptions": { # Options for exporting data as CSV. *MySQL* and *PostgreSQL* instances only.
"selectQuery": "A String", # The select query used to extract the data.
},
"databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
"A String",
],
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data. *BAK*: The file contains backup data for a SQL Server instance.
"kind": "A String", # This is always *sql#exportContext*.
"offload": True or False, # Option for export offload.
"sqlExportOptions": { # Options for exporting data as SQL statements.
"mysqlExportOptions": { # Options for exporting from MySQL.
- "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than *1*, --set-gtid-purged is set to OFF.
},
"schemaOnly": True or False, # Export only schemas.
"tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
"A String",
],
},
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs://bucketName/fileName*. If the file already exists, the request succeeds, but the operation fails. If *fileType* is *SQL* and the filename ends with .gz, the contents are compressed.
},
"importContext": { # Database instance import context. # The context for import operation, if applicable.
"bakImportOptions": { # Import parameters specific to SQL Server .BAK files
@@ -168,7 +172,7 @@
"fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
"importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
"kind": "A String", # This is always *sql#importContext*.
- "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs://bucketName/fileName*. Compressed gzip files (.gz) are supported when *fileType* is *SQL*. The instance must have write permissions to the bucket and read access to the file.
},
"insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
"kind": "A String", # This is always *sql#operation*.
@@ -249,7 +253,11 @@
Returns:
An object of the form:
- { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource. Next field: 18
+ "backupContext": { # Backup context. # The context for backup operation, if applicable.
+ "backupId": "A String", # The identifier of the backup.
+ "kind": "A String", # This is always *sql#backupContext*.
+ },
"endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
"error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
"errors": [ # The list of errors encountered while processing this operation.
@@ -262,25 +270,25 @@
"kind": "A String", # This is always *sql#operationErrors*.
},
"exportContext": { # Database instance export context. # The context for export operation, if applicable.
- "csvExportOptions": { # Options for exporting data as CSV.
+ "csvExportOptions": { # Options for exporting data as CSV. *MySQL* and *PostgreSQL* instances only.
"selectQuery": "A String", # The select query used to extract the data.
},
"databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
"A String",
],
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data. *BAK*: The file contains backup data for a SQL Server instance.
"kind": "A String", # This is always *sql#exportContext*.
"offload": True or False, # Option for export offload.
"sqlExportOptions": { # Options for exporting data as SQL statements.
"mysqlExportOptions": { # Options for exporting from MySQL.
- "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than *1*, --set-gtid-purged is set to OFF.
},
"schemaOnly": True or False, # Export only schemas.
"tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
"A String",
],
},
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs://bucketName/fileName*. If the file already exists, the request succeeds, but the operation fails. If *fileType* is *SQL* and the filename ends with .gz, the contents are compressed.
},
"importContext": { # Database instance import context. # The context for import operation, if applicable.
"bakImportOptions": { # Import parameters specific to SQL Server .BAK files
@@ -300,7 +308,7 @@
"fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
"importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
"kind": "A String", # This is always *sql#importContext*.
- "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs://bucketName/fileName*. Compressed gzip files (.gz) are supported when *fileType* is *SQL*. The instance must have write permissions to the bucket and read access to the file.
},
"insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
"kind": "A String", # This is always *sql#operation*.
@@ -386,7 +394,11 @@
Returns:
An object of the form:
- { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource. Next field: 18
+ "backupContext": { # Backup context. # The context for backup operation, if applicable.
+ "backupId": "A String", # The identifier of the backup.
+ "kind": "A String", # This is always *sql#backupContext*.
+ },
"endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
"error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
"errors": [ # The list of errors encountered while processing this operation.
@@ -399,25 +411,25 @@
"kind": "A String", # This is always *sql#operationErrors*.
},
"exportContext": { # Database instance export context. # The context for export operation, if applicable.
- "csvExportOptions": { # Options for exporting data as CSV.
+ "csvExportOptions": { # Options for exporting data as CSV. *MySQL* and *PostgreSQL* instances only.
"selectQuery": "A String", # The select query used to extract the data.
},
"databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
"A String",
],
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data. *BAK*: The file contains backup data for a SQL Server instance.
"kind": "A String", # This is always *sql#exportContext*.
"offload": True or False, # Option for export offload.
"sqlExportOptions": { # Options for exporting data as SQL statements.
"mysqlExportOptions": { # Options for exporting from MySQL.
- "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than *1*, --set-gtid-purged is set to OFF.
},
"schemaOnly": True or False, # Export only schemas.
"tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
"A String",
],
},
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs://bucketName/fileName*. If the file already exists, the request succeeds, but the operation fails. If *fileType* is *SQL* and the filename ends with .gz, the contents are compressed.
},
"importContext": { # Database instance import context. # The context for import operation, if applicable.
"bakImportOptions": { # Import parameters specific to SQL Server .BAK files
@@ -437,7 +449,7 @@
"fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
"importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
"kind": "A String", # This is always *sql#importContext*.
- "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs://bucketName/fileName*. Compressed gzip files (.gz) are supported when *fileType* is *SQL*. The instance must have write permissions to the bucket and read access to the file.
},
"insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
"kind": "A String", # This is always *sql#operation*.
@@ -487,7 +499,11 @@
Returns:
An object of the form:
- { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource. Next field: 18
+ "backupContext": { # Backup context. # The context for backup operation, if applicable.
+ "backupId": "A String", # The identifier of the backup.
+ "kind": "A String", # This is always *sql#backupContext*.
+ },
"endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
"error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
"errors": [ # The list of errors encountered while processing this operation.
@@ -500,25 +516,25 @@
"kind": "A String", # This is always *sql#operationErrors*.
},
"exportContext": { # Database instance export context. # The context for export operation, if applicable.
- "csvExportOptions": { # Options for exporting data as CSV.
+ "csvExportOptions": { # Options for exporting data as CSV. *MySQL* and *PostgreSQL* instances only.
"selectQuery": "A String", # The select query used to extract the data.
},
"databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
"A String",
],
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data. *BAK*: The file contains backup data for a SQL Server instance.
"kind": "A String", # This is always *sql#exportContext*.
"offload": True or False, # Option for export offload.
"sqlExportOptions": { # Options for exporting data as SQL statements.
"mysqlExportOptions": { # Options for exporting from MySQL.
- "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than *1*, --set-gtid-purged is set to OFF.
},
"schemaOnly": True or False, # Export only schemas.
"tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
"A String",
],
},
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs://bucketName/fileName*. If the file already exists, the request succeeds, but the operation fails. If *fileType* is *SQL* and the filename ends with .gz, the contents are compressed.
},
"importContext": { # Database instance import context. # The context for import operation, if applicable.
"bakImportOptions": { # Import parameters specific to SQL Server .BAK files
@@ -538,7 +554,7 @@
"fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
"importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
"kind": "A String", # This is always *sql#importContext*.
- "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs://bucketName/fileName*. Compressed gzip files (.gz) are supported when *fileType* is *SQL*. The instance must have write permissions to the bucket and read access to the file.
},
"insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
"kind": "A String", # This is always *sql#operation*.