chore: update docs/dyn (#1162)
This PR was generated using Autosynth. :rainbow:
Synth log will be available here:
https://source.cloud.google.com/results/invocations/b5e48daa-1759-436b-9fe7-ffce1482b520/targets
- [ ] To automatically regenerate this PR, check this box.
diff --git a/docs/dyn/sqladmin_v1beta4.users.html b/docs/dyn/sqladmin_v1beta4.users.html
index ccac361..132c862 100644
--- a/docs/dyn/sqladmin_v1beta4.users.html
+++ b/docs/dyn/sqladmin_v1beta4.users.html
@@ -87,7 +87,7 @@
<code><a href="#list">list(project, instance, x__xgafv=None)</a></code></p>
<p class="firstline">Lists users in the specified Cloud SQL instance.</p>
<p class="toc_element">
- <code><a href="#update">update(project, instance, body=None, name=None, host=None, x__xgafv=None)</a></code></p>
+ <code><a href="#update">update(project, instance, body=None, host=None, name=None, x__xgafv=None)</a></code></p>
<p class="firstline">Updates an existing user in a Cloud SQL instance.</p>
<h3>Method Details</h3>
<div class="method">
@@ -112,75 +112,71 @@
Returns:
An object of the form:
- { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource. Next field: 18
- "error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
- "errors": [ # The list of errors encountered while processing this operation.
- { # Database instance operation error.
- "code": "A String", # Identifies the specific error that occurred.
- "kind": "A String", # This is always *sql#operationError*.
- "message": "A String", # Additional information about the error encountered.
- },
- ],
- "kind": "A String", # This is always *sql#operationErrors*.
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
+ "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
+ "errors": [ # The list of errors encountered while processing this operation.
+ { # Database instance operation error.
+ "code": "A String", # Identifies the specific error that occurred.
+ "kind": "A String", # This is always *sql#operationError*.
+ "message": "A String", # Additional information about the error encountered.
+ },
+ ],
+ "kind": "A String", # This is always *sql#operationErrors*.
+ },
+ "exportContext": { # Database instance export context. # The context for export operation, if applicable.
+ "csvExportOptions": { # Options for exporting data as CSV.
+ "selectQuery": "A String", # The select query used to extract the data.
},
- "selfLink": "A String", # The URI of this resource.
- "exportContext": { # Database instance export context. # The context for export operation, if applicable.
- "csvExportOptions": { # Options for exporting data as CSV. *MySQL* and *PostgreSQL* instances only.
- "selectQuery": "A String", # The select query used to extract the data.
+ "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
+ "A String",
+ ],
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "kind": "A String", # This is always *sql#exportContext*.
+ "offload": True or False, # Option for export offload.
+ "sqlExportOptions": { # Options for exporting data as SQL statements.
+ "mysqlExportOptions": { # Options for exporting from MySQL.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
},
- "offload": True or False, # Option for export offload.
- "sqlExportOptions": { # Options for exporting data as SQL statements.
- "schemaOnly": True or False, # Export only schemas.
- "mysqlExportOptions": { # Options for exporting from MySQL.
- "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than *1*, --set-gtid-purged is set to OFF.
- },
- "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
- "A String",
- ],
- },
- "kind": "A String", # This is always *sql#exportContext*.
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data. *BAK*: The file contains backup data for a SQL Server instance.
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
- "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
+ "schemaOnly": True or False, # Export only schemas.
+ "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
"A String",
],
},
- "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "targetProject": "A String", # The project ID of the target instance related to this operation.
- "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
- "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "kind": "A String", # This is always *sql#operation*.
- "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "user": "A String", # The email address of the user who initiated this operation.
- "backupContext": { # Backup context. # The context for backup operation, if applicable.
- "kind": "A String", # This is always *sql#backupContext*.
- "backupId": "A String", # The identifier of the backup.
- },
- "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
- "targetLink": "A String",
- "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
- "targetId": "A String", # Name of the database instance related to this operation.
- "importContext": { # Database instance import context. # The context for import operation, if applicable.
- "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
- "csvImportOptions": { # Options for importing data as CSV.
- "table": "A String", # The table to which CSV data is imported.
- "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
- "A String",
- ],
- },
- "kind": "A String", # This is always *sql#importContext*.
- "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
- "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
- "bakImportOptions": { # Import parameters specific to SQL Server .BAK files
- "encryptionOptions": {
- "pvkPassword": "A String", # Password that encrypts the private key
- "pvkPath": "A String", # Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
- "certPath": "A String", # Path to the Certificate (.cer) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
- },
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
+ },
+ "importContext": { # Database instance import context. # The context for import operation, if applicable.
+ "bakImportOptions": { # Import parameters specific to SQL Server .BAK files
+ "encryptionOptions": {
+ "certPath": "A String", # Path to the Certificate (.cer) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
+ "pvkPassword": "A String", # Password that encrypts the private key
+ "pvkPath": "A String", # Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
},
},
- }</pre>
+ "csvImportOptions": { # Options for importing data as CSV.
+ "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
+ "A String",
+ ],
+ "table": "A String", # The table to which CSV data is imported.
+ },
+ "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
+ "kind": "A String", # This is always *sql#importContext*.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
+ },
+ "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "kind": "A String", # This is always *sql#operation*.
+ "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
+ "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
+ "selfLink": "A String", # The URI of this resource.
+ "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
+ "targetId": "A String", # Name of the database instance related to this operation.
+ "targetLink": "A String",
+ "targetProject": "A String", # The project ID of the target instance related to this operation.
+ "user": "A String", # The email address of the user who initiated this operation.
+}</pre>
</div>
<div class="method">
@@ -194,21 +190,21 @@
The object takes the form of:
{ # A Cloud SQL user resource.
- "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
- "password": "A String", # The password for the user.
- "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
- "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
- "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
- "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
- "serverRoles": [ # The server roles for this user
- "A String",
- ],
- "disabled": True or False, # If the user has been disabled
- },
- "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
- "kind": "A String", # This is always *sql#user*.
- "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
- }
+ "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
+ "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
+ "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
+ "kind": "A String", # This is always *sql#user*.
+ "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
+ "password": "A String", # The password for the user.
+ "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
+ "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
+ "disabled": True or False, # If the user has been disabled
+ "serverRoles": [ # The server roles for this user
+ "A String",
+ ],
+ },
+ "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
+}
x__xgafv: string, V1 error format.
Allowed values
@@ -218,75 +214,71 @@
Returns:
An object of the form:
- { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource. Next field: 18
- "error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
- "errors": [ # The list of errors encountered while processing this operation.
- { # Database instance operation error.
- "code": "A String", # Identifies the specific error that occurred.
- "kind": "A String", # This is always *sql#operationError*.
- "message": "A String", # Additional information about the error encountered.
- },
- ],
- "kind": "A String", # This is always *sql#operationErrors*.
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
+ "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
+ "errors": [ # The list of errors encountered while processing this operation.
+ { # Database instance operation error.
+ "code": "A String", # Identifies the specific error that occurred.
+ "kind": "A String", # This is always *sql#operationError*.
+ "message": "A String", # Additional information about the error encountered.
+ },
+ ],
+ "kind": "A String", # This is always *sql#operationErrors*.
+ },
+ "exportContext": { # Database instance export context. # The context for export operation, if applicable.
+ "csvExportOptions": { # Options for exporting data as CSV.
+ "selectQuery": "A String", # The select query used to extract the data.
},
- "selfLink": "A String", # The URI of this resource.
- "exportContext": { # Database instance export context. # The context for export operation, if applicable.
- "csvExportOptions": { # Options for exporting data as CSV. *MySQL* and *PostgreSQL* instances only.
- "selectQuery": "A String", # The select query used to extract the data.
+ "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
+ "A String",
+ ],
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "kind": "A String", # This is always *sql#exportContext*.
+ "offload": True or False, # Option for export offload.
+ "sqlExportOptions": { # Options for exporting data as SQL statements.
+ "mysqlExportOptions": { # Options for exporting from MySQL.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
},
- "offload": True or False, # Option for export offload.
- "sqlExportOptions": { # Options for exporting data as SQL statements.
- "schemaOnly": True or False, # Export only schemas.
- "mysqlExportOptions": { # Options for exporting from MySQL.
- "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than *1*, --set-gtid-purged is set to OFF.
- },
- "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
- "A String",
- ],
- },
- "kind": "A String", # This is always *sql#exportContext*.
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data. *BAK*: The file contains backup data for a SQL Server instance.
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
- "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
+ "schemaOnly": True or False, # Export only schemas.
+ "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
"A String",
],
},
- "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "targetProject": "A String", # The project ID of the target instance related to this operation.
- "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
- "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "kind": "A String", # This is always *sql#operation*.
- "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "user": "A String", # The email address of the user who initiated this operation.
- "backupContext": { # Backup context. # The context for backup operation, if applicable.
- "kind": "A String", # This is always *sql#backupContext*.
- "backupId": "A String", # The identifier of the backup.
- },
- "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
- "targetLink": "A String",
- "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
- "targetId": "A String", # Name of the database instance related to this operation.
- "importContext": { # Database instance import context. # The context for import operation, if applicable.
- "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
- "csvImportOptions": { # Options for importing data as CSV.
- "table": "A String", # The table to which CSV data is imported.
- "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
- "A String",
- ],
- },
- "kind": "A String", # This is always *sql#importContext*.
- "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
- "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
- "bakImportOptions": { # Import parameters specific to SQL Server .BAK files
- "encryptionOptions": {
- "pvkPassword": "A String", # Password that encrypts the private key
- "pvkPath": "A String", # Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
- "certPath": "A String", # Path to the Certificate (.cer) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
- },
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
+ },
+ "importContext": { # Database instance import context. # The context for import operation, if applicable.
+ "bakImportOptions": { # Import parameters specific to SQL Server .BAK files
+ "encryptionOptions": {
+ "certPath": "A String", # Path to the Certificate (.cer) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
+ "pvkPassword": "A String", # Password that encrypts the private key
+ "pvkPath": "A String", # Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
},
},
- }</pre>
+ "csvImportOptions": { # Options for importing data as CSV.
+ "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
+ "A String",
+ ],
+ "table": "A String", # The table to which CSV data is imported.
+ },
+ "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
+ "kind": "A String", # This is always *sql#importContext*.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
+ },
+ "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "kind": "A String", # This is always *sql#operation*.
+ "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
+ "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
+ "selfLink": "A String", # The URI of this resource.
+ "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
+ "targetId": "A String", # Name of the database instance related to this operation.
+ "targetLink": "A String",
+ "targetProject": "A String", # The project ID of the target instance related to this operation.
+ "user": "A String", # The email address of the user who initiated this operation.
+}</pre>
</div>
<div class="method">
@@ -305,31 +297,31 @@
An object of the form:
{ # User list response.
- "nextPageToken": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
- "items": [ # List of user resources in the instance.
- { # A Cloud SQL user resource.
- "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
- "password": "A String", # The password for the user.
- "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
- "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
- "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
- "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
- "serverRoles": [ # The server roles for this user
- "A String",
- ],
- "disabled": True or False, # If the user has been disabled
- },
- "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
- "kind": "A String", # This is always *sql#user*.
- "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
- },
- ],
- "kind": "A String", # This is always *sql#usersList*.
- }</pre>
+ "items": [ # List of user resources in the instance.
+ { # A Cloud SQL user resource.
+ "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
+ "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
+ "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
+ "kind": "A String", # This is always *sql#user*.
+ "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
+ "password": "A String", # The password for the user.
+ "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
+ "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
+ "disabled": True or False, # If the user has been disabled
+ "serverRoles": [ # The server roles for this user
+ "A String",
+ ],
+ },
+ "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
+ },
+ ],
+ "kind": "A String", # This is always *sql#usersList*.
+ "nextPageToken": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
+}</pre>
</div>
<div class="method">
- <code class="details" id="update">update(project, instance, body=None, name=None, host=None, x__xgafv=None)</code>
+ <code class="details" id="update">update(project, instance, body=None, host=None, name=None, x__xgafv=None)</code>
<pre>Updates an existing user in a Cloud SQL instance.
Args:
@@ -339,24 +331,24 @@
The object takes the form of:
{ # A Cloud SQL user resource.
- "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
- "password": "A String", # The password for the user.
- "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
- "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
- "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
- "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
- "serverRoles": [ # The server roles for this user
- "A String",
- ],
- "disabled": True or False, # If the user has been disabled
- },
- "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
- "kind": "A String", # This is always *sql#user*.
- "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
- }
+ "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
+ "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
+ "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
+ "kind": "A String", # This is always *sql#user*.
+ "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
+ "password": "A String", # The password for the user.
+ "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
+ "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
+ "disabled": True or False, # If the user has been disabled
+ "serverRoles": [ # The server roles for this user
+ "A String",
+ ],
+ },
+ "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
+}
- name: string, Name of the user in the instance.
host: string, Optional. Host of the user in the instance.
+ name: string, Name of the user in the instance.
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
@@ -365,75 +357,71 @@
Returns:
An object of the form:
- { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource. Next field: 18
- "error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
- "errors": [ # The list of errors encountered while processing this operation.
- { # Database instance operation error.
- "code": "A String", # Identifies the specific error that occurred.
- "kind": "A String", # This is always *sql#operationError*.
- "message": "A String", # Additional information about the error encountered.
- },
- ],
- "kind": "A String", # This is always *sql#operationErrors*.
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
+ "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
+ "errors": [ # The list of errors encountered while processing this operation.
+ { # Database instance operation error.
+ "code": "A String", # Identifies the specific error that occurred.
+ "kind": "A String", # This is always *sql#operationError*.
+ "message": "A String", # Additional information about the error encountered.
+ },
+ ],
+ "kind": "A String", # This is always *sql#operationErrors*.
+ },
+ "exportContext": { # Database instance export context. # The context for export operation, if applicable.
+ "csvExportOptions": { # Options for exporting data as CSV.
+ "selectQuery": "A String", # The select query used to extract the data.
},
- "selfLink": "A String", # The URI of this resource.
- "exportContext": { # Database instance export context. # The context for export operation, if applicable.
- "csvExportOptions": { # Options for exporting data as CSV. *MySQL* and *PostgreSQL* instances only.
- "selectQuery": "A String", # The select query used to extract the data.
+ "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
+ "A String",
+ ],
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "kind": "A String", # This is always *sql#exportContext*.
+ "offload": True or False, # Option for export offload.
+ "sqlExportOptions": { # Options for exporting data as SQL statements.
+ "mysqlExportOptions": { # Options for exporting from MySQL.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
},
- "offload": True or False, # Option for export offload.
- "sqlExportOptions": { # Options for exporting data as SQL statements.
- "schemaOnly": True or False, # Export only schemas.
- "mysqlExportOptions": { # Options for exporting from MySQL.
- "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than *1*, --set-gtid-purged is set to OFF.
- },
- "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
- "A String",
- ],
- },
- "kind": "A String", # This is always *sql#exportContext*.
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data. *BAK*: The file contains backup data for a SQL Server instance.
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
- "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
+ "schemaOnly": True or False, # Export only schemas.
+ "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
"A String",
],
},
- "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "targetProject": "A String", # The project ID of the target instance related to this operation.
- "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
- "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "kind": "A String", # This is always *sql#operation*.
- "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "user": "A String", # The email address of the user who initiated this operation.
- "backupContext": { # Backup context. # The context for backup operation, if applicable.
- "kind": "A String", # This is always *sql#backupContext*.
- "backupId": "A String", # The identifier of the backup.
- },
- "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
- "targetLink": "A String",
- "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
- "targetId": "A String", # Name of the database instance related to this operation.
- "importContext": { # Database instance import context. # The context for import operation, if applicable.
- "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
- "csvImportOptions": { # Options for importing data as CSV.
- "table": "A String", # The table to which CSV data is imported.
- "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
- "A String",
- ],
- },
- "kind": "A String", # This is always *sql#importContext*.
- "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
- "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
- "bakImportOptions": { # Import parameters specific to SQL Server .BAK files
- "encryptionOptions": {
- "pvkPassword": "A String", # Password that encrypts the private key
- "pvkPath": "A String", # Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
- "certPath": "A String", # Path to the Certificate (.cer) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
- },
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
+ },
+ "importContext": { # Database instance import context. # The context for import operation, if applicable.
+ "bakImportOptions": { # Import parameters specific to SQL Server .BAK files
+ "encryptionOptions": {
+ "certPath": "A String", # Path to the Certificate (.cer) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
+ "pvkPassword": "A String", # Password that encrypts the private key
+ "pvkPath": "A String", # Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
},
},
- }</pre>
+ "csvImportOptions": { # Options for importing data as CSV.
+ "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
+ "A String",
+ ],
+ "table": "A String", # The table to which CSV data is imported.
+ },
+ "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
+ "kind": "A String", # This is always *sql#importContext*.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
+ },
+ "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "kind": "A String", # This is always *sql#operation*.
+ "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
+ "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
+ "selfLink": "A String", # The URI of this resource.
+ "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
+ "targetId": "A String", # Name of the database instance related to this operation.
+ "targetLink": "A String",
+ "targetProject": "A String", # The project ID of the target instance related to this operation.
+ "user": "A String", # The email address of the user who initiated this operation.
+}</pre>
</div>
</body></html>
\ No newline at end of file