chore: update docs/dyn (#1106)
diff --git a/docs/dyn/sqladmin_v1beta4.users.html b/docs/dyn/sqladmin_v1beta4.users.html
index d6e8b29..e339f1e 100644
--- a/docs/dyn/sqladmin_v1beta4.users.html
+++ b/docs/dyn/sqladmin_v1beta4.users.html
@@ -78,7 +78,7 @@
<code><a href="#close">close()</a></code></p>
<p class="firstline">Close httplib2 connections.</p>
<p class="toc_element">
- <code><a href="#delete">delete(project, instance, name=None, host=None, x__xgafv=None)</a></code></p>
+ <code><a href="#delete">delete(project, instance, host=None, name=None, x__xgafv=None)</a></code></p>
<p class="firstline">Deletes a user from a Cloud SQL instance.</p>
<p class="toc_element">
<code><a href="#insert">insert(project, instance, body=None, x__xgafv=None)</a></code></p>
@@ -96,14 +96,14 @@
</div>
<div class="method">
- <code class="details" id="delete">delete(project, instance, name=None, host=None, x__xgafv=None)</code>
+ <code class="details" id="delete">delete(project, instance, host=None, name=None, x__xgafv=None)</code>
<pre>Deletes a user from a Cloud SQL instance.
Args:
project: string, Project ID of the project that contains the instance. (required)
instance: string, Database instance ID. This does not include the project ID. (required)
- name: string, Name of the user in the instance.
host: string, Host of the user in the instance.
+ name: string, Name of the user in the instance.
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
@@ -112,10 +112,11 @@
Returns:
An object of the form:
- { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
- "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource. Next field: 18
+ "user": "A String", # The email address of the user who initiated this operation.
+ "selfLink": "A String", # The URI of this resource.
+ "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
"error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
- "kind": "A String", # This is always *sql#operationErrors*.
"errors": [ # The list of errors encountered while processing this operation.
{ # Database instance operation error.
"code": "A String", # Identifies the specific error that occurred.
@@ -123,49 +124,9 @@
"kind": "A String", # This is always *sql#operationError*.
},
],
+ "kind": "A String", # This is always *sql#operationErrors*.
},
- "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "targetLink": "A String",
- "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
- "exportContext": { # Database instance export context. # The context for export operation, if applicable.
- "offload": True or False, # Option for export offload.
- "csvExportOptions": { # Options for exporting data as CSV.
- "selectQuery": "A String", # The select query used to extract the data.
- },
- "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
- "A String",
- ],
- "kind": "A String", # This is always *sql#exportContext*.
- "sqlExportOptions": { # Options for exporting data as SQL statements.
- "schemaOnly": True or False, # Export only schemas.
- "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
- "A String",
- ],
- "mysqlExportOptions": { # Options for exporting from MySQL.
- "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
- },
- },
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
- },
- "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
- "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "selfLink": "A String", # The URI of this resource.
- "kind": "A String", # This is always *sql#operation*.
- "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "targetProject": "A String", # The project ID of the target instance related to this operation.
"importContext": { # Database instance import context. # The context for import operation, if applicable.
- "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
- "csvImportOptions": { # Options for importing data as CSV.
- "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
- "A String",
- ],
- "table": "A String", # The table to which CSV data is imported.
- },
- "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
- "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
- "kind": "A String", # This is always *sql#importContext*.
"bakImportOptions": { # Import parameters specific to SQL Server .BAK files
"encryptionOptions": {
"certPath": "A String", # Path to the Certificate (.cer) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
@@ -173,9 +134,52 @@
"pvkPath": "A String", # Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
},
},
+ "kind": "A String", # This is always *sql#importContext*.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
+ "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
+ "csvImportOptions": { # Options for importing data as CSV.
+ "table": "A String", # The table to which CSV data is imported.
+ "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
+ "A String",
+ ],
+ },
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
},
- "user": "A String", # The email address of the user who initiated this operation.
+ "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
+ "targetLink": "A String",
"targetId": "A String", # Name of the database instance related to this operation.
+ "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
+ "kind": "A String", # This is always *sql#operation*.
+ "backupContext": { # Backup context. # The context for backup operation, if applicable.
+ "backupId": "A String", # The identifier of the backup.
+ "kind": "A String", # This is always *sql#backupContext*.
+ },
+ "exportContext": { # Database instance export context. # The context for export operation, if applicable.
+ "sqlExportOptions": { # Options for exporting data as SQL statements.
+ "schemaOnly": True or False, # Export only schemas.
+ "mysqlExportOptions": { # Options for exporting from MySQL.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than *1*, --set-gtid-purged is set to OFF.
+ },
+ "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
+ "A String",
+ ],
+ },
+ "kind": "A String", # This is always *sql#exportContext*.
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data. *BAK*: The file contains backup data for a SQL Server instance.
+ "csvExportOptions": { # Options for exporting data as CSV. *MySQL* and *PostgreSQL* instances only.
+ "selectQuery": "A String", # The select query used to extract the data.
+ },
+ "offload": True or False, # Option for export offload.
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
+ "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
+ "A String",
+ ],
+ },
+ "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "targetProject": "A String", # The project ID of the target instance related to this operation.
}</pre>
</div>
@@ -190,21 +194,21 @@
The object takes the form of:
{ # A Cloud SQL user resource.
- "password": "A String", # The password for the user.
- "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
- "serverRoles": [ # The server roles for this user
- "A String",
- ],
- "disabled": True or False, # If the user has been disabled
- },
- "kind": "A String", # This is always *sql#user*.
- "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
- "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
- "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
- "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
- "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
- "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
- }
+ "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
+ "password": "A String", # The password for the user.
+ "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
+ "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
+ "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
+ "disabled": True or False, # If the user has been disabled
+ "serverRoles": [ # The server roles for this user
+ "A String",
+ ],
+ },
+ "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
+ "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
+ "kind": "A String", # This is always *sql#user*.
+ "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
+}
x__xgafv: string, V1 error format.
Allowed values
@@ -214,10 +218,11 @@
Returns:
An object of the form:
- { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
- "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource. Next field: 18
+ "user": "A String", # The email address of the user who initiated this operation.
+ "selfLink": "A String", # The URI of this resource.
+ "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
"error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
- "kind": "A String", # This is always *sql#operationErrors*.
"errors": [ # The list of errors encountered while processing this operation.
{ # Database instance operation error.
"code": "A String", # Identifies the specific error that occurred.
@@ -225,49 +230,9 @@
"kind": "A String", # This is always *sql#operationError*.
},
],
+ "kind": "A String", # This is always *sql#operationErrors*.
},
- "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "targetLink": "A String",
- "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
- "exportContext": { # Database instance export context. # The context for export operation, if applicable.
- "offload": True or False, # Option for export offload.
- "csvExportOptions": { # Options for exporting data as CSV.
- "selectQuery": "A String", # The select query used to extract the data.
- },
- "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
- "A String",
- ],
- "kind": "A String", # This is always *sql#exportContext*.
- "sqlExportOptions": { # Options for exporting data as SQL statements.
- "schemaOnly": True or False, # Export only schemas.
- "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
- "A String",
- ],
- "mysqlExportOptions": { # Options for exporting from MySQL.
- "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
- },
- },
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
- },
- "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
- "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "selfLink": "A String", # The URI of this resource.
- "kind": "A String", # This is always *sql#operation*.
- "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "targetProject": "A String", # The project ID of the target instance related to this operation.
"importContext": { # Database instance import context. # The context for import operation, if applicable.
- "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
- "csvImportOptions": { # Options for importing data as CSV.
- "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
- "A String",
- ],
- "table": "A String", # The table to which CSV data is imported.
- },
- "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
- "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
- "kind": "A String", # This is always *sql#importContext*.
"bakImportOptions": { # Import parameters specific to SQL Server .BAK files
"encryptionOptions": {
"certPath": "A String", # Path to the Certificate (.cer) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
@@ -275,9 +240,52 @@
"pvkPath": "A String", # Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
},
},
+ "kind": "A String", # This is always *sql#importContext*.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
+ "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
+ "csvImportOptions": { # Options for importing data as CSV.
+ "table": "A String", # The table to which CSV data is imported.
+ "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
+ "A String",
+ ],
+ },
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
},
- "user": "A String", # The email address of the user who initiated this operation.
+ "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
+ "targetLink": "A String",
"targetId": "A String", # Name of the database instance related to this operation.
+ "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
+ "kind": "A String", # This is always *sql#operation*.
+ "backupContext": { # Backup context. # The context for backup operation, if applicable.
+ "backupId": "A String", # The identifier of the backup.
+ "kind": "A String", # This is always *sql#backupContext*.
+ },
+ "exportContext": { # Database instance export context. # The context for export operation, if applicable.
+ "sqlExportOptions": { # Options for exporting data as SQL statements.
+ "schemaOnly": True or False, # Export only schemas.
+ "mysqlExportOptions": { # Options for exporting from MySQL.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than *1*, --set-gtid-purged is set to OFF.
+ },
+ "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
+ "A String",
+ ],
+ },
+ "kind": "A String", # This is always *sql#exportContext*.
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data. *BAK*: The file contains backup data for a SQL Server instance.
+ "csvExportOptions": { # Options for exporting data as CSV. *MySQL* and *PostgreSQL* instances only.
+ "selectQuery": "A String", # The select query used to extract the data.
+ },
+ "offload": True or False, # Option for export offload.
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
+ "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
+ "A String",
+ ],
+ },
+ "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "targetProject": "A String", # The project ID of the target instance related to this operation.
}</pre>
</div>
@@ -297,26 +305,26 @@
An object of the form:
{ # User list response.
+ "nextPageToken": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
"items": [ # List of user resources in the instance.
{ # A Cloud SQL user resource.
- "password": "A String", # The password for the user.
- "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
- "serverRoles": [ # The server roles for this user
- "A String",
- ],
- "disabled": True or False, # If the user has been disabled
- },
- "kind": "A String", # This is always *sql#user*.
- "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
- "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
- "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
- "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
- "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
- "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
+ "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
+ "password": "A String", # The password for the user.
+ "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
+ "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
+ "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
+ "disabled": True or False, # If the user has been disabled
+ "serverRoles": [ # The server roles for this user
+ "A String",
+ ],
},
+ "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
+ "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
+ "kind": "A String", # This is always *sql#user*.
+ "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
+ },
],
"kind": "A String", # This is always *sql#usersList*.
- "nextPageToken": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
}</pre>
</div>
@@ -331,21 +339,21 @@
The object takes the form of:
{ # A Cloud SQL user resource.
- "password": "A String", # The password for the user.
- "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
- "serverRoles": [ # The server roles for this user
- "A String",
- ],
- "disabled": True or False, # If the user has been disabled
- },
- "kind": "A String", # This is always *sql#user*.
- "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
- "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
- "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
- "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
- "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
- "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
- }
+ "project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for *update* since it is already specified on the URL.
+ "password": "A String", # The password for the user.
+ "host": "A String", # The host name from which the user can connect. For *insert* operations, host defaults to an empty string. For *update* operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
+ "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for *update* since it is already specified in the URL.
+ "sqlserverUserDetails": { # Represents a Sql Server user on the Cloud SQL instance.
+ "disabled": True or False, # If the user has been disabled
+ "serverRoles": [ # The server roles for this user
+ "A String",
+ ],
+ },
+ "type": "A String", # The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type.
+ "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
+ "kind": "A String", # This is always *sql#user*.
+ "instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for *update* since it is already specified on the URL.
+}
host: string, Optional. Host of the user in the instance.
name: string, Name of the user in the instance.
@@ -357,10 +365,11 @@
Returns:
An object of the form:
- { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
- "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource. Next field: 18
+ "user": "A String", # The email address of the user who initiated this operation.
+ "selfLink": "A String", # The URI of this resource.
+ "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
"error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
- "kind": "A String", # This is always *sql#operationErrors*.
"errors": [ # The list of errors encountered while processing this operation.
{ # Database instance operation error.
"code": "A String", # Identifies the specific error that occurred.
@@ -368,49 +377,9 @@
"kind": "A String", # This is always *sql#operationError*.
},
],
+ "kind": "A String", # This is always *sql#operationErrors*.
},
- "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "targetLink": "A String",
- "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
- "exportContext": { # Database instance export context. # The context for export operation, if applicable.
- "offload": True or False, # Option for export offload.
- "csvExportOptions": { # Options for exporting data as CSV.
- "selectQuery": "A String", # The select query used to extract the data.
- },
- "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
- "A String",
- ],
- "kind": "A String", # This is always *sql#exportContext*.
- "sqlExportOptions": { # Options for exporting data as SQL statements.
- "schemaOnly": True or False, # Export only schemas.
- "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
- "A String",
- ],
- "mysqlExportOptions": { # Options for exporting from MySQL.
- "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
- },
- },
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
- },
- "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
- "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "selfLink": "A String", # The URI of this resource.
- "kind": "A String", # This is always *sql#operation*.
- "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
- "targetProject": "A String", # The project ID of the target instance related to this operation.
"importContext": { # Database instance import context. # The context for import operation, if applicable.
- "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
- "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
- "csvImportOptions": { # Options for importing data as CSV.
- "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
- "A String",
- ],
- "table": "A String", # The table to which CSV data is imported.
- },
- "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
- "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
- "kind": "A String", # This is always *sql#importContext*.
"bakImportOptions": { # Import parameters specific to SQL Server .BAK files
"encryptionOptions": {
"certPath": "A String", # Path to the Certificate (.cer) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
@@ -418,9 +387,52 @@
"pvkPath": "A String", # Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form *gs://bucketName/fileName*. The instance must have write permissions to the bucket and read access to the file.
},
},
+ "kind": "A String", # This is always *sql#importContext*.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form *gs: //bucketName/fileName*. Compressed gzip files (.gz) are supported // when *fileType* is *SQL*. The instance must have // write permissions to the bucket and read access to the file.
+ "database": "A String", # The target database for the import. If *fileType* is *SQL*, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If *fileType* is *CSV*, one database must be specified.
+ "csvImportOptions": { # Options for importing data as CSV.
+ "table": "A String", # The table to which CSV data is imported.
+ "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
+ "A String",
+ ],
+ },
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data.
+ "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
},
- "user": "A String", # The email address of the user who initiated this operation.
+ "status": "A String", # The status of an operation. Valid values are: *PENDING* *RUNNING* *DONE* *SQL_OPERATION_STATUS_UNSPECIFIED*
+ "targetLink": "A String",
"targetId": "A String", # Name of the database instance related to this operation.
+ "endTime": "A String", # The time this operation finished in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "operationType": "A String", # The type of the operation. Valid values are: *CREATE* *DELETE* *UPDATE* *RESTART* *IMPORT* *EXPORT* *BACKUP_VOLUME* *RESTORE_VOLUME* *CREATE_USER* *DELETE_USER* *CREATE_DATABASE* *DELETE_DATABASE*
+ "kind": "A String", # This is always *sql#operation*.
+ "backupContext": { # Backup context. # The context for backup operation, if applicable.
+ "backupId": "A String", # The identifier of the backup.
+ "kind": "A String", # This is always *sql#backupContext*.
+ },
+ "exportContext": { # Database instance export context. # The context for export operation, if applicable.
+ "sqlExportOptions": { # Options for exporting data as SQL statements.
+ "schemaOnly": True or False, # Export only schemas.
+ "mysqlExportOptions": { # Options for exporting from MySQL.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to *1*, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to *2*, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than *1*, --set-gtid-purged is set to OFF.
+ },
+ "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
+ "A String",
+ ],
+ },
+ "kind": "A String", # This is always *sql#exportContext*.
+ "fileType": "A String", # The file type for the specified uri. *SQL*: The file contains SQL statements. *CSV*: The file contains CSV data. *BAK*: The file contains backup data for a SQL Server instance.
+ "csvExportOptions": { # Options for exporting data as CSV. *MySQL* and *PostgreSQL* instances only.
+ "selectQuery": "A String", # The select query used to extract the data.
+ },
+ "offload": True or False, # Option for export offload.
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form *gs: //bucketName/fileName*. If the file already exists, the requests // succeeds, but the operation fails. If *fileType* is // *SQL* and the filename ends with .gz, the contents are // compressed.
+ "databases": [ # Databases to be exported. *MySQL instances:* If *fileType* is *SQL* and no database is specified, all databases are exported, except for the *mysql* system database. If *fileType* is *CSV*, you can specify one database, either by using this property or by using the *csvExportOptions.selectQuery* property, which takes precedence over this property. *PostgreSQL instances:* You must specify one database to be exported. If *fileType* is *CSV*, this database must match the one specified in the *csvExportOptions.selectQuery* property.
+ "A String",
+ ],
+ },
+ "startTime": "A String", # The time this operation actually started in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "insertTime": "A String", # The time this operation was enqueued in UTC timezone in RFC 3339 format, for example *2012-11-15T16:19:00.094Z*.
+ "targetProject": "A String", # The project ID of the target instance related to this operation.
}</pre>
</div>