Regen all docs. (#700)
* Stop recursing if discovery == {}
* Generate docs with 'make docs'.
diff --git a/docs/dyn/sqladmin_v1beta4.users.html b/docs/dyn/sqladmin_v1beta4.users.html
index c5f6665..b58dfb3 100644
--- a/docs/dyn/sqladmin_v1beta4.users.html
+++ b/docs/dyn/sqladmin_v1beta4.users.html
@@ -72,7 +72,7 @@
</style>
-<h1><a href="sqladmin_v1beta4.html">Cloud SQL Administration API</a> . <a href="sqladmin_v1beta4.users.html">users</a></h1>
+<h1><a href="sqladmin_v1beta4.html">Cloud SQL Admin API</a> . <a href="sqladmin_v1beta4.users.html">users</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
<code><a href="#delete">delete(project, instance, host, name)</a></code></p>
@@ -84,7 +84,7 @@
<code><a href="#list">list(project, instance)</a></code></p>
<p class="firstline">Lists users in the specified Cloud SQL instance.</p>
<p class="toc_element">
- <code><a href="#update">update(project, instance, host, name, body)</a></code></p>
+ <code><a href="#update">update(project, instance, name, body, host=None)</a></code></p>
<p class="firstline">Updates an existing user in a Cloud SQL instance.</p>
<h3>Method Details</h3>
<div class="method">
@@ -100,16 +100,16 @@
Returns:
An object of the form:
- { # An Operations resource contains information about database instance operations such as create, delete, and restart. Operations resources are created in response to operations that were initiated; you never create them directly.
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
"status": "A String", # The status of an operation. Valid values are PENDING, RUNNING, DONE, UNKNOWN.
"importContext": { # Database instance import context. # The context for import operation, if applicable.
"kind": "sql#importContext", # This is always sql#importContext.
- "database": "A String", # The database (for example, guestbook) to which the import is made. If fileType is SQL and no database is specified, it is assumed that the database is specified in the file to be imported. If fileType is CSV, it must be specified.
+ "database": "A String", # The target database for the import. If fileType is SQL, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If fileType is CSV, one database must be specified.
"fileType": "A String", # The file type for the specified uri.
# SQL: The file contains SQL statements.
# CSV: The file contains CSV data.
- "uri": "A String", # A path to the file in Google Cloud Storage from which the import is made. The URI is in the form gs://bucketName/fileName. Compressed gzip files (.gz) are supported when fileType is SQL.
- "importUser": "A String", # The PostgreSQL user to use for this import operation. Defaults to cloudsqlsuperuser. Does not apply to MySQL instances.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form gs://bucketName/fileName. Compressed gzip files (.gz) are supported when fileType is SQL. The instance must have write permissions to the bucket and read access to the file.
+ "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
"csvImportOptions": { # Options for importing data as CSV.
"table": "A String", # The table to which CSV data is imported.
"columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
@@ -124,17 +124,22 @@
"fileType": "A String", # The file type for the specified uri.
# SQL: The file contains SQL statements.
# CSV: The file contains CSV data.
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form gs://bucketName/fileName. If the file already exists, the operation fails. If fileType is SQL and the filename ends with .gz, the contents are compressed.
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form gs://bucketName/fileName. If the file already exists, the requests succeeds, but the operation fails. If fileType is SQL and the filename ends with .gz, the contents are compressed.
"csvExportOptions": { # Options for exporting data as CSV.
"selectQuery": "A String", # The select query used to extract the data.
},
- "databases": [ # Databases (for example, guestbook) from which the export is made. If fileType is SQL and no database is specified, all databases are exported. If fileType is CSV, you can optionally specify at most one database to export. If csvExportOptions.selectQuery also specifies the database, this field will be ignored.
+ "databases": [ # Databases to be exported.
+ # MySQL instances: If fileType is SQL and no database is specified, all databases are exported, except for the mysql system database. If fileType is CSV, you can specify one database, either by using this property or by using the csvExportOptions.selectQuery property, which takes precedence over this property.
+ # PostgreSQL instances: Specify exactly one database to be exported. If fileType is CSV, this database must match the database used in the csvExportOptions.selectQuery property.
"A String",
],
"sqlExportOptions": { # Options for exporting data as SQL statements.
- "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database.
+ "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
"A String",
],
+ "mysqlExportOptions": { # Options for exporting from MySQL.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to 1, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to 2, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
+ },
"schemaOnly": True or False, # Export only schemas.
},
},
@@ -172,11 +177,11 @@
{ # A Cloud SQL user resource.
"kind": "sql#user", # This is always sql#user.
- "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for update since it is already specified on the URL.
+ "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for update since it is already specified in the URL.
"project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for update since it is already specified on the URL.
"instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for update since it is already specified on the URL.
"host": "A String", # The host name from which the user can connect. For insert operations, host defaults to an empty string. For update operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
- "etag": "A String", # HTTP 1.1 Entity tag for the resource.
+ "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
"password": "A String", # The password for the user.
}
@@ -184,16 +189,16 @@
Returns:
An object of the form:
- { # An Operations resource contains information about database instance operations such as create, delete, and restart. Operations resources are created in response to operations that were initiated; you never create them directly.
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
"status": "A String", # The status of an operation. Valid values are PENDING, RUNNING, DONE, UNKNOWN.
"importContext": { # Database instance import context. # The context for import operation, if applicable.
"kind": "sql#importContext", # This is always sql#importContext.
- "database": "A String", # The database (for example, guestbook) to which the import is made. If fileType is SQL and no database is specified, it is assumed that the database is specified in the file to be imported. If fileType is CSV, it must be specified.
+ "database": "A String", # The target database for the import. If fileType is SQL, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If fileType is CSV, one database must be specified.
"fileType": "A String", # The file type for the specified uri.
# SQL: The file contains SQL statements.
# CSV: The file contains CSV data.
- "uri": "A String", # A path to the file in Google Cloud Storage from which the import is made. The URI is in the form gs://bucketName/fileName. Compressed gzip files (.gz) are supported when fileType is SQL.
- "importUser": "A String", # The PostgreSQL user to use for this import operation. Defaults to cloudsqlsuperuser. Does not apply to MySQL instances.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form gs://bucketName/fileName. Compressed gzip files (.gz) are supported when fileType is SQL. The instance must have write permissions to the bucket and read access to the file.
+ "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
"csvImportOptions": { # Options for importing data as CSV.
"table": "A String", # The table to which CSV data is imported.
"columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
@@ -208,17 +213,22 @@
"fileType": "A String", # The file type for the specified uri.
# SQL: The file contains SQL statements.
# CSV: The file contains CSV data.
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form gs://bucketName/fileName. If the file already exists, the operation fails. If fileType is SQL and the filename ends with .gz, the contents are compressed.
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form gs://bucketName/fileName. If the file already exists, the requests succeeds, but the operation fails. If fileType is SQL and the filename ends with .gz, the contents are compressed.
"csvExportOptions": { # Options for exporting data as CSV.
"selectQuery": "A String", # The select query used to extract the data.
},
- "databases": [ # Databases (for example, guestbook) from which the export is made. If fileType is SQL and no database is specified, all databases are exported. If fileType is CSV, you can optionally specify at most one database to export. If csvExportOptions.selectQuery also specifies the database, this field will be ignored.
+ "databases": [ # Databases to be exported.
+ # MySQL instances: If fileType is SQL and no database is specified, all databases are exported, except for the mysql system database. If fileType is CSV, you can specify one database, either by using this property or by using the csvExportOptions.selectQuery property, which takes precedence over this property.
+ # PostgreSQL instances: Specify exactly one database to be exported. If fileType is CSV, this database must match the database used in the csvExportOptions.selectQuery property.
"A String",
],
"sqlExportOptions": { # Options for exporting data as SQL statements.
- "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database.
+ "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
"A String",
],
+ "mysqlExportOptions": { # Options for exporting from MySQL.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to 1, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to 2, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
+ },
"schemaOnly": True or False, # Export only schemas.
},
},
@@ -260,11 +270,11 @@
"items": [ # List of user resources in the instance.
{ # A Cloud SQL user resource.
"kind": "sql#user", # This is always sql#user.
- "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for update since it is already specified on the URL.
+ "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for update since it is already specified in the URL.
"project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for update since it is already specified on the URL.
"instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for update since it is already specified on the URL.
"host": "A String", # The host name from which the user can connect. For insert operations, host defaults to an empty string. For update operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
- "etag": "A String", # HTTP 1.1 Entity tag for the resource.
+ "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
"password": "A String", # The password for the user.
},
],
@@ -273,41 +283,41 @@
</div>
<div class="method">
- <code class="details" id="update">update(project, instance, host, name, body)</code>
+ <code class="details" id="update">update(project, instance, name, body, host=None)</code>
<pre>Updates an existing user in a Cloud SQL instance.
Args:
project: string, Project ID of the project that contains the instance. (required)
instance: string, Database instance ID. This does not include the project ID. (required)
- host: string, Host of the user in the instance. (required)
name: string, Name of the user in the instance. (required)
body: object, The request body. (required)
The object takes the form of:
{ # A Cloud SQL user resource.
"kind": "sql#user", # This is always sql#user.
- "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for update since it is already specified on the URL.
+ "name": "A String", # The name of the user in the Cloud SQL instance. Can be omitted for update since it is already specified in the URL.
"project": "A String", # The project ID of the project containing the Cloud SQL database. The Google apps domain is prefixed if applicable. Can be omitted for update since it is already specified on the URL.
"instance": "A String", # The name of the Cloud SQL instance. This does not include the project ID. Can be omitted for update since it is already specified on the URL.
"host": "A String", # The host name from which the user can connect. For insert operations, host defaults to an empty string. For update operations, host is specified as part of the request URL. The host name cannot be updated after insertion.
- "etag": "A String", # HTTP 1.1 Entity tag for the resource.
+ "etag": "A String", # This field is deprecated and will be removed from a future version of the API.
"password": "A String", # The password for the user.
}
+ host: string, Host of the user in the instance.
Returns:
An object of the form:
- { # An Operations resource contains information about database instance operations such as create, delete, and restart. Operations resources are created in response to operations that were initiated; you never create them directly.
+ { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
"status": "A String", # The status of an operation. Valid values are PENDING, RUNNING, DONE, UNKNOWN.
"importContext": { # Database instance import context. # The context for import operation, if applicable.
"kind": "sql#importContext", # This is always sql#importContext.
- "database": "A String", # The database (for example, guestbook) to which the import is made. If fileType is SQL and no database is specified, it is assumed that the database is specified in the file to be imported. If fileType is CSV, it must be specified.
+ "database": "A String", # The target database for the import. If fileType is SQL, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. If fileType is CSV, one database must be specified.
"fileType": "A String", # The file type for the specified uri.
# SQL: The file contains SQL statements.
# CSV: The file contains CSV data.
- "uri": "A String", # A path to the file in Google Cloud Storage from which the import is made. The URI is in the form gs://bucketName/fileName. Compressed gzip files (.gz) are supported when fileType is SQL.
- "importUser": "A String", # The PostgreSQL user to use for this import operation. Defaults to cloudsqlsuperuser. Does not apply to MySQL instances.
+ "uri": "A String", # Path to the import file in Cloud Storage, in the form gs://bucketName/fileName. Compressed gzip files (.gz) are supported when fileType is SQL. The instance must have write permissions to the bucket and read access to the file.
+ "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
"csvImportOptions": { # Options for importing data as CSV.
"table": "A String", # The table to which CSV data is imported.
"columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
@@ -322,17 +332,22 @@
"fileType": "A String", # The file type for the specified uri.
# SQL: The file contains SQL statements.
# CSV: The file contains CSV data.
- "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form gs://bucketName/fileName. If the file already exists, the operation fails. If fileType is SQL and the filename ends with .gz, the contents are compressed.
+ "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form gs://bucketName/fileName. If the file already exists, the requests succeeds, but the operation fails. If fileType is SQL and the filename ends with .gz, the contents are compressed.
"csvExportOptions": { # Options for exporting data as CSV.
"selectQuery": "A String", # The select query used to extract the data.
},
- "databases": [ # Databases (for example, guestbook) from which the export is made. If fileType is SQL and no database is specified, all databases are exported. If fileType is CSV, you can optionally specify at most one database to export. If csvExportOptions.selectQuery also specifies the database, this field will be ignored.
+ "databases": [ # Databases to be exported.
+ # MySQL instances: If fileType is SQL and no database is specified, all databases are exported, except for the mysql system database. If fileType is CSV, you can specify one database, either by using this property or by using the csvExportOptions.selectQuery property, which takes precedence over this property.
+ # PostgreSQL instances: Specify exactly one database to be exported. If fileType is CSV, this database must match the database used in the csvExportOptions.selectQuery property.
"A String",
],
"sqlExportOptions": { # Options for exporting data as SQL statements.
- "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database.
+ "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
"A String",
],
+ "mysqlExportOptions": { # Options for exporting from MySQL.
+ "masterData": 42, # Option to include SQL statement required to set up replication. If set to 1, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates. If set to 2, the CHANGE MASTER TO statement is written as a SQL comment, and has no effect. All other values are ignored.
+ },
"schemaOnly": True or False, # Export only schemas.
},
},