chore: regens API reference docs (#889)
diff --git a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html
index 5794f13..59ece63 100644
--- a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html
+++ b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html
@@ -75,25 +75,28 @@
<h1><a href="spanner_v1.html">Cloud Spanner API</a> . <a href="spanner_v1.projects.html">projects</a> . <a href="spanner_v1.projects.instances.html">instances</a> . <a href="spanner_v1.projects.instances.databases.html">databases</a> . <a href="spanner_v1.projects.instances.databases.sessions.html">sessions</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
- <code><a href="#beginTransaction">beginTransaction(session, body, x__xgafv=None)</a></code></p>
+ <code><a href="#batchCreate">batchCreate(database, body=None, x__xgafv=None)</a></code></p>
+<p class="firstline">Creates multiple new sessions.</p>
+<p class="toc_element">
+ <code><a href="#beginTransaction">beginTransaction(session, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Begins a new transaction. This step can often be skipped:</p>
<p class="toc_element">
- <code><a href="#commit">commit(session, body, x__xgafv=None)</a></code></p>
+ <code><a href="#commit">commit(session, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Commits a transaction. The request includes the mutations to be</p>
<p class="toc_element">
- <code><a href="#create">create(database, body, x__xgafv=None)</a></code></p>
+ <code><a href="#create">create(database, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Creates a new session. A session can be used to perform</p>
<p class="toc_element">
<code><a href="#delete">delete(name, x__xgafv=None)</a></code></p>
<p class="firstline">Ends a session, releasing server resources associated with it. This will</p>
<p class="toc_element">
- <code><a href="#executeBatchDml">executeBatchDml(session, body, x__xgafv=None)</a></code></p>
+ <code><a href="#executeBatchDml">executeBatchDml(session, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Executes a batch of SQL DML statements. This method allows many statements</p>
<p class="toc_element">
- <code><a href="#executeSql">executeSql(session, body, x__xgafv=None)</a></code></p>
+ <code><a href="#executeSql">executeSql(session, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Executes an SQL statement, returning all results in a single reply. This</p>
<p class="toc_element">
- <code><a href="#executeStreamingSql">executeStreamingSql(session, body, x__xgafv=None)</a></code></p>
+ <code><a href="#executeStreamingSql">executeStreamingSql(session, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Like ExecuteSql, except returns the result</p>
<p class="toc_element">
<code><a href="#get">get(name, x__xgafv=None)</a></code></p>
@@ -105,23 +108,93 @@
<code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
<p class="firstline">Retrieves the next page of results.</p>
<p class="toc_element">
- <code><a href="#partitionQuery">partitionQuery(session, body, x__xgafv=None)</a></code></p>
+ <code><a href="#partitionQuery">partitionQuery(session, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Creates a set of partition tokens that can be used to execute a query</p>
<p class="toc_element">
- <code><a href="#partitionRead">partitionRead(session, body, x__xgafv=None)</a></code></p>
+ <code><a href="#partitionRead">partitionRead(session, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Creates a set of partition tokens that can be used to execute a read</p>
<p class="toc_element">
- <code><a href="#read">read(session, body, x__xgafv=None)</a></code></p>
+ <code><a href="#read">read(session, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Reads rows from the database using key lookups and scans, as a</p>
<p class="toc_element">
- <code><a href="#rollback">rollback(session, body, x__xgafv=None)</a></code></p>
+ <code><a href="#rollback">rollback(session, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Rolls back a transaction, releasing any locks it holds. It is a good</p>
<p class="toc_element">
- <code><a href="#streamingRead">streamingRead(session, body, x__xgafv=None)</a></code></p>
+ <code><a href="#streamingRead">streamingRead(session, body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Like Read, except returns the result set as a</p>
<h3>Method Details</h3>
<div class="method">
- <code class="details" id="beginTransaction">beginTransaction(session, body, x__xgafv=None)</code>
+ <code class="details" id="batchCreate">batchCreate(database, body=None, x__xgafv=None)</code>
+ <pre>Creates multiple new sessions.
+
+This API can be used to initialize a session cache on the clients.
+See https://goo.gl/TgSFN2 for best practices on session cache management.
+
+Args:
+ database: string, Required. The database in which the new sessions are created. (required)
+ body: object, The request body.
+ The object takes the form of:
+
+{ # The request for BatchCreateSessions.
+ "sessionTemplate": { # A session in the Cloud Spanner API. # Parameters to be applied to each created session.
+ "labels": { # The labels for the session.
+ #
+ # * Label keys must be between 1 and 63 characters long and must conform to
+ # the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
+ # * Label values must be between 0 and 63 characters long and must conform
+ # to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
+ # * No more than 64 labels can be associated with a given session.
+ #
+ # See https://goo.gl/xmQnxf for more information on and examples of labels.
+ "a_key": "A String",
+ },
+ "name": "A String", # The name of the session. This is always system-assigned; values provided
+ # when creating a session are ignored.
+ "approximateLastUseTime": "A String", # Output only. The approximate timestamp when the session is last used. It is
+ # typically earlier than the actual last use time.
+ "createTime": "A String", # Output only. The timestamp when the session is created.
+ },
+ "sessionCount": 42, # Required. The number of sessions to be created in this batch call.
+ # The API may return fewer than the requested number of sessions. If a
+ # specific number of sessions are desired, the client can make additional
+ # calls to BatchCreateSessions (adjusting
+ # session_count as necessary).
+ }
+
+ x__xgafv: string, V1 error format.
+ Allowed values
+ 1 - v1 error format
+ 2 - v2 error format
+
+Returns:
+ An object of the form:
+
+ { # The response for BatchCreateSessions.
+ "session": [ # The freshly created sessions.
+ { # A session in the Cloud Spanner API.
+ "labels": { # The labels for the session.
+ #
+ # * Label keys must be between 1 and 63 characters long and must conform to
+ # the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
+ # * Label values must be between 0 and 63 characters long and must conform
+ # to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
+ # * No more than 64 labels can be associated with a given session.
+ #
+ # See https://goo.gl/xmQnxf for more information on and examples of labels.
+ "a_key": "A String",
+ },
+ "name": "A String", # The name of the session. This is always system-assigned; values provided
+ # when creating a session are ignored.
+ "approximateLastUseTime": "A String", # Output only. The approximate timestamp when the session is last used. It is
+ # typically earlier than the actual last use time.
+ "createTime": "A String", # Output only. The timestamp when the session is created.
+ },
+ ],
+ }</pre>
+</div>
+
+<div class="method">
+ <code class="details" id="beginTransaction">beginTransaction(session, body=None, x__xgafv=None)</code>
<pre>Begins a new transaction. This step can often be skipped:
Read, ExecuteSql and
Commit can begin a new transaction as a
@@ -129,7 +202,7 @@
Args:
session: string, Required. The session in which the transaction runs. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # The request for BeginTransaction.
@@ -297,11 +370,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -422,7 +495,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -432,9 +505,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -458,18 +541,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -509,7 +582,7 @@
</div>
<div class="method">
- <code class="details" id="commit">commit(session, body, x__xgafv=None)</code>
+ <code class="details" id="commit">commit(session, body=None, x__xgafv=None)</code>
<pre>Commits a transaction. The request includes the mutations to be
applied to rows in the database.
@@ -521,7 +594,7 @@
Args:
session: string, Required. The session in which the transaction to be committed is running. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # The request for Commit.
@@ -560,6 +633,11 @@
# deleted, and the column values provided are inserted
# instead. Unlike insert_or_update, this means any values not
# explicitly written become `NULL`.
+ #
+ # In an interleaved table, if you create the child table with the
+ # `ON DELETE CASCADE` annotation, then replacing a parent row
+ # also deletes the child rows. Otherwise, you must delete the
+ # child rows before you replace the parent row.
# replace operations.
"table": "A String", # Required. The table whose rows will be written.
"values": [ # The values to be written. `values` can contain more than one
@@ -585,6 +663,10 @@
"insertOrUpdate": { # Arguments to insert, update, insert_or_update, and # Like insert, except that if the row already exists, then
# its column values are overwritten with the ones provided. Any
# column values not explicitly written are preserved.
+ #
+ # When using insert_or_update, just as when using insert, all `NOT
+ # NULL` columns in the table must be given a value. This holds true
+ # even when the row already exists and will therefore actually be updated.
# replace operations.
"table": "A String", # Required. The table whose rows will be written.
"values": [ # The values to be written. `values` can contain more than one
@@ -634,7 +716,10 @@
"delete": { # Arguments to delete operations. # Delete rows from a table. Succeeds whether or not the named
# rows were present.
"table": "A String", # Required. The table whose rows will be deleted.
- "keySet": { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All # Required. The primary keys of the rows within table to delete.
+ "keySet": { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All # Required. The primary keys of the rows within table to delete. The
+ # primary keys must be specified in the order in which they appear in the
+ # `PRIMARY KEY()` clause of the table's equivalent DDL statement (the DDL
+ # statement used to create the table).
# Delete is idempotent. The transaction will succeed even if some or all
# rows do not exist.
# the keys are expected to be in the same table or index. The keys need
@@ -937,11 +1022,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -1062,7 +1147,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -1072,9 +1157,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -1098,18 +1193,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -1136,7 +1221,7 @@
</div>
<div class="method">
- <code class="details" id="create">create(database, body, x__xgafv=None)</code>
+ <code class="details" id="create">create(database, body=None, x__xgafv=None)</code>
<pre>Creates a new session. A session can be used to perform
transactions that read and/or modify data in a Cloud Spanner database.
Sessions are meant to be reused for many consecutive
@@ -1148,9 +1233,9 @@
transaction internally, and count toward the one transaction
limit.
-Cloud Spanner limits the number of sessions that can exist at any given
-time; thus, it is a good idea to delete idle and/or unneeded sessions.
-Aside from explicit deletes, Cloud Spanner can delete sessions for which no
+Active sessions use additional server resources, so it is a good idea to
+delete idle and unneeded sessions.
+Aside from explicit deletes, Cloud Spanner may delete sessions for which no
operations are sent for more than an hour. If a session is deleted,
requests to it return `NOT_FOUND`.
@@ -1159,7 +1244,7 @@
Args:
database: string, Required. The database in which the new session is created. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # The request for CreateSession.
@@ -1240,40 +1325,38 @@
</div>
<div class="method">
- <code class="details" id="executeBatchDml">executeBatchDml(session, body, x__xgafv=None)</code>
+ <code class="details" id="executeBatchDml">executeBatchDml(session, body=None, x__xgafv=None)</code>
<pre>Executes a batch of SQL DML statements. This method allows many statements
to be run with lower latency than submitting them sequentially with
ExecuteSql.
-Statements are executed in order, sequentially.
-ExecuteBatchDmlResponse will contain a
-ResultSet for each DML statement that has successfully executed. If a
-statement fails, its error status will be returned as part of the
-ExecuteBatchDmlResponse. Execution will
-stop at the first failed statement; the remaining statements will not run.
+Statements are executed in sequential order. A request can succeed even if
+a statement fails. The ExecuteBatchDmlResponse.status field in the
+response provides information about the statement that failed. Clients must
+inspect this field to determine whether an error occurred.
-ExecuteBatchDml is expected to return an OK status with a response even if
-there was an error while processing one of the DML statements. Clients must
-inspect response.status to determine if there were any errors while
-processing the request.
-
-See more details in
-ExecuteBatchDmlRequest and
-ExecuteBatchDmlResponse.
+Execution stops after the first failed statement; the remaining statements
+are not executed.
Args:
session: string, Required. The session in which the DML statements should be performed. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
-{ # The request for ExecuteBatchDml
- "seqno": "A String", # A per-transaction sequence number used to identify this request. This is
- # used in the same space as the seqno in
- # ExecuteSqlRequest. See more details
- # in ExecuteSqlRequest.
- "transaction": { # This message is used to select the transaction in which a # The transaction to use. A ReadWrite transaction is required. Single-use
- # transactions are not supported (to avoid replay). The caller must either
- # supply an existing transaction ID or begin a new transaction.
+{ # The request for ExecuteBatchDml.
+ "seqno": "A String", # Required. A per-transaction sequence number used to identify this request. This field
+ # makes each request idempotent such that if the request is received multiple
+ # times, at most one will succeed.
+ #
+ # The sequence number must be monotonically increasing within the
+ # transaction. If a request arrives for the first time with an out-of-order
+ # sequence number, the transaction may be aborted. Replays of previously
+ # handled requests will yield the same response as the first execution.
+ "transaction": { # This message is used to select the transaction in which a # Required. The transaction to use. Must be a read-write transaction.
+ #
+ # To protect against replays, single-use transactions are not supported. The
+ # caller must either supply an existing transaction ID or begin a new
+ # transaction.
# Read or
# ExecuteSql call runs.
#
@@ -1444,11 +1527,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -1569,7 +1652,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -1579,9 +1662,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -1605,18 +1698,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -1793,11 +1876,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -1918,7 +2001,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -1928,9 +2011,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -1954,18 +2047,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -1978,12 +2061,12 @@
},
"id": "A String", # Execute the read or SQL query in a previously-started transaction.
},
- "statements": [ # The list of statements to execute in this batch. Statements are executed
- # serially, such that the effects of statement i are visible to statement
- # i+1. Each statement must be a DML statement. Execution will stop at the
- # first failed statement; the remaining statements will not run.
+ "statements": [ # Required. The list of statements to execute in this batch. Statements are executed
+ # serially, such that the effects of statement `i` are visible to statement
+ # `i+1`. Each statement must be a DML statement. Execution stops at the
+ # first failed statement; the remaining statements are not executed.
#
- # REQUIRES: `statements_size()` > 0.
+ # Callers must provide at least one statement.
{ # A single DML statement.
"paramTypes": { # It is not always possible for Cloud Spanner to infer the right SQL type
# from a JSON value. For example, values of type `BYTES` and values
@@ -1995,27 +2078,43 @@
# about SQL types.
"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
# table cell or returned from an SQL query.
- "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type`
+ "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
# provides type information for the struct's fields.
+ "fields": [ # The list of fields that make up this struct. Order is
+ # significant, because values of this struct type are represented as
+ # lists, where the order of field values matches the order of
+ # fields in the StructType. In turn, the order of fields
+ # matches the order of columns in a read request, or the order of
+ # fields in the `SELECT` clause of a query.
+ { # Message representing a single field of a struct.
+ "type": # Object with schema name: Type # The type of the field.
+ "name": "A String", # The name of the field. For reads, this is the column name. For
+ # SQL queries, it is the column alias (e.g., `"Word"` in the
+ # query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
+ # `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
+ # columns might have an empty name (e.g., !"SELECT
+ # UPPER(ColName)"`). Note that a query result can contain
+ # multiple fields with the same name.
+ },
+ ],
+ },
"code": "A String", # Required. The TypeCode for this type.
"arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type`
# is the type of the array elements.
},
},
- "params": { # The DML string can contain parameter placeholders. A parameter
- # placeholder consists of `'@'` followed by the parameter
- # name. Parameter names consist of any combination of letters,
- # numbers, and underscores.
+ "params": { # Parameter names and values that bind to placeholders in the DML string.
+ #
+ # A parameter placeholder consists of the `@` character followed by the
+ # parameter name (for example, `@firstName`). Parameter names can contain
+ # letters, numbers, and underscores.
#
# Parameters can appear anywhere that a literal value is expected. The
# same parameter name can be used more than once, for example:
- # `"WHERE id > @msg_id AND id < @msg_id + 100"`
#
- # It is an error to execute an SQL statement with unbound parameters.
+ # `"WHERE id > @msg_id AND id < @msg_id + 100"`
#
- # Parameter values are specified using `params`, which is a JSON
- # object whose keys are parameter names, and whose values are the
- # corresponding parameter values.
+ # It is an error to execute a SQL statement with unbound parameters.
"a_key": "", # Properties of the object.
},
"sql": "A String", # Required. The DML string.
@@ -2032,79 +2131,37 @@
An object of the form:
{ # The response for ExecuteBatchDml. Contains a list
- # of ResultSet, one for each DML statement that has successfully executed.
- # If a statement fails, the error is returned as part of the response payload.
- # Clients can determine whether all DML statements have run successfully, or if
- # a statement failed, using one of the following approaches:
+ # of ResultSet messages, one for each DML statement that has successfully
+ # executed, in the same order as the statements in the request. If a statement
+ # fails, the status in the response body identifies the cause of the failure.
#
- # 1. Check if `'status'` field is `OkStatus`.
- # 2. Check if `result_sets_size()` equals the number of statements in
- # ExecuteBatchDmlRequest.
+ # To check for DML statements that failed, use the following approach:
#
- # Example 1: A request with 5 DML statements, all executed successfully.
+ # 1. Check the status in the response message. The google.rpc.Code enum
+ # value `OK` indicates that all statements were executed successfully.
+ # 2. If the status was not `OK`, check the number of result sets in the
+ # response. If the response contains `N` ResultSet messages, then
+ # statement `N+1` in the request failed.
#
- # Result: A response with 5 ResultSets, one for each statement in the same
- # order, and an `OkStatus`.
+ # Example 1:
#
- # Example 2: A request with 5 DML statements. The 3rd statement has a syntax
- # error.
+ # * Request: 5 DML statements, all executed successfully.
+ # * Response: 5 ResultSet messages, with the status `OK`.
#
- # Result: A response with 2 ResultSets, for the first 2 statements that
- # run successfully, and a syntax error (`INVALID_ARGUMENT`) status. From
- # `result_set_size()` client can determine that the 3rd statement has failed.
- "status": { # The `Status` type defines a logical error model that is suitable for # If all DML statements are executed successfully, status will be OK.
+ # Example 2:
+ #
+ # * Request: 5 DML statements. The third statement has a syntax error.
+ # * Response: 2 ResultSet messages, and a syntax error (`INVALID_ARGUMENT`)
+ # status. The number of ResultSet messages indicates that the third
+ # statement failed, and the fourth and fifth statements were not executed.
+ "status": { # The `Status` type defines a logical error model that is suitable for # If all DML statements are executed successfully, the status is `OK`.
# Otherwise, the error status of the first failed statement.
# different programming environments, including REST APIs and RPC APIs. It is
- # used by [gRPC](https://github.com/grpc). The error model is designed to be:
+ # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+ # three pieces of data: error code, error message, and error details.
#
- # - Simple to use and understand for most users
- # - Flexible enough to meet unexpected needs
- #
- # # Overview
- #
- # The `Status` message contains three pieces of data: error code, error
- # message, and error details. The error code should be an enum value of
- # google.rpc.Code, but it may accept additional error codes if needed. The
- # error message should be a developer-facing English message that helps
- # developers *understand* and *resolve* the error. If a localized user-facing
- # error message is needed, put the localized message in the error details or
- # localize it in the client. The optional error details may contain arbitrary
- # information about the error. There is a predefined set of error detail types
- # in the package `google.rpc` that can be used for common error conditions.
- #
- # # Language mapping
- #
- # The `Status` message is the logical representation of the error model, but it
- # is not necessarily the actual wire format. When the `Status` message is
- # exposed in different client libraries and different wire protocols, it can be
- # mapped differently. For example, it will likely be mapped to some exceptions
- # in Java, but more likely mapped to some error codes in C.
- #
- # # Other uses
- #
- # The error model and the `Status` message can be used in a variety of
- # environments, either with or without APIs, to provide a
- # consistent developer experience across different environments.
- #
- # Example uses of this error model include:
- #
- # - Partial errors. If a service needs to return partial errors to the client,
- # it may embed the `Status` in the normal response to indicate the partial
- # errors.
- #
- # - Workflow errors. A typical workflow has multiple steps. Each step may
- # have a `Status` message for error reporting.
- #
- # - Batch operations. If a client uses batch request and batch response, the
- # `Status` message should be used directly inside batch response, one for
- # each error sub-response.
- #
- # - Asynchronous operations. If an API call embeds asynchronous operation
- # results in its response, the status of those operations should be
- # represented directly using the `Status` message.
- #
- # - Logging. If some API errors are stored in logs, the message `Status` could
- # be used directly after any stripping needed for security/privacy reasons.
+ # You can find out more about this error model and how to work with it in the
+ # [API Design Guide](https://cloud.google.com/apis/design/errors).
"message": "A String", # A developer-facing error message, which should be in English. Any
# user-facing error message should be localized and sent in the
# google.rpc.Status.details field, or localized by the client.
@@ -2116,12 +2173,12 @@
},
],
},
- "resultSets": [ # ResultSets, one for each statement in the request that ran successfully, in
- # the same order as the statements in the request. Each ResultSet will
- # not contain any rows. The ResultSetStats in each ResultSet will
- # contain the number of rows modified by the statement.
+ "resultSets": [ # One ResultSet for each statement in the request that ran successfully,
+ # in the same order as the statements in the request. Each ResultSet does
+ # not contain any rows. The ResultSetStats in each ResultSet contain
+ # the number of rows modified by the statement.
#
- # Only the first ResultSet in the response contains a valid
+ # Only the first ResultSet in the response contains valid
# ResultSetMetadata.
{ # Results from Read or
# ExecuteSql.
@@ -2175,16 +2232,16 @@
# created for each column that is read by the operator. The corresponding
# `variable` fields will be set to the variable names assigned to the
# columns.
- "childIndex": 42, # The node to which the link points.
"type": "A String", # The type of the link. For example, in Hash Joins this could be used to
# distinguish between the build child and the probe child, or in the case
# of the child being an output variable, to represent the tag associated
# with the output variable.
+ "childIndex": 42, # The node to which the link points.
},
],
"shortRepresentation": { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
# `SCALAR` PlanNode(s).
- "subqueries": { # A mapping of (subquery variable name) -> (subquery node id) for cases
+ "subqueries": { # A mapping of (subquery variable name) -> (subquery node id) for cases
# where the `description` string of this node references a `SCALAR`
# subquery contained in the expression subtree rooted at this node. The
# referenced `SCALAR` subquery may not necessarily be a direct child of
@@ -2234,14 +2291,7 @@
# matches the order of columns in a read request, or the order of
# fields in the `SELECT` clause of a query.
{ # Message representing a single field of a struct.
- "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a # The type of the field.
- # table cell or returned from an SQL query.
- "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type`
- # provides type information for the struct's fields.
- "code": "A String", # Required. The TypeCode for this type.
- "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type`
- # is the type of the array elements.
- },
+ "type": # Object with schema name: Type # The type of the field.
"name": "A String", # The name of the field. For reads, this is the column name. For
# SQL queries, it is the column alias (e.g., `"Word"` in the
# query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
@@ -2276,7 +2326,7 @@
</div>
<div class="method">
- <code class="details" id="executeSql">executeSql(session, body, x__xgafv=None)</code>
+ <code class="details" id="executeSql">executeSql(session, body=None, x__xgafv=None)</code>
<pre>Executes an SQL statement, returning all results in a single reply. This
method cannot be used to return a result set larger than 10 MiB;
if the query yields more data than that, the query fails with
@@ -2291,24 +2341,21 @@
Args:
session: string, Required. The session in which the SQL query should be performed. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # The request for ExecuteSql and
# ExecuteStreamingSql.
- "transaction": { # This message is used to select the transaction in which a # The transaction to use. If none is provided, the default is a
- # temporary read-only transaction with strong concurrency.
- #
- # The transaction to use.
+ "transaction": { # This message is used to select the transaction in which a # The transaction to use.
#
# For queries, if none is provided, the default is a temporary read-only
# transaction with strong concurrency.
#
- # Standard DML statements require a ReadWrite transaction. Single-use
- # transactions are not supported (to avoid replay). The caller must
- # either supply an existing transaction ID or begin a new transaction.
+ # Standard DML statements require a read-write transaction. To protect
+ # against replays, single-use transactions are not supported. The caller
+ # must either supply an existing transaction ID or begin a new transaction.
#
- # Partitioned DML requires an existing PartitionedDml transaction ID.
+ # Partitioned DML requires an existing Partitioned DML transaction ID.
# Read or
# ExecuteSql call runs.
#
@@ -2479,11 +2526,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -2604,7 +2651,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -2614,9 +2661,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -2640,18 +2697,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -2828,11 +2875,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -2953,7 +3000,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -2963,9 +3010,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -2989,18 +3046,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -3013,7 +3060,7 @@
},
"id": "A String", # Execute the read or SQL query in a previously-started transaction.
},
- "seqno": "A String", # A per-transaction sequence number used to identify this request. This
+ "seqno": "A String", # A per-transaction sequence number used to identify this request. This field
# makes each request idempotent such that if the request is received multiple
# times, at most one will succeed.
#
@@ -3043,33 +3090,67 @@
# about SQL types.
"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
# table cell or returned from an SQL query.
- "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type`
+ "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
# provides type information for the struct's fields.
+ "fields": [ # The list of fields that make up this struct. Order is
+ # significant, because values of this struct type are represented as
+ # lists, where the order of field values matches the order of
+ # fields in the StructType. In turn, the order of fields
+ # matches the order of columns in a read request, or the order of
+ # fields in the `SELECT` clause of a query.
+ { # Message representing a single field of a struct.
+ "type": # Object with schema name: Type # The type of the field.
+ "name": "A String", # The name of the field. For reads, this is the column name. For
+ # SQL queries, it is the column alias (e.g., `"Word"` in the
+ # query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
+ # `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
+ # columns might have an empty name (e.g., !"SELECT
+ # UPPER(ColName)"`). Note that a query result can contain
+ # multiple fields with the same name.
+ },
+ ],
+ },
"code": "A String", # Required. The TypeCode for this type.
"arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type`
# is the type of the array elements.
},
},
- "queryMode": "A String", # Used to control the amount of debugging information returned in
- # ResultSetStats. If partition_token is set, query_mode can only
- # be set to QueryMode.NORMAL.
- "sql": "A String", # Required. The SQL string.
- "params": { # The SQL string can contain parameter placeholders. A parameter
- # placeholder consists of `'@'` followed by the parameter
- # name. Parameter names consist of any combination of letters,
- # numbers, and underscores.
+ "queryOptions": { # Query optimizer configuration. # Query optimizer configuration to use for the given query.
+ "optimizerVersion": "A String", # An option to control the selection of optimizer version.
+ #
+ # This parameter allows individual queries to pick different query
+ # optimizer versions.
+ #
+ # Specifying "latest" as a value instructs Cloud Spanner to use the
+ # latest supported query optimizer version. If not specified, Cloud Spanner
+ # uses optimizer version set at the database level options. Any other
+ # positive integer (from the list of supported optimizer versions)
+ # overrides the default optimizer version for query execution.
+ # The list of supported optimizer versions can be queried from
+ # SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL statement
+ # with an invalid optimizer version will fail with a syntax error
+ # (`INVALID_ARGUMENT`) status.
+ #
+ # The `optimizer_version` statement hint has precedence over this setting.
+ },
+ "params": { # Parameter names and values that bind to placeholders in the SQL string.
+ #
+ # A parameter placeholder consists of the `@` character followed by the
+ # parameter name (for example, `@firstName`). Parameter names can contain
+ # letters, numbers, and underscores.
#
# Parameters can appear anywhere that a literal value is expected. The same
# parameter name can be used more than once, for example:
- # `"WHERE id > @msg_id AND id < @msg_id + 100"`
#
- # It is an error to execute an SQL statement with unbound parameters.
+ # `"WHERE id > @msg_id AND id < @msg_id + 100"`
#
- # Parameter values are specified using `params`, which is a JSON
- # object whose keys are parameter names, and whose values are the
- # corresponding parameter values.
+ # It is an error to execute a SQL statement with unbound parameters.
"a_key": "", # Properties of the object.
},
+ "sql": "A String", # Required. The SQL string.
+ "queryMode": "A String", # Used to control the amount of debugging information returned in
+ # ResultSetStats. If partition_token is set, query_mode can only
+ # be set to QueryMode.NORMAL.
}
x__xgafv: string, V1 error format.
@@ -3132,16 +3213,16 @@
# created for each column that is read by the operator. The corresponding
# `variable` fields will be set to the variable names assigned to the
# columns.
- "childIndex": 42, # The node to which the link points.
"type": "A String", # The type of the link. For example, in Hash Joins this could be used to
# distinguish between the build child and the probe child, or in the case
# of the child being an output variable, to represent the tag associated
# with the output variable.
+ "childIndex": 42, # The node to which the link points.
},
],
"shortRepresentation": { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
# `SCALAR` PlanNode(s).
- "subqueries": { # A mapping of (subquery variable name) -> (subquery node id) for cases
+ "subqueries": { # A mapping of (subquery variable name) -> (subquery node id) for cases
# where the `description` string of this node references a `SCALAR`
# subquery contained in the expression subtree rooted at this node. The
# referenced `SCALAR` subquery may not necessarily be a direct child of
@@ -3191,14 +3272,7 @@
# matches the order of columns in a read request, or the order of
# fields in the `SELECT` clause of a query.
{ # Message representing a single field of a struct.
- "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a # The type of the field.
- # table cell or returned from an SQL query.
- "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type`
- # provides type information for the struct's fields.
- "code": "A String", # Required. The TypeCode for this type.
- "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type`
- # is the type of the array elements.
- },
+ "type": # Object with schema name: Type # The type of the field.
"name": "A String", # The name of the field. For reads, this is the column name. For
# SQL queries, it is the column alias (e.g., `"Word"` in the
# query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
@@ -3231,7 +3305,7 @@
</div>
<div class="method">
- <code class="details" id="executeStreamingSql">executeStreamingSql(session, body, x__xgafv=None)</code>
+ <code class="details" id="executeStreamingSql">executeStreamingSql(session, body=None, x__xgafv=None)</code>
<pre>Like ExecuteSql, except returns the result
set as a stream. Unlike ExecuteSql, there
is no limit on the size of the returned result set. However, no
@@ -3240,24 +3314,21 @@
Args:
session: string, Required. The session in which the SQL query should be performed. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # The request for ExecuteSql and
# ExecuteStreamingSql.
- "transaction": { # This message is used to select the transaction in which a # The transaction to use. If none is provided, the default is a
- # temporary read-only transaction with strong concurrency.
- #
- # The transaction to use.
+ "transaction": { # This message is used to select the transaction in which a # The transaction to use.
#
# For queries, if none is provided, the default is a temporary read-only
# transaction with strong concurrency.
#
- # Standard DML statements require a ReadWrite transaction. Single-use
- # transactions are not supported (to avoid replay). The caller must
- # either supply an existing transaction ID or begin a new transaction.
+ # Standard DML statements require a read-write transaction. To protect
+ # against replays, single-use transactions are not supported. The caller
+ # must either supply an existing transaction ID or begin a new transaction.
#
- # Partitioned DML requires an existing PartitionedDml transaction ID.
+ # Partitioned DML requires an existing Partitioned DML transaction ID.
# Read or
# ExecuteSql call runs.
#
@@ -3428,11 +3499,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -3553,7 +3624,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -3563,9 +3634,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -3589,18 +3670,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -3777,11 +3848,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -3902,7 +3973,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -3912,9 +3983,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -3938,18 +4019,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -3962,7 +4033,7 @@
},
"id": "A String", # Execute the read or SQL query in a previously-started transaction.
},
- "seqno": "A String", # A per-transaction sequence number used to identify this request. This
+ "seqno": "A String", # A per-transaction sequence number used to identify this request. This field
# makes each request idempotent such that if the request is received multiple
# times, at most one will succeed.
#
@@ -3992,33 +4063,67 @@
# about SQL types.
"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
# table cell or returned from an SQL query.
- "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type`
+ "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
# provides type information for the struct's fields.
+ "fields": [ # The list of fields that make up this struct. Order is
+ # significant, because values of this struct type are represented as
+ # lists, where the order of field values matches the order of
+ # fields in the StructType. In turn, the order of fields
+ # matches the order of columns in a read request, or the order of
+ # fields in the `SELECT` clause of a query.
+ { # Message representing a single field of a struct.
+ "type": # Object with schema name: Type # The type of the field.
+ "name": "A String", # The name of the field. For reads, this is the column name. For
+ # SQL queries, it is the column alias (e.g., `"Word"` in the
+ # query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
+ # `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
+ # columns might have an empty name (e.g., !"SELECT
+ # UPPER(ColName)"`). Note that a query result can contain
+ # multiple fields with the same name.
+ },
+ ],
+ },
"code": "A String", # Required. The TypeCode for this type.
"arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type`
# is the type of the array elements.
},
},
- "queryMode": "A String", # Used to control the amount of debugging information returned in
- # ResultSetStats. If partition_token is set, query_mode can only
- # be set to QueryMode.NORMAL.
- "sql": "A String", # Required. The SQL string.
- "params": { # The SQL string can contain parameter placeholders. A parameter
- # placeholder consists of `'@'` followed by the parameter
- # name. Parameter names consist of any combination of letters,
- # numbers, and underscores.
+ "queryOptions": { # Query optimizer configuration. # Query optimizer configuration to use for the given query.
+ "optimizerVersion": "A String", # An option to control the selection of optimizer version.
+ #
+ # This parameter allows individual queries to pick different query
+ # optimizer versions.
+ #
+ # Specifying "latest" as a value instructs Cloud Spanner to use the
+ # latest supported query optimizer version. If not specified, Cloud Spanner
+ # uses optimizer version set at the database level options. Any other
+ # positive integer (from the list of supported optimizer versions)
+ # overrides the default optimizer version for query execution.
+ # The list of supported optimizer versions can be queried from
+ # SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL statement
+ # with an invalid optimizer version will fail with a syntax error
+ # (`INVALID_ARGUMENT`) status.
+ #
+ # The `optimizer_version` statement hint has precedence over this setting.
+ },
+ "params": { # Parameter names and values that bind to placeholders in the SQL string.
+ #
+ # A parameter placeholder consists of the `@` character followed by the
+ # parameter name (for example, `@firstName`). Parameter names can contain
+ # letters, numbers, and underscores.
#
# Parameters can appear anywhere that a literal value is expected. The same
# parameter name can be used more than once, for example:
- # `"WHERE id > @msg_id AND id < @msg_id + 100"`
#
- # It is an error to execute an SQL statement with unbound parameters.
+ # `"WHERE id > @msg_id AND id < @msg_id + 100"`
#
- # Parameter values are specified using `params`, which is a JSON
- # object whose keys are parameter names, and whose values are the
- # corresponding parameter values.
+ # It is an error to execute a SQL statement with unbound parameters.
"a_key": "", # Properties of the object.
},
+ "sql": "A String", # Required. The SQL string.
+ "queryMode": "A String", # Used to control the amount of debugging information returned in
+ # ResultSetStats. If partition_token is set, query_mode can only
+ # be set to QueryMode.NORMAL.
}
x__xgafv: string, V1 error format.
@@ -4067,28 +4172,28 @@
# Some examples of merging:
#
# # Strings are concatenated.
- # "foo", "bar" => "foobar"
+ # "foo", "bar" => "foobar"
#
# # Lists of non-strings are concatenated.
- # [2, 3], [4] => [2, 3, 4]
+ # [2, 3], [4] => [2, 3, 4]
#
# # Lists are concatenated, but the last and first elements are merged
# # because they are strings.
- # ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
+ # ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
#
# # Lists are concatenated, but the last and first elements are merged
# # because they are lists. Recursively, the last and first elements
# # of the inner lists are merged because they are strings.
- # ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
+ # ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
#
# # Non-overlapping object fields are combined.
- # {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
+ # {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
#
# # Overlapping object fields are merged.
- # {"a": "1"}, {"a": "2"} => {"a": "12"}
+ # {"a": "1"}, {"a": "2"} => {"a": "12"}
#
# # Examples of merging objects containing lists of strings.
- # {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
+ # {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
#
# For a more complete example, suppose a streaming SQL query is
# yielding a result set whose rows contain a single string
@@ -4153,16 +4258,16 @@
# created for each column that is read by the operator. The corresponding
# `variable` fields will be set to the variable names assigned to the
# columns.
- "childIndex": 42, # The node to which the link points.
"type": "A String", # The type of the link. For example, in Hash Joins this could be used to
# distinguish between the build child and the probe child, or in the case
# of the child being an output variable, to represent the tag associated
# with the output variable.
+ "childIndex": 42, # The node to which the link points.
},
],
"shortRepresentation": { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
# `SCALAR` PlanNode(s).
- "subqueries": { # A mapping of (subquery variable name) -> (subquery node id) for cases
+ "subqueries": { # A mapping of (subquery variable name) -> (subquery node id) for cases
# where the `description` string of this node references a `SCALAR`
# subquery contained in the expression subtree rooted at this node. The
# referenced `SCALAR` subquery may not necessarily be a direct child of
@@ -4213,14 +4318,7 @@
# matches the order of columns in a read request, or the order of
# fields in the `SELECT` clause of a query.
{ # Message representing a single field of a struct.
- "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a # The type of the field.
- # table cell or returned from an SQL query.
- "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type`
- # provides type information for the struct's fields.
- "code": "A String", # Required. The TypeCode for this type.
- "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type`
- # is the type of the array elements.
- },
+ "type": # Object with schema name: Type # The type of the field.
"name": "A String", # The name of the field. For reads, this is the column name. For
# SQL queries, it is the column alias (e.g., `"Word"` in the
# query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
@@ -4303,8 +4401,8 @@
Some examples of using filters are:
- * `labels.env:*` --> The session has the label "env".
- * `labels.env:dev` --> The session has the label "env" and the value of
+ * `labels.env:*` --> The session has the label "env".
+ * `labels.env:dev` --> The session has the label "env" and the value of
the label contains the string "dev".
pageToken: string, If non-empty, `page_token` should contain a
next_page_token from a previous
@@ -4359,7 +4457,7 @@
</div>
<div class="method">
- <code class="details" id="partitionQuery">partitionQuery(session, body, x__xgafv=None)</code>
+ <code class="details" id="partitionQuery">partitionQuery(session, body=None, x__xgafv=None)</code>
<pre>Creates a set of partition tokens that can be used to execute a query
operation in parallel. Each of the returned partition tokens can be used
by ExecuteStreamingSql to specify a subset
@@ -4374,7 +4472,7 @@
Args:
session: string, Required. The session used to create the partitions. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # The request for PartitionQuery
@@ -4388,8 +4486,26 @@
# about SQL types.
"a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
# table cell or returned from an SQL query.
- "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type`
+ "structType": { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
# provides type information for the struct's fields.
+ "fields": [ # The list of fields that make up this struct. Order is
+ # significant, because values of this struct type are represented as
+ # lists, where the order of field values matches the order of
+ # fields in the StructType. In turn, the order of fields
+ # matches the order of columns in a read request, or the order of
+ # fields in the `SELECT` clause of a query.
+ { # Message representing a single field of a struct.
+ "type": # Object with schema name: Type # The type of the field.
+ "name": "A String", # The name of the field. For reads, this is the column name. For
+ # SQL queries, it is the column alias (e.g., `"Word"` in the
+ # query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
+ # `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
+ # columns might have an empty name (e.g., !"SELECT
+ # UPPER(ColName)"`). Note that a query result can contain
+ # multiple fields with the same name.
+ },
+ ],
+ },
"code": "A String", # Required. The TypeCode for this type.
"arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type`
# is the type of the array elements.
@@ -4584,11 +4700,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -4709,7 +4825,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -4719,9 +4835,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -4745,18 +4871,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -4933,11 +5049,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -5058,7 +5174,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -5068,9 +5184,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -5094,18 +5220,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -5118,23 +5234,21 @@
},
"id": "A String", # Execute the read or SQL query in a previously-started transaction.
},
- "params": { # The SQL query string can contain parameter placeholders. A parameter
- # placeholder consists of `'@'` followed by the parameter
- # name. Parameter names consist of any combination of letters,
- # numbers, and underscores.
+ "params": { # Parameter names and values that bind to placeholders in the SQL string.
+ #
+ # A parameter placeholder consists of the `@` character followed by the
+ # parameter name (for example, `@firstName`). Parameter names can contain
+ # letters, numbers, and underscores.
#
# Parameters can appear anywhere that a literal value is expected. The same
# parameter name can be used more than once, for example:
- # `"WHERE id > @msg_id AND id < @msg_id + 100"`
#
- # It is an error to execute an SQL query with unbound parameters.
+ # `"WHERE id > @msg_id AND id < @msg_id + 100"`
#
- # Parameter values are specified using `params`, which is a JSON
- # object whose keys are parameter names, and whose values are the
- # corresponding parameter values.
+ # It is an error to execute a SQL statement with unbound parameters.
"a_key": "", # Properties of the object.
},
- "sql": "A String", # The query request to generate partitions for. The request will fail if
+ "sql": "A String", # Required. The query request to generate partitions for. The request will fail if
# the query is not root partitionable. The query plan of a root
# partitionable query has a single distributed union operator. A distributed
# union operator conceptually divides one or more tables into multiple
@@ -5184,7 +5298,7 @@
</div>
<div class="method">
- <code class="details" id="partitionRead">partitionRead(session, body, x__xgafv=None)</code>
+ <code class="details" id="partitionRead">partitionRead(session, body=None, x__xgafv=None)</code>
<pre>Creates a set of partition tokens that can be used to execute a read
operation in parallel. Each of the returned partition tokens can be used
by StreamingRead to specify a subset of the read
@@ -5201,7 +5315,7 @@
Args:
session: string, Required. The session used to create the partitions. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # The request for PartitionRead
@@ -5380,11 +5494,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -5505,7 +5619,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -5515,9 +5629,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -5541,18 +5665,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -5729,11 +5843,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -5854,7 +5968,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -5864,9 +5978,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -5890,18 +6014,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -6108,7 +6222,7 @@
</div>
<div class="method">
- <code class="details" id="read">read(session, body, x__xgafv=None)</code>
+ <code class="details" id="read">read(session, body=None, x__xgafv=None)</code>
<pre>Reads rows from the database using key lookups and scans, as a
simple key/value style alternative to
ExecuteSql. This method cannot be used to
@@ -6125,7 +6239,7 @@
Args:
session: string, Required. The session in which the read should be performed. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # The request for Read and
@@ -6305,11 +6419,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -6430,7 +6544,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -6440,9 +6554,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -6466,18 +6590,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -6654,11 +6768,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -6779,7 +6893,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -6789,9 +6903,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -6815,18 +6939,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -6990,7 +7104,7 @@
# is zero, the default is no limit. A limit cannot be specified if
# `partition_token` is set.
"table": "A String", # Required. The name of the table in the database to be read.
- "columns": [ # The columns of table to be returned for each row matching
+ "columns": [ # Required. The columns of table to be returned for each row matching
# this request.
"A String",
],
@@ -7056,16 +7170,16 @@
# created for each column that is read by the operator. The corresponding
# `variable` fields will be set to the variable names assigned to the
# columns.
- "childIndex": 42, # The node to which the link points.
"type": "A String", # The type of the link. For example, in Hash Joins this could be used to
# distinguish between the build child and the probe child, or in the case
# of the child being an output variable, to represent the tag associated
# with the output variable.
+ "childIndex": 42, # The node to which the link points.
},
],
"shortRepresentation": { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
# `SCALAR` PlanNode(s).
- "subqueries": { # A mapping of (subquery variable name) -> (subquery node id) for cases
+ "subqueries": { # A mapping of (subquery variable name) -> (subquery node id) for cases
# where the `description` string of this node references a `SCALAR`
# subquery contained in the expression subtree rooted at this node. The
# referenced `SCALAR` subquery may not necessarily be a direct child of
@@ -7115,14 +7229,7 @@
# matches the order of columns in a read request, or the order of
# fields in the `SELECT` clause of a query.
{ # Message representing a single field of a struct.
- "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a # The type of the field.
- # table cell or returned from an SQL query.
- "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type`
- # provides type information for the struct's fields.
- "code": "A String", # Required. The TypeCode for this type.
- "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type`
- # is the type of the array elements.
- },
+ "type": # Object with schema name: Type # The type of the field.
"name": "A String", # The name of the field. For reads, this is the column name. For
# SQL queries, it is the column alias (e.g., `"Word"` in the
# query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
@@ -7155,7 +7262,7 @@
</div>
<div class="method">
- <code class="details" id="rollback">rollback(session, body, x__xgafv=None)</code>
+ <code class="details" id="rollback">rollback(session, body=None, x__xgafv=None)</code>
<pre>Rolls back a transaction, releasing any locks it holds. It is a good
idea to call this for any transaction that includes one or more
Read or ExecuteSql requests and
@@ -7167,7 +7274,7 @@
Args:
session: string, Required. The session in which the transaction to roll back is running. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # The request for Rollback.
@@ -7195,7 +7302,7 @@
</div>
<div class="method">
- <code class="details" id="streamingRead">streamingRead(session, body, x__xgafv=None)</code>
+ <code class="details" id="streamingRead">streamingRead(session, body=None, x__xgafv=None)</code>
<pre>Like Read, except returns the result set as a
stream. Unlike Read, there is no limit on the
size of the returned result set. However, no individual row in
@@ -7204,7 +7311,7 @@
Args:
session: string, Required. The session in which the read should be performed. (required)
- body: object, The request body. (required)
+ body: object, The request body.
The object takes the form of:
{ # The request for Read and
@@ -7384,11 +7491,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -7509,7 +7616,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -7519,9 +7626,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -7545,18 +7662,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -7733,11 +7840,11 @@
# These timestamp bounds execute reads at a user-specified
# timestamp. Reads at a timestamp are guaranteed to see a consistent
# prefix of the global transaction history: they observe
- # modifications done by all transactions with a commit timestamp <=
+ # modifications done by all transactions with a commit timestamp <=
# the read timestamp, and observe none of the modifications done by
# transactions with a larger commit timestamp. They will block until
# all conflicting transactions that may be assigned commit timestamps
- # <= the read timestamp have finished.
+ # <= the read timestamp have finished.
#
# The timestamp can either be expressed as an absolute Cloud Spanner commit
# timestamp or a staleness relative to the current time.
@@ -7858,7 +7965,7 @@
# Authorization to begin a read-only transaction requires
# `spanner.databases.beginReadOnlyTransaction` permission
# on the `session` resource.
- "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
+ "minReadTimestamp": "A String", # Executes all reads at a timestamp >= `min_read_timestamp`.
#
# This is useful for requesting fresher data than some previous
# read, or data that is fresh enough to observe the effects of some
@@ -7868,9 +7975,19 @@
#
# A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
# Example: `"2014-10-02T15:01:23.045123456Z"`.
- "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
- # the Transaction message that describes the transaction.
- "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
+ "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
+ # reads at a specific timestamp are repeatable; the same read at
+ # the same timestamp always returns the same data. If the
+ # timestamp is in the future, the read will block until the
+ # specified timestamp, modulo the read's deadline.
+ #
+ # Useful for large scale consistent reads such as mapreduces, or
+ # for coordinating many reads against a consistent snapshot of the
+ # data.
+ #
+ # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
+ # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "maxStaleness": "A String", # Read data at a timestamp >= `NOW - max_staleness`
# seconds. Guarantees that all writes that have committed more
# than the specified number of seconds ago are visible. Because
# Cloud Spanner chooses the exact timestamp, this mode works even if
@@ -7894,18 +8011,8 @@
#
# Useful for reading at nearby replicas without the distributed
# timestamp negotiation overhead of `max_staleness`.
- "readTimestamp": "A String", # Executes all reads at the given timestamp. Unlike other modes,
- # reads at a specific timestamp are repeatable; the same read at
- # the same timestamp always returns the same data. If the
- # timestamp is in the future, the read will block until the
- # specified timestamp, modulo the read's deadline.
- #
- # Useful for large scale consistent reads such as mapreduces, or
- # for coordinating many reads against a consistent snapshot of the
- # data.
- #
- # A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- # Example: `"2014-10-02T15:01:23.045123456Z"`.
+ "returnReadTimestamp": True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+ # the Transaction message that describes the transaction.
"strong": True or False, # Read at a timestamp where all previously committed transactions
# are visible.
},
@@ -8069,7 +8176,7 @@
# is zero, the default is no limit. A limit cannot be specified if
# `partition_token` is set.
"table": "A String", # Required. The name of the table in the database to be read.
- "columns": [ # The columns of table to be returned for each row matching
+ "columns": [ # Required. The columns of table to be returned for each row matching
# this request.
"A String",
],
@@ -8121,28 +8228,28 @@
# Some examples of merging:
#
# # Strings are concatenated.
- # "foo", "bar" => "foobar"
+ # "foo", "bar" => "foobar"
#
# # Lists of non-strings are concatenated.
- # [2, 3], [4] => [2, 3, 4]
+ # [2, 3], [4] => [2, 3, 4]
#
# # Lists are concatenated, but the last and first elements are merged
# # because they are strings.
- # ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
+ # ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
#
# # Lists are concatenated, but the last and first elements are merged
# # because they are lists. Recursively, the last and first elements
# # of the inner lists are merged because they are strings.
- # ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
+ # ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
#
# # Non-overlapping object fields are combined.
- # {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
+ # {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
#
# # Overlapping object fields are merged.
- # {"a": "1"}, {"a": "2"} => {"a": "12"}
+ # {"a": "1"}, {"a": "2"} => {"a": "12"}
#
# # Examples of merging objects containing lists of strings.
- # {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
+ # {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
#
# For a more complete example, suppose a streaming SQL query is
# yielding a result set whose rows contain a single string
@@ -8207,16 +8314,16 @@
# created for each column that is read by the operator. The corresponding
# `variable` fields will be set to the variable names assigned to the
# columns.
- "childIndex": 42, # The node to which the link points.
"type": "A String", # The type of the link. For example, in Hash Joins this could be used to
# distinguish between the build child and the probe child, or in the case
# of the child being an output variable, to represent the tag associated
# with the output variable.
+ "childIndex": 42, # The node to which the link points.
},
],
"shortRepresentation": { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
# `SCALAR` PlanNode(s).
- "subqueries": { # A mapping of (subquery variable name) -> (subquery node id) for cases
+ "subqueries": { # A mapping of (subquery variable name) -> (subquery node id) for cases
# where the `description` string of this node references a `SCALAR`
# subquery contained in the expression subtree rooted at this node. The
# referenced `SCALAR` subquery may not necessarily be a direct child of
@@ -8267,14 +8374,7 @@
# matches the order of columns in a read request, or the order of
# fields in the `SELECT` clause of a query.
{ # Message representing a single field of a struct.
- "type": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a # The type of the field.
- # table cell or returned from an SQL query.
- "structType": # Object with schema name: StructType # If code == STRUCT, then `struct_type`
- # provides type information for the struct's fields.
- "code": "A String", # Required. The TypeCode for this type.
- "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type`
- # is the type of the array elements.
- },
+ "type": # Object with schema name: Type # The type of the field.
"name": "A String", # The name of the field. For reads, this is the column name. For
# SQL queries, it is the column alias (e.g., `"Word"` in the
# query `"SELECT 'hello' AS Word"`), or the column name (e.g.,