blob: 064238a4ae9328e40793d8d6f0f476cdf8788e73 [file] [log] [blame]
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="spanner_v1.html">Cloud Spanner API</a> . <a href="spanner_v1.projects.html">projects</a> . <a href="spanner_v1.projects.instances.html">instances</a> . <a href="spanner_v1.projects.instances.databases.html">databases</a> . <a href="spanner_v1.projects.instances.databases.sessions.html">sessions</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070078 <code><a href="#batchCreate">batchCreate(database, body=None, x__xgafv=None)</a></code></p>
79<p class="firstline">Creates multiple new sessions.</p>
80<p class="toc_element">
81 <code><a href="#beginTransaction">beginTransaction(session, body=None, x__xgafv=None)</a></code></p>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -040082<p class="firstline">Begins a new transaction. This step can often be skipped:</p>
83<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070084 <code><a href="#commit">commit(session, body=None, x__xgafv=None)</a></code></p>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -040085<p class="firstline">Commits a transaction. The request includes the mutations to be</p>
86<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070087 <code><a href="#create">create(database, body=None, x__xgafv=None)</a></code></p>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -040088<p class="firstline">Creates a new session. A session can be used to perform</p>
89<p class="toc_element">
90 <code><a href="#delete">delete(name, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070091<p class="firstline">Ends a session, releasing server resources associated with it. This will</p>
92<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070093 <code><a href="#executeBatchDml">executeBatchDml(session, body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070094<p class="firstline">Executes a batch of SQL DML statements. This method allows many statements</p>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -040095<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070096 <code><a href="#executeSql">executeSql(session, body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -070097<p class="firstline">Executes an SQL statement, returning all results in a single reply. This</p>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -040098<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -070099 <code><a href="#executeStreamingSql">executeStreamingSql(session, body=None, x__xgafv=None)</a></code></p>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400100<p class="firstline">Like ExecuteSql, except returns the result</p>
101<p class="toc_element">
102 <code><a href="#get">get(name, x__xgafv=None)</a></code></p>
103<p class="firstline">Gets a session. Returns `NOT_FOUND` if the session does not exist.</p>
104<p class="toc_element">
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700105 <code><a href="#list">list(database, filter=None, pageSize=None, pageToken=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700106<p class="firstline">Lists all sessions in a given database.</p>
107<p class="toc_element">
108 <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
109<p class="firstline">Retrieves the next page of results.</p>
110<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -0700111 <code><a href="#partitionQuery">partitionQuery(session, body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700112<p class="firstline">Creates a set of partition tokens that can be used to execute a query</p>
113<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -0700114 <code><a href="#partitionRead">partitionRead(session, body=None, x__xgafv=None)</a></code></p>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700115<p class="firstline">Creates a set of partition tokens that can be used to execute a read</p>
116<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -0700117 <code><a href="#read">read(session, body=None, x__xgafv=None)</a></code></p>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400118<p class="firstline">Reads rows from the database using key lookups and scans, as a</p>
119<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -0700120 <code><a href="#rollback">rollback(session, body=None, x__xgafv=None)</a></code></p>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400121<p class="firstline">Rolls back a transaction, releasing any locks it holds. It is a good</p>
122<p class="toc_element">
Dan O'Mearadd494642020-05-01 07:42:23 -0700123 <code><a href="#streamingRead">streamingRead(session, body=None, x__xgafv=None)</a></code></p>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400124<p class="firstline">Like Read, except returns the result set as a</p>
125<h3>Method Details</h3>
126<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -0700127 <code class="details" id="batchCreate">batchCreate(database, body=None, x__xgafv=None)</code>
128 <pre>Creates multiple new sessions.
129
130This API can be used to initialize a session cache on the clients.
131See https://goo.gl/TgSFN2 for best practices on session cache management.
132
133Args:
134 database: string, Required. The database in which the new sessions are created. (required)
135 body: object, The request body.
136 The object takes the form of:
137
138{ # The request for BatchCreateSessions.
Bu Sun Kim65020912020-05-20 12:08:20 -0700139 &quot;sessionTemplate&quot;: { # A session in the Cloud Spanner API. # Parameters to be applied to each created session.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700140 &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
Bu Sun Kim65020912020-05-20 12:08:20 -0700141 &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
142 # when creating a session are ignored.
Bu Sun Kim65020912020-05-20 12:08:20 -0700143 &quot;labels&quot;: { # The labels for the session.
Dan O'Mearadd494642020-05-01 07:42:23 -0700144 #
145 # * Label keys must be between 1 and 63 characters long and must conform to
146 # the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
147 # * Label values must be between 0 and 63 characters long and must conform
148 # to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
149 # * No more than 64 labels can be associated with a given session.
150 #
151 # See https://goo.gl/xmQnxf for more information on and examples of labels.
Bu Sun Kim65020912020-05-20 12:08:20 -0700152 &quot;a_key&quot;: &quot;A String&quot;,
Dan O'Mearadd494642020-05-01 07:42:23 -0700153 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700154 &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
155 # typically earlier than the actual last use time.
Dan O'Mearadd494642020-05-01 07:42:23 -0700156 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700157 &quot;sessionCount&quot;: 42, # Required. The number of sessions to be created in this batch call.
Dan O'Mearadd494642020-05-01 07:42:23 -0700158 # The API may return fewer than the requested number of sessions. If a
159 # specific number of sessions are desired, the client can make additional
160 # calls to BatchCreateSessions (adjusting
161 # session_count as necessary).
162 }
163
164 x__xgafv: string, V1 error format.
165 Allowed values
166 1 - v1 error format
167 2 - v2 error format
168
169Returns:
170 An object of the form:
171
172 { # The response for BatchCreateSessions.
Bu Sun Kim65020912020-05-20 12:08:20 -0700173 &quot;session&quot;: [ # The freshly created sessions.
Dan O'Mearadd494642020-05-01 07:42:23 -0700174 { # A session in the Cloud Spanner API.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700175 &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
Bu Sun Kim65020912020-05-20 12:08:20 -0700176 &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
177 # when creating a session are ignored.
Bu Sun Kim65020912020-05-20 12:08:20 -0700178 &quot;labels&quot;: { # The labels for the session.
Dan O'Mearadd494642020-05-01 07:42:23 -0700179 #
180 # * Label keys must be between 1 and 63 characters long and must conform to
181 # the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
182 # * Label values must be between 0 and 63 characters long and must conform
183 # to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
184 # * No more than 64 labels can be associated with a given session.
185 #
186 # See https://goo.gl/xmQnxf for more information on and examples of labels.
Bu Sun Kim65020912020-05-20 12:08:20 -0700187 &quot;a_key&quot;: &quot;A String&quot;,
Dan O'Mearadd494642020-05-01 07:42:23 -0700188 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700189 &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
190 # typically earlier than the actual last use time.
Dan O'Mearadd494642020-05-01 07:42:23 -0700191 },
192 ],
193 }</pre>
194</div>
195
196<div class="method">
197 <code class="details" id="beginTransaction">beginTransaction(session, body=None, x__xgafv=None)</code>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400198 <pre>Begins a new transaction. This step can often be skipped:
199Read, ExecuteSql and
200Commit can begin a new transaction as a
201side-effect.
202
203Args:
204 session: string, Required. The session in which the transaction runs. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -0700205 body: object, The request body.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400206 The object takes the form of:
207
208{ # The request for BeginTransaction.
Bu Sun Kim65020912020-05-20 12:08:20 -0700209 &quot;options&quot;: { # # Transactions # Required. Options for the new transaction.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400210 #
211 #
212 # Each session can have at most one active transaction at a time. After the
213 # active transaction is completed, the session can immediately be
214 # re-used for the next transaction. It is not necessary to create a
215 # new session for each transaction.
216 #
217 # # Transaction Modes
218 #
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700219 # Cloud Spanner supports three transaction modes:
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400220 #
221 # 1. Locking read-write. This type of transaction is the only way
222 # to write data into Cloud Spanner. These transactions rely on
223 # pessimistic locking and, if necessary, two-phase commit.
224 # Locking read-write transactions may abort, requiring the
225 # application to retry.
226 #
227 # 2. Snapshot read-only. This transaction type provides guaranteed
228 # consistency across several reads, but does not allow
229 # writes. Snapshot read-only transactions can be configured to
230 # read at timestamps in the past. Snapshot read-only
231 # transactions do not need to be committed.
232 #
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700233 # 3. Partitioned DML. This type of transaction is used to execute
234 # a single Partitioned DML statement. Partitioned DML partitions
235 # the key space and runs the DML statement over each partition
236 # in parallel using separate, internal transactions that commit
237 # independently. Partitioned DML transactions do not need to be
238 # committed.
239 #
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400240 # For transactions that only read, snapshot read-only transactions
241 # provide simpler semantics and are almost always faster. In
242 # particular, read-only transactions do not take locks, so they do
243 # not conflict with read-write transactions. As a consequence of not
244 # taking locks, they also do not abort, so retry loops are not needed.
245 #
246 # Transactions may only read/write data in a single database. They
247 # may, however, read/write data in different tables within that
248 # database.
249 #
250 # ## Locking Read-Write Transactions
251 #
252 # Locking transactions may be used to atomically read-modify-write
253 # data anywhere in a database. This type of transaction is externally
254 # consistent.
255 #
256 # Clients should attempt to minimize the amount of time a transaction
257 # is active. Faster transactions commit with higher probability
258 # and cause less contention. Cloud Spanner attempts to keep read locks
259 # active as long as the transaction continues to do reads, and the
260 # transaction has not been terminated by
261 # Commit or
262 # Rollback. Long periods of
263 # inactivity at the client may cause Cloud Spanner to release a
Bu Sun Kim65020912020-05-20 12:08:20 -0700264 # transaction&#x27;s locks and abort it.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400265 #
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400266 # Conceptually, a read-write transaction consists of zero or more
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700267 # reads or SQL statements followed by
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400268 # Commit. At any time before
269 # Commit, the client can send a
270 # Rollback request to abort the
271 # transaction.
272 #
273 # ### Semantics
274 #
275 # Cloud Spanner can commit the transaction if all read locks it acquired
276 # are still valid at commit time, and it is able to acquire write
277 # locks for all writes. Cloud Spanner can abort the transaction for any
278 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
279 # that the transaction has not modified any user data in Cloud Spanner.
280 #
281 # Unless the transaction commits, Cloud Spanner makes no guarantees about
Bu Sun Kim65020912020-05-20 12:08:20 -0700282 # how long the transaction&#x27;s locks were held for. It is an error to
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400283 # use Cloud Spanner locks for any sort of mutual exclusion other than
284 # between Cloud Spanner transactions themselves.
285 #
286 # ### Retrying Aborted Transactions
287 #
288 # When a transaction aborts, the application can choose to retry the
289 # whole transaction again. To maximize the chances of successfully
290 # committing the retry, the client should execute the retry in the
Bu Sun Kim65020912020-05-20 12:08:20 -0700291 # same session as the original attempt. The original session&#x27;s lock
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400292 # priority increases with each consecutive abort, meaning that each
293 # attempt has a slightly better chance of success than the previous.
294 #
295 # Under some circumstances (e.g., many transactions attempting to
296 # modify the same row(s)), a transaction can abort many times in a
297 # short period before successfully committing. Thus, it is not a good
298 # idea to cap the number of retries a transaction can attempt;
299 # instead, it is better to limit the total amount of wall time spent
300 # retrying.
301 #
302 # ### Idle Transactions
303 #
304 # A transaction is considered idle if it has no outstanding reads or
305 # SQL queries and has not started a read or SQL query within the last 10
306 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
Bu Sun Kim65020912020-05-20 12:08:20 -0700307 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400308 # fail with error `ABORTED`.
309 #
310 # If this behavior is undesirable, periodically executing a simple
311 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
312 # transaction from becoming idle.
313 #
314 # ## Snapshot Read-Only Transactions
315 #
316 # Snapshot read-only transactions provides a simpler method than
317 # locking read-write transactions for doing several consistent
318 # reads. However, this type of transaction does not support writes.
319 #
320 # Snapshot transactions do not take locks. Instead, they work by
321 # choosing a Cloud Spanner timestamp, then executing all reads at that
322 # timestamp. Since they do not acquire locks, they do not block
323 # concurrent read-write transactions.
324 #
325 # Unlike locking read-write transactions, snapshot read-only
326 # transactions never abort. They can fail if the chosen read
327 # timestamp is garbage collected; however, the default garbage
328 # collection policy is generous enough that most applications do not
329 # need to worry about this in practice.
330 #
331 # Snapshot read-only transactions do not need to call
332 # Commit or
333 # Rollback (and in fact are not
334 # permitted to do so).
335 #
336 # To execute a snapshot transaction, the client specifies a timestamp
337 # bound, which tells Cloud Spanner how to choose a read timestamp.
338 #
339 # The types of timestamp bound are:
340 #
341 # - Strong (the default).
342 # - Bounded staleness.
343 # - Exact staleness.
344 #
345 # If the Cloud Spanner database to be read is geographically distributed,
346 # stale read-only transactions can execute more quickly than strong
347 # or read-write transaction, because they are able to execute far
348 # from the leader replica.
349 #
350 # Each type of timestamp bound is discussed in detail below.
351 #
352 # ### Strong
353 #
354 # Strong reads are guaranteed to see the effects of all transactions
355 # that have committed before the start of the read. Furthermore, all
356 # rows yielded by a single read are consistent with each other -- if
357 # any part of the read observes a transaction, all parts of the read
358 # see the transaction.
359 #
360 # Strong reads are not repeatable: two consecutive strong read-only
361 # transactions might return inconsistent results if there are
362 # concurrent writes. If consistency across reads is required, the
363 # reads should be executed within a transaction or at an exact read
364 # timestamp.
365 #
366 # See TransactionOptions.ReadOnly.strong.
367 #
368 # ### Exact Staleness
369 #
370 # These timestamp bounds execute reads at a user-specified
371 # timestamp. Reads at a timestamp are guaranteed to see a consistent
372 # prefix of the global transaction history: they observe
Dan O'Mearadd494642020-05-01 07:42:23 -0700373 # modifications done by all transactions with a commit timestamp &lt;=
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400374 # the read timestamp, and observe none of the modifications done by
375 # transactions with a larger commit timestamp. They will block until
376 # all conflicting transactions that may be assigned commit timestamps
Dan O'Mearadd494642020-05-01 07:42:23 -0700377 # &lt;= the read timestamp have finished.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400378 #
379 # The timestamp can either be expressed as an absolute Cloud Spanner commit
380 # timestamp or a staleness relative to the current time.
381 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700382 # These modes do not require a &quot;negotiation phase&quot; to pick a
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400383 # timestamp. As a result, they execute slightly faster than the
384 # equivalent boundedly stale concurrency modes. On the other hand,
385 # boundedly stale reads usually return fresher results.
386 #
387 # See TransactionOptions.ReadOnly.read_timestamp and
388 # TransactionOptions.ReadOnly.exact_staleness.
389 #
390 # ### Bounded Staleness
391 #
392 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
393 # subject to a user-provided staleness bound. Cloud Spanner chooses the
394 # newest timestamp within the staleness bound that allows execution
395 # of the reads at the closest available replica without blocking.
396 #
397 # All rows yielded are consistent with each other -- if any part of
398 # the read observes a transaction, all parts of the read see the
399 # transaction. Boundedly stale reads are not repeatable: two stale
400 # reads, even if they use the same staleness bound, can execute at
401 # different timestamps and thus return inconsistent results.
402 #
403 # Boundedly stale reads execute in two phases: the first phase
404 # negotiates a timestamp among all replicas needed to serve the
405 # read. In the second phase, reads are executed at the negotiated
406 # timestamp.
407 #
408 # As a result of the two phase execution, bounded staleness reads are
409 # usually a little slower than comparable exact staleness
410 # reads. However, they are typically able to return fresher
411 # results, and are more likely to execute at the closest replica.
412 #
413 # Because the timestamp negotiation requires up-front knowledge of
414 # which rows will be read, it can only be used with single-use
415 # read-only transactions.
416 #
417 # See TransactionOptions.ReadOnly.max_staleness and
418 # TransactionOptions.ReadOnly.min_read_timestamp.
419 #
420 # ### Old Read Timestamps and Garbage Collection
421 #
422 # Cloud Spanner continuously garbage collects deleted and overwritten data
423 # in the background to reclaim storage space. This process is known
Bu Sun Kim65020912020-05-20 12:08:20 -0700424 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400425 # are one hour old. Because of this, Cloud Spanner cannot perform reads
426 # at read timestamps more than one hour in the past. This
427 # restriction also applies to in-progress reads and/or SQL queries whose
428 # timestamp become too old while executing. Reads and SQL queries with
429 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700430 #
431 # ## Partitioned DML Transactions
432 #
433 # Partitioned DML transactions are used to execute DML statements with a
434 # different execution strategy that provides different, and often better,
435 # scalability properties for large, table-wide operations than DML in a
436 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
437 # should prefer using ReadWrite transactions.
438 #
439 # Partitioned DML partitions the keyspace and runs the DML statement on each
440 # partition in separate, internal transactions. These transactions commit
441 # automatically when complete, and run independently from one another.
442 #
443 # To reduce lock contention, this execution strategy only acquires read locks
444 # on rows that match the WHERE clause of the statement. Additionally, the
445 # smaller per-partition transactions hold locks for less time.
446 #
447 # That said, Partitioned DML is not a drop-in replacement for standard DML used
448 # in ReadWrite transactions.
449 #
450 # - The DML statement must be fully-partitionable. Specifically, the statement
451 # must be expressible as the union of many statements which each access only
452 # a single row of the table.
453 #
454 # - The statement is not applied atomically to all rows of the table. Rather,
455 # the statement is applied atomically to partitions of the table, in
456 # independent transactions. Secondary index rows are updated atomically
457 # with the base table rows.
458 #
459 # - Partitioned DML does not guarantee exactly-once execution semantics
460 # against a partition. The statement will be applied at least once to each
461 # partition. It is strongly recommended that the DML statement should be
462 # idempotent to avoid unexpected results. For instance, it is potentially
463 # dangerous to run a statement such as
464 # `UPDATE table SET column = column + 1` as it could be run multiple times
465 # against some rows.
466 #
467 # - The partitions are committed automatically - there is no support for
468 # Commit or Rollback. If the call returns an error, or if the client issuing
469 # the ExecuteSql call dies, it is possible that some rows had the statement
470 # executed on them successfully. It is also possible that statement was
471 # never executed against other rows.
472 #
473 # - Partitioned DML transactions may only contain the execution of a single
474 # DML statement via ExecuteSql or ExecuteStreamingSql.
475 #
476 # - If any error is encountered during the execution of the partitioned DML
477 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
478 # value that cannot be stored due to schema constraints), then the
479 # operation is stopped at that point and an error is returned. It is
480 # possible that at this point, some partitions have been committed (or even
481 # committed multiple times), and other partitions have not been run at all.
482 #
483 # Given the above, Partitioned DML is good fit for large, database-wide,
484 # operations that are idempotent, such as deleting old rows from a very large
485 # table.
Bu Sun Kim65020912020-05-20 12:08:20 -0700486 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400487 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700488 # Authorization to begin a Partitioned DML transaction requires
489 # `spanner.databases.beginPartitionedDmlTransaction` permission
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400490 # on the `session` resource.
491 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700492 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
493 #
494 # Authorization to begin a read-write transaction requires
495 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
496 # on the `session` resource.
497 # transaction type has no options.
498 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700499 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400500 #
501 # Authorization to begin a read-only transaction requires
502 # `spanner.databases.beginReadOnlyTransaction` permission
503 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -0700504 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400505 # seconds. Guarantees that all writes that have committed more
506 # than the specified number of seconds ago are visible. Because
507 # Cloud Spanner chooses the exact timestamp, this mode works even if
Bu Sun Kim65020912020-05-20 12:08:20 -0700508 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400509 # commit timestamps.
510 #
511 # Useful for reading the freshest data available at a nearby
512 # replica, while bounding the possible staleness if the local
513 # replica has fallen behind.
514 #
515 # Note that this option can only be used in single-use
516 # transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700517 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
518 #
519 # This is useful for requesting fresher data than some previous
520 # read, or data that is fresh enough to observe the effects of some
521 # previously committed transaction whose timestamp is known.
522 #
523 # Note that this option can only be used in single-use transactions.
524 #
525 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
526 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
527 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
528 # are visible.
Bu Sun Kim65020912020-05-20 12:08:20 -0700529 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
530 # the Transaction message that describes the transaction.
531 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400532 # old. The timestamp is chosen soon after the read is started.
533 #
534 # Guarantees that all writes that have committed more than the
535 # specified number of seconds ago are visible. Because Cloud Spanner
Bu Sun Kim65020912020-05-20 12:08:20 -0700536 # chooses the exact timestamp, this mode works even if the client&#x27;s
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400537 # local clock is substantially skewed from Cloud Spanner commit
538 # timestamps.
539 #
540 # Useful for reading at nearby replicas without the distributed
541 # timestamp negotiation overhead of `max_staleness`.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700542 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
543 # reads at a specific timestamp are repeatable; the same read at
544 # the same timestamp always returns the same data. If the
545 # timestamp is in the future, the read will block until the
546 # specified timestamp, modulo the read&#x27;s deadline.
547 #
548 # Useful for large scale consistent reads such as mapreduces, or
549 # for coordinating many reads against a consistent snapshot of the
550 # data.
551 #
552 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
553 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700554 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400555 },
556 }
557
558 x__xgafv: string, V1 error format.
559 Allowed values
560 1 - v1 error format
561 2 - v2 error format
562
563Returns:
564 An object of the form:
565
566 { # A transaction.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700567 &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
568 # for the transaction. Not returned by default: see
569 # TransactionOptions.ReadOnly.return_read_timestamp.
570 #
571 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
572 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
Bu Sun Kim65020912020-05-20 12:08:20 -0700573 &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400574 # Read,
575 # ExecuteSql,
576 # Commit, or
577 # Rollback calls.
578 #
579 # Single-use read-only transactions do not have IDs, because
580 # single-use transactions do not support multiple requests.
581 }</pre>
582</div>
583
584<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -0700585 <code class="details" id="commit">commit(session, body=None, x__xgafv=None)</code>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400586 <pre>Commits a transaction. The request includes the mutations to be
587applied to rows in the database.
588
589`Commit` might return an `ABORTED` error. This can occur at any time;
590commonly, the cause is conflicts with concurrent
591transactions. However, it can also happen for a variety of other
592reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
593the transaction from the beginning, re-using the same session.
594
595Args:
596 session: string, Required. The session in which the transaction to be committed is running. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -0700597 body: object, The request body.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400598 The object takes the form of:
599
600{ # The request for Commit.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700601 &quot;transactionId&quot;: &quot;A String&quot;, # Commit a previously-started transaction.
Bu Sun Kim65020912020-05-20 12:08:20 -0700602 &quot;singleUseTransaction&quot;: { # # Transactions # Execute mutations in a temporary transaction. Note that unlike
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400603 # commit of a previously-started transaction, commit with a
604 # temporary transaction is non-idempotent. That is, if the
605 # `CommitRequest` is sent to Cloud Spanner more than once (for
606 # instance, due to retries in the application, or in the
607 # transport library), it is possible that the mutations are
608 # executed more than once. If this is undesirable, use
609 # BeginTransaction and
610 # Commit instead.
611 #
612 #
613 # Each session can have at most one active transaction at a time. After the
614 # active transaction is completed, the session can immediately be
615 # re-used for the next transaction. It is not necessary to create a
616 # new session for each transaction.
617 #
618 # # Transaction Modes
619 #
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700620 # Cloud Spanner supports three transaction modes:
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400621 #
622 # 1. Locking read-write. This type of transaction is the only way
623 # to write data into Cloud Spanner. These transactions rely on
624 # pessimistic locking and, if necessary, two-phase commit.
625 # Locking read-write transactions may abort, requiring the
626 # application to retry.
627 #
628 # 2. Snapshot read-only. This transaction type provides guaranteed
629 # consistency across several reads, but does not allow
630 # writes. Snapshot read-only transactions can be configured to
631 # read at timestamps in the past. Snapshot read-only
632 # transactions do not need to be committed.
633 #
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700634 # 3. Partitioned DML. This type of transaction is used to execute
635 # a single Partitioned DML statement. Partitioned DML partitions
636 # the key space and runs the DML statement over each partition
637 # in parallel using separate, internal transactions that commit
638 # independently. Partitioned DML transactions do not need to be
639 # committed.
640 #
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400641 # For transactions that only read, snapshot read-only transactions
642 # provide simpler semantics and are almost always faster. In
643 # particular, read-only transactions do not take locks, so they do
644 # not conflict with read-write transactions. As a consequence of not
645 # taking locks, they also do not abort, so retry loops are not needed.
646 #
647 # Transactions may only read/write data in a single database. They
648 # may, however, read/write data in different tables within that
649 # database.
650 #
651 # ## Locking Read-Write Transactions
652 #
653 # Locking transactions may be used to atomically read-modify-write
654 # data anywhere in a database. This type of transaction is externally
655 # consistent.
656 #
657 # Clients should attempt to minimize the amount of time a transaction
658 # is active. Faster transactions commit with higher probability
659 # and cause less contention. Cloud Spanner attempts to keep read locks
660 # active as long as the transaction continues to do reads, and the
661 # transaction has not been terminated by
662 # Commit or
663 # Rollback. Long periods of
664 # inactivity at the client may cause Cloud Spanner to release a
Bu Sun Kim65020912020-05-20 12:08:20 -0700665 # transaction&#x27;s locks and abort it.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400666 #
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400667 # Conceptually, a read-write transaction consists of zero or more
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700668 # reads or SQL statements followed by
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400669 # Commit. At any time before
670 # Commit, the client can send a
671 # Rollback request to abort the
672 # transaction.
673 #
674 # ### Semantics
675 #
676 # Cloud Spanner can commit the transaction if all read locks it acquired
677 # are still valid at commit time, and it is able to acquire write
678 # locks for all writes. Cloud Spanner can abort the transaction for any
679 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
680 # that the transaction has not modified any user data in Cloud Spanner.
681 #
682 # Unless the transaction commits, Cloud Spanner makes no guarantees about
Bu Sun Kim65020912020-05-20 12:08:20 -0700683 # how long the transaction&#x27;s locks were held for. It is an error to
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400684 # use Cloud Spanner locks for any sort of mutual exclusion other than
685 # between Cloud Spanner transactions themselves.
686 #
687 # ### Retrying Aborted Transactions
688 #
689 # When a transaction aborts, the application can choose to retry the
690 # whole transaction again. To maximize the chances of successfully
691 # committing the retry, the client should execute the retry in the
Bu Sun Kim65020912020-05-20 12:08:20 -0700692 # same session as the original attempt. The original session&#x27;s lock
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400693 # priority increases with each consecutive abort, meaning that each
694 # attempt has a slightly better chance of success than the previous.
695 #
696 # Under some circumstances (e.g., many transactions attempting to
697 # modify the same row(s)), a transaction can abort many times in a
698 # short period before successfully committing. Thus, it is not a good
699 # idea to cap the number of retries a transaction can attempt;
700 # instead, it is better to limit the total amount of wall time spent
701 # retrying.
702 #
703 # ### Idle Transactions
704 #
705 # A transaction is considered idle if it has no outstanding reads or
706 # SQL queries and has not started a read or SQL query within the last 10
707 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
Bu Sun Kim65020912020-05-20 12:08:20 -0700708 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400709 # fail with error `ABORTED`.
710 #
711 # If this behavior is undesirable, periodically executing a simple
712 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
713 # transaction from becoming idle.
714 #
715 # ## Snapshot Read-Only Transactions
716 #
717 # Snapshot read-only transactions provides a simpler method than
718 # locking read-write transactions for doing several consistent
719 # reads. However, this type of transaction does not support writes.
720 #
721 # Snapshot transactions do not take locks. Instead, they work by
722 # choosing a Cloud Spanner timestamp, then executing all reads at that
723 # timestamp. Since they do not acquire locks, they do not block
724 # concurrent read-write transactions.
725 #
726 # Unlike locking read-write transactions, snapshot read-only
727 # transactions never abort. They can fail if the chosen read
728 # timestamp is garbage collected; however, the default garbage
729 # collection policy is generous enough that most applications do not
730 # need to worry about this in practice.
731 #
732 # Snapshot read-only transactions do not need to call
733 # Commit or
734 # Rollback (and in fact are not
735 # permitted to do so).
736 #
737 # To execute a snapshot transaction, the client specifies a timestamp
738 # bound, which tells Cloud Spanner how to choose a read timestamp.
739 #
740 # The types of timestamp bound are:
741 #
742 # - Strong (the default).
743 # - Bounded staleness.
744 # - Exact staleness.
745 #
746 # If the Cloud Spanner database to be read is geographically distributed,
747 # stale read-only transactions can execute more quickly than strong
748 # or read-write transaction, because they are able to execute far
749 # from the leader replica.
750 #
751 # Each type of timestamp bound is discussed in detail below.
752 #
753 # ### Strong
754 #
755 # Strong reads are guaranteed to see the effects of all transactions
756 # that have committed before the start of the read. Furthermore, all
757 # rows yielded by a single read are consistent with each other -- if
758 # any part of the read observes a transaction, all parts of the read
759 # see the transaction.
760 #
761 # Strong reads are not repeatable: two consecutive strong read-only
762 # transactions might return inconsistent results if there are
763 # concurrent writes. If consistency across reads is required, the
764 # reads should be executed within a transaction or at an exact read
765 # timestamp.
766 #
767 # See TransactionOptions.ReadOnly.strong.
768 #
769 # ### Exact Staleness
770 #
771 # These timestamp bounds execute reads at a user-specified
772 # timestamp. Reads at a timestamp are guaranteed to see a consistent
773 # prefix of the global transaction history: they observe
Dan O'Mearadd494642020-05-01 07:42:23 -0700774 # modifications done by all transactions with a commit timestamp &lt;=
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400775 # the read timestamp, and observe none of the modifications done by
776 # transactions with a larger commit timestamp. They will block until
777 # all conflicting transactions that may be assigned commit timestamps
Dan O'Mearadd494642020-05-01 07:42:23 -0700778 # &lt;= the read timestamp have finished.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400779 #
780 # The timestamp can either be expressed as an absolute Cloud Spanner commit
781 # timestamp or a staleness relative to the current time.
782 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700783 # These modes do not require a &quot;negotiation phase&quot; to pick a
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400784 # timestamp. As a result, they execute slightly faster than the
785 # equivalent boundedly stale concurrency modes. On the other hand,
786 # boundedly stale reads usually return fresher results.
787 #
788 # See TransactionOptions.ReadOnly.read_timestamp and
789 # TransactionOptions.ReadOnly.exact_staleness.
790 #
791 # ### Bounded Staleness
792 #
793 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
794 # subject to a user-provided staleness bound. Cloud Spanner chooses the
795 # newest timestamp within the staleness bound that allows execution
796 # of the reads at the closest available replica without blocking.
797 #
798 # All rows yielded are consistent with each other -- if any part of
799 # the read observes a transaction, all parts of the read see the
800 # transaction. Boundedly stale reads are not repeatable: two stale
801 # reads, even if they use the same staleness bound, can execute at
802 # different timestamps and thus return inconsistent results.
803 #
804 # Boundedly stale reads execute in two phases: the first phase
805 # negotiates a timestamp among all replicas needed to serve the
806 # read. In the second phase, reads are executed at the negotiated
807 # timestamp.
808 #
809 # As a result of the two phase execution, bounded staleness reads are
810 # usually a little slower than comparable exact staleness
811 # reads. However, they are typically able to return fresher
812 # results, and are more likely to execute at the closest replica.
813 #
814 # Because the timestamp negotiation requires up-front knowledge of
815 # which rows will be read, it can only be used with single-use
816 # read-only transactions.
817 #
818 # See TransactionOptions.ReadOnly.max_staleness and
819 # TransactionOptions.ReadOnly.min_read_timestamp.
820 #
821 # ### Old Read Timestamps and Garbage Collection
822 #
823 # Cloud Spanner continuously garbage collects deleted and overwritten data
824 # in the background to reclaim storage space. This process is known
Bu Sun Kim65020912020-05-20 12:08:20 -0700825 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400826 # are one hour old. Because of this, Cloud Spanner cannot perform reads
827 # at read timestamps more than one hour in the past. This
828 # restriction also applies to in-progress reads and/or SQL queries whose
829 # timestamp become too old while executing. Reads and SQL queries with
830 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700831 #
832 # ## Partitioned DML Transactions
833 #
834 # Partitioned DML transactions are used to execute DML statements with a
835 # different execution strategy that provides different, and often better,
836 # scalability properties for large, table-wide operations than DML in a
837 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
838 # should prefer using ReadWrite transactions.
839 #
840 # Partitioned DML partitions the keyspace and runs the DML statement on each
841 # partition in separate, internal transactions. These transactions commit
842 # automatically when complete, and run independently from one another.
843 #
844 # To reduce lock contention, this execution strategy only acquires read locks
845 # on rows that match the WHERE clause of the statement. Additionally, the
846 # smaller per-partition transactions hold locks for less time.
847 #
848 # That said, Partitioned DML is not a drop-in replacement for standard DML used
849 # in ReadWrite transactions.
850 #
851 # - The DML statement must be fully-partitionable. Specifically, the statement
852 # must be expressible as the union of many statements which each access only
853 # a single row of the table.
854 #
855 # - The statement is not applied atomically to all rows of the table. Rather,
856 # the statement is applied atomically to partitions of the table, in
857 # independent transactions. Secondary index rows are updated atomically
858 # with the base table rows.
859 #
860 # - Partitioned DML does not guarantee exactly-once execution semantics
861 # against a partition. The statement will be applied at least once to each
862 # partition. It is strongly recommended that the DML statement should be
863 # idempotent to avoid unexpected results. For instance, it is potentially
864 # dangerous to run a statement such as
865 # `UPDATE table SET column = column + 1` as it could be run multiple times
866 # against some rows.
867 #
868 # - The partitions are committed automatically - there is no support for
869 # Commit or Rollback. If the call returns an error, or if the client issuing
870 # the ExecuteSql call dies, it is possible that some rows had the statement
871 # executed on them successfully. It is also possible that statement was
872 # never executed against other rows.
873 #
874 # - Partitioned DML transactions may only contain the execution of a single
875 # DML statement via ExecuteSql or ExecuteStreamingSql.
876 #
877 # - If any error is encountered during the execution of the partitioned DML
878 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
879 # value that cannot be stored due to schema constraints), then the
880 # operation is stopped at that point and an error is returned. It is
881 # possible that at this point, some partitions have been committed (or even
882 # committed multiple times), and other partitions have not been run at all.
883 #
884 # Given the above, Partitioned DML is good fit for large, database-wide,
885 # operations that are idempotent, such as deleting old rows from a very large
886 # table.
Bu Sun Kim65020912020-05-20 12:08:20 -0700887 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400888 #
Bu Sun Kim65020912020-05-20 12:08:20 -0700889 # Authorization to begin a Partitioned DML transaction requires
890 # `spanner.databases.beginPartitionedDmlTransaction` permission
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400891 # on the `session` resource.
892 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700893 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
894 #
895 # Authorization to begin a read-write transaction requires
896 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
897 # on the `session` resource.
898 # transaction type has no options.
899 },
Bu Sun Kim65020912020-05-20 12:08:20 -0700900 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400901 #
902 # Authorization to begin a read-only transaction requires
903 # `spanner.databases.beginReadOnlyTransaction` permission
904 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -0700905 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400906 # seconds. Guarantees that all writes that have committed more
907 # than the specified number of seconds ago are visible. Because
908 # Cloud Spanner chooses the exact timestamp, this mode works even if
Bu Sun Kim65020912020-05-20 12:08:20 -0700909 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400910 # commit timestamps.
911 #
912 # Useful for reading the freshest data available at a nearby
913 # replica, while bounding the possible staleness if the local
914 # replica has fallen behind.
915 #
916 # Note that this option can only be used in single-use
917 # transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700918 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
919 #
920 # This is useful for requesting fresher data than some previous
921 # read, or data that is fresh enough to observe the effects of some
922 # previously committed transaction whose timestamp is known.
923 #
924 # Note that this option can only be used in single-use transactions.
925 #
926 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
927 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
928 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
929 # are visible.
Bu Sun Kim65020912020-05-20 12:08:20 -0700930 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
931 # the Transaction message that describes the transaction.
932 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400933 # old. The timestamp is chosen soon after the read is started.
934 #
935 # Guarantees that all writes that have committed more than the
936 # specified number of seconds ago are visible. Because Cloud Spanner
Bu Sun Kim65020912020-05-20 12:08:20 -0700937 # chooses the exact timestamp, this mode works even if the client&#x27;s
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400938 # local clock is substantially skewed from Cloud Spanner commit
939 # timestamps.
940 #
941 # Useful for reading at nearby replicas without the distributed
942 # timestamp negotiation overhead of `max_staleness`.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700943 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
944 # reads at a specific timestamp are repeatable; the same read at
945 # the same timestamp always returns the same data. If the
946 # timestamp is in the future, the read will block until the
947 # specified timestamp, modulo the read&#x27;s deadline.
948 #
949 # Useful for large scale consistent reads such as mapreduces, or
950 # for coordinating many reads against a consistent snapshot of the
951 # data.
952 #
953 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
954 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -0700955 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -0400956 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -0700957 &quot;mutations&quot;: [ # The mutations to be executed when this transaction commits. All
958 # mutations are applied atomically, in the order they appear in
959 # this list.
960 { # A modification to one or more Cloud Spanner rows. Mutations can be
961 # applied to a Cloud Spanner database by sending them in a
962 # Commit call.
963 &quot;insert&quot;: { # Arguments to insert, update, insert_or_update, and # Insert new rows in a table. If any of the rows already exist,
964 # the write or transaction fails with error `ALREADY_EXISTS`.
965 # replace operations.
966 &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be written.
967 &quot;values&quot;: [ # The values to be written. `values` can contain more than one
968 # list of values. If it does, then multiple rows are written, one
969 # for each entry in `values`. Each list in `values` must have
970 # exactly as many entries as there are entries in columns
971 # above. Sending multiple lists is equivalent to sending multiple
972 # `Mutation`s, each containing one `values` entry and repeating
973 # table and columns. Individual values in each list are
974 # encoded as described here.
975 [
976 &quot;&quot;,
977 ],
978 ],
979 &quot;columns&quot;: [ # The names of the columns in table to be written.
980 #
981 # The list of columns must contain enough columns to allow
982 # Cloud Spanner to derive values for all primary key columns in the
983 # row(s) to be modified.
984 &quot;A String&quot;,
985 ],
986 },
987 &quot;delete&quot;: { # Arguments to delete operations. # Delete rows from a table. Succeeds whether or not the named
988 # rows were present.
989 &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be deleted.
990 &quot;keySet&quot;: { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All # Required. The primary keys of the rows within table to delete. The
991 # primary keys must be specified in the order in which they appear in the
992 # `PRIMARY KEY()` clause of the table&#x27;s equivalent DDL statement (the DDL
993 # statement used to create the table).
994 # Delete is idempotent. The transaction will succeed even if some or all
995 # rows do not exist.
996 # the keys are expected to be in the same table or index. The keys need
997 # not be sorted in any particular way.
998 #
999 # If the same key is specified multiple times in the set (for example
1000 # if two ranges, two keys, or a key and a range overlap), Cloud Spanner
1001 # behaves as if the key were only specified once.
1002 &quot;ranges&quot;: [ # A list of key ranges. See KeyRange for more information about
1003 # key range specifications.
1004 { # KeyRange represents a range of rows in a table or index.
1005 #
1006 # A range has a start key and an end key. These keys can be open or
1007 # closed, indicating if the range includes rows with that key.
1008 #
1009 # Keys are represented by lists, where the ith value in the list
1010 # corresponds to the ith component of the table or index primary key.
1011 # Individual values are encoded as described
1012 # here.
1013 #
1014 # For example, consider the following table definition:
1015 #
1016 # CREATE TABLE UserEvents (
1017 # UserName STRING(MAX),
1018 # EventDate STRING(10)
1019 # ) PRIMARY KEY(UserName, EventDate);
1020 #
1021 # The following keys name rows in this table:
1022 #
1023 # &quot;Bob&quot;, &quot;2014-09-23&quot;
1024 #
1025 # Since the `UserEvents` table&#x27;s `PRIMARY KEY` clause names two
1026 # columns, each `UserEvents` key has two elements; the first is the
1027 # `UserName`, and the second is the `EventDate`.
1028 #
1029 # Key ranges with multiple components are interpreted
1030 # lexicographically by component using the table or index key&#x27;s declared
1031 # sort order. For example, the following range returns all events for
1032 # user `&quot;Bob&quot;` that occurred in the year 2015:
1033 #
1034 # &quot;start_closed&quot;: [&quot;Bob&quot;, &quot;2015-01-01&quot;]
1035 # &quot;end_closed&quot;: [&quot;Bob&quot;, &quot;2015-12-31&quot;]
1036 #
1037 # Start and end keys can omit trailing key components. This affects the
1038 # inclusion and exclusion of rows that exactly match the provided key
1039 # components: if the key is closed, then rows that exactly match the
1040 # provided components are included; if the key is open, then rows
1041 # that exactly match are not included.
1042 #
1043 # For example, the following range includes all events for `&quot;Bob&quot;` that
1044 # occurred during and after the year 2000:
1045 #
1046 # &quot;start_closed&quot;: [&quot;Bob&quot;, &quot;2000-01-01&quot;]
1047 # &quot;end_closed&quot;: [&quot;Bob&quot;]
1048 #
1049 # The next example retrieves all events for `&quot;Bob&quot;`:
1050 #
1051 # &quot;start_closed&quot;: [&quot;Bob&quot;]
1052 # &quot;end_closed&quot;: [&quot;Bob&quot;]
1053 #
1054 # To retrieve events before the year 2000:
1055 #
1056 # &quot;start_closed&quot;: [&quot;Bob&quot;]
1057 # &quot;end_open&quot;: [&quot;Bob&quot;, &quot;2000-01-01&quot;]
1058 #
1059 # The following range includes all rows in the table:
1060 #
1061 # &quot;start_closed&quot;: []
1062 # &quot;end_closed&quot;: []
1063 #
1064 # This range returns all users whose `UserName` begins with any
1065 # character from A to C:
1066 #
1067 # &quot;start_closed&quot;: [&quot;A&quot;]
1068 # &quot;end_open&quot;: [&quot;D&quot;]
1069 #
1070 # This range returns all users whose `UserName` begins with B:
1071 #
1072 # &quot;start_closed&quot;: [&quot;B&quot;]
1073 # &quot;end_open&quot;: [&quot;C&quot;]
1074 #
1075 # Key ranges honor column sort order. For example, suppose a table is
1076 # defined as follows:
1077 #
1078 # CREATE TABLE DescendingSortedTable {
1079 # Key INT64,
1080 # ...
1081 # ) PRIMARY KEY(Key DESC);
1082 #
1083 # The following range retrieves all rows with key values between 1
1084 # and 100 inclusive:
1085 #
1086 # &quot;start_closed&quot;: [&quot;100&quot;]
1087 # &quot;end_closed&quot;: [&quot;1&quot;]
1088 #
1089 # Note that 100 is passed as the start, and 1 is passed as the end,
1090 # because `Key` is a descending column in the schema.
1091 &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
1092 # `len(start_open)` key columns exactly match `start_open`.
1093 &quot;&quot;,
1094 ],
1095 &quot;endClosed&quot;: [ # If the end is closed, then the range includes all rows whose
1096 # first `len(end_closed)` key columns exactly match `end_closed`.
1097 &quot;&quot;,
1098 ],
1099 &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
1100 # `len(end_open)` key columns exactly match `end_open`.
1101 &quot;&quot;,
1102 ],
1103 &quot;startClosed&quot;: [ # If the start is closed, then the range includes all rows whose
1104 # first `len(start_closed)` key columns exactly match `start_closed`.
1105 &quot;&quot;,
1106 ],
1107 },
1108 ],
1109 &quot;keys&quot;: [ # A list of specific keys. Entries in `keys` should have exactly as
1110 # many elements as there are columns in the primary or index key
1111 # with which this `KeySet` is used. Individual key values are
1112 # encoded as described here.
1113 [
1114 &quot;&quot;,
1115 ],
1116 ],
1117 &quot;all&quot;: True or False, # For convenience `all` can be set to `true` to indicate that this
1118 # `KeySet` matches all keys in the table or index. Note that any keys
1119 # specified in `keys` or `ranges` are only yielded once.
1120 },
1121 },
1122 &quot;replace&quot;: { # Arguments to insert, update, insert_or_update, and # Like insert, except that if the row already exists, it is
1123 # deleted, and the column values provided are inserted
1124 # instead. Unlike insert_or_update, this means any values not
1125 # explicitly written become `NULL`.
1126 #
1127 # In an interleaved table, if you create the child table with the
1128 # `ON DELETE CASCADE` annotation, then replacing a parent row
1129 # also deletes the child rows. Otherwise, you must delete the
1130 # child rows before you replace the parent row.
1131 # replace operations.
1132 &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be written.
1133 &quot;values&quot;: [ # The values to be written. `values` can contain more than one
1134 # list of values. If it does, then multiple rows are written, one
1135 # for each entry in `values`. Each list in `values` must have
1136 # exactly as many entries as there are entries in columns
1137 # above. Sending multiple lists is equivalent to sending multiple
1138 # `Mutation`s, each containing one `values` entry and repeating
1139 # table and columns. Individual values in each list are
1140 # encoded as described here.
1141 [
1142 &quot;&quot;,
1143 ],
1144 ],
1145 &quot;columns&quot;: [ # The names of the columns in table to be written.
1146 #
1147 # The list of columns must contain enough columns to allow
1148 # Cloud Spanner to derive values for all primary key columns in the
1149 # row(s) to be modified.
1150 &quot;A String&quot;,
1151 ],
1152 },
1153 &quot;insertOrUpdate&quot;: { # Arguments to insert, update, insert_or_update, and # Like insert, except that if the row already exists, then
1154 # its column values are overwritten with the ones provided. Any
1155 # column values not explicitly written are preserved.
1156 #
1157 # When using insert_or_update, just as when using insert, all `NOT
1158 # NULL` columns in the table must be given a value. This holds true
1159 # even when the row already exists and will therefore actually be updated.
1160 # replace operations.
1161 &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be written.
1162 &quot;values&quot;: [ # The values to be written. `values` can contain more than one
1163 # list of values. If it does, then multiple rows are written, one
1164 # for each entry in `values`. Each list in `values` must have
1165 # exactly as many entries as there are entries in columns
1166 # above. Sending multiple lists is equivalent to sending multiple
1167 # `Mutation`s, each containing one `values` entry and repeating
1168 # table and columns. Individual values in each list are
1169 # encoded as described here.
1170 [
1171 &quot;&quot;,
1172 ],
1173 ],
1174 &quot;columns&quot;: [ # The names of the columns in table to be written.
1175 #
1176 # The list of columns must contain enough columns to allow
1177 # Cloud Spanner to derive values for all primary key columns in the
1178 # row(s) to be modified.
1179 &quot;A String&quot;,
1180 ],
1181 },
1182 &quot;update&quot;: { # Arguments to insert, update, insert_or_update, and # Update existing rows in a table. If any of the rows does not
1183 # already exist, the transaction fails with error `NOT_FOUND`.
1184 # replace operations.
1185 &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be written.
1186 &quot;values&quot;: [ # The values to be written. `values` can contain more than one
1187 # list of values. If it does, then multiple rows are written, one
1188 # for each entry in `values`. Each list in `values` must have
1189 # exactly as many entries as there are entries in columns
1190 # above. Sending multiple lists is equivalent to sending multiple
1191 # `Mutation`s, each containing one `values` entry and repeating
1192 # table and columns. Individual values in each list are
1193 # encoded as described here.
1194 [
1195 &quot;&quot;,
1196 ],
1197 ],
1198 &quot;columns&quot;: [ # The names of the columns in table to be written.
1199 #
1200 # The list of columns must contain enough columns to allow
1201 # Cloud Spanner to derive values for all primary key columns in the
1202 # row(s) to be modified.
1203 &quot;A String&quot;,
1204 ],
1205 },
1206 },
1207 ],
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001208 }
1209
1210 x__xgafv: string, V1 error format.
1211 Allowed values
1212 1 - v1 error format
1213 2 - v2 error format
1214
1215Returns:
1216 An object of the form:
1217
1218 { # The response for Commit.
Bu Sun Kim65020912020-05-20 12:08:20 -07001219 &quot;commitTimestamp&quot;: &quot;A String&quot;, # The Cloud Spanner timestamp at which the transaction committed.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001220 }</pre>
1221</div>
1222
1223<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07001224 <code class="details" id="create">create(database, body=None, x__xgafv=None)</code>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001225 <pre>Creates a new session. A session can be used to perform
1226transactions that read and/or modify data in a Cloud Spanner database.
1227Sessions are meant to be reused for many consecutive
1228transactions.
1229
1230Sessions can only execute one transaction at a time. To execute
1231multiple concurrent read-write/write-only transactions, create
1232multiple sessions. Note that standalone reads and queries use a
1233transaction internally, and count toward the one transaction
1234limit.
1235
Dan O'Mearadd494642020-05-01 07:42:23 -07001236Active sessions use additional server resources, so it is a good idea to
1237delete idle and unneeded sessions.
1238Aside from explicit deletes, Cloud Spanner may delete sessions for which no
Sai Cheemalapatie833b792017-03-24 15:06:46 -07001239operations are sent for more than an hour. If a session is deleted,
1240requests to it return `NOT_FOUND`.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001241
1242Idle sessions can be kept alive by sending a trivial SQL query
Bu Sun Kim65020912020-05-20 12:08:20 -07001243periodically, e.g., `&quot;SELECT 1&quot;`.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001244
1245Args:
1246 database: string, Required. The database in which the new session is created. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -07001247 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001248 The object takes the form of:
1249
1250{ # The request for CreateSession.
Bu Sun Kim65020912020-05-20 12:08:20 -07001251 &quot;session&quot;: { # A session in the Cloud Spanner API. # The session to create.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001252 &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
Bu Sun Kim65020912020-05-20 12:08:20 -07001253 &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
1254 # when creating a session are ignored.
Bu Sun Kim65020912020-05-20 12:08:20 -07001255 &quot;labels&quot;: { # The labels for the session.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001256 #
1257 # * Label keys must be between 1 and 63 characters long and must conform to
1258 # the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
1259 # * Label values must be between 0 and 63 characters long and must conform
1260 # to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
1261 # * No more than 64 labels can be associated with a given session.
1262 #
1263 # See https://goo.gl/xmQnxf for more information on and examples of labels.
Bu Sun Kim65020912020-05-20 12:08:20 -07001264 &quot;a_key&quot;: &quot;A String&quot;,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001265 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001266 &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
1267 # typically earlier than the actual last use time.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001268 },
1269 }
1270
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001271 x__xgafv: string, V1 error format.
1272 Allowed values
1273 1 - v1 error format
1274 2 - v2 error format
1275
1276Returns:
1277 An object of the form:
1278
1279 { # A session in the Cloud Spanner API.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001280 &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
Bu Sun Kim65020912020-05-20 12:08:20 -07001281 &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
1282 # when creating a session are ignored.
Bu Sun Kim65020912020-05-20 12:08:20 -07001283 &quot;labels&quot;: { # The labels for the session.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001284 #
1285 # * Label keys must be between 1 and 63 characters long and must conform to
1286 # the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
1287 # * Label values must be between 0 and 63 characters long and must conform
1288 # to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
1289 # * No more than 64 labels can be associated with a given session.
1290 #
1291 # See https://goo.gl/xmQnxf for more information on and examples of labels.
Bu Sun Kim65020912020-05-20 12:08:20 -07001292 &quot;a_key&quot;: &quot;A String&quot;,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001293 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001294 &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
1295 # typically earlier than the actual last use time.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001296 }</pre>
1297</div>
1298
1299<div class="method">
1300 <code class="details" id="delete">delete(name, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001301 <pre>Ends a session, releasing server resources associated with it. This will
1302asynchronously trigger cancellation of any operations that are running with
1303this session.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001304
1305Args:
1306 name: string, Required. The name of the session to delete. (required)
1307 x__xgafv: string, V1 error format.
1308 Allowed values
1309 1 - v1 error format
1310 2 - v2 error format
1311
1312Returns:
1313 An object of the form:
1314
1315 { # A generic empty message that you can re-use to avoid defining duplicated
1316 # empty messages in your APIs. A typical example is to use it as the request
1317 # or the response type of an API method. For instance:
1318 #
1319 # service Foo {
1320 # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
1321 # }
1322 #
1323 # The JSON representation for `Empty` is empty JSON object `{}`.
1324 }</pre>
1325</div>
1326
1327<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07001328 <code class="details" id="executeBatchDml">executeBatchDml(session, body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001329 <pre>Executes a batch of SQL DML statements. This method allows many statements
1330to be run with lower latency than submitting them sequentially with
1331ExecuteSql.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001332
Dan O'Mearadd494642020-05-01 07:42:23 -07001333Statements are executed in sequential order. A request can succeed even if
1334a statement fails. The ExecuteBatchDmlResponse.status field in the
1335response provides information about the statement that failed. Clients must
1336inspect this field to determine whether an error occurred.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001337
Dan O'Mearadd494642020-05-01 07:42:23 -07001338Execution stops after the first failed statement; the remaining statements
1339are not executed.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001340
1341Args:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001342 session: string, Required. The session in which the DML statements should be performed. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -07001343 body: object, The request body.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001344 The object takes the form of:
1345
Dan O'Mearadd494642020-05-01 07:42:23 -07001346{ # The request for ExecuteBatchDml.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001347 &quot;statements&quot;: [ # Required. The list of statements to execute in this batch. Statements are executed
1348 # serially, such that the effects of statement `i` are visible to statement
1349 # `i+1`. Each statement must be a DML statement. Execution stops at the
1350 # first failed statement; the remaining statements are not executed.
1351 #
1352 # Callers must provide at least one statement.
1353 { # A single DML statement.
1354 &quot;params&quot;: { # Parameter names and values that bind to placeholders in the DML string.
1355 #
1356 # A parameter placeholder consists of the `@` character followed by the
1357 # parameter name (for example, `@firstName`). Parameter names can contain
1358 # letters, numbers, and underscores.
1359 #
1360 # Parameters can appear anywhere that a literal value is expected. The
1361 # same parameter name can be used more than once, for example:
1362 #
1363 # `&quot;WHERE id &gt; @msg_id AND id &lt; @msg_id + 100&quot;`
1364 #
1365 # It is an error to execute a SQL statement with unbound parameters.
1366 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
1367 },
1368 &quot;sql&quot;: &quot;A String&quot;, # Required. The DML string.
1369 &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
1370 # from a JSON value. For example, values of type `BYTES` and values
1371 # of type `STRING` both appear in params as JSON strings.
1372 #
1373 # In these cases, `param_types` can be used to specify the exact
1374 # SQL type for some or all of the SQL statement parameters. See the
1375 # definition of Type for more information
1376 # about SQL types.
1377 &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
1378 # table cell or returned from an SQL query.
1379 &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
1380 # is the type of the array elements.
1381 &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
1382 &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
1383 # provides type information for the struct&#x27;s fields.
1384 &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
1385 # significant, because values of this struct type are represented as
1386 # lists, where the order of field values matches the order of
1387 # fields in the StructType. In turn, the order of fields
1388 # matches the order of columns in a read request, or the order of
1389 # fields in the `SELECT` clause of a query.
1390 { # Message representing a single field of a struct.
1391 &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
1392 # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
1393 # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
1394 # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
1395 # columns might have an empty name (e.g., !&quot;SELECT
1396 # UPPER(ColName)&quot;`). Note that a query result can contain
1397 # multiple fields with the same name.
1398 &quot;type&quot;: # Object with schema name: Type # The type of the field.
1399 },
1400 ],
1401 },
1402 },
1403 },
1404 },
1405 ],
1406 &quot;seqno&quot;: &quot;A String&quot;, # Required. A per-transaction sequence number used to identify this request. This field
1407 # makes each request idempotent such that if the request is received multiple
1408 # times, at most one will succeed.
1409 #
1410 # The sequence number must be monotonically increasing within the
1411 # transaction. If a request arrives for the first time with an out-of-order
1412 # sequence number, the transaction may be aborted. Replays of previously
1413 # handled requests will yield the same response as the first execution.
Bu Sun Kim65020912020-05-20 12:08:20 -07001414 &quot;transaction&quot;: { # This message is used to select the transaction in which a # Required. The transaction to use. Must be a read-write transaction.
Dan O'Mearadd494642020-05-01 07:42:23 -07001415 #
1416 # To protect against replays, single-use transactions are not supported. The
1417 # caller must either supply an existing transaction ID or begin a new
1418 # transaction.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04001419 # Read or
1420 # ExecuteSql call runs.
1421 #
1422 # See TransactionOptions for more information about transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001423 &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
Bu Sun Kim65020912020-05-20 12:08:20 -07001424 &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
1425 # it. The transaction ID of the new transaction is returned in
1426 # ResultSetMetadata.transaction, which is a Transaction.
1427 #
1428 #
1429 # Each session can have at most one active transaction at a time. After the
1430 # active transaction is completed, the session can immediately be
1431 # re-used for the next transaction. It is not necessary to create a
1432 # new session for each transaction.
1433 #
1434 # # Transaction Modes
1435 #
1436 # Cloud Spanner supports three transaction modes:
1437 #
1438 # 1. Locking read-write. This type of transaction is the only way
1439 # to write data into Cloud Spanner. These transactions rely on
1440 # pessimistic locking and, if necessary, two-phase commit.
1441 # Locking read-write transactions may abort, requiring the
1442 # application to retry.
1443 #
1444 # 2. Snapshot read-only. This transaction type provides guaranteed
1445 # consistency across several reads, but does not allow
1446 # writes. Snapshot read-only transactions can be configured to
1447 # read at timestamps in the past. Snapshot read-only
1448 # transactions do not need to be committed.
1449 #
1450 # 3. Partitioned DML. This type of transaction is used to execute
1451 # a single Partitioned DML statement. Partitioned DML partitions
1452 # the key space and runs the DML statement over each partition
1453 # in parallel using separate, internal transactions that commit
1454 # independently. Partitioned DML transactions do not need to be
1455 # committed.
1456 #
1457 # For transactions that only read, snapshot read-only transactions
1458 # provide simpler semantics and are almost always faster. In
1459 # particular, read-only transactions do not take locks, so they do
1460 # not conflict with read-write transactions. As a consequence of not
1461 # taking locks, they also do not abort, so retry loops are not needed.
1462 #
1463 # Transactions may only read/write data in a single database. They
1464 # may, however, read/write data in different tables within that
1465 # database.
1466 #
1467 # ## Locking Read-Write Transactions
1468 #
1469 # Locking transactions may be used to atomically read-modify-write
1470 # data anywhere in a database. This type of transaction is externally
1471 # consistent.
1472 #
1473 # Clients should attempt to minimize the amount of time a transaction
1474 # is active. Faster transactions commit with higher probability
1475 # and cause less contention. Cloud Spanner attempts to keep read locks
1476 # active as long as the transaction continues to do reads, and the
1477 # transaction has not been terminated by
1478 # Commit or
1479 # Rollback. Long periods of
1480 # inactivity at the client may cause Cloud Spanner to release a
1481 # transaction&#x27;s locks and abort it.
1482 #
1483 # Conceptually, a read-write transaction consists of zero or more
1484 # reads or SQL statements followed by
1485 # Commit. At any time before
1486 # Commit, the client can send a
1487 # Rollback request to abort the
1488 # transaction.
1489 #
1490 # ### Semantics
1491 #
1492 # Cloud Spanner can commit the transaction if all read locks it acquired
1493 # are still valid at commit time, and it is able to acquire write
1494 # locks for all writes. Cloud Spanner can abort the transaction for any
1495 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
1496 # that the transaction has not modified any user data in Cloud Spanner.
1497 #
1498 # Unless the transaction commits, Cloud Spanner makes no guarantees about
1499 # how long the transaction&#x27;s locks were held for. It is an error to
1500 # use Cloud Spanner locks for any sort of mutual exclusion other than
1501 # between Cloud Spanner transactions themselves.
1502 #
1503 # ### Retrying Aborted Transactions
1504 #
1505 # When a transaction aborts, the application can choose to retry the
1506 # whole transaction again. To maximize the chances of successfully
1507 # committing the retry, the client should execute the retry in the
1508 # same session as the original attempt. The original session&#x27;s lock
1509 # priority increases with each consecutive abort, meaning that each
1510 # attempt has a slightly better chance of success than the previous.
1511 #
1512 # Under some circumstances (e.g., many transactions attempting to
1513 # modify the same row(s)), a transaction can abort many times in a
1514 # short period before successfully committing. Thus, it is not a good
1515 # idea to cap the number of retries a transaction can attempt;
1516 # instead, it is better to limit the total amount of wall time spent
1517 # retrying.
1518 #
1519 # ### Idle Transactions
1520 #
1521 # A transaction is considered idle if it has no outstanding reads or
1522 # SQL queries and has not started a read or SQL query within the last 10
1523 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
1524 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
1525 # fail with error `ABORTED`.
1526 #
1527 # If this behavior is undesirable, periodically executing a simple
1528 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
1529 # transaction from becoming idle.
1530 #
1531 # ## Snapshot Read-Only Transactions
1532 #
1533 # Snapshot read-only transactions provides a simpler method than
1534 # locking read-write transactions for doing several consistent
1535 # reads. However, this type of transaction does not support writes.
1536 #
1537 # Snapshot transactions do not take locks. Instead, they work by
1538 # choosing a Cloud Spanner timestamp, then executing all reads at that
1539 # timestamp. Since they do not acquire locks, they do not block
1540 # concurrent read-write transactions.
1541 #
1542 # Unlike locking read-write transactions, snapshot read-only
1543 # transactions never abort. They can fail if the chosen read
1544 # timestamp is garbage collected; however, the default garbage
1545 # collection policy is generous enough that most applications do not
1546 # need to worry about this in practice.
1547 #
1548 # Snapshot read-only transactions do not need to call
1549 # Commit or
1550 # Rollback (and in fact are not
1551 # permitted to do so).
1552 #
1553 # To execute a snapshot transaction, the client specifies a timestamp
1554 # bound, which tells Cloud Spanner how to choose a read timestamp.
1555 #
1556 # The types of timestamp bound are:
1557 #
1558 # - Strong (the default).
1559 # - Bounded staleness.
1560 # - Exact staleness.
1561 #
1562 # If the Cloud Spanner database to be read is geographically distributed,
1563 # stale read-only transactions can execute more quickly than strong
1564 # or read-write transaction, because they are able to execute far
1565 # from the leader replica.
1566 #
1567 # Each type of timestamp bound is discussed in detail below.
1568 #
1569 # ### Strong
1570 #
1571 # Strong reads are guaranteed to see the effects of all transactions
1572 # that have committed before the start of the read. Furthermore, all
1573 # rows yielded by a single read are consistent with each other -- if
1574 # any part of the read observes a transaction, all parts of the read
1575 # see the transaction.
1576 #
1577 # Strong reads are not repeatable: two consecutive strong read-only
1578 # transactions might return inconsistent results if there are
1579 # concurrent writes. If consistency across reads is required, the
1580 # reads should be executed within a transaction or at an exact read
1581 # timestamp.
1582 #
1583 # See TransactionOptions.ReadOnly.strong.
1584 #
1585 # ### Exact Staleness
1586 #
1587 # These timestamp bounds execute reads at a user-specified
1588 # timestamp. Reads at a timestamp are guaranteed to see a consistent
1589 # prefix of the global transaction history: they observe
1590 # modifications done by all transactions with a commit timestamp &lt;=
1591 # the read timestamp, and observe none of the modifications done by
1592 # transactions with a larger commit timestamp. They will block until
1593 # all conflicting transactions that may be assigned commit timestamps
1594 # &lt;= the read timestamp have finished.
1595 #
1596 # The timestamp can either be expressed as an absolute Cloud Spanner commit
1597 # timestamp or a staleness relative to the current time.
1598 #
1599 # These modes do not require a &quot;negotiation phase&quot; to pick a
1600 # timestamp. As a result, they execute slightly faster than the
1601 # equivalent boundedly stale concurrency modes. On the other hand,
1602 # boundedly stale reads usually return fresher results.
1603 #
1604 # See TransactionOptions.ReadOnly.read_timestamp and
1605 # TransactionOptions.ReadOnly.exact_staleness.
1606 #
1607 # ### Bounded Staleness
1608 #
1609 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
1610 # subject to a user-provided staleness bound. Cloud Spanner chooses the
1611 # newest timestamp within the staleness bound that allows execution
1612 # of the reads at the closest available replica without blocking.
1613 #
1614 # All rows yielded are consistent with each other -- if any part of
1615 # the read observes a transaction, all parts of the read see the
1616 # transaction. Boundedly stale reads are not repeatable: two stale
1617 # reads, even if they use the same staleness bound, can execute at
1618 # different timestamps and thus return inconsistent results.
1619 #
1620 # Boundedly stale reads execute in two phases: the first phase
1621 # negotiates a timestamp among all replicas needed to serve the
1622 # read. In the second phase, reads are executed at the negotiated
1623 # timestamp.
1624 #
1625 # As a result of the two phase execution, bounded staleness reads are
1626 # usually a little slower than comparable exact staleness
1627 # reads. However, they are typically able to return fresher
1628 # results, and are more likely to execute at the closest replica.
1629 #
1630 # Because the timestamp negotiation requires up-front knowledge of
1631 # which rows will be read, it can only be used with single-use
1632 # read-only transactions.
1633 #
1634 # See TransactionOptions.ReadOnly.max_staleness and
1635 # TransactionOptions.ReadOnly.min_read_timestamp.
1636 #
1637 # ### Old Read Timestamps and Garbage Collection
1638 #
1639 # Cloud Spanner continuously garbage collects deleted and overwritten data
1640 # in the background to reclaim storage space. This process is known
1641 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
1642 # are one hour old. Because of this, Cloud Spanner cannot perform reads
1643 # at read timestamps more than one hour in the past. This
1644 # restriction also applies to in-progress reads and/or SQL queries whose
1645 # timestamp become too old while executing. Reads and SQL queries with
1646 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
1647 #
1648 # ## Partitioned DML Transactions
1649 #
1650 # Partitioned DML transactions are used to execute DML statements with a
1651 # different execution strategy that provides different, and often better,
1652 # scalability properties for large, table-wide operations than DML in a
1653 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
1654 # should prefer using ReadWrite transactions.
1655 #
1656 # Partitioned DML partitions the keyspace and runs the DML statement on each
1657 # partition in separate, internal transactions. These transactions commit
1658 # automatically when complete, and run independently from one another.
1659 #
1660 # To reduce lock contention, this execution strategy only acquires read locks
1661 # on rows that match the WHERE clause of the statement. Additionally, the
1662 # smaller per-partition transactions hold locks for less time.
1663 #
1664 # That said, Partitioned DML is not a drop-in replacement for standard DML used
1665 # in ReadWrite transactions.
1666 #
1667 # - The DML statement must be fully-partitionable. Specifically, the statement
1668 # must be expressible as the union of many statements which each access only
1669 # a single row of the table.
1670 #
1671 # - The statement is not applied atomically to all rows of the table. Rather,
1672 # the statement is applied atomically to partitions of the table, in
1673 # independent transactions. Secondary index rows are updated atomically
1674 # with the base table rows.
1675 #
1676 # - Partitioned DML does not guarantee exactly-once execution semantics
1677 # against a partition. The statement will be applied at least once to each
1678 # partition. It is strongly recommended that the DML statement should be
1679 # idempotent to avoid unexpected results. For instance, it is potentially
1680 # dangerous to run a statement such as
1681 # `UPDATE table SET column = column + 1` as it could be run multiple times
1682 # against some rows.
1683 #
1684 # - The partitions are committed automatically - there is no support for
1685 # Commit or Rollback. If the call returns an error, or if the client issuing
1686 # the ExecuteSql call dies, it is possible that some rows had the statement
1687 # executed on them successfully. It is also possible that statement was
1688 # never executed against other rows.
1689 #
1690 # - Partitioned DML transactions may only contain the execution of a single
1691 # DML statement via ExecuteSql or ExecuteStreamingSql.
1692 #
1693 # - If any error is encountered during the execution of the partitioned DML
1694 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
1695 # value that cannot be stored due to schema constraints), then the
1696 # operation is stopped at that point and an error is returned. It is
1697 # possible that at this point, some partitions have been committed (or even
1698 # committed multiple times), and other partitions have not been run at all.
1699 #
1700 # Given the above, Partitioned DML is good fit for large, database-wide,
1701 # operations that are idempotent, such as deleting old rows from a very large
1702 # table.
1703 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001704 #
1705 # Authorization to begin a Partitioned DML transaction requires
1706 # `spanner.databases.beginPartitionedDmlTransaction` permission
1707 # on the `session` resource.
1708 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001709 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
1710 #
1711 # Authorization to begin a read-write transaction requires
1712 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
1713 # on the `session` resource.
1714 # transaction type has no options.
1715 },
Bu Sun Kim65020912020-05-20 12:08:20 -07001716 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
1717 #
1718 # Authorization to begin a read-only transaction requires
1719 # `spanner.databases.beginReadOnlyTransaction` permission
1720 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -07001721 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
1722 # seconds. Guarantees that all writes that have committed more
1723 # than the specified number of seconds ago are visible. Because
1724 # Cloud Spanner chooses the exact timestamp, this mode works even if
1725 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
1726 # commit timestamps.
1727 #
1728 # Useful for reading the freshest data available at a nearby
1729 # replica, while bounding the possible staleness if the local
1730 # replica has fallen behind.
1731 #
1732 # Note that this option can only be used in single-use
1733 # transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001734 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
1735 #
1736 # This is useful for requesting fresher data than some previous
1737 # read, or data that is fresh enough to observe the effects of some
1738 # previously committed transaction whose timestamp is known.
1739 #
1740 # Note that this option can only be used in single-use transactions.
1741 #
1742 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
1743 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
1744 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
1745 # are visible.
Bu Sun Kim65020912020-05-20 12:08:20 -07001746 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
1747 # the Transaction message that describes the transaction.
1748 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
1749 # old. The timestamp is chosen soon after the read is started.
1750 #
1751 # Guarantees that all writes that have committed more than the
1752 # specified number of seconds ago are visible. Because Cloud Spanner
1753 # chooses the exact timestamp, this mode works even if the client&#x27;s
1754 # local clock is substantially skewed from Cloud Spanner commit
1755 # timestamps.
1756 #
1757 # Useful for reading at nearby replicas without the distributed
1758 # timestamp negotiation overhead of `max_staleness`.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07001759 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
1760 # reads at a specific timestamp are repeatable; the same read at
1761 # the same timestamp always returns the same data. If the
1762 # timestamp is in the future, the read will block until the
1763 # specified timestamp, modulo the read&#x27;s deadline.
1764 #
1765 # Useful for large scale consistent reads such as mapreduces, or
1766 # for coordinating many reads against a consistent snapshot of the
1767 # data.
1768 #
1769 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
1770 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
1771 },
1772 },
1773 &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
1774 # This is the most efficient way to execute a transaction that
1775 # consists of a single SQL query.
1776 #
1777 #
1778 # Each session can have at most one active transaction at a time. After the
1779 # active transaction is completed, the session can immediately be
1780 # re-used for the next transaction. It is not necessary to create a
1781 # new session for each transaction.
1782 #
1783 # # Transaction Modes
1784 #
1785 # Cloud Spanner supports three transaction modes:
1786 #
1787 # 1. Locking read-write. This type of transaction is the only way
1788 # to write data into Cloud Spanner. These transactions rely on
1789 # pessimistic locking and, if necessary, two-phase commit.
1790 # Locking read-write transactions may abort, requiring the
1791 # application to retry.
1792 #
1793 # 2. Snapshot read-only. This transaction type provides guaranteed
1794 # consistency across several reads, but does not allow
1795 # writes. Snapshot read-only transactions can be configured to
1796 # read at timestamps in the past. Snapshot read-only
1797 # transactions do not need to be committed.
1798 #
1799 # 3. Partitioned DML. This type of transaction is used to execute
1800 # a single Partitioned DML statement. Partitioned DML partitions
1801 # the key space and runs the DML statement over each partition
1802 # in parallel using separate, internal transactions that commit
1803 # independently. Partitioned DML transactions do not need to be
1804 # committed.
1805 #
1806 # For transactions that only read, snapshot read-only transactions
1807 # provide simpler semantics and are almost always faster. In
1808 # particular, read-only transactions do not take locks, so they do
1809 # not conflict with read-write transactions. As a consequence of not
1810 # taking locks, they also do not abort, so retry loops are not needed.
1811 #
1812 # Transactions may only read/write data in a single database. They
1813 # may, however, read/write data in different tables within that
1814 # database.
1815 #
1816 # ## Locking Read-Write Transactions
1817 #
1818 # Locking transactions may be used to atomically read-modify-write
1819 # data anywhere in a database. This type of transaction is externally
1820 # consistent.
1821 #
1822 # Clients should attempt to minimize the amount of time a transaction
1823 # is active. Faster transactions commit with higher probability
1824 # and cause less contention. Cloud Spanner attempts to keep read locks
1825 # active as long as the transaction continues to do reads, and the
1826 # transaction has not been terminated by
1827 # Commit or
1828 # Rollback. Long periods of
1829 # inactivity at the client may cause Cloud Spanner to release a
1830 # transaction&#x27;s locks and abort it.
1831 #
1832 # Conceptually, a read-write transaction consists of zero or more
1833 # reads or SQL statements followed by
1834 # Commit. At any time before
1835 # Commit, the client can send a
1836 # Rollback request to abort the
1837 # transaction.
1838 #
1839 # ### Semantics
1840 #
1841 # Cloud Spanner can commit the transaction if all read locks it acquired
1842 # are still valid at commit time, and it is able to acquire write
1843 # locks for all writes. Cloud Spanner can abort the transaction for any
1844 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
1845 # that the transaction has not modified any user data in Cloud Spanner.
1846 #
1847 # Unless the transaction commits, Cloud Spanner makes no guarantees about
1848 # how long the transaction&#x27;s locks were held for. It is an error to
1849 # use Cloud Spanner locks for any sort of mutual exclusion other than
1850 # between Cloud Spanner transactions themselves.
1851 #
1852 # ### Retrying Aborted Transactions
1853 #
1854 # When a transaction aborts, the application can choose to retry the
1855 # whole transaction again. To maximize the chances of successfully
1856 # committing the retry, the client should execute the retry in the
1857 # same session as the original attempt. The original session&#x27;s lock
1858 # priority increases with each consecutive abort, meaning that each
1859 # attempt has a slightly better chance of success than the previous.
1860 #
1861 # Under some circumstances (e.g., many transactions attempting to
1862 # modify the same row(s)), a transaction can abort many times in a
1863 # short period before successfully committing. Thus, it is not a good
1864 # idea to cap the number of retries a transaction can attempt;
1865 # instead, it is better to limit the total amount of wall time spent
1866 # retrying.
1867 #
1868 # ### Idle Transactions
1869 #
1870 # A transaction is considered idle if it has no outstanding reads or
1871 # SQL queries and has not started a read or SQL query within the last 10
1872 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
1873 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
1874 # fail with error `ABORTED`.
1875 #
1876 # If this behavior is undesirable, periodically executing a simple
1877 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
1878 # transaction from becoming idle.
1879 #
1880 # ## Snapshot Read-Only Transactions
1881 #
1882 # Snapshot read-only transactions provides a simpler method than
1883 # locking read-write transactions for doing several consistent
1884 # reads. However, this type of transaction does not support writes.
1885 #
1886 # Snapshot transactions do not take locks. Instead, they work by
1887 # choosing a Cloud Spanner timestamp, then executing all reads at that
1888 # timestamp. Since they do not acquire locks, they do not block
1889 # concurrent read-write transactions.
1890 #
1891 # Unlike locking read-write transactions, snapshot read-only
1892 # transactions never abort. They can fail if the chosen read
1893 # timestamp is garbage collected; however, the default garbage
1894 # collection policy is generous enough that most applications do not
1895 # need to worry about this in practice.
1896 #
1897 # Snapshot read-only transactions do not need to call
1898 # Commit or
1899 # Rollback (and in fact are not
1900 # permitted to do so).
1901 #
1902 # To execute a snapshot transaction, the client specifies a timestamp
1903 # bound, which tells Cloud Spanner how to choose a read timestamp.
1904 #
1905 # The types of timestamp bound are:
1906 #
1907 # - Strong (the default).
1908 # - Bounded staleness.
1909 # - Exact staleness.
1910 #
1911 # If the Cloud Spanner database to be read is geographically distributed,
1912 # stale read-only transactions can execute more quickly than strong
1913 # or read-write transaction, because they are able to execute far
1914 # from the leader replica.
1915 #
1916 # Each type of timestamp bound is discussed in detail below.
1917 #
1918 # ### Strong
1919 #
1920 # Strong reads are guaranteed to see the effects of all transactions
1921 # that have committed before the start of the read. Furthermore, all
1922 # rows yielded by a single read are consistent with each other -- if
1923 # any part of the read observes a transaction, all parts of the read
1924 # see the transaction.
1925 #
1926 # Strong reads are not repeatable: two consecutive strong read-only
1927 # transactions might return inconsistent results if there are
1928 # concurrent writes. If consistency across reads is required, the
1929 # reads should be executed within a transaction or at an exact read
1930 # timestamp.
1931 #
1932 # See TransactionOptions.ReadOnly.strong.
1933 #
1934 # ### Exact Staleness
1935 #
1936 # These timestamp bounds execute reads at a user-specified
1937 # timestamp. Reads at a timestamp are guaranteed to see a consistent
1938 # prefix of the global transaction history: they observe
1939 # modifications done by all transactions with a commit timestamp &lt;=
1940 # the read timestamp, and observe none of the modifications done by
1941 # transactions with a larger commit timestamp. They will block until
1942 # all conflicting transactions that may be assigned commit timestamps
1943 # &lt;= the read timestamp have finished.
1944 #
1945 # The timestamp can either be expressed as an absolute Cloud Spanner commit
1946 # timestamp or a staleness relative to the current time.
1947 #
1948 # These modes do not require a &quot;negotiation phase&quot; to pick a
1949 # timestamp. As a result, they execute slightly faster than the
1950 # equivalent boundedly stale concurrency modes. On the other hand,
1951 # boundedly stale reads usually return fresher results.
1952 #
1953 # See TransactionOptions.ReadOnly.read_timestamp and
1954 # TransactionOptions.ReadOnly.exact_staleness.
1955 #
1956 # ### Bounded Staleness
1957 #
1958 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
1959 # subject to a user-provided staleness bound. Cloud Spanner chooses the
1960 # newest timestamp within the staleness bound that allows execution
1961 # of the reads at the closest available replica without blocking.
1962 #
1963 # All rows yielded are consistent with each other -- if any part of
1964 # the read observes a transaction, all parts of the read see the
1965 # transaction. Boundedly stale reads are not repeatable: two stale
1966 # reads, even if they use the same staleness bound, can execute at
1967 # different timestamps and thus return inconsistent results.
1968 #
1969 # Boundedly stale reads execute in two phases: the first phase
1970 # negotiates a timestamp among all replicas needed to serve the
1971 # read. In the second phase, reads are executed at the negotiated
1972 # timestamp.
1973 #
1974 # As a result of the two phase execution, bounded staleness reads are
1975 # usually a little slower than comparable exact staleness
1976 # reads. However, they are typically able to return fresher
1977 # results, and are more likely to execute at the closest replica.
1978 #
1979 # Because the timestamp negotiation requires up-front knowledge of
1980 # which rows will be read, it can only be used with single-use
1981 # read-only transactions.
1982 #
1983 # See TransactionOptions.ReadOnly.max_staleness and
1984 # TransactionOptions.ReadOnly.min_read_timestamp.
1985 #
1986 # ### Old Read Timestamps and Garbage Collection
1987 #
1988 # Cloud Spanner continuously garbage collects deleted and overwritten data
1989 # in the background to reclaim storage space. This process is known
1990 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
1991 # are one hour old. Because of this, Cloud Spanner cannot perform reads
1992 # at read timestamps more than one hour in the past. This
1993 # restriction also applies to in-progress reads and/or SQL queries whose
1994 # timestamp become too old while executing. Reads and SQL queries with
1995 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
1996 #
1997 # ## Partitioned DML Transactions
1998 #
1999 # Partitioned DML transactions are used to execute DML statements with a
2000 # different execution strategy that provides different, and often better,
2001 # scalability properties for large, table-wide operations than DML in a
2002 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
2003 # should prefer using ReadWrite transactions.
2004 #
2005 # Partitioned DML partitions the keyspace and runs the DML statement on each
2006 # partition in separate, internal transactions. These transactions commit
2007 # automatically when complete, and run independently from one another.
2008 #
2009 # To reduce lock contention, this execution strategy only acquires read locks
2010 # on rows that match the WHERE clause of the statement. Additionally, the
2011 # smaller per-partition transactions hold locks for less time.
2012 #
2013 # That said, Partitioned DML is not a drop-in replacement for standard DML used
2014 # in ReadWrite transactions.
2015 #
2016 # - The DML statement must be fully-partitionable. Specifically, the statement
2017 # must be expressible as the union of many statements which each access only
2018 # a single row of the table.
2019 #
2020 # - The statement is not applied atomically to all rows of the table. Rather,
2021 # the statement is applied atomically to partitions of the table, in
2022 # independent transactions. Secondary index rows are updated atomically
2023 # with the base table rows.
2024 #
2025 # - Partitioned DML does not guarantee exactly-once execution semantics
2026 # against a partition. The statement will be applied at least once to each
2027 # partition. It is strongly recommended that the DML statement should be
2028 # idempotent to avoid unexpected results. For instance, it is potentially
2029 # dangerous to run a statement such as
2030 # `UPDATE table SET column = column + 1` as it could be run multiple times
2031 # against some rows.
2032 #
2033 # - The partitions are committed automatically - there is no support for
2034 # Commit or Rollback. If the call returns an error, or if the client issuing
2035 # the ExecuteSql call dies, it is possible that some rows had the statement
2036 # executed on them successfully. It is also possible that statement was
2037 # never executed against other rows.
2038 #
2039 # - Partitioned DML transactions may only contain the execution of a single
2040 # DML statement via ExecuteSql or ExecuteStreamingSql.
2041 #
2042 # - If any error is encountered during the execution of the partitioned DML
2043 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
2044 # value that cannot be stored due to schema constraints), then the
2045 # operation is stopped at that point and an error is returned. It is
2046 # possible that at this point, some partitions have been committed (or even
2047 # committed multiple times), and other partitions have not been run at all.
2048 #
2049 # Given the above, Partitioned DML is good fit for large, database-wide,
2050 # operations that are idempotent, such as deleting old rows from a very large
2051 # table.
2052 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
2053 #
2054 # Authorization to begin a Partitioned DML transaction requires
2055 # `spanner.databases.beginPartitionedDmlTransaction` permission
2056 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -07002057 },
2058 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
2059 #
2060 # Authorization to begin a read-write transaction requires
2061 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
2062 # on the `session` resource.
2063 # transaction type has no options.
2064 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07002065 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
2066 #
2067 # Authorization to begin a read-only transaction requires
2068 # `spanner.databases.beginReadOnlyTransaction` permission
2069 # on the `session` resource.
2070 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
2071 # seconds. Guarantees that all writes that have committed more
2072 # than the specified number of seconds ago are visible. Because
2073 # Cloud Spanner chooses the exact timestamp, this mode works even if
2074 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
2075 # commit timestamps.
2076 #
2077 # Useful for reading the freshest data available at a nearby
2078 # replica, while bounding the possible staleness if the local
2079 # replica has fallen behind.
2080 #
2081 # Note that this option can only be used in single-use
2082 # transactions.
2083 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
2084 #
2085 # This is useful for requesting fresher data than some previous
2086 # read, or data that is fresh enough to observe the effects of some
2087 # previously committed transaction whose timestamp is known.
2088 #
2089 # Note that this option can only be used in single-use transactions.
2090 #
2091 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
2092 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
2093 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
2094 # are visible.
2095 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
2096 # the Transaction message that describes the transaction.
2097 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
2098 # old. The timestamp is chosen soon after the read is started.
2099 #
2100 # Guarantees that all writes that have committed more than the
2101 # specified number of seconds ago are visible. Because Cloud Spanner
2102 # chooses the exact timestamp, this mode works even if the client&#x27;s
2103 # local clock is substantially skewed from Cloud Spanner commit
2104 # timestamps.
2105 #
2106 # Useful for reading at nearby replicas without the distributed
2107 # timestamp negotiation overhead of `max_staleness`.
2108 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
2109 # reads at a specific timestamp are repeatable; the same read at
2110 # the same timestamp always returns the same data. If the
2111 # timestamp is in the future, the read will block until the
2112 # specified timestamp, modulo the read&#x27;s deadline.
2113 #
2114 # Useful for large scale consistent reads such as mapreduces, or
2115 # for coordinating many reads against a consistent snapshot of the
2116 # data.
2117 #
2118 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
2119 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
2120 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04002121 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04002122 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002123 }
2124
2125 x__xgafv: string, V1 error format.
2126 Allowed values
2127 1 - v1 error format
2128 2 - v2 error format
2129
2130Returns:
2131 An object of the form:
2132
2133 { # The response for ExecuteBatchDml. Contains a list
Dan O'Mearadd494642020-05-01 07:42:23 -07002134 # of ResultSet messages, one for each DML statement that has successfully
2135 # executed, in the same order as the statements in the request. If a statement
2136 # fails, the status in the response body identifies the cause of the failure.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002137 #
Dan O'Mearadd494642020-05-01 07:42:23 -07002138 # To check for DML statements that failed, use the following approach:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002139 #
Dan O'Mearadd494642020-05-01 07:42:23 -07002140 # 1. Check the status in the response message. The google.rpc.Code enum
2141 # value `OK` indicates that all statements were executed successfully.
2142 # 2. If the status was not `OK`, check the number of result sets in the
2143 # response. If the response contains `N` ResultSet messages, then
2144 # statement `N+1` in the request failed.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002145 #
Dan O'Mearadd494642020-05-01 07:42:23 -07002146 # Example 1:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002147 #
Dan O'Mearadd494642020-05-01 07:42:23 -07002148 # * Request: 5 DML statements, all executed successfully.
2149 # * Response: 5 ResultSet messages, with the status `OK`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002150 #
Dan O'Mearadd494642020-05-01 07:42:23 -07002151 # Example 2:
2152 #
2153 # * Request: 5 DML statements. The third statement has a syntax error.
2154 # * Response: 2 ResultSet messages, and a syntax error (`INVALID_ARGUMENT`)
2155 # status. The number of ResultSet messages indicates that the third
2156 # statement failed, and the fourth and fifth statements were not executed.
Bu Sun Kim65020912020-05-20 12:08:20 -07002157 &quot;status&quot;: { # The `Status` type defines a logical error model that is suitable for # If all DML statements are executed successfully, the status is `OK`.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002158 # Otherwise, the error status of the first failed statement.
2159 # different programming environments, including REST APIs and RPC APIs. It is
Dan O'Mearadd494642020-05-01 07:42:23 -07002160 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
2161 # three pieces of data: error code, error message, and error details.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002162 #
Dan O'Mearadd494642020-05-01 07:42:23 -07002163 # You can find out more about this error model and how to work with it in the
2164 # [API Design Guide](https://cloud.google.com/apis/design/errors).
Bu Sun Kim65020912020-05-20 12:08:20 -07002165 &quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002166 # message types for APIs to use.
2167 {
Bu Sun Kim65020912020-05-20 12:08:20 -07002168 &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002169 },
2170 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07002171 &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
2172 # user-facing error message should be localized and sent in the
2173 # google.rpc.Status.details field, or localized by the client.
2174 &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002175 },
Bu Sun Kim65020912020-05-20 12:08:20 -07002176 &quot;resultSets&quot;: [ # One ResultSet for each statement in the request that ran successfully,
Dan O'Mearadd494642020-05-01 07:42:23 -07002177 # in the same order as the statements in the request. Each ResultSet does
2178 # not contain any rows. The ResultSetStats in each ResultSet contain
2179 # the number of rows modified by the statement.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002180 #
Dan O'Mearadd494642020-05-01 07:42:23 -07002181 # Only the first ResultSet in the response contains valid
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002182 # ResultSetMetadata.
2183 { # Results from Read or
2184 # ExecuteSql.
Bu Sun Kim65020912020-05-20 12:08:20 -07002185 &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
2186 &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
2187 # set. For example, a SQL query like `&quot;SELECT UserId, UserName FROM
2188 # Users&quot;` could return a `row_type` value like:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002189 #
Bu Sun Kim65020912020-05-20 12:08:20 -07002190 # &quot;fields&quot;: [
2191 # { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
2192 # { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002193 # ]
Bu Sun Kim65020912020-05-20 12:08:20 -07002194 &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002195 # significant, because values of this struct type are represented as
2196 # lists, where the order of field values matches the order of
2197 # fields in the StructType. In turn, the order of fields
2198 # matches the order of columns in a read request, or the order of
2199 # fields in the `SELECT` clause of a query.
2200 { # Message representing a single field of a struct.
Bu Sun Kim65020912020-05-20 12:08:20 -07002201 &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
2202 # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
2203 # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
2204 # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
2205 # columns might have an empty name (e.g., !&quot;SELECT
2206 # UPPER(ColName)&quot;`). Note that a query result can contain
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002207 # multiple fields with the same name.
Bu Sun Kim65020912020-05-20 12:08:20 -07002208 &quot;type&quot;: # Object with schema name: Type # The type of the field.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002209 },
2210 ],
2211 },
Bu Sun Kim65020912020-05-20 12:08:20 -07002212 &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002213 # information about the new transaction is yielded here.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07002214 &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
2215 # for the transaction. Not returned by default: see
2216 # TransactionOptions.ReadOnly.return_read_timestamp.
2217 #
2218 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
2219 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
Bu Sun Kim65020912020-05-20 12:08:20 -07002220 &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002221 # Read,
2222 # ExecuteSql,
2223 # Commit, or
2224 # Rollback calls.
2225 #
2226 # Single-use read-only transactions do not have IDs, because
2227 # single-use transactions do not support multiple requests.
2228 },
2229 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07002230 &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the SQL statement that
2231 # produced this result set. These can be requested by setting
2232 # ExecuteSqlRequest.query_mode.
2233 # DML statements always produce stats containing the number of rows
2234 # modified, unless executed using the
2235 # ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode.
2236 # Other fields may or may not be populated, based on the
2237 # ExecuteSqlRequest.query_mode.
2238 &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
2239 # returns a lower bound of the rows modified.
2240 &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
2241 &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
2242 # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
2243 # `plan_nodes`.
2244 { # Node information for nodes appearing in a QueryPlan.plan_nodes.
2245 &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
2246 &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
2247 # key-value pairs. Only present if the plan was returned as a result of a
2248 # profile query. For example, number of executions, number of rows/time per
2249 # execution etc.
2250 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
2251 },
2252 &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
2253 # `SCALAR` PlanNode(s).
2254 &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
2255 # where the `description` string of this node references a `SCALAR`
2256 # subquery contained in the expression subtree rooted at this node. The
2257 # referenced `SCALAR` subquery may not necessarily be a direct child of
2258 # this node.
2259 &quot;a_key&quot;: 42,
2260 },
2261 &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
2262 },
2263 &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
2264 # For example, a Parameter Reference node could have the following
2265 # information in its metadata:
2266 #
2267 # {
2268 # &quot;parameter_reference&quot;: &quot;param1&quot;,
2269 # &quot;parameter_type&quot;: &quot;array&quot;
2270 # }
2271 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
2272 },
2273 &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
2274 { # Metadata associated with a parent-child relationship appearing in a
2275 # PlanNode.
2276 &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
2277 # distinguish between the build child and the probe child, or in the case
2278 # of the child being an output variable, to represent the tag associated
2279 # with the output variable.
2280 &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
2281 # to an output variable of the parent node. The field carries the name of
2282 # the output variable.
2283 # For example, a `TableScan` operator that reads rows from a table will
2284 # have child links to the `SCALAR` nodes representing the output variables
2285 # created for each column that is read by the operator. The corresponding
2286 # `variable` fields will be set to the variable names assigned to the
2287 # columns.
2288 &quot;childIndex&quot;: 42, # The node to which the link points.
2289 },
2290 ],
2291 &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
2292 &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
2293 # different kinds of nodes differently. For example, If the node is a
2294 # SCALAR node, it will have a condensed representation
2295 # which can be used to directly embed a description of the node in its
2296 # parent.
2297 },
2298 ],
2299 },
2300 &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
2301 &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
2302 # the query is profiled. For example, a query could return the statistics as
2303 # follows:
2304 #
2305 # {
2306 # &quot;rows_returned&quot;: &quot;3&quot;,
2307 # &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
2308 # &quot;cpu_time&quot;: &quot;1.19 secs&quot;
2309 # }
2310 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
2311 },
2312 },
2313 &quot;rows&quot;: [ # Each element in `rows` is a row whose format is defined by
2314 # metadata.row_type. The ith element
2315 # in each row matches the ith field in
2316 # metadata.row_type. Elements are
2317 # encoded based on type as described
2318 # here.
2319 [
2320 &quot;&quot;,
2321 ],
2322 ],
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002323 },
2324 ],
2325 }</pre>
2326</div>
2327
2328<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07002329 <code class="details" id="executeSql">executeSql(session, body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002330 <pre>Executes an SQL statement, returning all results in a single reply. This
2331method cannot be used to return a result set larger than 10 MiB;
2332if the query yields more data than that, the query fails with
2333a `FAILED_PRECONDITION` error.
2334
2335Operations inside read-write transactions might return `ABORTED`. If
2336this occurs, the application should restart the transaction from
2337the beginning. See Transaction for more details.
2338
2339Larger result sets can be fetched in streaming fashion by calling
2340ExecuteStreamingSql instead.
2341
2342Args:
2343 session: string, Required. The session in which the SQL query should be performed. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -07002344 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002345 The object takes the form of:
2346
2347{ # The request for ExecuteSql and
2348 # ExecuteStreamingSql.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07002349 &quot;seqno&quot;: &quot;A String&quot;, # A per-transaction sequence number used to identify this request. This field
2350 # makes each request idempotent such that if the request is received multiple
2351 # times, at most one will succeed.
2352 #
2353 # The sequence number must be monotonically increasing within the
2354 # transaction. If a request arrives for the first time with an out-of-order
2355 # sequence number, the transaction may be aborted. Replays of previously
2356 # handled requests will yield the same response as the first execution.
2357 #
2358 # Required for DML statements. Ignored for queries.
Bu Sun Kim65020912020-05-20 12:08:20 -07002359 &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002360 #
2361 # For queries, if none is provided, the default is a temporary read-only
2362 # transaction with strong concurrency.
2363 #
Dan O'Mearadd494642020-05-01 07:42:23 -07002364 # Standard DML statements require a read-write transaction. To protect
2365 # against replays, single-use transactions are not supported. The caller
2366 # must either supply an existing transaction ID or begin a new transaction.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002367 #
Dan O'Mearadd494642020-05-01 07:42:23 -07002368 # Partitioned DML requires an existing Partitioned DML transaction ID.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002369 # Read or
2370 # ExecuteSql call runs.
2371 #
2372 # See TransactionOptions for more information about transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07002373 &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
Bu Sun Kim65020912020-05-20 12:08:20 -07002374 &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
2375 # it. The transaction ID of the new transaction is returned in
2376 # ResultSetMetadata.transaction, which is a Transaction.
2377 #
2378 #
2379 # Each session can have at most one active transaction at a time. After the
2380 # active transaction is completed, the session can immediately be
2381 # re-used for the next transaction. It is not necessary to create a
2382 # new session for each transaction.
2383 #
2384 # # Transaction Modes
2385 #
2386 # Cloud Spanner supports three transaction modes:
2387 #
2388 # 1. Locking read-write. This type of transaction is the only way
2389 # to write data into Cloud Spanner. These transactions rely on
2390 # pessimistic locking and, if necessary, two-phase commit.
2391 # Locking read-write transactions may abort, requiring the
2392 # application to retry.
2393 #
2394 # 2. Snapshot read-only. This transaction type provides guaranteed
2395 # consistency across several reads, but does not allow
2396 # writes. Snapshot read-only transactions can be configured to
2397 # read at timestamps in the past. Snapshot read-only
2398 # transactions do not need to be committed.
2399 #
2400 # 3. Partitioned DML. This type of transaction is used to execute
2401 # a single Partitioned DML statement. Partitioned DML partitions
2402 # the key space and runs the DML statement over each partition
2403 # in parallel using separate, internal transactions that commit
2404 # independently. Partitioned DML transactions do not need to be
2405 # committed.
2406 #
2407 # For transactions that only read, snapshot read-only transactions
2408 # provide simpler semantics and are almost always faster. In
2409 # particular, read-only transactions do not take locks, so they do
2410 # not conflict with read-write transactions. As a consequence of not
2411 # taking locks, they also do not abort, so retry loops are not needed.
2412 #
2413 # Transactions may only read/write data in a single database. They
2414 # may, however, read/write data in different tables within that
2415 # database.
2416 #
2417 # ## Locking Read-Write Transactions
2418 #
2419 # Locking transactions may be used to atomically read-modify-write
2420 # data anywhere in a database. This type of transaction is externally
2421 # consistent.
2422 #
2423 # Clients should attempt to minimize the amount of time a transaction
2424 # is active. Faster transactions commit with higher probability
2425 # and cause less contention. Cloud Spanner attempts to keep read locks
2426 # active as long as the transaction continues to do reads, and the
2427 # transaction has not been terminated by
2428 # Commit or
2429 # Rollback. Long periods of
2430 # inactivity at the client may cause Cloud Spanner to release a
2431 # transaction&#x27;s locks and abort it.
2432 #
2433 # Conceptually, a read-write transaction consists of zero or more
2434 # reads or SQL statements followed by
2435 # Commit. At any time before
2436 # Commit, the client can send a
2437 # Rollback request to abort the
2438 # transaction.
2439 #
2440 # ### Semantics
2441 #
2442 # Cloud Spanner can commit the transaction if all read locks it acquired
2443 # are still valid at commit time, and it is able to acquire write
2444 # locks for all writes. Cloud Spanner can abort the transaction for any
2445 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
2446 # that the transaction has not modified any user data in Cloud Spanner.
2447 #
2448 # Unless the transaction commits, Cloud Spanner makes no guarantees about
2449 # how long the transaction&#x27;s locks were held for. It is an error to
2450 # use Cloud Spanner locks for any sort of mutual exclusion other than
2451 # between Cloud Spanner transactions themselves.
2452 #
2453 # ### Retrying Aborted Transactions
2454 #
2455 # When a transaction aborts, the application can choose to retry the
2456 # whole transaction again. To maximize the chances of successfully
2457 # committing the retry, the client should execute the retry in the
2458 # same session as the original attempt. The original session&#x27;s lock
2459 # priority increases with each consecutive abort, meaning that each
2460 # attempt has a slightly better chance of success than the previous.
2461 #
2462 # Under some circumstances (e.g., many transactions attempting to
2463 # modify the same row(s)), a transaction can abort many times in a
2464 # short period before successfully committing. Thus, it is not a good
2465 # idea to cap the number of retries a transaction can attempt;
2466 # instead, it is better to limit the total amount of wall time spent
2467 # retrying.
2468 #
2469 # ### Idle Transactions
2470 #
2471 # A transaction is considered idle if it has no outstanding reads or
2472 # SQL queries and has not started a read or SQL query within the last 10
2473 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
2474 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
2475 # fail with error `ABORTED`.
2476 #
2477 # If this behavior is undesirable, periodically executing a simple
2478 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
2479 # transaction from becoming idle.
2480 #
2481 # ## Snapshot Read-Only Transactions
2482 #
2483 # Snapshot read-only transactions provides a simpler method than
2484 # locking read-write transactions for doing several consistent
2485 # reads. However, this type of transaction does not support writes.
2486 #
2487 # Snapshot transactions do not take locks. Instead, they work by
2488 # choosing a Cloud Spanner timestamp, then executing all reads at that
2489 # timestamp. Since they do not acquire locks, they do not block
2490 # concurrent read-write transactions.
2491 #
2492 # Unlike locking read-write transactions, snapshot read-only
2493 # transactions never abort. They can fail if the chosen read
2494 # timestamp is garbage collected; however, the default garbage
2495 # collection policy is generous enough that most applications do not
2496 # need to worry about this in practice.
2497 #
2498 # Snapshot read-only transactions do not need to call
2499 # Commit or
2500 # Rollback (and in fact are not
2501 # permitted to do so).
2502 #
2503 # To execute a snapshot transaction, the client specifies a timestamp
2504 # bound, which tells Cloud Spanner how to choose a read timestamp.
2505 #
2506 # The types of timestamp bound are:
2507 #
2508 # - Strong (the default).
2509 # - Bounded staleness.
2510 # - Exact staleness.
2511 #
2512 # If the Cloud Spanner database to be read is geographically distributed,
2513 # stale read-only transactions can execute more quickly than strong
2514 # or read-write transaction, because they are able to execute far
2515 # from the leader replica.
2516 #
2517 # Each type of timestamp bound is discussed in detail below.
2518 #
2519 # ### Strong
2520 #
2521 # Strong reads are guaranteed to see the effects of all transactions
2522 # that have committed before the start of the read. Furthermore, all
2523 # rows yielded by a single read are consistent with each other -- if
2524 # any part of the read observes a transaction, all parts of the read
2525 # see the transaction.
2526 #
2527 # Strong reads are not repeatable: two consecutive strong read-only
2528 # transactions might return inconsistent results if there are
2529 # concurrent writes. If consistency across reads is required, the
2530 # reads should be executed within a transaction or at an exact read
2531 # timestamp.
2532 #
2533 # See TransactionOptions.ReadOnly.strong.
2534 #
2535 # ### Exact Staleness
2536 #
2537 # These timestamp bounds execute reads at a user-specified
2538 # timestamp. Reads at a timestamp are guaranteed to see a consistent
2539 # prefix of the global transaction history: they observe
2540 # modifications done by all transactions with a commit timestamp &lt;=
2541 # the read timestamp, and observe none of the modifications done by
2542 # transactions with a larger commit timestamp. They will block until
2543 # all conflicting transactions that may be assigned commit timestamps
2544 # &lt;= the read timestamp have finished.
2545 #
2546 # The timestamp can either be expressed as an absolute Cloud Spanner commit
2547 # timestamp or a staleness relative to the current time.
2548 #
2549 # These modes do not require a &quot;negotiation phase&quot; to pick a
2550 # timestamp. As a result, they execute slightly faster than the
2551 # equivalent boundedly stale concurrency modes. On the other hand,
2552 # boundedly stale reads usually return fresher results.
2553 #
2554 # See TransactionOptions.ReadOnly.read_timestamp and
2555 # TransactionOptions.ReadOnly.exact_staleness.
2556 #
2557 # ### Bounded Staleness
2558 #
2559 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
2560 # subject to a user-provided staleness bound. Cloud Spanner chooses the
2561 # newest timestamp within the staleness bound that allows execution
2562 # of the reads at the closest available replica without blocking.
2563 #
2564 # All rows yielded are consistent with each other -- if any part of
2565 # the read observes a transaction, all parts of the read see the
2566 # transaction. Boundedly stale reads are not repeatable: two stale
2567 # reads, even if they use the same staleness bound, can execute at
2568 # different timestamps and thus return inconsistent results.
2569 #
2570 # Boundedly stale reads execute in two phases: the first phase
2571 # negotiates a timestamp among all replicas needed to serve the
2572 # read. In the second phase, reads are executed at the negotiated
2573 # timestamp.
2574 #
2575 # As a result of the two phase execution, bounded staleness reads are
2576 # usually a little slower than comparable exact staleness
2577 # reads. However, they are typically able to return fresher
2578 # results, and are more likely to execute at the closest replica.
2579 #
2580 # Because the timestamp negotiation requires up-front knowledge of
2581 # which rows will be read, it can only be used with single-use
2582 # read-only transactions.
2583 #
2584 # See TransactionOptions.ReadOnly.max_staleness and
2585 # TransactionOptions.ReadOnly.min_read_timestamp.
2586 #
2587 # ### Old Read Timestamps and Garbage Collection
2588 #
2589 # Cloud Spanner continuously garbage collects deleted and overwritten data
2590 # in the background to reclaim storage space. This process is known
2591 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
2592 # are one hour old. Because of this, Cloud Spanner cannot perform reads
2593 # at read timestamps more than one hour in the past. This
2594 # restriction also applies to in-progress reads and/or SQL queries whose
2595 # timestamp become too old while executing. Reads and SQL queries with
2596 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
2597 #
2598 # ## Partitioned DML Transactions
2599 #
2600 # Partitioned DML transactions are used to execute DML statements with a
2601 # different execution strategy that provides different, and often better,
2602 # scalability properties for large, table-wide operations than DML in a
2603 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
2604 # should prefer using ReadWrite transactions.
2605 #
2606 # Partitioned DML partitions the keyspace and runs the DML statement on each
2607 # partition in separate, internal transactions. These transactions commit
2608 # automatically when complete, and run independently from one another.
2609 #
2610 # To reduce lock contention, this execution strategy only acquires read locks
2611 # on rows that match the WHERE clause of the statement. Additionally, the
2612 # smaller per-partition transactions hold locks for less time.
2613 #
2614 # That said, Partitioned DML is not a drop-in replacement for standard DML used
2615 # in ReadWrite transactions.
2616 #
2617 # - The DML statement must be fully-partitionable. Specifically, the statement
2618 # must be expressible as the union of many statements which each access only
2619 # a single row of the table.
2620 #
2621 # - The statement is not applied atomically to all rows of the table. Rather,
2622 # the statement is applied atomically to partitions of the table, in
2623 # independent transactions. Secondary index rows are updated atomically
2624 # with the base table rows.
2625 #
2626 # - Partitioned DML does not guarantee exactly-once execution semantics
2627 # against a partition. The statement will be applied at least once to each
2628 # partition. It is strongly recommended that the DML statement should be
2629 # idempotent to avoid unexpected results. For instance, it is potentially
2630 # dangerous to run a statement such as
2631 # `UPDATE table SET column = column + 1` as it could be run multiple times
2632 # against some rows.
2633 #
2634 # - The partitions are committed automatically - there is no support for
2635 # Commit or Rollback. If the call returns an error, or if the client issuing
2636 # the ExecuteSql call dies, it is possible that some rows had the statement
2637 # executed on them successfully. It is also possible that statement was
2638 # never executed against other rows.
2639 #
2640 # - Partitioned DML transactions may only contain the execution of a single
2641 # DML statement via ExecuteSql or ExecuteStreamingSql.
2642 #
2643 # - If any error is encountered during the execution of the partitioned DML
2644 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
2645 # value that cannot be stored due to schema constraints), then the
2646 # operation is stopped at that point and an error is returned. It is
2647 # possible that at this point, some partitions have been committed (or even
2648 # committed multiple times), and other partitions have not been run at all.
2649 #
2650 # Given the above, Partitioned DML is good fit for large, database-wide,
2651 # operations that are idempotent, such as deleting old rows from a very large
2652 # table.
2653 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07002654 #
2655 # Authorization to begin a Partitioned DML transaction requires
2656 # `spanner.databases.beginPartitionedDmlTransaction` permission
2657 # on the `session` resource.
2658 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07002659 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
2660 #
2661 # Authorization to begin a read-write transaction requires
2662 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
2663 # on the `session` resource.
2664 # transaction type has no options.
2665 },
Bu Sun Kim65020912020-05-20 12:08:20 -07002666 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
2667 #
2668 # Authorization to begin a read-only transaction requires
2669 # `spanner.databases.beginReadOnlyTransaction` permission
2670 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -07002671 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
2672 # seconds. Guarantees that all writes that have committed more
2673 # than the specified number of seconds ago are visible. Because
2674 # Cloud Spanner chooses the exact timestamp, this mode works even if
2675 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
2676 # commit timestamps.
2677 #
2678 # Useful for reading the freshest data available at a nearby
2679 # replica, while bounding the possible staleness if the local
2680 # replica has fallen behind.
2681 #
2682 # Note that this option can only be used in single-use
2683 # transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07002684 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
2685 #
2686 # This is useful for requesting fresher data than some previous
2687 # read, or data that is fresh enough to observe the effects of some
2688 # previously committed transaction whose timestamp is known.
2689 #
2690 # Note that this option can only be used in single-use transactions.
2691 #
2692 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
2693 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
2694 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
2695 # are visible.
Bu Sun Kim65020912020-05-20 12:08:20 -07002696 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
2697 # the Transaction message that describes the transaction.
2698 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
2699 # old. The timestamp is chosen soon after the read is started.
2700 #
2701 # Guarantees that all writes that have committed more than the
2702 # specified number of seconds ago are visible. Because Cloud Spanner
2703 # chooses the exact timestamp, this mode works even if the client&#x27;s
2704 # local clock is substantially skewed from Cloud Spanner commit
2705 # timestamps.
2706 #
2707 # Useful for reading at nearby replicas without the distributed
2708 # timestamp negotiation overhead of `max_staleness`.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07002709 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
2710 # reads at a specific timestamp are repeatable; the same read at
2711 # the same timestamp always returns the same data. If the
2712 # timestamp is in the future, the read will block until the
2713 # specified timestamp, modulo the read&#x27;s deadline.
2714 #
2715 # Useful for large scale consistent reads such as mapreduces, or
2716 # for coordinating many reads against a consistent snapshot of the
2717 # data.
2718 #
2719 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
2720 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
2721 },
2722 },
2723 &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
2724 # This is the most efficient way to execute a transaction that
2725 # consists of a single SQL query.
2726 #
2727 #
2728 # Each session can have at most one active transaction at a time. After the
2729 # active transaction is completed, the session can immediately be
2730 # re-used for the next transaction. It is not necessary to create a
2731 # new session for each transaction.
2732 #
2733 # # Transaction Modes
2734 #
2735 # Cloud Spanner supports three transaction modes:
2736 #
2737 # 1. Locking read-write. This type of transaction is the only way
2738 # to write data into Cloud Spanner. These transactions rely on
2739 # pessimistic locking and, if necessary, two-phase commit.
2740 # Locking read-write transactions may abort, requiring the
2741 # application to retry.
2742 #
2743 # 2. Snapshot read-only. This transaction type provides guaranteed
2744 # consistency across several reads, but does not allow
2745 # writes. Snapshot read-only transactions can be configured to
2746 # read at timestamps in the past. Snapshot read-only
2747 # transactions do not need to be committed.
2748 #
2749 # 3. Partitioned DML. This type of transaction is used to execute
2750 # a single Partitioned DML statement. Partitioned DML partitions
2751 # the key space and runs the DML statement over each partition
2752 # in parallel using separate, internal transactions that commit
2753 # independently. Partitioned DML transactions do not need to be
2754 # committed.
2755 #
2756 # For transactions that only read, snapshot read-only transactions
2757 # provide simpler semantics and are almost always faster. In
2758 # particular, read-only transactions do not take locks, so they do
2759 # not conflict with read-write transactions. As a consequence of not
2760 # taking locks, they also do not abort, so retry loops are not needed.
2761 #
2762 # Transactions may only read/write data in a single database. They
2763 # may, however, read/write data in different tables within that
2764 # database.
2765 #
2766 # ## Locking Read-Write Transactions
2767 #
2768 # Locking transactions may be used to atomically read-modify-write
2769 # data anywhere in a database. This type of transaction is externally
2770 # consistent.
2771 #
2772 # Clients should attempt to minimize the amount of time a transaction
2773 # is active. Faster transactions commit with higher probability
2774 # and cause less contention. Cloud Spanner attempts to keep read locks
2775 # active as long as the transaction continues to do reads, and the
2776 # transaction has not been terminated by
2777 # Commit or
2778 # Rollback. Long periods of
2779 # inactivity at the client may cause Cloud Spanner to release a
2780 # transaction&#x27;s locks and abort it.
2781 #
2782 # Conceptually, a read-write transaction consists of zero or more
2783 # reads or SQL statements followed by
2784 # Commit. At any time before
2785 # Commit, the client can send a
2786 # Rollback request to abort the
2787 # transaction.
2788 #
2789 # ### Semantics
2790 #
2791 # Cloud Spanner can commit the transaction if all read locks it acquired
2792 # are still valid at commit time, and it is able to acquire write
2793 # locks for all writes. Cloud Spanner can abort the transaction for any
2794 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
2795 # that the transaction has not modified any user data in Cloud Spanner.
2796 #
2797 # Unless the transaction commits, Cloud Spanner makes no guarantees about
2798 # how long the transaction&#x27;s locks were held for. It is an error to
2799 # use Cloud Spanner locks for any sort of mutual exclusion other than
2800 # between Cloud Spanner transactions themselves.
2801 #
2802 # ### Retrying Aborted Transactions
2803 #
2804 # When a transaction aborts, the application can choose to retry the
2805 # whole transaction again. To maximize the chances of successfully
2806 # committing the retry, the client should execute the retry in the
2807 # same session as the original attempt. The original session&#x27;s lock
2808 # priority increases with each consecutive abort, meaning that each
2809 # attempt has a slightly better chance of success than the previous.
2810 #
2811 # Under some circumstances (e.g., many transactions attempting to
2812 # modify the same row(s)), a transaction can abort many times in a
2813 # short period before successfully committing. Thus, it is not a good
2814 # idea to cap the number of retries a transaction can attempt;
2815 # instead, it is better to limit the total amount of wall time spent
2816 # retrying.
2817 #
2818 # ### Idle Transactions
2819 #
2820 # A transaction is considered idle if it has no outstanding reads or
2821 # SQL queries and has not started a read or SQL query within the last 10
2822 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
2823 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
2824 # fail with error `ABORTED`.
2825 #
2826 # If this behavior is undesirable, periodically executing a simple
2827 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
2828 # transaction from becoming idle.
2829 #
2830 # ## Snapshot Read-Only Transactions
2831 #
2832 # Snapshot read-only transactions provides a simpler method than
2833 # locking read-write transactions for doing several consistent
2834 # reads. However, this type of transaction does not support writes.
2835 #
2836 # Snapshot transactions do not take locks. Instead, they work by
2837 # choosing a Cloud Spanner timestamp, then executing all reads at that
2838 # timestamp. Since they do not acquire locks, they do not block
2839 # concurrent read-write transactions.
2840 #
2841 # Unlike locking read-write transactions, snapshot read-only
2842 # transactions never abort. They can fail if the chosen read
2843 # timestamp is garbage collected; however, the default garbage
2844 # collection policy is generous enough that most applications do not
2845 # need to worry about this in practice.
2846 #
2847 # Snapshot read-only transactions do not need to call
2848 # Commit or
2849 # Rollback (and in fact are not
2850 # permitted to do so).
2851 #
2852 # To execute a snapshot transaction, the client specifies a timestamp
2853 # bound, which tells Cloud Spanner how to choose a read timestamp.
2854 #
2855 # The types of timestamp bound are:
2856 #
2857 # - Strong (the default).
2858 # - Bounded staleness.
2859 # - Exact staleness.
2860 #
2861 # If the Cloud Spanner database to be read is geographically distributed,
2862 # stale read-only transactions can execute more quickly than strong
2863 # or read-write transaction, because they are able to execute far
2864 # from the leader replica.
2865 #
2866 # Each type of timestamp bound is discussed in detail below.
2867 #
2868 # ### Strong
2869 #
2870 # Strong reads are guaranteed to see the effects of all transactions
2871 # that have committed before the start of the read. Furthermore, all
2872 # rows yielded by a single read are consistent with each other -- if
2873 # any part of the read observes a transaction, all parts of the read
2874 # see the transaction.
2875 #
2876 # Strong reads are not repeatable: two consecutive strong read-only
2877 # transactions might return inconsistent results if there are
2878 # concurrent writes. If consistency across reads is required, the
2879 # reads should be executed within a transaction or at an exact read
2880 # timestamp.
2881 #
2882 # See TransactionOptions.ReadOnly.strong.
2883 #
2884 # ### Exact Staleness
2885 #
2886 # These timestamp bounds execute reads at a user-specified
2887 # timestamp. Reads at a timestamp are guaranteed to see a consistent
2888 # prefix of the global transaction history: they observe
2889 # modifications done by all transactions with a commit timestamp &lt;=
2890 # the read timestamp, and observe none of the modifications done by
2891 # transactions with a larger commit timestamp. They will block until
2892 # all conflicting transactions that may be assigned commit timestamps
2893 # &lt;= the read timestamp have finished.
2894 #
2895 # The timestamp can either be expressed as an absolute Cloud Spanner commit
2896 # timestamp or a staleness relative to the current time.
2897 #
2898 # These modes do not require a &quot;negotiation phase&quot; to pick a
2899 # timestamp. As a result, they execute slightly faster than the
2900 # equivalent boundedly stale concurrency modes. On the other hand,
2901 # boundedly stale reads usually return fresher results.
2902 #
2903 # See TransactionOptions.ReadOnly.read_timestamp and
2904 # TransactionOptions.ReadOnly.exact_staleness.
2905 #
2906 # ### Bounded Staleness
2907 #
2908 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
2909 # subject to a user-provided staleness bound. Cloud Spanner chooses the
2910 # newest timestamp within the staleness bound that allows execution
2911 # of the reads at the closest available replica without blocking.
2912 #
2913 # All rows yielded are consistent with each other -- if any part of
2914 # the read observes a transaction, all parts of the read see the
2915 # transaction. Boundedly stale reads are not repeatable: two stale
2916 # reads, even if they use the same staleness bound, can execute at
2917 # different timestamps and thus return inconsistent results.
2918 #
2919 # Boundedly stale reads execute in two phases: the first phase
2920 # negotiates a timestamp among all replicas needed to serve the
2921 # read. In the second phase, reads are executed at the negotiated
2922 # timestamp.
2923 #
2924 # As a result of the two phase execution, bounded staleness reads are
2925 # usually a little slower than comparable exact staleness
2926 # reads. However, they are typically able to return fresher
2927 # results, and are more likely to execute at the closest replica.
2928 #
2929 # Because the timestamp negotiation requires up-front knowledge of
2930 # which rows will be read, it can only be used with single-use
2931 # read-only transactions.
2932 #
2933 # See TransactionOptions.ReadOnly.max_staleness and
2934 # TransactionOptions.ReadOnly.min_read_timestamp.
2935 #
2936 # ### Old Read Timestamps and Garbage Collection
2937 #
2938 # Cloud Spanner continuously garbage collects deleted and overwritten data
2939 # in the background to reclaim storage space. This process is known
2940 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
2941 # are one hour old. Because of this, Cloud Spanner cannot perform reads
2942 # at read timestamps more than one hour in the past. This
2943 # restriction also applies to in-progress reads and/or SQL queries whose
2944 # timestamp become too old while executing. Reads and SQL queries with
2945 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
2946 #
2947 # ## Partitioned DML Transactions
2948 #
2949 # Partitioned DML transactions are used to execute DML statements with a
2950 # different execution strategy that provides different, and often better,
2951 # scalability properties for large, table-wide operations than DML in a
2952 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
2953 # should prefer using ReadWrite transactions.
2954 #
2955 # Partitioned DML partitions the keyspace and runs the DML statement on each
2956 # partition in separate, internal transactions. These transactions commit
2957 # automatically when complete, and run independently from one another.
2958 #
2959 # To reduce lock contention, this execution strategy only acquires read locks
2960 # on rows that match the WHERE clause of the statement. Additionally, the
2961 # smaller per-partition transactions hold locks for less time.
2962 #
2963 # That said, Partitioned DML is not a drop-in replacement for standard DML used
2964 # in ReadWrite transactions.
2965 #
2966 # - The DML statement must be fully-partitionable. Specifically, the statement
2967 # must be expressible as the union of many statements which each access only
2968 # a single row of the table.
2969 #
2970 # - The statement is not applied atomically to all rows of the table. Rather,
2971 # the statement is applied atomically to partitions of the table, in
2972 # independent transactions. Secondary index rows are updated atomically
2973 # with the base table rows.
2974 #
2975 # - Partitioned DML does not guarantee exactly-once execution semantics
2976 # against a partition. The statement will be applied at least once to each
2977 # partition. It is strongly recommended that the DML statement should be
2978 # idempotent to avoid unexpected results. For instance, it is potentially
2979 # dangerous to run a statement such as
2980 # `UPDATE table SET column = column + 1` as it could be run multiple times
2981 # against some rows.
2982 #
2983 # - The partitions are committed automatically - there is no support for
2984 # Commit or Rollback. If the call returns an error, or if the client issuing
2985 # the ExecuteSql call dies, it is possible that some rows had the statement
2986 # executed on them successfully. It is also possible that statement was
2987 # never executed against other rows.
2988 #
2989 # - Partitioned DML transactions may only contain the execution of a single
2990 # DML statement via ExecuteSql or ExecuteStreamingSql.
2991 #
2992 # - If any error is encountered during the execution of the partitioned DML
2993 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
2994 # value that cannot be stored due to schema constraints), then the
2995 # operation is stopped at that point and an error is returned. It is
2996 # possible that at this point, some partitions have been committed (or even
2997 # committed multiple times), and other partitions have not been run at all.
2998 #
2999 # Given the above, Partitioned DML is good fit for large, database-wide,
3000 # operations that are idempotent, such as deleting old rows from a very large
3001 # table.
3002 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
3003 #
3004 # Authorization to begin a Partitioned DML transaction requires
3005 # `spanner.databases.beginPartitionedDmlTransaction` permission
3006 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -07003007 },
3008 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
3009 #
3010 # Authorization to begin a read-write transaction requires
3011 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
3012 # on the `session` resource.
3013 # transaction type has no options.
3014 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003015 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
3016 #
3017 # Authorization to begin a read-only transaction requires
3018 # `spanner.databases.beginReadOnlyTransaction` permission
3019 # on the `session` resource.
3020 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
3021 # seconds. Guarantees that all writes that have committed more
3022 # than the specified number of seconds ago are visible. Because
3023 # Cloud Spanner chooses the exact timestamp, this mode works even if
3024 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
3025 # commit timestamps.
3026 #
3027 # Useful for reading the freshest data available at a nearby
3028 # replica, while bounding the possible staleness if the local
3029 # replica has fallen behind.
3030 #
3031 # Note that this option can only be used in single-use
3032 # transactions.
3033 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
3034 #
3035 # This is useful for requesting fresher data than some previous
3036 # read, or data that is fresh enough to observe the effects of some
3037 # previously committed transaction whose timestamp is known.
3038 #
3039 # Note that this option can only be used in single-use transactions.
3040 #
3041 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
3042 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
3043 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
3044 # are visible.
3045 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
3046 # the Transaction message that describes the transaction.
3047 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
3048 # old. The timestamp is chosen soon after the read is started.
3049 #
3050 # Guarantees that all writes that have committed more than the
3051 # specified number of seconds ago are visible. Because Cloud Spanner
3052 # chooses the exact timestamp, this mode works even if the client&#x27;s
3053 # local clock is substantially skewed from Cloud Spanner commit
3054 # timestamps.
3055 #
3056 # Useful for reading at nearby replicas without the distributed
3057 # timestamp negotiation overhead of `max_staleness`.
3058 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
3059 # reads at a specific timestamp are repeatable; the same read at
3060 # the same timestamp always returns the same data. If the
3061 # timestamp is in the future, the read will block until the
3062 # specified timestamp, modulo the read&#x27;s deadline.
3063 #
3064 # Useful for large scale consistent reads such as mapreduces, or
3065 # for coordinating many reads against a consistent snapshot of the
3066 # data.
3067 #
3068 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
3069 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
3070 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07003071 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07003072 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003073 &quot;queryMode&quot;: &quot;A String&quot;, # Used to control the amount of debugging information returned in
3074 # ResultSetStats. If partition_token is set, query_mode can only
3075 # be set to QueryMode.NORMAL.
3076 &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
3077 # previously created using PartitionQuery(). There must be an exact
3078 # match for the values of fields common to this message and the
3079 # PartitionQueryRequest message used to create this partition_token.
Bu Sun Kim65020912020-05-20 12:08:20 -07003080 &quot;resumeToken&quot;: &quot;A String&quot;, # If this request is resuming a previously interrupted SQL statement
3081 # execution, `resume_token` should be copied from the last
3082 # PartialResultSet yielded before the interruption. Doing this
3083 # enables the new SQL statement execution to resume where the last one left
3084 # off. The rest of the request parameters must exactly match the
3085 # request that yielded this token.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003086 &quot;queryOptions&quot;: { # Query optimizer configuration. # Query optimizer configuration to use for the given query.
3087 &quot;optimizerVersion&quot;: &quot;A String&quot;, # An option to control the selection of optimizer version.
3088 #
3089 # This parameter allows individual queries to pick different query
3090 # optimizer versions.
3091 #
3092 # Specifying &quot;latest&quot; as a value instructs Cloud Spanner to use the
3093 # latest supported query optimizer version. If not specified, Cloud Spanner
3094 # uses optimizer version set at the database level options. Any other
3095 # positive integer (from the list of supported optimizer versions)
3096 # overrides the default optimizer version for query execution.
3097 # The list of supported optimizer versions can be queried from
3098 # SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL statement
3099 # with an invalid optimizer version will fail with a syntax error
3100 # (`INVALID_ARGUMENT`) status.
3101 # See
3102 # https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
3103 # for more information on managing the query optimizer.
3104 #
3105 # The `optimizer_version` statement hint has precedence over this setting.
3106 },
3107 &quot;params&quot;: { # Parameter names and values that bind to placeholders in the SQL string.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07003108 #
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003109 # A parameter placeholder consists of the `@` character followed by the
3110 # parameter name (for example, `@firstName`). Parameter names can contain
3111 # letters, numbers, and underscores.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07003112 #
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003113 # Parameters can appear anywhere that a literal value is expected. The same
3114 # parameter name can be used more than once, for example:
3115 #
3116 # `&quot;WHERE id &gt; @msg_id AND id &lt; @msg_id + 100&quot;`
3117 #
3118 # It is an error to execute a SQL statement with unbound parameters.
3119 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
3120 },
3121 &quot;sql&quot;: &quot;A String&quot;, # Required. The SQL string.
Bu Sun Kim65020912020-05-20 12:08:20 -07003122 &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003123 # from a JSON value. For example, values of type `BYTES` and values
3124 # of type `STRING` both appear in params as JSON strings.
3125 #
3126 # In these cases, `param_types` can be used to specify the exact
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07003127 # SQL type for some or all of the SQL statement parameters. See the
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003128 # definition of Type for more information
3129 # about SQL types.
Bu Sun Kim65020912020-05-20 12:08:20 -07003130 &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003131 # table cell or returned from an SQL query.
Bu Sun Kim65020912020-05-20 12:08:20 -07003132 &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
3133 # is the type of the array elements.
3134 &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
3135 &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
3136 # provides type information for the struct&#x27;s fields.
3137 &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
Dan O'Mearadd494642020-05-01 07:42:23 -07003138 # significant, because values of this struct type are represented as
3139 # lists, where the order of field values matches the order of
3140 # fields in the StructType. In turn, the order of fields
3141 # matches the order of columns in a read request, or the order of
3142 # fields in the `SELECT` clause of a query.
3143 { # Message representing a single field of a struct.
Bu Sun Kim65020912020-05-20 12:08:20 -07003144 &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
3145 # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
3146 # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
3147 # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
3148 # columns might have an empty name (e.g., !&quot;SELECT
3149 # UPPER(ColName)&quot;`). Note that a query result can contain
Dan O'Mearadd494642020-05-01 07:42:23 -07003150 # multiple fields with the same name.
Bu Sun Kim65020912020-05-20 12:08:20 -07003151 &quot;type&quot;: # Object with schema name: Type # The type of the field.
Dan O'Mearadd494642020-05-01 07:42:23 -07003152 },
3153 ],
3154 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003155 },
3156 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003157 }
3158
3159 x__xgafv: string, V1 error format.
3160 Allowed values
3161 1 - v1 error format
3162 2 - v2 error format
3163
3164Returns:
3165 An object of the form:
3166
3167 { # Results from Read or
3168 # ExecuteSql.
Bu Sun Kim65020912020-05-20 12:08:20 -07003169 &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
3170 &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
3171 # set. For example, a SQL query like `&quot;SELECT UserId, UserName FROM
3172 # Users&quot;` could return a `row_type` value like:
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003173 #
Bu Sun Kim65020912020-05-20 12:08:20 -07003174 # &quot;fields&quot;: [
3175 # { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
3176 # { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003177 # ]
Bu Sun Kim65020912020-05-20 12:08:20 -07003178 &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003179 # significant, because values of this struct type are represented as
3180 # lists, where the order of field values matches the order of
3181 # fields in the StructType. In turn, the order of fields
3182 # matches the order of columns in a read request, or the order of
3183 # fields in the `SELECT` clause of a query.
3184 { # Message representing a single field of a struct.
Bu Sun Kim65020912020-05-20 12:08:20 -07003185 &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
3186 # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
3187 # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
3188 # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
3189 # columns might have an empty name (e.g., !&quot;SELECT
3190 # UPPER(ColName)&quot;`). Note that a query result can contain
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003191 # multiple fields with the same name.
Bu Sun Kim65020912020-05-20 12:08:20 -07003192 &quot;type&quot;: # Object with schema name: Type # The type of the field.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003193 },
3194 ],
3195 },
Bu Sun Kim65020912020-05-20 12:08:20 -07003196 &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003197 # information about the new transaction is yielded here.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003198 &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
3199 # for the transaction. Not returned by default: see
3200 # TransactionOptions.ReadOnly.return_read_timestamp.
3201 #
3202 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
3203 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
Bu Sun Kim65020912020-05-20 12:08:20 -07003204 &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003205 # Read,
3206 # ExecuteSql,
3207 # Commit, or
3208 # Rollback calls.
3209 #
3210 # Single-use read-only transactions do not have IDs, because
3211 # single-use transactions do not support multiple requests.
3212 },
3213 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003214 &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the SQL statement that
3215 # produced this result set. These can be requested by setting
3216 # ExecuteSqlRequest.query_mode.
3217 # DML statements always produce stats containing the number of rows
3218 # modified, unless executed using the
3219 # ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode.
3220 # Other fields may or may not be populated, based on the
3221 # ExecuteSqlRequest.query_mode.
3222 &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
3223 # returns a lower bound of the rows modified.
3224 &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
3225 &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
3226 # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
3227 # `plan_nodes`.
3228 { # Node information for nodes appearing in a QueryPlan.plan_nodes.
3229 &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
3230 &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
3231 # key-value pairs. Only present if the plan was returned as a result of a
3232 # profile query. For example, number of executions, number of rows/time per
3233 # execution etc.
3234 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
3235 },
3236 &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
3237 # `SCALAR` PlanNode(s).
3238 &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
3239 # where the `description` string of this node references a `SCALAR`
3240 # subquery contained in the expression subtree rooted at this node. The
3241 # referenced `SCALAR` subquery may not necessarily be a direct child of
3242 # this node.
3243 &quot;a_key&quot;: 42,
3244 },
3245 &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
3246 },
3247 &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
3248 # For example, a Parameter Reference node could have the following
3249 # information in its metadata:
3250 #
3251 # {
3252 # &quot;parameter_reference&quot;: &quot;param1&quot;,
3253 # &quot;parameter_type&quot;: &quot;array&quot;
3254 # }
3255 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
3256 },
3257 &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
3258 { # Metadata associated with a parent-child relationship appearing in a
3259 # PlanNode.
3260 &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
3261 # distinguish between the build child and the probe child, or in the case
3262 # of the child being an output variable, to represent the tag associated
3263 # with the output variable.
3264 &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
3265 # to an output variable of the parent node. The field carries the name of
3266 # the output variable.
3267 # For example, a `TableScan` operator that reads rows from a table will
3268 # have child links to the `SCALAR` nodes representing the output variables
3269 # created for each column that is read by the operator. The corresponding
3270 # `variable` fields will be set to the variable names assigned to the
3271 # columns.
3272 &quot;childIndex&quot;: 42, # The node to which the link points.
3273 },
3274 ],
3275 &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
3276 &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
3277 # different kinds of nodes differently. For example, If the node is a
3278 # SCALAR node, it will have a condensed representation
3279 # which can be used to directly embed a description of the node in its
3280 # parent.
3281 },
3282 ],
3283 },
3284 &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
3285 &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
3286 # the query is profiled. For example, a query could return the statistics as
3287 # follows:
3288 #
3289 # {
3290 # &quot;rows_returned&quot;: &quot;3&quot;,
3291 # &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
3292 # &quot;cpu_time&quot;: &quot;1.19 secs&quot;
3293 # }
3294 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
3295 },
3296 },
3297 &quot;rows&quot;: [ # Each element in `rows` is a row whose format is defined by
3298 # metadata.row_type. The ith element
3299 # in each row matches the ith field in
3300 # metadata.row_type. Elements are
3301 # encoded based on type as described
3302 # here.
3303 [
3304 &quot;&quot;,
3305 ],
3306 ],
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003307 }</pre>
3308</div>
3309
3310<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07003311 <code class="details" id="executeStreamingSql">executeStreamingSql(session, body=None, x__xgafv=None)</code>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003312 <pre>Like ExecuteSql, except returns the result
3313set as a stream. Unlike ExecuteSql, there
3314is no limit on the size of the returned result set. However, no
3315individual row in the result set can exceed 100 MiB, and no
3316column value can exceed 10 MiB.
3317
3318Args:
3319 session: string, Required. The session in which the SQL query should be performed. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -07003320 body: object, The request body.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003321 The object takes the form of:
3322
3323{ # The request for ExecuteSql and
3324 # ExecuteStreamingSql.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003325 &quot;seqno&quot;: &quot;A String&quot;, # A per-transaction sequence number used to identify this request. This field
3326 # makes each request idempotent such that if the request is received multiple
3327 # times, at most one will succeed.
3328 #
3329 # The sequence number must be monotonically increasing within the
3330 # transaction. If a request arrives for the first time with an out-of-order
3331 # sequence number, the transaction may be aborted. Replays of previously
3332 # handled requests will yield the same response as the first execution.
3333 #
3334 # Required for DML statements. Ignored for queries.
Bu Sun Kim65020912020-05-20 12:08:20 -07003335 &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07003336 #
3337 # For queries, if none is provided, the default is a temporary read-only
3338 # transaction with strong concurrency.
3339 #
Dan O'Mearadd494642020-05-01 07:42:23 -07003340 # Standard DML statements require a read-write transaction. To protect
3341 # against replays, single-use transactions are not supported. The caller
3342 # must either supply an existing transaction ID or begin a new transaction.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07003343 #
Dan O'Mearadd494642020-05-01 07:42:23 -07003344 # Partitioned DML requires an existing Partitioned DML transaction ID.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04003345 # Read or
3346 # ExecuteSql call runs.
3347 #
3348 # See TransactionOptions for more information about transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003349 &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
Bu Sun Kim65020912020-05-20 12:08:20 -07003350 &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
3351 # it. The transaction ID of the new transaction is returned in
3352 # ResultSetMetadata.transaction, which is a Transaction.
3353 #
3354 #
3355 # Each session can have at most one active transaction at a time. After the
3356 # active transaction is completed, the session can immediately be
3357 # re-used for the next transaction. It is not necessary to create a
3358 # new session for each transaction.
3359 #
3360 # # Transaction Modes
3361 #
3362 # Cloud Spanner supports three transaction modes:
3363 #
3364 # 1. Locking read-write. This type of transaction is the only way
3365 # to write data into Cloud Spanner. These transactions rely on
3366 # pessimistic locking and, if necessary, two-phase commit.
3367 # Locking read-write transactions may abort, requiring the
3368 # application to retry.
3369 #
3370 # 2. Snapshot read-only. This transaction type provides guaranteed
3371 # consistency across several reads, but does not allow
3372 # writes. Snapshot read-only transactions can be configured to
3373 # read at timestamps in the past. Snapshot read-only
3374 # transactions do not need to be committed.
3375 #
3376 # 3. Partitioned DML. This type of transaction is used to execute
3377 # a single Partitioned DML statement. Partitioned DML partitions
3378 # the key space and runs the DML statement over each partition
3379 # in parallel using separate, internal transactions that commit
3380 # independently. Partitioned DML transactions do not need to be
3381 # committed.
3382 #
3383 # For transactions that only read, snapshot read-only transactions
3384 # provide simpler semantics and are almost always faster. In
3385 # particular, read-only transactions do not take locks, so they do
3386 # not conflict with read-write transactions. As a consequence of not
3387 # taking locks, they also do not abort, so retry loops are not needed.
3388 #
3389 # Transactions may only read/write data in a single database. They
3390 # may, however, read/write data in different tables within that
3391 # database.
3392 #
3393 # ## Locking Read-Write Transactions
3394 #
3395 # Locking transactions may be used to atomically read-modify-write
3396 # data anywhere in a database. This type of transaction is externally
3397 # consistent.
3398 #
3399 # Clients should attempt to minimize the amount of time a transaction
3400 # is active. Faster transactions commit with higher probability
3401 # and cause less contention. Cloud Spanner attempts to keep read locks
3402 # active as long as the transaction continues to do reads, and the
3403 # transaction has not been terminated by
3404 # Commit or
3405 # Rollback. Long periods of
3406 # inactivity at the client may cause Cloud Spanner to release a
3407 # transaction&#x27;s locks and abort it.
3408 #
3409 # Conceptually, a read-write transaction consists of zero or more
3410 # reads or SQL statements followed by
3411 # Commit. At any time before
3412 # Commit, the client can send a
3413 # Rollback request to abort the
3414 # transaction.
3415 #
3416 # ### Semantics
3417 #
3418 # Cloud Spanner can commit the transaction if all read locks it acquired
3419 # are still valid at commit time, and it is able to acquire write
3420 # locks for all writes. Cloud Spanner can abort the transaction for any
3421 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
3422 # that the transaction has not modified any user data in Cloud Spanner.
3423 #
3424 # Unless the transaction commits, Cloud Spanner makes no guarantees about
3425 # how long the transaction&#x27;s locks were held for. It is an error to
3426 # use Cloud Spanner locks for any sort of mutual exclusion other than
3427 # between Cloud Spanner transactions themselves.
3428 #
3429 # ### Retrying Aborted Transactions
3430 #
3431 # When a transaction aborts, the application can choose to retry the
3432 # whole transaction again. To maximize the chances of successfully
3433 # committing the retry, the client should execute the retry in the
3434 # same session as the original attempt. The original session&#x27;s lock
3435 # priority increases with each consecutive abort, meaning that each
3436 # attempt has a slightly better chance of success than the previous.
3437 #
3438 # Under some circumstances (e.g., many transactions attempting to
3439 # modify the same row(s)), a transaction can abort many times in a
3440 # short period before successfully committing. Thus, it is not a good
3441 # idea to cap the number of retries a transaction can attempt;
3442 # instead, it is better to limit the total amount of wall time spent
3443 # retrying.
3444 #
3445 # ### Idle Transactions
3446 #
3447 # A transaction is considered idle if it has no outstanding reads or
3448 # SQL queries and has not started a read or SQL query within the last 10
3449 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
3450 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
3451 # fail with error `ABORTED`.
3452 #
3453 # If this behavior is undesirable, periodically executing a simple
3454 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
3455 # transaction from becoming idle.
3456 #
3457 # ## Snapshot Read-Only Transactions
3458 #
3459 # Snapshot read-only transactions provides a simpler method than
3460 # locking read-write transactions for doing several consistent
3461 # reads. However, this type of transaction does not support writes.
3462 #
3463 # Snapshot transactions do not take locks. Instead, they work by
3464 # choosing a Cloud Spanner timestamp, then executing all reads at that
3465 # timestamp. Since they do not acquire locks, they do not block
3466 # concurrent read-write transactions.
3467 #
3468 # Unlike locking read-write transactions, snapshot read-only
3469 # transactions never abort. They can fail if the chosen read
3470 # timestamp is garbage collected; however, the default garbage
3471 # collection policy is generous enough that most applications do not
3472 # need to worry about this in practice.
3473 #
3474 # Snapshot read-only transactions do not need to call
3475 # Commit or
3476 # Rollback (and in fact are not
3477 # permitted to do so).
3478 #
3479 # To execute a snapshot transaction, the client specifies a timestamp
3480 # bound, which tells Cloud Spanner how to choose a read timestamp.
3481 #
3482 # The types of timestamp bound are:
3483 #
3484 # - Strong (the default).
3485 # - Bounded staleness.
3486 # - Exact staleness.
3487 #
3488 # If the Cloud Spanner database to be read is geographically distributed,
3489 # stale read-only transactions can execute more quickly than strong
3490 # or read-write transaction, because they are able to execute far
3491 # from the leader replica.
3492 #
3493 # Each type of timestamp bound is discussed in detail below.
3494 #
3495 # ### Strong
3496 #
3497 # Strong reads are guaranteed to see the effects of all transactions
3498 # that have committed before the start of the read. Furthermore, all
3499 # rows yielded by a single read are consistent with each other -- if
3500 # any part of the read observes a transaction, all parts of the read
3501 # see the transaction.
3502 #
3503 # Strong reads are not repeatable: two consecutive strong read-only
3504 # transactions might return inconsistent results if there are
3505 # concurrent writes. If consistency across reads is required, the
3506 # reads should be executed within a transaction or at an exact read
3507 # timestamp.
3508 #
3509 # See TransactionOptions.ReadOnly.strong.
3510 #
3511 # ### Exact Staleness
3512 #
3513 # These timestamp bounds execute reads at a user-specified
3514 # timestamp. Reads at a timestamp are guaranteed to see a consistent
3515 # prefix of the global transaction history: they observe
3516 # modifications done by all transactions with a commit timestamp &lt;=
3517 # the read timestamp, and observe none of the modifications done by
3518 # transactions with a larger commit timestamp. They will block until
3519 # all conflicting transactions that may be assigned commit timestamps
3520 # &lt;= the read timestamp have finished.
3521 #
3522 # The timestamp can either be expressed as an absolute Cloud Spanner commit
3523 # timestamp or a staleness relative to the current time.
3524 #
3525 # These modes do not require a &quot;negotiation phase&quot; to pick a
3526 # timestamp. As a result, they execute slightly faster than the
3527 # equivalent boundedly stale concurrency modes. On the other hand,
3528 # boundedly stale reads usually return fresher results.
3529 #
3530 # See TransactionOptions.ReadOnly.read_timestamp and
3531 # TransactionOptions.ReadOnly.exact_staleness.
3532 #
3533 # ### Bounded Staleness
3534 #
3535 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
3536 # subject to a user-provided staleness bound. Cloud Spanner chooses the
3537 # newest timestamp within the staleness bound that allows execution
3538 # of the reads at the closest available replica without blocking.
3539 #
3540 # All rows yielded are consistent with each other -- if any part of
3541 # the read observes a transaction, all parts of the read see the
3542 # transaction. Boundedly stale reads are not repeatable: two stale
3543 # reads, even if they use the same staleness bound, can execute at
3544 # different timestamps and thus return inconsistent results.
3545 #
3546 # Boundedly stale reads execute in two phases: the first phase
3547 # negotiates a timestamp among all replicas needed to serve the
3548 # read. In the second phase, reads are executed at the negotiated
3549 # timestamp.
3550 #
3551 # As a result of the two phase execution, bounded staleness reads are
3552 # usually a little slower than comparable exact staleness
3553 # reads. However, they are typically able to return fresher
3554 # results, and are more likely to execute at the closest replica.
3555 #
3556 # Because the timestamp negotiation requires up-front knowledge of
3557 # which rows will be read, it can only be used with single-use
3558 # read-only transactions.
3559 #
3560 # See TransactionOptions.ReadOnly.max_staleness and
3561 # TransactionOptions.ReadOnly.min_read_timestamp.
3562 #
3563 # ### Old Read Timestamps and Garbage Collection
3564 #
3565 # Cloud Spanner continuously garbage collects deleted and overwritten data
3566 # in the background to reclaim storage space. This process is known
3567 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
3568 # are one hour old. Because of this, Cloud Spanner cannot perform reads
3569 # at read timestamps more than one hour in the past. This
3570 # restriction also applies to in-progress reads and/or SQL queries whose
3571 # timestamp become too old while executing. Reads and SQL queries with
3572 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
3573 #
3574 # ## Partitioned DML Transactions
3575 #
3576 # Partitioned DML transactions are used to execute DML statements with a
3577 # different execution strategy that provides different, and often better,
3578 # scalability properties for large, table-wide operations than DML in a
3579 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
3580 # should prefer using ReadWrite transactions.
3581 #
3582 # Partitioned DML partitions the keyspace and runs the DML statement on each
3583 # partition in separate, internal transactions. These transactions commit
3584 # automatically when complete, and run independently from one another.
3585 #
3586 # To reduce lock contention, this execution strategy only acquires read locks
3587 # on rows that match the WHERE clause of the statement. Additionally, the
3588 # smaller per-partition transactions hold locks for less time.
3589 #
3590 # That said, Partitioned DML is not a drop-in replacement for standard DML used
3591 # in ReadWrite transactions.
3592 #
3593 # - The DML statement must be fully-partitionable. Specifically, the statement
3594 # must be expressible as the union of many statements which each access only
3595 # a single row of the table.
3596 #
3597 # - The statement is not applied atomically to all rows of the table. Rather,
3598 # the statement is applied atomically to partitions of the table, in
3599 # independent transactions. Secondary index rows are updated atomically
3600 # with the base table rows.
3601 #
3602 # - Partitioned DML does not guarantee exactly-once execution semantics
3603 # against a partition. The statement will be applied at least once to each
3604 # partition. It is strongly recommended that the DML statement should be
3605 # idempotent to avoid unexpected results. For instance, it is potentially
3606 # dangerous to run a statement such as
3607 # `UPDATE table SET column = column + 1` as it could be run multiple times
3608 # against some rows.
3609 #
3610 # - The partitions are committed automatically - there is no support for
3611 # Commit or Rollback. If the call returns an error, or if the client issuing
3612 # the ExecuteSql call dies, it is possible that some rows had the statement
3613 # executed on them successfully. It is also possible that statement was
3614 # never executed against other rows.
3615 #
3616 # - Partitioned DML transactions may only contain the execution of a single
3617 # DML statement via ExecuteSql or ExecuteStreamingSql.
3618 #
3619 # - If any error is encountered during the execution of the partitioned DML
3620 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
3621 # value that cannot be stored due to schema constraints), then the
3622 # operation is stopped at that point and an error is returned. It is
3623 # possible that at this point, some partitions have been committed (or even
3624 # committed multiple times), and other partitions have not been run at all.
3625 #
3626 # Given the above, Partitioned DML is good fit for large, database-wide,
3627 # operations that are idempotent, such as deleting old rows from a very large
3628 # table.
3629 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07003630 #
3631 # Authorization to begin a Partitioned DML transaction requires
3632 # `spanner.databases.beginPartitionedDmlTransaction` permission
3633 # on the `session` resource.
3634 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003635 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
3636 #
3637 # Authorization to begin a read-write transaction requires
3638 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
3639 # on the `session` resource.
3640 # transaction type has no options.
3641 },
Bu Sun Kim65020912020-05-20 12:08:20 -07003642 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
3643 #
3644 # Authorization to begin a read-only transaction requires
3645 # `spanner.databases.beginReadOnlyTransaction` permission
3646 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -07003647 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
3648 # seconds. Guarantees that all writes that have committed more
3649 # than the specified number of seconds ago are visible. Because
3650 # Cloud Spanner chooses the exact timestamp, this mode works even if
3651 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
3652 # commit timestamps.
3653 #
3654 # Useful for reading the freshest data available at a nearby
3655 # replica, while bounding the possible staleness if the local
3656 # replica has fallen behind.
3657 #
3658 # Note that this option can only be used in single-use
3659 # transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003660 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
3661 #
3662 # This is useful for requesting fresher data than some previous
3663 # read, or data that is fresh enough to observe the effects of some
3664 # previously committed transaction whose timestamp is known.
3665 #
3666 # Note that this option can only be used in single-use transactions.
3667 #
3668 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
3669 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
3670 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
3671 # are visible.
Bu Sun Kim65020912020-05-20 12:08:20 -07003672 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
3673 # the Transaction message that describes the transaction.
3674 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
3675 # old. The timestamp is chosen soon after the read is started.
3676 #
3677 # Guarantees that all writes that have committed more than the
3678 # specified number of seconds ago are visible. Because Cloud Spanner
3679 # chooses the exact timestamp, this mode works even if the client&#x27;s
3680 # local clock is substantially skewed from Cloud Spanner commit
3681 # timestamps.
3682 #
3683 # Useful for reading at nearby replicas without the distributed
3684 # timestamp negotiation overhead of `max_staleness`.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003685 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
3686 # reads at a specific timestamp are repeatable; the same read at
3687 # the same timestamp always returns the same data. If the
3688 # timestamp is in the future, the read will block until the
3689 # specified timestamp, modulo the read&#x27;s deadline.
3690 #
3691 # Useful for large scale consistent reads such as mapreduces, or
3692 # for coordinating many reads against a consistent snapshot of the
3693 # data.
3694 #
3695 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
3696 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
3697 },
3698 },
3699 &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
3700 # This is the most efficient way to execute a transaction that
3701 # consists of a single SQL query.
3702 #
3703 #
3704 # Each session can have at most one active transaction at a time. After the
3705 # active transaction is completed, the session can immediately be
3706 # re-used for the next transaction. It is not necessary to create a
3707 # new session for each transaction.
3708 #
3709 # # Transaction Modes
3710 #
3711 # Cloud Spanner supports three transaction modes:
3712 #
3713 # 1. Locking read-write. This type of transaction is the only way
3714 # to write data into Cloud Spanner. These transactions rely on
3715 # pessimistic locking and, if necessary, two-phase commit.
3716 # Locking read-write transactions may abort, requiring the
3717 # application to retry.
3718 #
3719 # 2. Snapshot read-only. This transaction type provides guaranteed
3720 # consistency across several reads, but does not allow
3721 # writes. Snapshot read-only transactions can be configured to
3722 # read at timestamps in the past. Snapshot read-only
3723 # transactions do not need to be committed.
3724 #
3725 # 3. Partitioned DML. This type of transaction is used to execute
3726 # a single Partitioned DML statement. Partitioned DML partitions
3727 # the key space and runs the DML statement over each partition
3728 # in parallel using separate, internal transactions that commit
3729 # independently. Partitioned DML transactions do not need to be
3730 # committed.
3731 #
3732 # For transactions that only read, snapshot read-only transactions
3733 # provide simpler semantics and are almost always faster. In
3734 # particular, read-only transactions do not take locks, so they do
3735 # not conflict with read-write transactions. As a consequence of not
3736 # taking locks, they also do not abort, so retry loops are not needed.
3737 #
3738 # Transactions may only read/write data in a single database. They
3739 # may, however, read/write data in different tables within that
3740 # database.
3741 #
3742 # ## Locking Read-Write Transactions
3743 #
3744 # Locking transactions may be used to atomically read-modify-write
3745 # data anywhere in a database. This type of transaction is externally
3746 # consistent.
3747 #
3748 # Clients should attempt to minimize the amount of time a transaction
3749 # is active. Faster transactions commit with higher probability
3750 # and cause less contention. Cloud Spanner attempts to keep read locks
3751 # active as long as the transaction continues to do reads, and the
3752 # transaction has not been terminated by
3753 # Commit or
3754 # Rollback. Long periods of
3755 # inactivity at the client may cause Cloud Spanner to release a
3756 # transaction&#x27;s locks and abort it.
3757 #
3758 # Conceptually, a read-write transaction consists of zero or more
3759 # reads or SQL statements followed by
3760 # Commit. At any time before
3761 # Commit, the client can send a
3762 # Rollback request to abort the
3763 # transaction.
3764 #
3765 # ### Semantics
3766 #
3767 # Cloud Spanner can commit the transaction if all read locks it acquired
3768 # are still valid at commit time, and it is able to acquire write
3769 # locks for all writes. Cloud Spanner can abort the transaction for any
3770 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
3771 # that the transaction has not modified any user data in Cloud Spanner.
3772 #
3773 # Unless the transaction commits, Cloud Spanner makes no guarantees about
3774 # how long the transaction&#x27;s locks were held for. It is an error to
3775 # use Cloud Spanner locks for any sort of mutual exclusion other than
3776 # between Cloud Spanner transactions themselves.
3777 #
3778 # ### Retrying Aborted Transactions
3779 #
3780 # When a transaction aborts, the application can choose to retry the
3781 # whole transaction again. To maximize the chances of successfully
3782 # committing the retry, the client should execute the retry in the
3783 # same session as the original attempt. The original session&#x27;s lock
3784 # priority increases with each consecutive abort, meaning that each
3785 # attempt has a slightly better chance of success than the previous.
3786 #
3787 # Under some circumstances (e.g., many transactions attempting to
3788 # modify the same row(s)), a transaction can abort many times in a
3789 # short period before successfully committing. Thus, it is not a good
3790 # idea to cap the number of retries a transaction can attempt;
3791 # instead, it is better to limit the total amount of wall time spent
3792 # retrying.
3793 #
3794 # ### Idle Transactions
3795 #
3796 # A transaction is considered idle if it has no outstanding reads or
3797 # SQL queries and has not started a read or SQL query within the last 10
3798 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
3799 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
3800 # fail with error `ABORTED`.
3801 #
3802 # If this behavior is undesirable, periodically executing a simple
3803 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
3804 # transaction from becoming idle.
3805 #
3806 # ## Snapshot Read-Only Transactions
3807 #
3808 # Snapshot read-only transactions provides a simpler method than
3809 # locking read-write transactions for doing several consistent
3810 # reads. However, this type of transaction does not support writes.
3811 #
3812 # Snapshot transactions do not take locks. Instead, they work by
3813 # choosing a Cloud Spanner timestamp, then executing all reads at that
3814 # timestamp. Since they do not acquire locks, they do not block
3815 # concurrent read-write transactions.
3816 #
3817 # Unlike locking read-write transactions, snapshot read-only
3818 # transactions never abort. They can fail if the chosen read
3819 # timestamp is garbage collected; however, the default garbage
3820 # collection policy is generous enough that most applications do not
3821 # need to worry about this in practice.
3822 #
3823 # Snapshot read-only transactions do not need to call
3824 # Commit or
3825 # Rollback (and in fact are not
3826 # permitted to do so).
3827 #
3828 # To execute a snapshot transaction, the client specifies a timestamp
3829 # bound, which tells Cloud Spanner how to choose a read timestamp.
3830 #
3831 # The types of timestamp bound are:
3832 #
3833 # - Strong (the default).
3834 # - Bounded staleness.
3835 # - Exact staleness.
3836 #
3837 # If the Cloud Spanner database to be read is geographically distributed,
3838 # stale read-only transactions can execute more quickly than strong
3839 # or read-write transaction, because they are able to execute far
3840 # from the leader replica.
3841 #
3842 # Each type of timestamp bound is discussed in detail below.
3843 #
3844 # ### Strong
3845 #
3846 # Strong reads are guaranteed to see the effects of all transactions
3847 # that have committed before the start of the read. Furthermore, all
3848 # rows yielded by a single read are consistent with each other -- if
3849 # any part of the read observes a transaction, all parts of the read
3850 # see the transaction.
3851 #
3852 # Strong reads are not repeatable: two consecutive strong read-only
3853 # transactions might return inconsistent results if there are
3854 # concurrent writes. If consistency across reads is required, the
3855 # reads should be executed within a transaction or at an exact read
3856 # timestamp.
3857 #
3858 # See TransactionOptions.ReadOnly.strong.
3859 #
3860 # ### Exact Staleness
3861 #
3862 # These timestamp bounds execute reads at a user-specified
3863 # timestamp. Reads at a timestamp are guaranteed to see a consistent
3864 # prefix of the global transaction history: they observe
3865 # modifications done by all transactions with a commit timestamp &lt;=
3866 # the read timestamp, and observe none of the modifications done by
3867 # transactions with a larger commit timestamp. They will block until
3868 # all conflicting transactions that may be assigned commit timestamps
3869 # &lt;= the read timestamp have finished.
3870 #
3871 # The timestamp can either be expressed as an absolute Cloud Spanner commit
3872 # timestamp or a staleness relative to the current time.
3873 #
3874 # These modes do not require a &quot;negotiation phase&quot; to pick a
3875 # timestamp. As a result, they execute slightly faster than the
3876 # equivalent boundedly stale concurrency modes. On the other hand,
3877 # boundedly stale reads usually return fresher results.
3878 #
3879 # See TransactionOptions.ReadOnly.read_timestamp and
3880 # TransactionOptions.ReadOnly.exact_staleness.
3881 #
3882 # ### Bounded Staleness
3883 #
3884 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
3885 # subject to a user-provided staleness bound. Cloud Spanner chooses the
3886 # newest timestamp within the staleness bound that allows execution
3887 # of the reads at the closest available replica without blocking.
3888 #
3889 # All rows yielded are consistent with each other -- if any part of
3890 # the read observes a transaction, all parts of the read see the
3891 # transaction. Boundedly stale reads are not repeatable: two stale
3892 # reads, even if they use the same staleness bound, can execute at
3893 # different timestamps and thus return inconsistent results.
3894 #
3895 # Boundedly stale reads execute in two phases: the first phase
3896 # negotiates a timestamp among all replicas needed to serve the
3897 # read. In the second phase, reads are executed at the negotiated
3898 # timestamp.
3899 #
3900 # As a result of the two phase execution, bounded staleness reads are
3901 # usually a little slower than comparable exact staleness
3902 # reads. However, they are typically able to return fresher
3903 # results, and are more likely to execute at the closest replica.
3904 #
3905 # Because the timestamp negotiation requires up-front knowledge of
3906 # which rows will be read, it can only be used with single-use
3907 # read-only transactions.
3908 #
3909 # See TransactionOptions.ReadOnly.max_staleness and
3910 # TransactionOptions.ReadOnly.min_read_timestamp.
3911 #
3912 # ### Old Read Timestamps and Garbage Collection
3913 #
3914 # Cloud Spanner continuously garbage collects deleted and overwritten data
3915 # in the background to reclaim storage space. This process is known
3916 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
3917 # are one hour old. Because of this, Cloud Spanner cannot perform reads
3918 # at read timestamps more than one hour in the past. This
3919 # restriction also applies to in-progress reads and/or SQL queries whose
3920 # timestamp become too old while executing. Reads and SQL queries with
3921 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
3922 #
3923 # ## Partitioned DML Transactions
3924 #
3925 # Partitioned DML transactions are used to execute DML statements with a
3926 # different execution strategy that provides different, and often better,
3927 # scalability properties for large, table-wide operations than DML in a
3928 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
3929 # should prefer using ReadWrite transactions.
3930 #
3931 # Partitioned DML partitions the keyspace and runs the DML statement on each
3932 # partition in separate, internal transactions. These transactions commit
3933 # automatically when complete, and run independently from one another.
3934 #
3935 # To reduce lock contention, this execution strategy only acquires read locks
3936 # on rows that match the WHERE clause of the statement. Additionally, the
3937 # smaller per-partition transactions hold locks for less time.
3938 #
3939 # That said, Partitioned DML is not a drop-in replacement for standard DML used
3940 # in ReadWrite transactions.
3941 #
3942 # - The DML statement must be fully-partitionable. Specifically, the statement
3943 # must be expressible as the union of many statements which each access only
3944 # a single row of the table.
3945 #
3946 # - The statement is not applied atomically to all rows of the table. Rather,
3947 # the statement is applied atomically to partitions of the table, in
3948 # independent transactions. Secondary index rows are updated atomically
3949 # with the base table rows.
3950 #
3951 # - Partitioned DML does not guarantee exactly-once execution semantics
3952 # against a partition. The statement will be applied at least once to each
3953 # partition. It is strongly recommended that the DML statement should be
3954 # idempotent to avoid unexpected results. For instance, it is potentially
3955 # dangerous to run a statement such as
3956 # `UPDATE table SET column = column + 1` as it could be run multiple times
3957 # against some rows.
3958 #
3959 # - The partitions are committed automatically - there is no support for
3960 # Commit or Rollback. If the call returns an error, or if the client issuing
3961 # the ExecuteSql call dies, it is possible that some rows had the statement
3962 # executed on them successfully. It is also possible that statement was
3963 # never executed against other rows.
3964 #
3965 # - Partitioned DML transactions may only contain the execution of a single
3966 # DML statement via ExecuteSql or ExecuteStreamingSql.
3967 #
3968 # - If any error is encountered during the execution of the partitioned DML
3969 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
3970 # value that cannot be stored due to schema constraints), then the
3971 # operation is stopped at that point and an error is returned. It is
3972 # possible that at this point, some partitions have been committed (or even
3973 # committed multiple times), and other partitions have not been run at all.
3974 #
3975 # Given the above, Partitioned DML is good fit for large, database-wide,
3976 # operations that are idempotent, such as deleting old rows from a very large
3977 # table.
3978 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
3979 #
3980 # Authorization to begin a Partitioned DML transaction requires
3981 # `spanner.databases.beginPartitionedDmlTransaction` permission
3982 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -07003983 },
3984 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
3985 #
3986 # Authorization to begin a read-write transaction requires
3987 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
3988 # on the `session` resource.
3989 # transaction type has no options.
3990 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07003991 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
3992 #
3993 # Authorization to begin a read-only transaction requires
3994 # `spanner.databases.beginReadOnlyTransaction` permission
3995 # on the `session` resource.
3996 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
3997 # seconds. Guarantees that all writes that have committed more
3998 # than the specified number of seconds ago are visible. Because
3999 # Cloud Spanner chooses the exact timestamp, this mode works even if
4000 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
4001 # commit timestamps.
4002 #
4003 # Useful for reading the freshest data available at a nearby
4004 # replica, while bounding the possible staleness if the local
4005 # replica has fallen behind.
4006 #
4007 # Note that this option can only be used in single-use
4008 # transactions.
4009 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
4010 #
4011 # This is useful for requesting fresher data than some previous
4012 # read, or data that is fresh enough to observe the effects of some
4013 # previously committed transaction whose timestamp is known.
4014 #
4015 # Note that this option can only be used in single-use transactions.
4016 #
4017 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
4018 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
4019 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
4020 # are visible.
4021 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
4022 # the Transaction message that describes the transaction.
4023 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
4024 # old. The timestamp is chosen soon after the read is started.
4025 #
4026 # Guarantees that all writes that have committed more than the
4027 # specified number of seconds ago are visible. Because Cloud Spanner
4028 # chooses the exact timestamp, this mode works even if the client&#x27;s
4029 # local clock is substantially skewed from Cloud Spanner commit
4030 # timestamps.
4031 #
4032 # Useful for reading at nearby replicas without the distributed
4033 # timestamp negotiation overhead of `max_staleness`.
4034 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
4035 # reads at a specific timestamp are repeatable; the same read at
4036 # the same timestamp always returns the same data. If the
4037 # timestamp is in the future, the read will block until the
4038 # specified timestamp, modulo the read&#x27;s deadline.
4039 #
4040 # Useful for large scale consistent reads such as mapreduces, or
4041 # for coordinating many reads against a consistent snapshot of the
4042 # data.
4043 #
4044 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
4045 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
4046 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004047 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004048 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004049 &quot;queryMode&quot;: &quot;A String&quot;, # Used to control the amount of debugging information returned in
4050 # ResultSetStats. If partition_token is set, query_mode can only
4051 # be set to QueryMode.NORMAL.
4052 &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
4053 # previously created using PartitionQuery(). There must be an exact
4054 # match for the values of fields common to this message and the
4055 # PartitionQueryRequest message used to create this partition_token.
Bu Sun Kim65020912020-05-20 12:08:20 -07004056 &quot;resumeToken&quot;: &quot;A String&quot;, # If this request is resuming a previously interrupted SQL statement
4057 # execution, `resume_token` should be copied from the last
4058 # PartialResultSet yielded before the interruption. Doing this
4059 # enables the new SQL statement execution to resume where the last one left
4060 # off. The rest of the request parameters must exactly match the
4061 # request that yielded this token.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004062 &quot;queryOptions&quot;: { # Query optimizer configuration. # Query optimizer configuration to use for the given query.
4063 &quot;optimizerVersion&quot;: &quot;A String&quot;, # An option to control the selection of optimizer version.
4064 #
4065 # This parameter allows individual queries to pick different query
4066 # optimizer versions.
4067 #
4068 # Specifying &quot;latest&quot; as a value instructs Cloud Spanner to use the
4069 # latest supported query optimizer version. If not specified, Cloud Spanner
4070 # uses optimizer version set at the database level options. Any other
4071 # positive integer (from the list of supported optimizer versions)
4072 # overrides the default optimizer version for query execution.
4073 # The list of supported optimizer versions can be queried from
4074 # SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL statement
4075 # with an invalid optimizer version will fail with a syntax error
4076 # (`INVALID_ARGUMENT`) status.
4077 # See
4078 # https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
4079 # for more information on managing the query optimizer.
4080 #
4081 # The `optimizer_version` statement hint has precedence over this setting.
4082 },
4083 &quot;params&quot;: { # Parameter names and values that bind to placeholders in the SQL string.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004084 #
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004085 # A parameter placeholder consists of the `@` character followed by the
4086 # parameter name (for example, `@firstName`). Parameter names can contain
4087 # letters, numbers, and underscores.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004088 #
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004089 # Parameters can appear anywhere that a literal value is expected. The same
4090 # parameter name can be used more than once, for example:
4091 #
4092 # `&quot;WHERE id &gt; @msg_id AND id &lt; @msg_id + 100&quot;`
4093 #
4094 # It is an error to execute a SQL statement with unbound parameters.
4095 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
4096 },
4097 &quot;sql&quot;: &quot;A String&quot;, # Required. The SQL string.
Bu Sun Kim65020912020-05-20 12:08:20 -07004098 &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004099 # from a JSON value. For example, values of type `BYTES` and values
4100 # of type `STRING` both appear in params as JSON strings.
4101 #
4102 # In these cases, `param_types` can be used to specify the exact
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004103 # SQL type for some or all of the SQL statement parameters. See the
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004104 # definition of Type for more information
4105 # about SQL types.
Bu Sun Kim65020912020-05-20 12:08:20 -07004106 &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004107 # table cell or returned from an SQL query.
Bu Sun Kim65020912020-05-20 12:08:20 -07004108 &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
4109 # is the type of the array elements.
4110 &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
4111 &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
4112 # provides type information for the struct&#x27;s fields.
4113 &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
Dan O'Mearadd494642020-05-01 07:42:23 -07004114 # significant, because values of this struct type are represented as
4115 # lists, where the order of field values matches the order of
4116 # fields in the StructType. In turn, the order of fields
4117 # matches the order of columns in a read request, or the order of
4118 # fields in the `SELECT` clause of a query.
4119 { # Message representing a single field of a struct.
Bu Sun Kim65020912020-05-20 12:08:20 -07004120 &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
4121 # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
4122 # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
4123 # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
4124 # columns might have an empty name (e.g., !&quot;SELECT
4125 # UPPER(ColName)&quot;`). Note that a query result can contain
Dan O'Mearadd494642020-05-01 07:42:23 -07004126 # multiple fields with the same name.
Bu Sun Kim65020912020-05-20 12:08:20 -07004127 &quot;type&quot;: # Object with schema name: Type # The type of the field.
Dan O'Mearadd494642020-05-01 07:42:23 -07004128 },
4129 ],
4130 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004131 },
4132 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004133 }
4134
4135 x__xgafv: string, V1 error format.
4136 Allowed values
4137 1 - v1 error format
4138 2 - v2 error format
4139
4140Returns:
4141 An object of the form:
4142
4143 { # Partial results from a streaming read or SQL query. Streaming reads and
4144 # SQL queries better tolerate large result sets, large rows, and large
4145 # values, but are a little trickier to consume.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004146 &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the statement that produced this
4147 # streaming result set. These can be requested by setting
4148 # ExecuteSqlRequest.query_mode and are sent
4149 # only once with the last response in the stream.
4150 # This field will also be present in the last response for DML
4151 # statements.
4152 &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
4153 # returns a lower bound of the rows modified.
4154 &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
4155 &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
4156 # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
4157 # `plan_nodes`.
4158 { # Node information for nodes appearing in a QueryPlan.plan_nodes.
4159 &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
4160 &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
4161 # key-value pairs. Only present if the plan was returned as a result of a
4162 # profile query. For example, number of executions, number of rows/time per
4163 # execution etc.
4164 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
4165 },
4166 &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
4167 # `SCALAR` PlanNode(s).
4168 &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
4169 # where the `description` string of this node references a `SCALAR`
4170 # subquery contained in the expression subtree rooted at this node. The
4171 # referenced `SCALAR` subquery may not necessarily be a direct child of
4172 # this node.
4173 &quot;a_key&quot;: 42,
4174 },
4175 &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
4176 },
4177 &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
4178 # For example, a Parameter Reference node could have the following
4179 # information in its metadata:
4180 #
4181 # {
4182 # &quot;parameter_reference&quot;: &quot;param1&quot;,
4183 # &quot;parameter_type&quot;: &quot;array&quot;
4184 # }
4185 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
4186 },
4187 &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
4188 { # Metadata associated with a parent-child relationship appearing in a
4189 # PlanNode.
4190 &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
4191 # distinguish between the build child and the probe child, or in the case
4192 # of the child being an output variable, to represent the tag associated
4193 # with the output variable.
4194 &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
4195 # to an output variable of the parent node. The field carries the name of
4196 # the output variable.
4197 # For example, a `TableScan` operator that reads rows from a table will
4198 # have child links to the `SCALAR` nodes representing the output variables
4199 # created for each column that is read by the operator. The corresponding
4200 # `variable` fields will be set to the variable names assigned to the
4201 # columns.
4202 &quot;childIndex&quot;: 42, # The node to which the link points.
4203 },
4204 ],
4205 &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
4206 &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
4207 # different kinds of nodes differently. For example, If the node is a
4208 # SCALAR node, it will have a condensed representation
4209 # which can be used to directly embed a description of the node in its
4210 # parent.
4211 },
4212 ],
4213 },
4214 &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
4215 &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
4216 # the query is profiled. For example, a query could return the statistics as
4217 # follows:
4218 #
4219 # {
4220 # &quot;rows_returned&quot;: &quot;3&quot;,
4221 # &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
4222 # &quot;cpu_time&quot;: &quot;1.19 secs&quot;
4223 # }
4224 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
4225 },
4226 },
4227 &quot;chunkedValue&quot;: True or False, # If true, then the final value in values is chunked, and must
4228 # be combined with more values from subsequent `PartialResultSet`s
4229 # to obtain a complete field value.
Bu Sun Kim65020912020-05-20 12:08:20 -07004230 &quot;values&quot;: [ # A streamed result set consists of a stream of values, which might
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004231 # be split into many `PartialResultSet` messages to accommodate
4232 # large rows and/or large values. Every N complete values defines a
4233 # row, where N is equal to the number of entries in
4234 # metadata.row_type.fields.
4235 #
4236 # Most values are encoded based on type as described
4237 # here.
4238 #
Bu Sun Kim65020912020-05-20 12:08:20 -07004239 # It is possible that the last value in values is &quot;chunked&quot;,
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004240 # meaning that the rest of the value is sent in subsequent
4241 # `PartialResultSet`(s). This is denoted by the chunked_value
4242 # field. Two or more chunked values can be merged to form a
4243 # complete value as follows:
4244 #
4245 # * `bool/number/null`: cannot be chunked
4246 # * `string`: concatenate the strings
4247 # * `list`: concatenate the lists. If the last element in a list is a
4248 # `string`, `list`, or `object`, merge it with the first element in
4249 # the next list by applying these rules recursively.
4250 # * `object`: concatenate the (field name, field value) pairs. If a
4251 # field name is duplicated, then apply these rules recursively
4252 # to merge the field values.
4253 #
4254 # Some examples of merging:
4255 #
4256 # # Strings are concatenated.
Bu Sun Kim65020912020-05-20 12:08:20 -07004257 # &quot;foo&quot;, &quot;bar&quot; =&gt; &quot;foobar&quot;
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004258 #
4259 # # Lists of non-strings are concatenated.
Dan O'Mearadd494642020-05-01 07:42:23 -07004260 # [2, 3], [4] =&gt; [2, 3, 4]
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004261 #
4262 # # Lists are concatenated, but the last and first elements are merged
4263 # # because they are strings.
Bu Sun Kim65020912020-05-20 12:08:20 -07004264 # [&quot;a&quot;, &quot;b&quot;], [&quot;c&quot;, &quot;d&quot;] =&gt; [&quot;a&quot;, &quot;bc&quot;, &quot;d&quot;]
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004265 #
4266 # # Lists are concatenated, but the last and first elements are merged
4267 # # because they are lists. Recursively, the last and first elements
4268 # # of the inner lists are merged because they are strings.
Bu Sun Kim65020912020-05-20 12:08:20 -07004269 # [&quot;a&quot;, [&quot;b&quot;, &quot;c&quot;]], [[&quot;d&quot;], &quot;e&quot;] =&gt; [&quot;a&quot;, [&quot;b&quot;, &quot;cd&quot;], &quot;e&quot;]
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004270 #
4271 # # Non-overlapping object fields are combined.
Bu Sun Kim65020912020-05-20 12:08:20 -07004272 # {&quot;a&quot;: &quot;1&quot;}, {&quot;b&quot;: &quot;2&quot;} =&gt; {&quot;a&quot;: &quot;1&quot;, &quot;b&quot;: 2&quot;}
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004273 #
4274 # # Overlapping object fields are merged.
Bu Sun Kim65020912020-05-20 12:08:20 -07004275 # {&quot;a&quot;: &quot;1&quot;}, {&quot;a&quot;: &quot;2&quot;} =&gt; {&quot;a&quot;: &quot;12&quot;}
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004276 #
4277 # # Examples of merging objects containing lists of strings.
Bu Sun Kim65020912020-05-20 12:08:20 -07004278 # {&quot;a&quot;: [&quot;1&quot;]}, {&quot;a&quot;: [&quot;2&quot;]} =&gt; {&quot;a&quot;: [&quot;12&quot;]}
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004279 #
4280 # For a more complete example, suppose a streaming SQL query is
4281 # yielding a result set whose rows contain a single string
4282 # field. The following `PartialResultSet`s might be yielded:
4283 #
4284 # {
Bu Sun Kim65020912020-05-20 12:08:20 -07004285 # &quot;metadata&quot;: { ... }
4286 # &quot;values&quot;: [&quot;Hello&quot;, &quot;W&quot;]
4287 # &quot;chunked_value&quot;: true
4288 # &quot;resume_token&quot;: &quot;Af65...&quot;
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004289 # }
4290 # {
Bu Sun Kim65020912020-05-20 12:08:20 -07004291 # &quot;values&quot;: [&quot;orl&quot;]
4292 # &quot;chunked_value&quot;: true
4293 # &quot;resume_token&quot;: &quot;Bqp2...&quot;
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004294 # }
4295 # {
Bu Sun Kim65020912020-05-20 12:08:20 -07004296 # &quot;values&quot;: [&quot;d&quot;]
4297 # &quot;resume_token&quot;: &quot;Zx1B...&quot;
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004298 # }
4299 #
4300 # This sequence of `PartialResultSet`s encodes two rows, one
Bu Sun Kim65020912020-05-20 12:08:20 -07004301 # containing the field value `&quot;Hello&quot;`, and a second containing the
4302 # field value `&quot;World&quot; = &quot;W&quot; + &quot;orl&quot; + &quot;d&quot;`.
4303 &quot;&quot;,
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004304 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07004305 &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004306 # Only present in the first response.
Bu Sun Kim65020912020-05-20 12:08:20 -07004307 &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
4308 # set. For example, a SQL query like `&quot;SELECT UserId, UserName FROM
4309 # Users&quot;` could return a `row_type` value like:
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004310 #
Bu Sun Kim65020912020-05-20 12:08:20 -07004311 # &quot;fields&quot;: [
4312 # { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
4313 # { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004314 # ]
Bu Sun Kim65020912020-05-20 12:08:20 -07004315 &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004316 # significant, because values of this struct type are represented as
4317 # lists, where the order of field values matches the order of
4318 # fields in the StructType. In turn, the order of fields
4319 # matches the order of columns in a read request, or the order of
4320 # fields in the `SELECT` clause of a query.
4321 { # Message representing a single field of a struct.
Bu Sun Kim65020912020-05-20 12:08:20 -07004322 &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
4323 # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
4324 # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
4325 # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
4326 # columns might have an empty name (e.g., !&quot;SELECT
4327 # UPPER(ColName)&quot;`). Note that a query result can contain
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004328 # multiple fields with the same name.
Bu Sun Kim65020912020-05-20 12:08:20 -07004329 &quot;type&quot;: # Object with schema name: Type # The type of the field.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004330 },
4331 ],
4332 },
Bu Sun Kim65020912020-05-20 12:08:20 -07004333 &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004334 # information about the new transaction is yielded here.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004335 &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
4336 # for the transaction. Not returned by default: see
4337 # TransactionOptions.ReadOnly.return_read_timestamp.
4338 #
4339 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
4340 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
Bu Sun Kim65020912020-05-20 12:08:20 -07004341 &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004342 # Read,
4343 # ExecuteSql,
4344 # Commit, or
4345 # Rollback calls.
4346 #
4347 # Single-use read-only transactions do not have IDs, because
4348 # single-use transactions do not support multiple requests.
4349 },
4350 },
Bu Sun Kim65020912020-05-20 12:08:20 -07004351 &quot;resumeToken&quot;: &quot;A String&quot;, # Streaming calls might be interrupted for a variety of reasons, such
4352 # as TCP connection loss. If this occurs, the stream of results can
4353 # be resumed by re-sending the original request and including
4354 # `resume_token`. Note that executing any other transaction in the
4355 # same session invalidates the token.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04004356 }</pre>
4357</div>
4358
4359<div class="method">
4360 <code class="details" id="get">get(name, x__xgafv=None)</code>
4361 <pre>Gets a session. Returns `NOT_FOUND` if the session does not exist.
4362This is mainly useful for determining whether a session is still
4363alive.
4364
4365Args:
4366 name: string, Required. The name of the session to retrieve. (required)
4367 x__xgafv: string, V1 error format.
4368 Allowed values
4369 1 - v1 error format
4370 2 - v2 error format
4371
4372Returns:
4373 An object of the form:
4374
4375 { # A session in the Cloud Spanner API.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004376 &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
Bu Sun Kim65020912020-05-20 12:08:20 -07004377 &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
4378 # when creating a session are ignored.
Bu Sun Kim65020912020-05-20 12:08:20 -07004379 &quot;labels&quot;: { # The labels for the session.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004380 #
4381 # * Label keys must be between 1 and 63 characters long and must conform to
4382 # the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
4383 # * Label values must be between 0 and 63 characters long and must conform
4384 # to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
4385 # * No more than 64 labels can be associated with a given session.
4386 #
4387 # See https://goo.gl/xmQnxf for more information on and examples of labels.
Bu Sun Kim65020912020-05-20 12:08:20 -07004388 &quot;a_key&quot;: &quot;A String&quot;,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004389 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004390 &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
4391 # typically earlier than the actual last use time.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004392 }</pre>
4393</div>
4394
4395<div class="method">
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004396 <code class="details" id="list">list(database, filter=None, pageSize=None, pageToken=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004397 <pre>Lists all sessions in a given database.
4398
4399Args:
4400 database: string, Required. The database in which to list sessions. (required)
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004401 filter: string, An expression for filtering the results of the request. Filter rules are
4402case insensitive. The fields eligible for filtering are:
4403
4404 * `labels.key` where key is the name of a label
4405
4406Some examples of using filters are:
4407
Bu Sun Kim65020912020-05-20 12:08:20 -07004408 * `labels.env:*` --&gt; The session has the label &quot;env&quot;.
4409 * `labels.env:dev` --&gt; The session has the label &quot;env&quot; and the value of
4410 the label contains the string &quot;dev&quot;.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004411 pageSize: integer, Number of sessions to be returned in the response. If 0 or less, defaults
4412to the server&#x27;s maximum allowed page size.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004413 pageToken: string, If non-empty, `page_token` should contain a
4414next_page_token from a previous
4415ListSessionsResponse.
4416 x__xgafv: string, V1 error format.
4417 Allowed values
4418 1 - v1 error format
4419 2 - v2 error format
4420
4421Returns:
4422 An object of the form:
4423
4424 { # The response for ListSessions.
Bu Sun Kim65020912020-05-20 12:08:20 -07004425 &quot;sessions&quot;: [ # The list of requested sessions.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004426 { # A session in the Cloud Spanner API.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004427 &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
Bu Sun Kim65020912020-05-20 12:08:20 -07004428 &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
4429 # when creating a session are ignored.
Bu Sun Kim65020912020-05-20 12:08:20 -07004430 &quot;labels&quot;: { # The labels for the session.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004431 #
4432 # * Label keys must be between 1 and 63 characters long and must conform to
4433 # the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
4434 # * Label values must be between 0 and 63 characters long and must conform
4435 # to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
4436 # * No more than 64 labels can be associated with a given session.
4437 #
4438 # See https://goo.gl/xmQnxf for more information on and examples of labels.
Bu Sun Kim65020912020-05-20 12:08:20 -07004439 &quot;a_key&quot;: &quot;A String&quot;,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004440 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004441 &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
4442 # typically earlier than the actual last use time.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004443 },
4444 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07004445 &quot;nextPageToken&quot;: &quot;A String&quot;, # `next_page_token` can be sent in a subsequent
4446 # ListSessions call to fetch more of the matching
4447 # sessions.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004448 }</pre>
4449</div>
4450
4451<div class="method">
4452 <code class="details" id="list_next">list_next(previous_request, previous_response)</code>
4453 <pre>Retrieves the next page of results.
4454
4455Args:
4456 previous_request: The request for the previous page. (required)
4457 previous_response: The response from the request for the previous page. (required)
4458
4459Returns:
Bu Sun Kim65020912020-05-20 12:08:20 -07004460 A request object that you can call &#x27;execute()&#x27; on to request the next
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004461 page. Returns None if there are no more items in the collection.
4462 </pre>
4463</div>
4464
4465<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07004466 <code class="details" id="partitionQuery">partitionQuery(session, body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004467 <pre>Creates a set of partition tokens that can be used to execute a query
4468operation in parallel. Each of the returned partition tokens can be used
4469by ExecuteStreamingSql to specify a subset
4470of the query result to read. The same session and read-only transaction
4471must be used by the PartitionQueryRequest used to create the
4472partition tokens and the ExecuteSqlRequests that use the partition tokens.
4473
4474Partition tokens become invalid when the session used to create them
4475is deleted, is idle for too long, begins a new transaction, or becomes too
4476old. When any of these happen, it is not possible to resume the query, and
4477the whole operation must be restarted from the beginning.
4478
4479Args:
4480 session: string, Required. The session used to create the partitions. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -07004481 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004482 The object takes the form of:
4483
4484{ # The request for PartitionQuery
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004485 &quot;params&quot;: { # Parameter names and values that bind to placeholders in the SQL string.
4486 #
4487 # A parameter placeholder consists of the `@` character followed by the
4488 # parameter name (for example, `@firstName`). Parameter names can contain
4489 # letters, numbers, and underscores.
4490 #
4491 # Parameters can appear anywhere that a literal value is expected. The same
4492 # parameter name can be used more than once, for example:
4493 #
4494 # `&quot;WHERE id &gt; @msg_id AND id &lt; @msg_id + 100&quot;`
4495 #
4496 # It is an error to execute a SQL statement with unbound parameters.
4497 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
4498 },
4499 &quot;partitionOptions&quot;: { # Options for a PartitionQueryRequest and # Additional options that affect how many partitions are created.
4500 # PartitionReadRequest.
4501 &quot;maxPartitions&quot;: &quot;A String&quot;, # **Note:** This hint is currently ignored by PartitionQuery and
4502 # PartitionRead requests.
4503 #
4504 # The desired maximum number of partitions to return. For example, this may
4505 # be set to the number of workers available. The default for this option
4506 # is currently 10,000. The maximum value is currently 200,000. This is only
4507 # a hint. The actual number of partitions returned may be smaller or larger
4508 # than this maximum count request.
4509 &quot;partitionSizeBytes&quot;: &quot;A String&quot;, # **Note:** This hint is currently ignored by PartitionQuery and
4510 # PartitionRead requests.
4511 #
4512 # The desired data size for each partition generated. The default for this
4513 # option is currently 1 GiB. This is only a hint. The actual size of each
4514 # partition may be smaller or larger than this size request.
4515 },
Bu Sun Kim65020912020-05-20 12:08:20 -07004516 &quot;sql&quot;: &quot;A String&quot;, # Required. The query request to generate partitions for. The request will fail if
4517 # the query is not root partitionable. The query plan of a root
4518 # partitionable query has a single distributed union operator. A distributed
4519 # union operator conceptually divides one or more tables into multiple
4520 # splits, remotely evaluates a subquery independently on each split, and
4521 # then unions all results.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004522 #
Bu Sun Kim65020912020-05-20 12:08:20 -07004523 # This must not contain DML commands, such as INSERT, UPDATE, or
4524 # DELETE. Use ExecuteStreamingSql with a
4525 # PartitionedDml transaction for large, partition-friendly DML operations.
Bu Sun Kim65020912020-05-20 12:08:20 -07004526 &quot;transaction&quot;: { # This message is used to select the transaction in which a # Read only snapshot transactions are supported, read/write and single use
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004527 # transactions are not.
4528 # Read or
4529 # ExecuteSql call runs.
4530 #
4531 # See TransactionOptions for more information about transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004532 &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
Bu Sun Kim65020912020-05-20 12:08:20 -07004533 &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
4534 # it. The transaction ID of the new transaction is returned in
4535 # ResultSetMetadata.transaction, which is a Transaction.
4536 #
4537 #
4538 # Each session can have at most one active transaction at a time. After the
4539 # active transaction is completed, the session can immediately be
4540 # re-used for the next transaction. It is not necessary to create a
4541 # new session for each transaction.
4542 #
4543 # # Transaction Modes
4544 #
4545 # Cloud Spanner supports three transaction modes:
4546 #
4547 # 1. Locking read-write. This type of transaction is the only way
4548 # to write data into Cloud Spanner. These transactions rely on
4549 # pessimistic locking and, if necessary, two-phase commit.
4550 # Locking read-write transactions may abort, requiring the
4551 # application to retry.
4552 #
4553 # 2. Snapshot read-only. This transaction type provides guaranteed
4554 # consistency across several reads, but does not allow
4555 # writes. Snapshot read-only transactions can be configured to
4556 # read at timestamps in the past. Snapshot read-only
4557 # transactions do not need to be committed.
4558 #
4559 # 3. Partitioned DML. This type of transaction is used to execute
4560 # a single Partitioned DML statement. Partitioned DML partitions
4561 # the key space and runs the DML statement over each partition
4562 # in parallel using separate, internal transactions that commit
4563 # independently. Partitioned DML transactions do not need to be
4564 # committed.
4565 #
4566 # For transactions that only read, snapshot read-only transactions
4567 # provide simpler semantics and are almost always faster. In
4568 # particular, read-only transactions do not take locks, so they do
4569 # not conflict with read-write transactions. As a consequence of not
4570 # taking locks, they also do not abort, so retry loops are not needed.
4571 #
4572 # Transactions may only read/write data in a single database. They
4573 # may, however, read/write data in different tables within that
4574 # database.
4575 #
4576 # ## Locking Read-Write Transactions
4577 #
4578 # Locking transactions may be used to atomically read-modify-write
4579 # data anywhere in a database. This type of transaction is externally
4580 # consistent.
4581 #
4582 # Clients should attempt to minimize the amount of time a transaction
4583 # is active. Faster transactions commit with higher probability
4584 # and cause less contention. Cloud Spanner attempts to keep read locks
4585 # active as long as the transaction continues to do reads, and the
4586 # transaction has not been terminated by
4587 # Commit or
4588 # Rollback. Long periods of
4589 # inactivity at the client may cause Cloud Spanner to release a
4590 # transaction&#x27;s locks and abort it.
4591 #
4592 # Conceptually, a read-write transaction consists of zero or more
4593 # reads or SQL statements followed by
4594 # Commit. At any time before
4595 # Commit, the client can send a
4596 # Rollback request to abort the
4597 # transaction.
4598 #
4599 # ### Semantics
4600 #
4601 # Cloud Spanner can commit the transaction if all read locks it acquired
4602 # are still valid at commit time, and it is able to acquire write
4603 # locks for all writes. Cloud Spanner can abort the transaction for any
4604 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
4605 # that the transaction has not modified any user data in Cloud Spanner.
4606 #
4607 # Unless the transaction commits, Cloud Spanner makes no guarantees about
4608 # how long the transaction&#x27;s locks were held for. It is an error to
4609 # use Cloud Spanner locks for any sort of mutual exclusion other than
4610 # between Cloud Spanner transactions themselves.
4611 #
4612 # ### Retrying Aborted Transactions
4613 #
4614 # When a transaction aborts, the application can choose to retry the
4615 # whole transaction again. To maximize the chances of successfully
4616 # committing the retry, the client should execute the retry in the
4617 # same session as the original attempt. The original session&#x27;s lock
4618 # priority increases with each consecutive abort, meaning that each
4619 # attempt has a slightly better chance of success than the previous.
4620 #
4621 # Under some circumstances (e.g., many transactions attempting to
4622 # modify the same row(s)), a transaction can abort many times in a
4623 # short period before successfully committing. Thus, it is not a good
4624 # idea to cap the number of retries a transaction can attempt;
4625 # instead, it is better to limit the total amount of wall time spent
4626 # retrying.
4627 #
4628 # ### Idle Transactions
4629 #
4630 # A transaction is considered idle if it has no outstanding reads or
4631 # SQL queries and has not started a read or SQL query within the last 10
4632 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
4633 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
4634 # fail with error `ABORTED`.
4635 #
4636 # If this behavior is undesirable, periodically executing a simple
4637 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
4638 # transaction from becoming idle.
4639 #
4640 # ## Snapshot Read-Only Transactions
4641 #
4642 # Snapshot read-only transactions provides a simpler method than
4643 # locking read-write transactions for doing several consistent
4644 # reads. However, this type of transaction does not support writes.
4645 #
4646 # Snapshot transactions do not take locks. Instead, they work by
4647 # choosing a Cloud Spanner timestamp, then executing all reads at that
4648 # timestamp. Since they do not acquire locks, they do not block
4649 # concurrent read-write transactions.
4650 #
4651 # Unlike locking read-write transactions, snapshot read-only
4652 # transactions never abort. They can fail if the chosen read
4653 # timestamp is garbage collected; however, the default garbage
4654 # collection policy is generous enough that most applications do not
4655 # need to worry about this in practice.
4656 #
4657 # Snapshot read-only transactions do not need to call
4658 # Commit or
4659 # Rollback (and in fact are not
4660 # permitted to do so).
4661 #
4662 # To execute a snapshot transaction, the client specifies a timestamp
4663 # bound, which tells Cloud Spanner how to choose a read timestamp.
4664 #
4665 # The types of timestamp bound are:
4666 #
4667 # - Strong (the default).
4668 # - Bounded staleness.
4669 # - Exact staleness.
4670 #
4671 # If the Cloud Spanner database to be read is geographically distributed,
4672 # stale read-only transactions can execute more quickly than strong
4673 # or read-write transaction, because they are able to execute far
4674 # from the leader replica.
4675 #
4676 # Each type of timestamp bound is discussed in detail below.
4677 #
4678 # ### Strong
4679 #
4680 # Strong reads are guaranteed to see the effects of all transactions
4681 # that have committed before the start of the read. Furthermore, all
4682 # rows yielded by a single read are consistent with each other -- if
4683 # any part of the read observes a transaction, all parts of the read
4684 # see the transaction.
4685 #
4686 # Strong reads are not repeatable: two consecutive strong read-only
4687 # transactions might return inconsistent results if there are
4688 # concurrent writes. If consistency across reads is required, the
4689 # reads should be executed within a transaction or at an exact read
4690 # timestamp.
4691 #
4692 # See TransactionOptions.ReadOnly.strong.
4693 #
4694 # ### Exact Staleness
4695 #
4696 # These timestamp bounds execute reads at a user-specified
4697 # timestamp. Reads at a timestamp are guaranteed to see a consistent
4698 # prefix of the global transaction history: they observe
4699 # modifications done by all transactions with a commit timestamp &lt;=
4700 # the read timestamp, and observe none of the modifications done by
4701 # transactions with a larger commit timestamp. They will block until
4702 # all conflicting transactions that may be assigned commit timestamps
4703 # &lt;= the read timestamp have finished.
4704 #
4705 # The timestamp can either be expressed as an absolute Cloud Spanner commit
4706 # timestamp or a staleness relative to the current time.
4707 #
4708 # These modes do not require a &quot;negotiation phase&quot; to pick a
4709 # timestamp. As a result, they execute slightly faster than the
4710 # equivalent boundedly stale concurrency modes. On the other hand,
4711 # boundedly stale reads usually return fresher results.
4712 #
4713 # See TransactionOptions.ReadOnly.read_timestamp and
4714 # TransactionOptions.ReadOnly.exact_staleness.
4715 #
4716 # ### Bounded Staleness
4717 #
4718 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
4719 # subject to a user-provided staleness bound. Cloud Spanner chooses the
4720 # newest timestamp within the staleness bound that allows execution
4721 # of the reads at the closest available replica without blocking.
4722 #
4723 # All rows yielded are consistent with each other -- if any part of
4724 # the read observes a transaction, all parts of the read see the
4725 # transaction. Boundedly stale reads are not repeatable: two stale
4726 # reads, even if they use the same staleness bound, can execute at
4727 # different timestamps and thus return inconsistent results.
4728 #
4729 # Boundedly stale reads execute in two phases: the first phase
4730 # negotiates a timestamp among all replicas needed to serve the
4731 # read. In the second phase, reads are executed at the negotiated
4732 # timestamp.
4733 #
4734 # As a result of the two phase execution, bounded staleness reads are
4735 # usually a little slower than comparable exact staleness
4736 # reads. However, they are typically able to return fresher
4737 # results, and are more likely to execute at the closest replica.
4738 #
4739 # Because the timestamp negotiation requires up-front knowledge of
4740 # which rows will be read, it can only be used with single-use
4741 # read-only transactions.
4742 #
4743 # See TransactionOptions.ReadOnly.max_staleness and
4744 # TransactionOptions.ReadOnly.min_read_timestamp.
4745 #
4746 # ### Old Read Timestamps and Garbage Collection
4747 #
4748 # Cloud Spanner continuously garbage collects deleted and overwritten data
4749 # in the background to reclaim storage space. This process is known
4750 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
4751 # are one hour old. Because of this, Cloud Spanner cannot perform reads
4752 # at read timestamps more than one hour in the past. This
4753 # restriction also applies to in-progress reads and/or SQL queries whose
4754 # timestamp become too old while executing. Reads and SQL queries with
4755 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
4756 #
4757 # ## Partitioned DML Transactions
4758 #
4759 # Partitioned DML transactions are used to execute DML statements with a
4760 # different execution strategy that provides different, and often better,
4761 # scalability properties for large, table-wide operations than DML in a
4762 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
4763 # should prefer using ReadWrite transactions.
4764 #
4765 # Partitioned DML partitions the keyspace and runs the DML statement on each
4766 # partition in separate, internal transactions. These transactions commit
4767 # automatically when complete, and run independently from one another.
4768 #
4769 # To reduce lock contention, this execution strategy only acquires read locks
4770 # on rows that match the WHERE clause of the statement. Additionally, the
4771 # smaller per-partition transactions hold locks for less time.
4772 #
4773 # That said, Partitioned DML is not a drop-in replacement for standard DML used
4774 # in ReadWrite transactions.
4775 #
4776 # - The DML statement must be fully-partitionable. Specifically, the statement
4777 # must be expressible as the union of many statements which each access only
4778 # a single row of the table.
4779 #
4780 # - The statement is not applied atomically to all rows of the table. Rather,
4781 # the statement is applied atomically to partitions of the table, in
4782 # independent transactions. Secondary index rows are updated atomically
4783 # with the base table rows.
4784 #
4785 # - Partitioned DML does not guarantee exactly-once execution semantics
4786 # against a partition. The statement will be applied at least once to each
4787 # partition. It is strongly recommended that the DML statement should be
4788 # idempotent to avoid unexpected results. For instance, it is potentially
4789 # dangerous to run a statement such as
4790 # `UPDATE table SET column = column + 1` as it could be run multiple times
4791 # against some rows.
4792 #
4793 # - The partitions are committed automatically - there is no support for
4794 # Commit or Rollback. If the call returns an error, or if the client issuing
4795 # the ExecuteSql call dies, it is possible that some rows had the statement
4796 # executed on them successfully. It is also possible that statement was
4797 # never executed against other rows.
4798 #
4799 # - Partitioned DML transactions may only contain the execution of a single
4800 # DML statement via ExecuteSql or ExecuteStreamingSql.
4801 #
4802 # - If any error is encountered during the execution of the partitioned DML
4803 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
4804 # value that cannot be stored due to schema constraints), then the
4805 # operation is stopped at that point and an error is returned. It is
4806 # possible that at this point, some partitions have been committed (or even
4807 # committed multiple times), and other partitions have not been run at all.
4808 #
4809 # Given the above, Partitioned DML is good fit for large, database-wide,
4810 # operations that are idempotent, such as deleting old rows from a very large
4811 # table.
4812 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07004813 #
4814 # Authorization to begin a Partitioned DML transaction requires
4815 # `spanner.databases.beginPartitionedDmlTransaction` permission
4816 # on the `session` resource.
4817 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004818 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
4819 #
4820 # Authorization to begin a read-write transaction requires
4821 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
4822 # on the `session` resource.
4823 # transaction type has no options.
4824 },
Bu Sun Kim65020912020-05-20 12:08:20 -07004825 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
4826 #
4827 # Authorization to begin a read-only transaction requires
4828 # `spanner.databases.beginReadOnlyTransaction` permission
4829 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -07004830 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
4831 # seconds. Guarantees that all writes that have committed more
4832 # than the specified number of seconds ago are visible. Because
4833 # Cloud Spanner chooses the exact timestamp, this mode works even if
4834 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
4835 # commit timestamps.
4836 #
4837 # Useful for reading the freshest data available at a nearby
4838 # replica, while bounding the possible staleness if the local
4839 # replica has fallen behind.
4840 #
4841 # Note that this option can only be used in single-use
4842 # transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004843 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
4844 #
4845 # This is useful for requesting fresher data than some previous
4846 # read, or data that is fresh enough to observe the effects of some
4847 # previously committed transaction whose timestamp is known.
4848 #
4849 # Note that this option can only be used in single-use transactions.
4850 #
4851 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
4852 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
4853 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
4854 # are visible.
Bu Sun Kim65020912020-05-20 12:08:20 -07004855 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
4856 # the Transaction message that describes the transaction.
4857 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
4858 # old. The timestamp is chosen soon after the read is started.
4859 #
4860 # Guarantees that all writes that have committed more than the
4861 # specified number of seconds ago are visible. Because Cloud Spanner
4862 # chooses the exact timestamp, this mode works even if the client&#x27;s
4863 # local clock is substantially skewed from Cloud Spanner commit
4864 # timestamps.
4865 #
4866 # Useful for reading at nearby replicas without the distributed
4867 # timestamp negotiation overhead of `max_staleness`.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07004868 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
4869 # reads at a specific timestamp are repeatable; the same read at
4870 # the same timestamp always returns the same data. If the
4871 # timestamp is in the future, the read will block until the
4872 # specified timestamp, modulo the read&#x27;s deadline.
4873 #
4874 # Useful for large scale consistent reads such as mapreduces, or
4875 # for coordinating many reads against a consistent snapshot of the
4876 # data.
4877 #
4878 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
4879 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
4880 },
4881 },
4882 &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
4883 # This is the most efficient way to execute a transaction that
4884 # consists of a single SQL query.
4885 #
4886 #
4887 # Each session can have at most one active transaction at a time. After the
4888 # active transaction is completed, the session can immediately be
4889 # re-used for the next transaction. It is not necessary to create a
4890 # new session for each transaction.
4891 #
4892 # # Transaction Modes
4893 #
4894 # Cloud Spanner supports three transaction modes:
4895 #
4896 # 1. Locking read-write. This type of transaction is the only way
4897 # to write data into Cloud Spanner. These transactions rely on
4898 # pessimistic locking and, if necessary, two-phase commit.
4899 # Locking read-write transactions may abort, requiring the
4900 # application to retry.
4901 #
4902 # 2. Snapshot read-only. This transaction type provides guaranteed
4903 # consistency across several reads, but does not allow
4904 # writes. Snapshot read-only transactions can be configured to
4905 # read at timestamps in the past. Snapshot read-only
4906 # transactions do not need to be committed.
4907 #
4908 # 3. Partitioned DML. This type of transaction is used to execute
4909 # a single Partitioned DML statement. Partitioned DML partitions
4910 # the key space and runs the DML statement over each partition
4911 # in parallel using separate, internal transactions that commit
4912 # independently. Partitioned DML transactions do not need to be
4913 # committed.
4914 #
4915 # For transactions that only read, snapshot read-only transactions
4916 # provide simpler semantics and are almost always faster. In
4917 # particular, read-only transactions do not take locks, so they do
4918 # not conflict with read-write transactions. As a consequence of not
4919 # taking locks, they also do not abort, so retry loops are not needed.
4920 #
4921 # Transactions may only read/write data in a single database. They
4922 # may, however, read/write data in different tables within that
4923 # database.
4924 #
4925 # ## Locking Read-Write Transactions
4926 #
4927 # Locking transactions may be used to atomically read-modify-write
4928 # data anywhere in a database. This type of transaction is externally
4929 # consistent.
4930 #
4931 # Clients should attempt to minimize the amount of time a transaction
4932 # is active. Faster transactions commit with higher probability
4933 # and cause less contention. Cloud Spanner attempts to keep read locks
4934 # active as long as the transaction continues to do reads, and the
4935 # transaction has not been terminated by
4936 # Commit or
4937 # Rollback. Long periods of
4938 # inactivity at the client may cause Cloud Spanner to release a
4939 # transaction&#x27;s locks and abort it.
4940 #
4941 # Conceptually, a read-write transaction consists of zero or more
4942 # reads or SQL statements followed by
4943 # Commit. At any time before
4944 # Commit, the client can send a
4945 # Rollback request to abort the
4946 # transaction.
4947 #
4948 # ### Semantics
4949 #
4950 # Cloud Spanner can commit the transaction if all read locks it acquired
4951 # are still valid at commit time, and it is able to acquire write
4952 # locks for all writes. Cloud Spanner can abort the transaction for any
4953 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
4954 # that the transaction has not modified any user data in Cloud Spanner.
4955 #
4956 # Unless the transaction commits, Cloud Spanner makes no guarantees about
4957 # how long the transaction&#x27;s locks were held for. It is an error to
4958 # use Cloud Spanner locks for any sort of mutual exclusion other than
4959 # between Cloud Spanner transactions themselves.
4960 #
4961 # ### Retrying Aborted Transactions
4962 #
4963 # When a transaction aborts, the application can choose to retry the
4964 # whole transaction again. To maximize the chances of successfully
4965 # committing the retry, the client should execute the retry in the
4966 # same session as the original attempt. The original session&#x27;s lock
4967 # priority increases with each consecutive abort, meaning that each
4968 # attempt has a slightly better chance of success than the previous.
4969 #
4970 # Under some circumstances (e.g., many transactions attempting to
4971 # modify the same row(s)), a transaction can abort many times in a
4972 # short period before successfully committing. Thus, it is not a good
4973 # idea to cap the number of retries a transaction can attempt;
4974 # instead, it is better to limit the total amount of wall time spent
4975 # retrying.
4976 #
4977 # ### Idle Transactions
4978 #
4979 # A transaction is considered idle if it has no outstanding reads or
4980 # SQL queries and has not started a read or SQL query within the last 10
4981 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
4982 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
4983 # fail with error `ABORTED`.
4984 #
4985 # If this behavior is undesirable, periodically executing a simple
4986 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
4987 # transaction from becoming idle.
4988 #
4989 # ## Snapshot Read-Only Transactions
4990 #
4991 # Snapshot read-only transactions provides a simpler method than
4992 # locking read-write transactions for doing several consistent
4993 # reads. However, this type of transaction does not support writes.
4994 #
4995 # Snapshot transactions do not take locks. Instead, they work by
4996 # choosing a Cloud Spanner timestamp, then executing all reads at that
4997 # timestamp. Since they do not acquire locks, they do not block
4998 # concurrent read-write transactions.
4999 #
5000 # Unlike locking read-write transactions, snapshot read-only
5001 # transactions never abort. They can fail if the chosen read
5002 # timestamp is garbage collected; however, the default garbage
5003 # collection policy is generous enough that most applications do not
5004 # need to worry about this in practice.
5005 #
5006 # Snapshot read-only transactions do not need to call
5007 # Commit or
5008 # Rollback (and in fact are not
5009 # permitted to do so).
5010 #
5011 # To execute a snapshot transaction, the client specifies a timestamp
5012 # bound, which tells Cloud Spanner how to choose a read timestamp.
5013 #
5014 # The types of timestamp bound are:
5015 #
5016 # - Strong (the default).
5017 # - Bounded staleness.
5018 # - Exact staleness.
5019 #
5020 # If the Cloud Spanner database to be read is geographically distributed,
5021 # stale read-only transactions can execute more quickly than strong
5022 # or read-write transaction, because they are able to execute far
5023 # from the leader replica.
5024 #
5025 # Each type of timestamp bound is discussed in detail below.
5026 #
5027 # ### Strong
5028 #
5029 # Strong reads are guaranteed to see the effects of all transactions
5030 # that have committed before the start of the read. Furthermore, all
5031 # rows yielded by a single read are consistent with each other -- if
5032 # any part of the read observes a transaction, all parts of the read
5033 # see the transaction.
5034 #
5035 # Strong reads are not repeatable: two consecutive strong read-only
5036 # transactions might return inconsistent results if there are
5037 # concurrent writes. If consistency across reads is required, the
5038 # reads should be executed within a transaction or at an exact read
5039 # timestamp.
5040 #
5041 # See TransactionOptions.ReadOnly.strong.
5042 #
5043 # ### Exact Staleness
5044 #
5045 # These timestamp bounds execute reads at a user-specified
5046 # timestamp. Reads at a timestamp are guaranteed to see a consistent
5047 # prefix of the global transaction history: they observe
5048 # modifications done by all transactions with a commit timestamp &lt;=
5049 # the read timestamp, and observe none of the modifications done by
5050 # transactions with a larger commit timestamp. They will block until
5051 # all conflicting transactions that may be assigned commit timestamps
5052 # &lt;= the read timestamp have finished.
5053 #
5054 # The timestamp can either be expressed as an absolute Cloud Spanner commit
5055 # timestamp or a staleness relative to the current time.
5056 #
5057 # These modes do not require a &quot;negotiation phase&quot; to pick a
5058 # timestamp. As a result, they execute slightly faster than the
5059 # equivalent boundedly stale concurrency modes. On the other hand,
5060 # boundedly stale reads usually return fresher results.
5061 #
5062 # See TransactionOptions.ReadOnly.read_timestamp and
5063 # TransactionOptions.ReadOnly.exact_staleness.
5064 #
5065 # ### Bounded Staleness
5066 #
5067 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
5068 # subject to a user-provided staleness bound. Cloud Spanner chooses the
5069 # newest timestamp within the staleness bound that allows execution
5070 # of the reads at the closest available replica without blocking.
5071 #
5072 # All rows yielded are consistent with each other -- if any part of
5073 # the read observes a transaction, all parts of the read see the
5074 # transaction. Boundedly stale reads are not repeatable: two stale
5075 # reads, even if they use the same staleness bound, can execute at
5076 # different timestamps and thus return inconsistent results.
5077 #
5078 # Boundedly stale reads execute in two phases: the first phase
5079 # negotiates a timestamp among all replicas needed to serve the
5080 # read. In the second phase, reads are executed at the negotiated
5081 # timestamp.
5082 #
5083 # As a result of the two phase execution, bounded staleness reads are
5084 # usually a little slower than comparable exact staleness
5085 # reads. However, they are typically able to return fresher
5086 # results, and are more likely to execute at the closest replica.
5087 #
5088 # Because the timestamp negotiation requires up-front knowledge of
5089 # which rows will be read, it can only be used with single-use
5090 # read-only transactions.
5091 #
5092 # See TransactionOptions.ReadOnly.max_staleness and
5093 # TransactionOptions.ReadOnly.min_read_timestamp.
5094 #
5095 # ### Old Read Timestamps and Garbage Collection
5096 #
5097 # Cloud Spanner continuously garbage collects deleted and overwritten data
5098 # in the background to reclaim storage space. This process is known
5099 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
5100 # are one hour old. Because of this, Cloud Spanner cannot perform reads
5101 # at read timestamps more than one hour in the past. This
5102 # restriction also applies to in-progress reads and/or SQL queries whose
5103 # timestamp become too old while executing. Reads and SQL queries with
5104 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
5105 #
5106 # ## Partitioned DML Transactions
5107 #
5108 # Partitioned DML transactions are used to execute DML statements with a
5109 # different execution strategy that provides different, and often better,
5110 # scalability properties for large, table-wide operations than DML in a
5111 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
5112 # should prefer using ReadWrite transactions.
5113 #
5114 # Partitioned DML partitions the keyspace and runs the DML statement on each
5115 # partition in separate, internal transactions. These transactions commit
5116 # automatically when complete, and run independently from one another.
5117 #
5118 # To reduce lock contention, this execution strategy only acquires read locks
5119 # on rows that match the WHERE clause of the statement. Additionally, the
5120 # smaller per-partition transactions hold locks for less time.
5121 #
5122 # That said, Partitioned DML is not a drop-in replacement for standard DML used
5123 # in ReadWrite transactions.
5124 #
5125 # - The DML statement must be fully-partitionable. Specifically, the statement
5126 # must be expressible as the union of many statements which each access only
5127 # a single row of the table.
5128 #
5129 # - The statement is not applied atomically to all rows of the table. Rather,
5130 # the statement is applied atomically to partitions of the table, in
5131 # independent transactions. Secondary index rows are updated atomically
5132 # with the base table rows.
5133 #
5134 # - Partitioned DML does not guarantee exactly-once execution semantics
5135 # against a partition. The statement will be applied at least once to each
5136 # partition. It is strongly recommended that the DML statement should be
5137 # idempotent to avoid unexpected results. For instance, it is potentially
5138 # dangerous to run a statement such as
5139 # `UPDATE table SET column = column + 1` as it could be run multiple times
5140 # against some rows.
5141 #
5142 # - The partitions are committed automatically - there is no support for
5143 # Commit or Rollback. If the call returns an error, or if the client issuing
5144 # the ExecuteSql call dies, it is possible that some rows had the statement
5145 # executed on them successfully. It is also possible that statement was
5146 # never executed against other rows.
5147 #
5148 # - Partitioned DML transactions may only contain the execution of a single
5149 # DML statement via ExecuteSql or ExecuteStreamingSql.
5150 #
5151 # - If any error is encountered during the execution of the partitioned DML
5152 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
5153 # value that cannot be stored due to schema constraints), then the
5154 # operation is stopped at that point and an error is returned. It is
5155 # possible that at this point, some partitions have been committed (or even
5156 # committed multiple times), and other partitions have not been run at all.
5157 #
5158 # Given the above, Partitioned DML is good fit for large, database-wide,
5159 # operations that are idempotent, such as deleting old rows from a very large
5160 # table.
5161 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
5162 #
5163 # Authorization to begin a Partitioned DML transaction requires
5164 # `spanner.databases.beginPartitionedDmlTransaction` permission
5165 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -07005166 },
5167 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
5168 #
5169 # Authorization to begin a read-write transaction requires
5170 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
5171 # on the `session` resource.
5172 # transaction type has no options.
5173 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07005174 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
5175 #
5176 # Authorization to begin a read-only transaction requires
5177 # `spanner.databases.beginReadOnlyTransaction` permission
5178 # on the `session` resource.
5179 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
5180 # seconds. Guarantees that all writes that have committed more
5181 # than the specified number of seconds ago are visible. Because
5182 # Cloud Spanner chooses the exact timestamp, this mode works even if
5183 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
5184 # commit timestamps.
5185 #
5186 # Useful for reading the freshest data available at a nearby
5187 # replica, while bounding the possible staleness if the local
5188 # replica has fallen behind.
5189 #
5190 # Note that this option can only be used in single-use
5191 # transactions.
5192 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
5193 #
5194 # This is useful for requesting fresher data than some previous
5195 # read, or data that is fresh enough to observe the effects of some
5196 # previously committed transaction whose timestamp is known.
5197 #
5198 # Note that this option can only be used in single-use transactions.
5199 #
5200 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
5201 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
5202 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
5203 # are visible.
5204 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
5205 # the Transaction message that describes the transaction.
5206 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
5207 # old. The timestamp is chosen soon after the read is started.
5208 #
5209 # Guarantees that all writes that have committed more than the
5210 # specified number of seconds ago are visible. Because Cloud Spanner
5211 # chooses the exact timestamp, this mode works even if the client&#x27;s
5212 # local clock is substantially skewed from Cloud Spanner commit
5213 # timestamps.
5214 #
5215 # Useful for reading at nearby replicas without the distributed
5216 # timestamp negotiation overhead of `max_staleness`.
5217 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
5218 # reads at a specific timestamp are repeatable; the same read at
5219 # the same timestamp always returns the same data. If the
5220 # timestamp is in the future, the read will block until the
5221 # specified timestamp, modulo the read&#x27;s deadline.
5222 #
5223 # Useful for large scale consistent reads such as mapreduces, or
5224 # for coordinating many reads against a consistent snapshot of the
5225 # data.
5226 #
5227 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
5228 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
5229 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005230 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005231 },
Bu Sun Kim65020912020-05-20 12:08:20 -07005232 &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
5233 # from a JSON value. For example, values of type `BYTES` and values
5234 # of type `STRING` both appear in params as JSON strings.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005235 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005236 # In these cases, `param_types` can be used to specify the exact
5237 # SQL type for some or all of the SQL query parameters. See the
5238 # definition of Type for more information
5239 # about SQL types.
5240 &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
5241 # table cell or returned from an SQL query.
5242 &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
5243 # is the type of the array elements.
5244 &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
5245 &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
5246 # provides type information for the struct&#x27;s fields.
5247 &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
5248 # significant, because values of this struct type are represented as
5249 # lists, where the order of field values matches the order of
5250 # fields in the StructType. In turn, the order of fields
5251 # matches the order of columns in a read request, or the order of
5252 # fields in the `SELECT` clause of a query.
5253 { # Message representing a single field of a struct.
5254 &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
5255 # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
5256 # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
5257 # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
5258 # columns might have an empty name (e.g., !&quot;SELECT
5259 # UPPER(ColName)&quot;`). Note that a query result can contain
5260 # multiple fields with the same name.
5261 &quot;type&quot;: # Object with schema name: Type # The type of the field.
5262 },
5263 ],
5264 },
5265 },
5266 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005267 }
5268
5269 x__xgafv: string, V1 error format.
5270 Allowed values
5271 1 - v1 error format
5272 2 - v2 error format
5273
5274Returns:
5275 An object of the form:
5276
5277 { # The response for PartitionQuery
5278 # or PartitionRead
Bu Sun Kim65020912020-05-20 12:08:20 -07005279 &quot;partitions&quot;: [ # Partitions created by this request.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005280 { # Information returned for each partition returned in a
5281 # PartitionResponse.
Bu Sun Kim65020912020-05-20 12:08:20 -07005282 &quot;partitionToken&quot;: &quot;A String&quot;, # This token can be passed to Read, StreamingRead, ExecuteSql, or
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005283 # ExecuteStreamingSql requests to restrict the results to those identified by
5284 # this partition token.
5285 },
5286 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07005287 &quot;transaction&quot;: { # A transaction. # Transaction created by this request.
5288 &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
5289 # for the transaction. Not returned by default: see
5290 # TransactionOptions.ReadOnly.return_read_timestamp.
5291 #
5292 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
5293 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
5294 &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
5295 # Read,
5296 # ExecuteSql,
5297 # Commit, or
5298 # Rollback calls.
5299 #
5300 # Single-use read-only transactions do not have IDs, because
5301 # single-use transactions do not support multiple requests.
5302 },
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005303 }</pre>
5304</div>
5305
5306<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07005307 <code class="details" id="partitionRead">partitionRead(session, body=None, x__xgafv=None)</code>
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005308 <pre>Creates a set of partition tokens that can be used to execute a read
5309operation in parallel. Each of the returned partition tokens can be used
5310by StreamingRead to specify a subset of the read
5311result to read. The same session and read-only transaction must be used by
5312the PartitionReadRequest used to create the partition tokens and the
5313ReadRequests that use the partition tokens. There are no ordering
5314guarantees on rows returned among the returned partition tokens, or even
5315within each individual StreamingRead call issued with a partition_token.
5316
5317Partition tokens become invalid when the session used to create them
5318is deleted, is idle for too long, begins a new transaction, or becomes too
5319old. When any of these happen, it is not possible to resume the read, and
5320the whole operation must be restarted from the beginning.
5321
5322Args:
5323 session: string, Required. The session used to create the partitions. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -07005324 body: object, The request body.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005325 The object takes the form of:
5326
5327{ # The request for PartitionRead
Bu Sun Kim65020912020-05-20 12:08:20 -07005328 &quot;index&quot;: &quot;A String&quot;, # If non-empty, the name of an index on table. This index is
5329 # used instead of the table primary key when interpreting key_set
5330 # and sorting result rows. See key_set for further information.
5331 &quot;keySet&quot;: { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All # Required. `key_set` identifies the rows to be yielded. `key_set` names the
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005332 # primary keys of the rows in table to be yielded, unless index
5333 # is present. If index is present, then key_set instead names
5334 # index keys in index.
5335 #
5336 # It is not an error for the `key_set` to name rows that do not
5337 # exist in the database. Read yields nothing for nonexistent rows.
5338 # the keys are expected to be in the same table or index. The keys need
5339 # not be sorted in any particular way.
5340 #
5341 # If the same key is specified multiple times in the set (for example
5342 # if two ranges, two keys, or a key and a range overlap), Cloud Spanner
5343 # behaves as if the key were only specified once.
Bu Sun Kim65020912020-05-20 12:08:20 -07005344 &quot;ranges&quot;: [ # A list of key ranges. See KeyRange for more information about
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005345 # key range specifications.
5346 { # KeyRange represents a range of rows in a table or index.
5347 #
5348 # A range has a start key and an end key. These keys can be open or
5349 # closed, indicating if the range includes rows with that key.
5350 #
5351 # Keys are represented by lists, where the ith value in the list
5352 # corresponds to the ith component of the table or index primary key.
5353 # Individual values are encoded as described
5354 # here.
5355 #
5356 # For example, consider the following table definition:
5357 #
5358 # CREATE TABLE UserEvents (
5359 # UserName STRING(MAX),
5360 # EventDate STRING(10)
5361 # ) PRIMARY KEY(UserName, EventDate);
5362 #
5363 # The following keys name rows in this table:
5364 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005365 # &quot;Bob&quot;, &quot;2014-09-23&quot;
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005366 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005367 # Since the `UserEvents` table&#x27;s `PRIMARY KEY` clause names two
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005368 # columns, each `UserEvents` key has two elements; the first is the
5369 # `UserName`, and the second is the `EventDate`.
5370 #
5371 # Key ranges with multiple components are interpreted
Bu Sun Kim65020912020-05-20 12:08:20 -07005372 # lexicographically by component using the table or index key&#x27;s declared
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005373 # sort order. For example, the following range returns all events for
Bu Sun Kim65020912020-05-20 12:08:20 -07005374 # user `&quot;Bob&quot;` that occurred in the year 2015:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005375 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005376 # &quot;start_closed&quot;: [&quot;Bob&quot;, &quot;2015-01-01&quot;]
5377 # &quot;end_closed&quot;: [&quot;Bob&quot;, &quot;2015-12-31&quot;]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005378 #
5379 # Start and end keys can omit trailing key components. This affects the
5380 # inclusion and exclusion of rows that exactly match the provided key
5381 # components: if the key is closed, then rows that exactly match the
5382 # provided components are included; if the key is open, then rows
5383 # that exactly match are not included.
5384 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005385 # For example, the following range includes all events for `&quot;Bob&quot;` that
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005386 # occurred during and after the year 2000:
5387 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005388 # &quot;start_closed&quot;: [&quot;Bob&quot;, &quot;2000-01-01&quot;]
5389 # &quot;end_closed&quot;: [&quot;Bob&quot;]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005390 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005391 # The next example retrieves all events for `&quot;Bob&quot;`:
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005392 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005393 # &quot;start_closed&quot;: [&quot;Bob&quot;]
5394 # &quot;end_closed&quot;: [&quot;Bob&quot;]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005395 #
5396 # To retrieve events before the year 2000:
5397 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005398 # &quot;start_closed&quot;: [&quot;Bob&quot;]
5399 # &quot;end_open&quot;: [&quot;Bob&quot;, &quot;2000-01-01&quot;]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005400 #
5401 # The following range includes all rows in the table:
5402 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005403 # &quot;start_closed&quot;: []
5404 # &quot;end_closed&quot;: []
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005405 #
5406 # This range returns all users whose `UserName` begins with any
5407 # character from A to C:
5408 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005409 # &quot;start_closed&quot;: [&quot;A&quot;]
5410 # &quot;end_open&quot;: [&quot;D&quot;]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005411 #
5412 # This range returns all users whose `UserName` begins with B:
5413 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005414 # &quot;start_closed&quot;: [&quot;B&quot;]
5415 # &quot;end_open&quot;: [&quot;C&quot;]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005416 #
5417 # Key ranges honor column sort order. For example, suppose a table is
5418 # defined as follows:
5419 #
5420 # CREATE TABLE DescendingSortedTable {
5421 # Key INT64,
5422 # ...
5423 # ) PRIMARY KEY(Key DESC);
5424 #
5425 # The following range retrieves all rows with key values between 1
5426 # and 100 inclusive:
5427 #
Bu Sun Kim65020912020-05-20 12:08:20 -07005428 # &quot;start_closed&quot;: [&quot;100&quot;]
5429 # &quot;end_closed&quot;: [&quot;1&quot;]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005430 #
5431 # Note that 100 is passed as the start, and 1 is passed as the end,
5432 # because `Key` is a descending column in the schema.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07005433 &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
5434 # `len(start_open)` key columns exactly match `start_open`.
Bu Sun Kim65020912020-05-20 12:08:20 -07005435 &quot;&quot;,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005436 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07005437 &quot;endClosed&quot;: [ # If the end is closed, then the range includes all rows whose
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005438 # first `len(end_closed)` key columns exactly match `end_closed`.
Bu Sun Kim65020912020-05-20 12:08:20 -07005439 &quot;&quot;,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005440 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07005441 &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
5442 # `len(end_open)` key columns exactly match `end_open`.
Bu Sun Kim65020912020-05-20 12:08:20 -07005443 &quot;&quot;,
5444 ],
5445 &quot;startClosed&quot;: [ # If the start is closed, then the range includes all rows whose
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005446 # first `len(start_closed)` key columns exactly match `start_closed`.
Bu Sun Kim65020912020-05-20 12:08:20 -07005447 &quot;&quot;,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005448 ],
5449 },
5450 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07005451 &quot;keys&quot;: [ # A list of specific keys. Entries in `keys` should have exactly as
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005452 # many elements as there are columns in the primary or index key
5453 # with which this `KeySet` is used. Individual key values are
5454 # encoded as described here.
5455 [
Bu Sun Kim65020912020-05-20 12:08:20 -07005456 &quot;&quot;,
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005457 ],
5458 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07005459 &quot;all&quot;: True or False, # For convenience `all` can be set to `true` to indicate that this
5460 # `KeySet` matches all keys in the table or index. Note that any keys
5461 # specified in `keys` or `ranges` are only yielded once.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07005462 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07005463 &quot;partitionOptions&quot;: { # Options for a PartitionQueryRequest and # Additional options that affect how many partitions are created.
5464 # PartitionReadRequest.
5465 &quot;maxPartitions&quot;: &quot;A String&quot;, # **Note:** This hint is currently ignored by PartitionQuery and
5466 # PartitionRead requests.
5467 #
5468 # The desired maximum number of partitions to return. For example, this may
5469 # be set to the number of workers available. The default for this option
5470 # is currently 10,000. The maximum value is currently 200,000. This is only
5471 # a hint. The actual number of partitions returned may be smaller or larger
5472 # than this maximum count request.
5473 &quot;partitionSizeBytes&quot;: &quot;A String&quot;, # **Note:** This hint is currently ignored by PartitionQuery and
5474 # PartitionRead requests.
5475 #
5476 # The desired data size for each partition generated. The default for this
5477 # option is currently 1 GiB. This is only a hint. The actual size of each
5478 # partition may be smaller or larger than this size request.
5479 },
5480 &quot;transaction&quot;: { # This message is used to select the transaction in which a # Read only snapshot transactions are supported, read/write and single use
5481 # transactions are not.
5482 # Read or
5483 # ExecuteSql call runs.
5484 #
5485 # See TransactionOptions for more information about transactions.
5486 &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
5487 &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
5488 # it. The transaction ID of the new transaction is returned in
5489 # ResultSetMetadata.transaction, which is a Transaction.
5490 #
5491 #
5492 # Each session can have at most one active transaction at a time. After the
5493 # active transaction is completed, the session can immediately be
5494 # re-used for the next transaction. It is not necessary to create a
5495 # new session for each transaction.
5496 #
5497 # # Transaction Modes
5498 #
5499 # Cloud Spanner supports three transaction modes:
5500 #
5501 # 1. Locking read-write. This type of transaction is the only way
5502 # to write data into Cloud Spanner. These transactions rely on
5503 # pessimistic locking and, if necessary, two-phase commit.
5504 # Locking read-write transactions may abort, requiring the
5505 # application to retry.
5506 #
5507 # 2. Snapshot read-only. This transaction type provides guaranteed
5508 # consistency across several reads, but does not allow
5509 # writes. Snapshot read-only transactions can be configured to
5510 # read at timestamps in the past. Snapshot read-only
5511 # transactions do not need to be committed.
5512 #
5513 # 3. Partitioned DML. This type of transaction is used to execute
5514 # a single Partitioned DML statement. Partitioned DML partitions
5515 # the key space and runs the DML statement over each partition
5516 # in parallel using separate, internal transactions that commit
5517 # independently. Partitioned DML transactions do not need to be
5518 # committed.
5519 #
5520 # For transactions that only read, snapshot read-only transactions
5521 # provide simpler semantics and are almost always faster. In
5522 # particular, read-only transactions do not take locks, so they do
5523 # not conflict with read-write transactions. As a consequence of not
5524 # taking locks, they also do not abort, so retry loops are not needed.
5525 #
5526 # Transactions may only read/write data in a single database. They
5527 # may, however, read/write data in different tables within that
5528 # database.
5529 #
5530 # ## Locking Read-Write Transactions
5531 #
5532 # Locking transactions may be used to atomically read-modify-write
5533 # data anywhere in a database. This type of transaction is externally
5534 # consistent.
5535 #
5536 # Clients should attempt to minimize the amount of time a transaction
5537 # is active. Faster transactions commit with higher probability
5538 # and cause less contention. Cloud Spanner attempts to keep read locks
5539 # active as long as the transaction continues to do reads, and the
5540 # transaction has not been terminated by
5541 # Commit or
5542 # Rollback. Long periods of
5543 # inactivity at the client may cause Cloud Spanner to release a
5544 # transaction&#x27;s locks and abort it.
5545 #
5546 # Conceptually, a read-write transaction consists of zero or more
5547 # reads or SQL statements followed by
5548 # Commit. At any time before
5549 # Commit, the client can send a
5550 # Rollback request to abort the
5551 # transaction.
5552 #
5553 # ### Semantics
5554 #
5555 # Cloud Spanner can commit the transaction if all read locks it acquired
5556 # are still valid at commit time, and it is able to acquire write
5557 # locks for all writes. Cloud Spanner can abort the transaction for any
5558 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
5559 # that the transaction has not modified any user data in Cloud Spanner.
5560 #
5561 # Unless the transaction commits, Cloud Spanner makes no guarantees about
5562 # how long the transaction&#x27;s locks were held for. It is an error to
5563 # use Cloud Spanner locks for any sort of mutual exclusion other than
5564 # between Cloud Spanner transactions themselves.
5565 #
5566 # ### Retrying Aborted Transactions
5567 #
5568 # When a transaction aborts, the application can choose to retry the
5569 # whole transaction again. To maximize the chances of successfully
5570 # committing the retry, the client should execute the retry in the
5571 # same session as the original attempt. The original session&#x27;s lock
5572 # priority increases with each consecutive abort, meaning that each
5573 # attempt has a slightly better chance of success than the previous.
5574 #
5575 # Under some circumstances (e.g., many transactions attempting to
5576 # modify the same row(s)), a transaction can abort many times in a
5577 # short period before successfully committing. Thus, it is not a good
5578 # idea to cap the number of retries a transaction can attempt;
5579 # instead, it is better to limit the total amount of wall time spent
5580 # retrying.
5581 #
5582 # ### Idle Transactions
5583 #
5584 # A transaction is considered idle if it has no outstanding reads or
5585 # SQL queries and has not started a read or SQL query within the last 10
5586 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
5587 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
5588 # fail with error `ABORTED`.
5589 #
5590 # If this behavior is undesirable, periodically executing a simple
5591 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
5592 # transaction from becoming idle.
5593 #
5594 # ## Snapshot Read-Only Transactions
5595 #
5596 # Snapshot read-only transactions provides a simpler method than
5597 # locking read-write transactions for doing several consistent
5598 # reads. However, this type of transaction does not support writes.
5599 #
5600 # Snapshot transactions do not take locks. Instead, they work by
5601 # choosing a Cloud Spanner timestamp, then executing all reads at that
5602 # timestamp. Since they do not acquire locks, they do not block
5603 # concurrent read-write transactions.
5604 #
5605 # Unlike locking read-write transactions, snapshot read-only
5606 # transactions never abort. They can fail if the chosen read
5607 # timestamp is garbage collected; however, the default garbage
5608 # collection policy is generous enough that most applications do not
5609 # need to worry about this in practice.
5610 #
5611 # Snapshot read-only transactions do not need to call
5612 # Commit or
5613 # Rollback (and in fact are not
5614 # permitted to do so).
5615 #
5616 # To execute a snapshot transaction, the client specifies a timestamp
5617 # bound, which tells Cloud Spanner how to choose a read timestamp.
5618 #
5619 # The types of timestamp bound are:
5620 #
5621 # - Strong (the default).
5622 # - Bounded staleness.
5623 # - Exact staleness.
5624 #
5625 # If the Cloud Spanner database to be read is geographically distributed,
5626 # stale read-only transactions can execute more quickly than strong
5627 # or read-write transaction, because they are able to execute far
5628 # from the leader replica.
5629 #
5630 # Each type of timestamp bound is discussed in detail below.
5631 #
5632 # ### Strong
5633 #
5634 # Strong reads are guaranteed to see the effects of all transactions
5635 # that have committed before the start of the read. Furthermore, all
5636 # rows yielded by a single read are consistent with each other -- if
5637 # any part of the read observes a transaction, all parts of the read
5638 # see the transaction.
5639 #
5640 # Strong reads are not repeatable: two consecutive strong read-only
5641 # transactions might return inconsistent results if there are
5642 # concurrent writes. If consistency across reads is required, the
5643 # reads should be executed within a transaction or at an exact read
5644 # timestamp.
5645 #
5646 # See TransactionOptions.ReadOnly.strong.
5647 #
5648 # ### Exact Staleness
5649 #
5650 # These timestamp bounds execute reads at a user-specified
5651 # timestamp. Reads at a timestamp are guaranteed to see a consistent
5652 # prefix of the global transaction history: they observe
5653 # modifications done by all transactions with a commit timestamp &lt;=
5654 # the read timestamp, and observe none of the modifications done by
5655 # transactions with a larger commit timestamp. They will block until
5656 # all conflicting transactions that may be assigned commit timestamps
5657 # &lt;= the read timestamp have finished.
5658 #
5659 # The timestamp can either be expressed as an absolute Cloud Spanner commit
5660 # timestamp or a staleness relative to the current time.
5661 #
5662 # These modes do not require a &quot;negotiation phase&quot; to pick a
5663 # timestamp. As a result, they execute slightly faster than the
5664 # equivalent boundedly stale concurrency modes. On the other hand,
5665 # boundedly stale reads usually return fresher results.
5666 #
5667 # See TransactionOptions.ReadOnly.read_timestamp and
5668 # TransactionOptions.ReadOnly.exact_staleness.
5669 #
5670 # ### Bounded Staleness
5671 #
5672 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
5673 # subject to a user-provided staleness bound. Cloud Spanner chooses the
5674 # newest timestamp within the staleness bound that allows execution
5675 # of the reads at the closest available replica without blocking.
5676 #
5677 # All rows yielded are consistent with each other -- if any part of
5678 # the read observes a transaction, all parts of the read see the
5679 # transaction. Boundedly stale reads are not repeatable: two stale
5680 # reads, even if they use the same staleness bound, can execute at
5681 # different timestamps and thus return inconsistent results.
5682 #
5683 # Boundedly stale reads execute in two phases: the first phase
5684 # negotiates a timestamp among all replicas needed to serve the
5685 # read. In the second phase, reads are executed at the negotiated
5686 # timestamp.
5687 #
5688 # As a result of the two phase execution, bounded staleness reads are
5689 # usually a little slower than comparable exact staleness
5690 # reads. However, they are typically able to return fresher
5691 # results, and are more likely to execute at the closest replica.
5692 #
5693 # Because the timestamp negotiation requires up-front knowledge of
5694 # which rows will be read, it can only be used with single-use
5695 # read-only transactions.
5696 #
5697 # See TransactionOptions.ReadOnly.max_staleness and
5698 # TransactionOptions.ReadOnly.min_read_timestamp.
5699 #
5700 # ### Old Read Timestamps and Garbage Collection
5701 #
5702 # Cloud Spanner continuously garbage collects deleted and overwritten data
5703 # in the background to reclaim storage space. This process is known
5704 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
5705 # are one hour old. Because of this, Cloud Spanner cannot perform reads
5706 # at read timestamps more than one hour in the past. This
5707 # restriction also applies to in-progress reads and/or SQL queries whose
5708 # timestamp become too old while executing. Reads and SQL queries with
5709 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
5710 #
5711 # ## Partitioned DML Transactions
5712 #
5713 # Partitioned DML transactions are used to execute DML statements with a
5714 # different execution strategy that provides different, and often better,
5715 # scalability properties for large, table-wide operations than DML in a
5716 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
5717 # should prefer using ReadWrite transactions.
5718 #
5719 # Partitioned DML partitions the keyspace and runs the DML statement on each
5720 # partition in separate, internal transactions. These transactions commit
5721 # automatically when complete, and run independently from one another.
5722 #
5723 # To reduce lock contention, this execution strategy only acquires read locks
5724 # on rows that match the WHERE clause of the statement. Additionally, the
5725 # smaller per-partition transactions hold locks for less time.
5726 #
5727 # That said, Partitioned DML is not a drop-in replacement for standard DML used
5728 # in ReadWrite transactions.
5729 #
5730 # - The DML statement must be fully-partitionable. Specifically, the statement
5731 # must be expressible as the union of many statements which each access only
5732 # a single row of the table.
5733 #
5734 # - The statement is not applied atomically to all rows of the table. Rather,
5735 # the statement is applied atomically to partitions of the table, in
5736 # independent transactions. Secondary index rows are updated atomically
5737 # with the base table rows.
5738 #
5739 # - Partitioned DML does not guarantee exactly-once execution semantics
5740 # against a partition. The statement will be applied at least once to each
5741 # partition. It is strongly recommended that the DML statement should be
5742 # idempotent to avoid unexpected results. For instance, it is potentially
5743 # dangerous to run a statement such as
5744 # `UPDATE table SET column = column + 1` as it could be run multiple times
5745 # against some rows.
5746 #
5747 # - The partitions are committed automatically - there is no support for
5748 # Commit or Rollback. If the call returns an error, or if the client issuing
5749 # the ExecuteSql call dies, it is possible that some rows had the statement
5750 # executed on them successfully. It is also possible that statement was
5751 # never executed against other rows.
5752 #
5753 # - Partitioned DML transactions may only contain the execution of a single
5754 # DML statement via ExecuteSql or ExecuteStreamingSql.
5755 #
5756 # - If any error is encountered during the execution of the partitioned DML
5757 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
5758 # value that cannot be stored due to schema constraints), then the
5759 # operation is stopped at that point and an error is returned. It is
5760 # possible that at this point, some partitions have been committed (or even
5761 # committed multiple times), and other partitions have not been run at all.
5762 #
5763 # Given the above, Partitioned DML is good fit for large, database-wide,
5764 # operations that are idempotent, such as deleting old rows from a very large
5765 # table.
5766 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
5767 #
5768 # Authorization to begin a Partitioned DML transaction requires
5769 # `spanner.databases.beginPartitionedDmlTransaction` permission
5770 # on the `session` resource.
5771 },
5772 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
5773 #
5774 # Authorization to begin a read-write transaction requires
5775 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
5776 # on the `session` resource.
5777 # transaction type has no options.
5778 },
5779 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
5780 #
5781 # Authorization to begin a read-only transaction requires
5782 # `spanner.databases.beginReadOnlyTransaction` permission
5783 # on the `session` resource.
5784 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
5785 # seconds. Guarantees that all writes that have committed more
5786 # than the specified number of seconds ago are visible. Because
5787 # Cloud Spanner chooses the exact timestamp, this mode works even if
5788 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
5789 # commit timestamps.
5790 #
5791 # Useful for reading the freshest data available at a nearby
5792 # replica, while bounding the possible staleness if the local
5793 # replica has fallen behind.
5794 #
5795 # Note that this option can only be used in single-use
5796 # transactions.
5797 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
5798 #
5799 # This is useful for requesting fresher data than some previous
5800 # read, or data that is fresh enough to observe the effects of some
5801 # previously committed transaction whose timestamp is known.
5802 #
5803 # Note that this option can only be used in single-use transactions.
5804 #
5805 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
5806 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
5807 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
5808 # are visible.
5809 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
5810 # the Transaction message that describes the transaction.
5811 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
5812 # old. The timestamp is chosen soon after the read is started.
5813 #
5814 # Guarantees that all writes that have committed more than the
5815 # specified number of seconds ago are visible. Because Cloud Spanner
5816 # chooses the exact timestamp, this mode works even if the client&#x27;s
5817 # local clock is substantially skewed from Cloud Spanner commit
5818 # timestamps.
5819 #
5820 # Useful for reading at nearby replicas without the distributed
5821 # timestamp negotiation overhead of `max_staleness`.
5822 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
5823 # reads at a specific timestamp are repeatable; the same read at
5824 # the same timestamp always returns the same data. If the
5825 # timestamp is in the future, the read will block until the
5826 # specified timestamp, modulo the read&#x27;s deadline.
5827 #
5828 # Useful for large scale consistent reads such as mapreduces, or
5829 # for coordinating many reads against a consistent snapshot of the
5830 # data.
5831 #
5832 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
5833 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
5834 },
5835 },
5836 &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
5837 # This is the most efficient way to execute a transaction that
5838 # consists of a single SQL query.
5839 #
5840 #
5841 # Each session can have at most one active transaction at a time. After the
5842 # active transaction is completed, the session can immediately be
5843 # re-used for the next transaction. It is not necessary to create a
5844 # new session for each transaction.
5845 #
5846 # # Transaction Modes
5847 #
5848 # Cloud Spanner supports three transaction modes:
5849 #
5850 # 1. Locking read-write. This type of transaction is the only way
5851 # to write data into Cloud Spanner. These transactions rely on
5852 # pessimistic locking and, if necessary, two-phase commit.
5853 # Locking read-write transactions may abort, requiring the
5854 # application to retry.
5855 #
5856 # 2. Snapshot read-only. This transaction type provides guaranteed
5857 # consistency across several reads, but does not allow
5858 # writes. Snapshot read-only transactions can be configured to
5859 # read at timestamps in the past. Snapshot read-only
5860 # transactions do not need to be committed.
5861 #
5862 # 3. Partitioned DML. This type of transaction is used to execute
5863 # a single Partitioned DML statement. Partitioned DML partitions
5864 # the key space and runs the DML statement over each partition
5865 # in parallel using separate, internal transactions that commit
5866 # independently. Partitioned DML transactions do not need to be
5867 # committed.
5868 #
5869 # For transactions that only read, snapshot read-only transactions
5870 # provide simpler semantics and are almost always faster. In
5871 # particular, read-only transactions do not take locks, so they do
5872 # not conflict with read-write transactions. As a consequence of not
5873 # taking locks, they also do not abort, so retry loops are not needed.
5874 #
5875 # Transactions may only read/write data in a single database. They
5876 # may, however, read/write data in different tables within that
5877 # database.
5878 #
5879 # ## Locking Read-Write Transactions
5880 #
5881 # Locking transactions may be used to atomically read-modify-write
5882 # data anywhere in a database. This type of transaction is externally
5883 # consistent.
5884 #
5885 # Clients should attempt to minimize the amount of time a transaction
5886 # is active. Faster transactions commit with higher probability
5887 # and cause less contention. Cloud Spanner attempts to keep read locks
5888 # active as long as the transaction continues to do reads, and the
5889 # transaction has not been terminated by
5890 # Commit or
5891 # Rollback. Long periods of
5892 # inactivity at the client may cause Cloud Spanner to release a
5893 # transaction&#x27;s locks and abort it.
5894 #
5895 # Conceptually, a read-write transaction consists of zero or more
5896 # reads or SQL statements followed by
5897 # Commit. At any time before
5898 # Commit, the client can send a
5899 # Rollback request to abort the
5900 # transaction.
5901 #
5902 # ### Semantics
5903 #
5904 # Cloud Spanner can commit the transaction if all read locks it acquired
5905 # are still valid at commit time, and it is able to acquire write
5906 # locks for all writes. Cloud Spanner can abort the transaction for any
5907 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
5908 # that the transaction has not modified any user data in Cloud Spanner.
5909 #
5910 # Unless the transaction commits, Cloud Spanner makes no guarantees about
5911 # how long the transaction&#x27;s locks were held for. It is an error to
5912 # use Cloud Spanner locks for any sort of mutual exclusion other than
5913 # between Cloud Spanner transactions themselves.
5914 #
5915 # ### Retrying Aborted Transactions
5916 #
5917 # When a transaction aborts, the application can choose to retry the
5918 # whole transaction again. To maximize the chances of successfully
5919 # committing the retry, the client should execute the retry in the
5920 # same session as the original attempt. The original session&#x27;s lock
5921 # priority increases with each consecutive abort, meaning that each
5922 # attempt has a slightly better chance of success than the previous.
5923 #
5924 # Under some circumstances (e.g., many transactions attempting to
5925 # modify the same row(s)), a transaction can abort many times in a
5926 # short period before successfully committing. Thus, it is not a good
5927 # idea to cap the number of retries a transaction can attempt;
5928 # instead, it is better to limit the total amount of wall time spent
5929 # retrying.
5930 #
5931 # ### Idle Transactions
5932 #
5933 # A transaction is considered idle if it has no outstanding reads or
5934 # SQL queries and has not started a read or SQL query within the last 10
5935 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
5936 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
5937 # fail with error `ABORTED`.
5938 #
5939 # If this behavior is undesirable, periodically executing a simple
5940 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
5941 # transaction from becoming idle.
5942 #
5943 # ## Snapshot Read-Only Transactions
5944 #
5945 # Snapshot read-only transactions provides a simpler method than
5946 # locking read-write transactions for doing several consistent
5947 # reads. However, this type of transaction does not support writes.
5948 #
5949 # Snapshot transactions do not take locks. Instead, they work by
5950 # choosing a Cloud Spanner timestamp, then executing all reads at that
5951 # timestamp. Since they do not acquire locks, they do not block
5952 # concurrent read-write transactions.
5953 #
5954 # Unlike locking read-write transactions, snapshot read-only
5955 # transactions never abort. They can fail if the chosen read
5956 # timestamp is garbage collected; however, the default garbage
5957 # collection policy is generous enough that most applications do not
5958 # need to worry about this in practice.
5959 #
5960 # Snapshot read-only transactions do not need to call
5961 # Commit or
5962 # Rollback (and in fact are not
5963 # permitted to do so).
5964 #
5965 # To execute a snapshot transaction, the client specifies a timestamp
5966 # bound, which tells Cloud Spanner how to choose a read timestamp.
5967 #
5968 # The types of timestamp bound are:
5969 #
5970 # - Strong (the default).
5971 # - Bounded staleness.
5972 # - Exact staleness.
5973 #
5974 # If the Cloud Spanner database to be read is geographically distributed,
5975 # stale read-only transactions can execute more quickly than strong
5976 # or read-write transaction, because they are able to execute far
5977 # from the leader replica.
5978 #
5979 # Each type of timestamp bound is discussed in detail below.
5980 #
5981 # ### Strong
5982 #
5983 # Strong reads are guaranteed to see the effects of all transactions
5984 # that have committed before the start of the read. Furthermore, all
5985 # rows yielded by a single read are consistent with each other -- if
5986 # any part of the read observes a transaction, all parts of the read
5987 # see the transaction.
5988 #
5989 # Strong reads are not repeatable: two consecutive strong read-only
5990 # transactions might return inconsistent results if there are
5991 # concurrent writes. If consistency across reads is required, the
5992 # reads should be executed within a transaction or at an exact read
5993 # timestamp.
5994 #
5995 # See TransactionOptions.ReadOnly.strong.
5996 #
5997 # ### Exact Staleness
5998 #
5999 # These timestamp bounds execute reads at a user-specified
6000 # timestamp. Reads at a timestamp are guaranteed to see a consistent
6001 # prefix of the global transaction history: they observe
6002 # modifications done by all transactions with a commit timestamp &lt;=
6003 # the read timestamp, and observe none of the modifications done by
6004 # transactions with a larger commit timestamp. They will block until
6005 # all conflicting transactions that may be assigned commit timestamps
6006 # &lt;= the read timestamp have finished.
6007 #
6008 # The timestamp can either be expressed as an absolute Cloud Spanner commit
6009 # timestamp or a staleness relative to the current time.
6010 #
6011 # These modes do not require a &quot;negotiation phase&quot; to pick a
6012 # timestamp. As a result, they execute slightly faster than the
6013 # equivalent boundedly stale concurrency modes. On the other hand,
6014 # boundedly stale reads usually return fresher results.
6015 #
6016 # See TransactionOptions.ReadOnly.read_timestamp and
6017 # TransactionOptions.ReadOnly.exact_staleness.
6018 #
6019 # ### Bounded Staleness
6020 #
6021 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
6022 # subject to a user-provided staleness bound. Cloud Spanner chooses the
6023 # newest timestamp within the staleness bound that allows execution
6024 # of the reads at the closest available replica without blocking.
6025 #
6026 # All rows yielded are consistent with each other -- if any part of
6027 # the read observes a transaction, all parts of the read see the
6028 # transaction. Boundedly stale reads are not repeatable: two stale
6029 # reads, even if they use the same staleness bound, can execute at
6030 # different timestamps and thus return inconsistent results.
6031 #
6032 # Boundedly stale reads execute in two phases: the first phase
6033 # negotiates a timestamp among all replicas needed to serve the
6034 # read. In the second phase, reads are executed at the negotiated
6035 # timestamp.
6036 #
6037 # As a result of the two phase execution, bounded staleness reads are
6038 # usually a little slower than comparable exact staleness
6039 # reads. However, they are typically able to return fresher
6040 # results, and are more likely to execute at the closest replica.
6041 #
6042 # Because the timestamp negotiation requires up-front knowledge of
6043 # which rows will be read, it can only be used with single-use
6044 # read-only transactions.
6045 #
6046 # See TransactionOptions.ReadOnly.max_staleness and
6047 # TransactionOptions.ReadOnly.min_read_timestamp.
6048 #
6049 # ### Old Read Timestamps and Garbage Collection
6050 #
6051 # Cloud Spanner continuously garbage collects deleted and overwritten data
6052 # in the background to reclaim storage space. This process is known
6053 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
6054 # are one hour old. Because of this, Cloud Spanner cannot perform reads
6055 # at read timestamps more than one hour in the past. This
6056 # restriction also applies to in-progress reads and/or SQL queries whose
6057 # timestamp become too old while executing. Reads and SQL queries with
6058 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
6059 #
6060 # ## Partitioned DML Transactions
6061 #
6062 # Partitioned DML transactions are used to execute DML statements with a
6063 # different execution strategy that provides different, and often better,
6064 # scalability properties for large, table-wide operations than DML in a
6065 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
6066 # should prefer using ReadWrite transactions.
6067 #
6068 # Partitioned DML partitions the keyspace and runs the DML statement on each
6069 # partition in separate, internal transactions. These transactions commit
6070 # automatically when complete, and run independently from one another.
6071 #
6072 # To reduce lock contention, this execution strategy only acquires read locks
6073 # on rows that match the WHERE clause of the statement. Additionally, the
6074 # smaller per-partition transactions hold locks for less time.
6075 #
6076 # That said, Partitioned DML is not a drop-in replacement for standard DML used
6077 # in ReadWrite transactions.
6078 #
6079 # - The DML statement must be fully-partitionable. Specifically, the statement
6080 # must be expressible as the union of many statements which each access only
6081 # a single row of the table.
6082 #
6083 # - The statement is not applied atomically to all rows of the table. Rather,
6084 # the statement is applied atomically to partitions of the table, in
6085 # independent transactions. Secondary index rows are updated atomically
6086 # with the base table rows.
6087 #
6088 # - Partitioned DML does not guarantee exactly-once execution semantics
6089 # against a partition. The statement will be applied at least once to each
6090 # partition. It is strongly recommended that the DML statement should be
6091 # idempotent to avoid unexpected results. For instance, it is potentially
6092 # dangerous to run a statement such as
6093 # `UPDATE table SET column = column + 1` as it could be run multiple times
6094 # against some rows.
6095 #
6096 # - The partitions are committed automatically - there is no support for
6097 # Commit or Rollback. If the call returns an error, or if the client issuing
6098 # the ExecuteSql call dies, it is possible that some rows had the statement
6099 # executed on them successfully. It is also possible that statement was
6100 # never executed against other rows.
6101 #
6102 # - Partitioned DML transactions may only contain the execution of a single
6103 # DML statement via ExecuteSql or ExecuteStreamingSql.
6104 #
6105 # - If any error is encountered during the execution of the partitioned DML
6106 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
6107 # value that cannot be stored due to schema constraints), then the
6108 # operation is stopped at that point and an error is returned. It is
6109 # possible that at this point, some partitions have been committed (or even
6110 # committed multiple times), and other partitions have not been run at all.
6111 #
6112 # Given the above, Partitioned DML is good fit for large, database-wide,
6113 # operations that are idempotent, such as deleting old rows from a very large
6114 # table.
6115 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
6116 #
6117 # Authorization to begin a Partitioned DML transaction requires
6118 # `spanner.databases.beginPartitionedDmlTransaction` permission
6119 # on the `session` resource.
6120 },
6121 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
6122 #
6123 # Authorization to begin a read-write transaction requires
6124 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
6125 # on the `session` resource.
6126 # transaction type has no options.
6127 },
6128 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
6129 #
6130 # Authorization to begin a read-only transaction requires
6131 # `spanner.databases.beginReadOnlyTransaction` permission
6132 # on the `session` resource.
6133 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
6134 # seconds. Guarantees that all writes that have committed more
6135 # than the specified number of seconds ago are visible. Because
6136 # Cloud Spanner chooses the exact timestamp, this mode works even if
6137 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
6138 # commit timestamps.
6139 #
6140 # Useful for reading the freshest data available at a nearby
6141 # replica, while bounding the possible staleness if the local
6142 # replica has fallen behind.
6143 #
6144 # Note that this option can only be used in single-use
6145 # transactions.
6146 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
6147 #
6148 # This is useful for requesting fresher data than some previous
6149 # read, or data that is fresh enough to observe the effects of some
6150 # previously committed transaction whose timestamp is known.
6151 #
6152 # Note that this option can only be used in single-use transactions.
6153 #
6154 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
6155 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
6156 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
6157 # are visible.
6158 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
6159 # the Transaction message that describes the transaction.
6160 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
6161 # old. The timestamp is chosen soon after the read is started.
6162 #
6163 # Guarantees that all writes that have committed more than the
6164 # specified number of seconds ago are visible. Because Cloud Spanner
6165 # chooses the exact timestamp, this mode works even if the client&#x27;s
6166 # local clock is substantially skewed from Cloud Spanner commit
6167 # timestamps.
6168 #
6169 # Useful for reading at nearby replicas without the distributed
6170 # timestamp negotiation overhead of `max_staleness`.
6171 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
6172 # reads at a specific timestamp are repeatable; the same read at
6173 # the same timestamp always returns the same data. If the
6174 # timestamp is in the future, the read will block until the
6175 # specified timestamp, modulo the read&#x27;s deadline.
6176 #
6177 # Useful for large scale consistent reads such as mapreduces, or
6178 # for coordinating many reads against a consistent snapshot of the
6179 # data.
6180 #
6181 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
6182 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
6183 },
6184 },
6185 },
6186 &quot;columns&quot;: [ # The columns of table to be returned for each row matching
6187 # this request.
6188 &quot;A String&quot;,
6189 ],
6190 &quot;table&quot;: &quot;A String&quot;, # Required. The name of the table in the database to be read.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07006191 }
6192
6193 x__xgafv: string, V1 error format.
6194 Allowed values
6195 1 - v1 error format
6196 2 - v2 error format
6197
6198Returns:
6199 An object of the form:
6200
6201 { # The response for PartitionQuery
6202 # or PartitionRead
Bu Sun Kim65020912020-05-20 12:08:20 -07006203 &quot;partitions&quot;: [ # Partitions created by this request.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07006204 { # Information returned for each partition returned in a
6205 # PartitionResponse.
Bu Sun Kim65020912020-05-20 12:08:20 -07006206 &quot;partitionToken&quot;: &quot;A String&quot;, # This token can be passed to Read, StreamingRead, ExecuteSql, or
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07006207 # ExecuteStreamingSql requests to restrict the results to those identified by
6208 # this partition token.
6209 },
6210 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07006211 &quot;transaction&quot;: { # A transaction. # Transaction created by this request.
6212 &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
6213 # for the transaction. Not returned by default: see
6214 # TransactionOptions.ReadOnly.return_read_timestamp.
6215 #
6216 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
6217 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
6218 &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
6219 # Read,
6220 # ExecuteSql,
6221 # Commit, or
6222 # Rollback calls.
6223 #
6224 # Single-use read-only transactions do not have IDs, because
6225 # single-use transactions do not support multiple requests.
6226 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04006227 }</pre>
6228</div>
6229
6230<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07006231 <code class="details" id="read">read(session, body=None, x__xgafv=None)</code>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04006232 <pre>Reads rows from the database using key lookups and scans, as a
6233simple key/value style alternative to
6234ExecuteSql. This method cannot be used to
6235return a result set larger than 10 MiB; if the read matches more
6236data than that, the read fails with a `FAILED_PRECONDITION`
6237error.
6238
6239Reads inside read-write transactions might return `ABORTED`. If
6240this occurs, the application should restart the transaction from
6241the beginning. See Transaction for more details.
6242
6243Larger result sets can be yielded in streaming fashion by calling
6244StreamingRead instead.
6245
6246Args:
6247 session: string, Required. The session in which the read should be performed. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -07006248 body: object, The request body.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04006249 The object takes the form of:
6250
6251{ # The request for Read and
6252 # StreamingRead.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07006253 &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use. If none is provided, the default is a
6254 # temporary read-only transaction with strong concurrency.
6255 # Read or
6256 # ExecuteSql call runs.
6257 #
6258 # See TransactionOptions for more information about transactions.
6259 &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
6260 &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
6261 # it. The transaction ID of the new transaction is returned in
6262 # ResultSetMetadata.transaction, which is a Transaction.
6263 #
6264 #
6265 # Each session can have at most one active transaction at a time. After the
6266 # active transaction is completed, the session can immediately be
6267 # re-used for the next transaction. It is not necessary to create a
6268 # new session for each transaction.
6269 #
6270 # # Transaction Modes
6271 #
6272 # Cloud Spanner supports three transaction modes:
6273 #
6274 # 1. Locking read-write. This type of transaction is the only way
6275 # to write data into Cloud Spanner. These transactions rely on
6276 # pessimistic locking and, if necessary, two-phase commit.
6277 # Locking read-write transactions may abort, requiring the
6278 # application to retry.
6279 #
6280 # 2. Snapshot read-only. This transaction type provides guaranteed
6281 # consistency across several reads, but does not allow
6282 # writes. Snapshot read-only transactions can be configured to
6283 # read at timestamps in the past. Snapshot read-only
6284 # transactions do not need to be committed.
6285 #
6286 # 3. Partitioned DML. This type of transaction is used to execute
6287 # a single Partitioned DML statement. Partitioned DML partitions
6288 # the key space and runs the DML statement over each partition
6289 # in parallel using separate, internal transactions that commit
6290 # independently. Partitioned DML transactions do not need to be
6291 # committed.
6292 #
6293 # For transactions that only read, snapshot read-only transactions
6294 # provide simpler semantics and are almost always faster. In
6295 # particular, read-only transactions do not take locks, so they do
6296 # not conflict with read-write transactions. As a consequence of not
6297 # taking locks, they also do not abort, so retry loops are not needed.
6298 #
6299 # Transactions may only read/write data in a single database. They
6300 # may, however, read/write data in different tables within that
6301 # database.
6302 #
6303 # ## Locking Read-Write Transactions
6304 #
6305 # Locking transactions may be used to atomically read-modify-write
6306 # data anywhere in a database. This type of transaction is externally
6307 # consistent.
6308 #
6309 # Clients should attempt to minimize the amount of time a transaction
6310 # is active. Faster transactions commit with higher probability
6311 # and cause less contention. Cloud Spanner attempts to keep read locks
6312 # active as long as the transaction continues to do reads, and the
6313 # transaction has not been terminated by
6314 # Commit or
6315 # Rollback. Long periods of
6316 # inactivity at the client may cause Cloud Spanner to release a
6317 # transaction&#x27;s locks and abort it.
6318 #
6319 # Conceptually, a read-write transaction consists of zero or more
6320 # reads or SQL statements followed by
6321 # Commit. At any time before
6322 # Commit, the client can send a
6323 # Rollback request to abort the
6324 # transaction.
6325 #
6326 # ### Semantics
6327 #
6328 # Cloud Spanner can commit the transaction if all read locks it acquired
6329 # are still valid at commit time, and it is able to acquire write
6330 # locks for all writes. Cloud Spanner can abort the transaction for any
6331 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
6332 # that the transaction has not modified any user data in Cloud Spanner.
6333 #
6334 # Unless the transaction commits, Cloud Spanner makes no guarantees about
6335 # how long the transaction&#x27;s locks were held for. It is an error to
6336 # use Cloud Spanner locks for any sort of mutual exclusion other than
6337 # between Cloud Spanner transactions themselves.
6338 #
6339 # ### Retrying Aborted Transactions
6340 #
6341 # When a transaction aborts, the application can choose to retry the
6342 # whole transaction again. To maximize the chances of successfully
6343 # committing the retry, the client should execute the retry in the
6344 # same session as the original attempt. The original session&#x27;s lock
6345 # priority increases with each consecutive abort, meaning that each
6346 # attempt has a slightly better chance of success than the previous.
6347 #
6348 # Under some circumstances (e.g., many transactions attempting to
6349 # modify the same row(s)), a transaction can abort many times in a
6350 # short period before successfully committing. Thus, it is not a good
6351 # idea to cap the number of retries a transaction can attempt;
6352 # instead, it is better to limit the total amount of wall time spent
6353 # retrying.
6354 #
6355 # ### Idle Transactions
6356 #
6357 # A transaction is considered idle if it has no outstanding reads or
6358 # SQL queries and has not started a read or SQL query within the last 10
6359 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
6360 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
6361 # fail with error `ABORTED`.
6362 #
6363 # If this behavior is undesirable, periodically executing a simple
6364 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
6365 # transaction from becoming idle.
6366 #
6367 # ## Snapshot Read-Only Transactions
6368 #
6369 # Snapshot read-only transactions provides a simpler method than
6370 # locking read-write transactions for doing several consistent
6371 # reads. However, this type of transaction does not support writes.
6372 #
6373 # Snapshot transactions do not take locks. Instead, they work by
6374 # choosing a Cloud Spanner timestamp, then executing all reads at that
6375 # timestamp. Since they do not acquire locks, they do not block
6376 # concurrent read-write transactions.
6377 #
6378 # Unlike locking read-write transactions, snapshot read-only
6379 # transactions never abort. They can fail if the chosen read
6380 # timestamp is garbage collected; however, the default garbage
6381 # collection policy is generous enough that most applications do not
6382 # need to worry about this in practice.
6383 #
6384 # Snapshot read-only transactions do not need to call
6385 # Commit or
6386 # Rollback (and in fact are not
6387 # permitted to do so).
6388 #
6389 # To execute a snapshot transaction, the client specifies a timestamp
6390 # bound, which tells Cloud Spanner how to choose a read timestamp.
6391 #
6392 # The types of timestamp bound are:
6393 #
6394 # - Strong (the default).
6395 # - Bounded staleness.
6396 # - Exact staleness.
6397 #
6398 # If the Cloud Spanner database to be read is geographically distributed,
6399 # stale read-only transactions can execute more quickly than strong
6400 # or read-write transaction, because they are able to execute far
6401 # from the leader replica.
6402 #
6403 # Each type of timestamp bound is discussed in detail below.
6404 #
6405 # ### Strong
6406 #
6407 # Strong reads are guaranteed to see the effects of all transactions
6408 # that have committed before the start of the read. Furthermore, all
6409 # rows yielded by a single read are consistent with each other -- if
6410 # any part of the read observes a transaction, all parts of the read
6411 # see the transaction.
6412 #
6413 # Strong reads are not repeatable: two consecutive strong read-only
6414 # transactions might return inconsistent results if there are
6415 # concurrent writes. If consistency across reads is required, the
6416 # reads should be executed within a transaction or at an exact read
6417 # timestamp.
6418 #
6419 # See TransactionOptions.ReadOnly.strong.
6420 #
6421 # ### Exact Staleness
6422 #
6423 # These timestamp bounds execute reads at a user-specified
6424 # timestamp. Reads at a timestamp are guaranteed to see a consistent
6425 # prefix of the global transaction history: they observe
6426 # modifications done by all transactions with a commit timestamp &lt;=
6427 # the read timestamp, and observe none of the modifications done by
6428 # transactions with a larger commit timestamp. They will block until
6429 # all conflicting transactions that may be assigned commit timestamps
6430 # &lt;= the read timestamp have finished.
6431 #
6432 # The timestamp can either be expressed as an absolute Cloud Spanner commit
6433 # timestamp or a staleness relative to the current time.
6434 #
6435 # These modes do not require a &quot;negotiation phase&quot; to pick a
6436 # timestamp. As a result, they execute slightly faster than the
6437 # equivalent boundedly stale concurrency modes. On the other hand,
6438 # boundedly stale reads usually return fresher results.
6439 #
6440 # See TransactionOptions.ReadOnly.read_timestamp and
6441 # TransactionOptions.ReadOnly.exact_staleness.
6442 #
6443 # ### Bounded Staleness
6444 #
6445 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
6446 # subject to a user-provided staleness bound. Cloud Spanner chooses the
6447 # newest timestamp within the staleness bound that allows execution
6448 # of the reads at the closest available replica without blocking.
6449 #
6450 # All rows yielded are consistent with each other -- if any part of
6451 # the read observes a transaction, all parts of the read see the
6452 # transaction. Boundedly stale reads are not repeatable: two stale
6453 # reads, even if they use the same staleness bound, can execute at
6454 # different timestamps and thus return inconsistent results.
6455 #
6456 # Boundedly stale reads execute in two phases: the first phase
6457 # negotiates a timestamp among all replicas needed to serve the
6458 # read. In the second phase, reads are executed at the negotiated
6459 # timestamp.
6460 #
6461 # As a result of the two phase execution, bounded staleness reads are
6462 # usually a little slower than comparable exact staleness
6463 # reads. However, they are typically able to return fresher
6464 # results, and are more likely to execute at the closest replica.
6465 #
6466 # Because the timestamp negotiation requires up-front knowledge of
6467 # which rows will be read, it can only be used with single-use
6468 # read-only transactions.
6469 #
6470 # See TransactionOptions.ReadOnly.max_staleness and
6471 # TransactionOptions.ReadOnly.min_read_timestamp.
6472 #
6473 # ### Old Read Timestamps and Garbage Collection
6474 #
6475 # Cloud Spanner continuously garbage collects deleted and overwritten data
6476 # in the background to reclaim storage space. This process is known
6477 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
6478 # are one hour old. Because of this, Cloud Spanner cannot perform reads
6479 # at read timestamps more than one hour in the past. This
6480 # restriction also applies to in-progress reads and/or SQL queries whose
6481 # timestamp become too old while executing. Reads and SQL queries with
6482 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
6483 #
6484 # ## Partitioned DML Transactions
6485 #
6486 # Partitioned DML transactions are used to execute DML statements with a
6487 # different execution strategy that provides different, and often better,
6488 # scalability properties for large, table-wide operations than DML in a
6489 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
6490 # should prefer using ReadWrite transactions.
6491 #
6492 # Partitioned DML partitions the keyspace and runs the DML statement on each
6493 # partition in separate, internal transactions. These transactions commit
6494 # automatically when complete, and run independently from one another.
6495 #
6496 # To reduce lock contention, this execution strategy only acquires read locks
6497 # on rows that match the WHERE clause of the statement. Additionally, the
6498 # smaller per-partition transactions hold locks for less time.
6499 #
6500 # That said, Partitioned DML is not a drop-in replacement for standard DML used
6501 # in ReadWrite transactions.
6502 #
6503 # - The DML statement must be fully-partitionable. Specifically, the statement
6504 # must be expressible as the union of many statements which each access only
6505 # a single row of the table.
6506 #
6507 # - The statement is not applied atomically to all rows of the table. Rather,
6508 # the statement is applied atomically to partitions of the table, in
6509 # independent transactions. Secondary index rows are updated atomically
6510 # with the base table rows.
6511 #
6512 # - Partitioned DML does not guarantee exactly-once execution semantics
6513 # against a partition. The statement will be applied at least once to each
6514 # partition. It is strongly recommended that the DML statement should be
6515 # idempotent to avoid unexpected results. For instance, it is potentially
6516 # dangerous to run a statement such as
6517 # `UPDATE table SET column = column + 1` as it could be run multiple times
6518 # against some rows.
6519 #
6520 # - The partitions are committed automatically - there is no support for
6521 # Commit or Rollback. If the call returns an error, or if the client issuing
6522 # the ExecuteSql call dies, it is possible that some rows had the statement
6523 # executed on them successfully. It is also possible that statement was
6524 # never executed against other rows.
6525 #
6526 # - Partitioned DML transactions may only contain the execution of a single
6527 # DML statement via ExecuteSql or ExecuteStreamingSql.
6528 #
6529 # - If any error is encountered during the execution of the partitioned DML
6530 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
6531 # value that cannot be stored due to schema constraints), then the
6532 # operation is stopped at that point and an error is returned. It is
6533 # possible that at this point, some partitions have been committed (or even
6534 # committed multiple times), and other partitions have not been run at all.
6535 #
6536 # Given the above, Partitioned DML is good fit for large, database-wide,
6537 # operations that are idempotent, such as deleting old rows from a very large
6538 # table.
6539 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
6540 #
6541 # Authorization to begin a Partitioned DML transaction requires
6542 # `spanner.databases.beginPartitionedDmlTransaction` permission
6543 # on the `session` resource.
6544 },
6545 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
6546 #
6547 # Authorization to begin a read-write transaction requires
6548 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
6549 # on the `session` resource.
6550 # transaction type has no options.
6551 },
6552 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
6553 #
6554 # Authorization to begin a read-only transaction requires
6555 # `spanner.databases.beginReadOnlyTransaction` permission
6556 # on the `session` resource.
6557 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
6558 # seconds. Guarantees that all writes that have committed more
6559 # than the specified number of seconds ago are visible. Because
6560 # Cloud Spanner chooses the exact timestamp, this mode works even if
6561 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
6562 # commit timestamps.
6563 #
6564 # Useful for reading the freshest data available at a nearby
6565 # replica, while bounding the possible staleness if the local
6566 # replica has fallen behind.
6567 #
6568 # Note that this option can only be used in single-use
6569 # transactions.
6570 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
6571 #
6572 # This is useful for requesting fresher data than some previous
6573 # read, or data that is fresh enough to observe the effects of some
6574 # previously committed transaction whose timestamp is known.
6575 #
6576 # Note that this option can only be used in single-use transactions.
6577 #
6578 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
6579 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
6580 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
6581 # are visible.
6582 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
6583 # the Transaction message that describes the transaction.
6584 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
6585 # old. The timestamp is chosen soon after the read is started.
6586 #
6587 # Guarantees that all writes that have committed more than the
6588 # specified number of seconds ago are visible. Because Cloud Spanner
6589 # chooses the exact timestamp, this mode works even if the client&#x27;s
6590 # local clock is substantially skewed from Cloud Spanner commit
6591 # timestamps.
6592 #
6593 # Useful for reading at nearby replicas without the distributed
6594 # timestamp negotiation overhead of `max_staleness`.
6595 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
6596 # reads at a specific timestamp are repeatable; the same read at
6597 # the same timestamp always returns the same data. If the
6598 # timestamp is in the future, the read will block until the
6599 # specified timestamp, modulo the read&#x27;s deadline.
6600 #
6601 # Useful for large scale consistent reads such as mapreduces, or
6602 # for coordinating many reads against a consistent snapshot of the
6603 # data.
6604 #
6605 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
6606 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
6607 },
6608 },
6609 &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
6610 # This is the most efficient way to execute a transaction that
6611 # consists of a single SQL query.
6612 #
6613 #
6614 # Each session can have at most one active transaction at a time. After the
6615 # active transaction is completed, the session can immediately be
6616 # re-used for the next transaction. It is not necessary to create a
6617 # new session for each transaction.
6618 #
6619 # # Transaction Modes
6620 #
6621 # Cloud Spanner supports three transaction modes:
6622 #
6623 # 1. Locking read-write. This type of transaction is the only way
6624 # to write data into Cloud Spanner. These transactions rely on
6625 # pessimistic locking and, if necessary, two-phase commit.
6626 # Locking read-write transactions may abort, requiring the
6627 # application to retry.
6628 #
6629 # 2. Snapshot read-only. This transaction type provides guaranteed
6630 # consistency across several reads, but does not allow
6631 # writes. Snapshot read-only transactions can be configured to
6632 # read at timestamps in the past. Snapshot read-only
6633 # transactions do not need to be committed.
6634 #
6635 # 3. Partitioned DML. This type of transaction is used to execute
6636 # a single Partitioned DML statement. Partitioned DML partitions
6637 # the key space and runs the DML statement over each partition
6638 # in parallel using separate, internal transactions that commit
6639 # independently. Partitioned DML transactions do not need to be
6640 # committed.
6641 #
6642 # For transactions that only read, snapshot read-only transactions
6643 # provide simpler semantics and are almost always faster. In
6644 # particular, read-only transactions do not take locks, so they do
6645 # not conflict with read-write transactions. As a consequence of not
6646 # taking locks, they also do not abort, so retry loops are not needed.
6647 #
6648 # Transactions may only read/write data in a single database. They
6649 # may, however, read/write data in different tables within that
6650 # database.
6651 #
6652 # ## Locking Read-Write Transactions
6653 #
6654 # Locking transactions may be used to atomically read-modify-write
6655 # data anywhere in a database. This type of transaction is externally
6656 # consistent.
6657 #
6658 # Clients should attempt to minimize the amount of time a transaction
6659 # is active. Faster transactions commit with higher probability
6660 # and cause less contention. Cloud Spanner attempts to keep read locks
6661 # active as long as the transaction continues to do reads, and the
6662 # transaction has not been terminated by
6663 # Commit or
6664 # Rollback. Long periods of
6665 # inactivity at the client may cause Cloud Spanner to release a
6666 # transaction&#x27;s locks and abort it.
6667 #
6668 # Conceptually, a read-write transaction consists of zero or more
6669 # reads or SQL statements followed by
6670 # Commit. At any time before
6671 # Commit, the client can send a
6672 # Rollback request to abort the
6673 # transaction.
6674 #
6675 # ### Semantics
6676 #
6677 # Cloud Spanner can commit the transaction if all read locks it acquired
6678 # are still valid at commit time, and it is able to acquire write
6679 # locks for all writes. Cloud Spanner can abort the transaction for any
6680 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
6681 # that the transaction has not modified any user data in Cloud Spanner.
6682 #
6683 # Unless the transaction commits, Cloud Spanner makes no guarantees about
6684 # how long the transaction&#x27;s locks were held for. It is an error to
6685 # use Cloud Spanner locks for any sort of mutual exclusion other than
6686 # between Cloud Spanner transactions themselves.
6687 #
6688 # ### Retrying Aborted Transactions
6689 #
6690 # When a transaction aborts, the application can choose to retry the
6691 # whole transaction again. To maximize the chances of successfully
6692 # committing the retry, the client should execute the retry in the
6693 # same session as the original attempt. The original session&#x27;s lock
6694 # priority increases with each consecutive abort, meaning that each
6695 # attempt has a slightly better chance of success than the previous.
6696 #
6697 # Under some circumstances (e.g., many transactions attempting to
6698 # modify the same row(s)), a transaction can abort many times in a
6699 # short period before successfully committing. Thus, it is not a good
6700 # idea to cap the number of retries a transaction can attempt;
6701 # instead, it is better to limit the total amount of wall time spent
6702 # retrying.
6703 #
6704 # ### Idle Transactions
6705 #
6706 # A transaction is considered idle if it has no outstanding reads or
6707 # SQL queries and has not started a read or SQL query within the last 10
6708 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
6709 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
6710 # fail with error `ABORTED`.
6711 #
6712 # If this behavior is undesirable, periodically executing a simple
6713 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
6714 # transaction from becoming idle.
6715 #
6716 # ## Snapshot Read-Only Transactions
6717 #
6718 # Snapshot read-only transactions provides a simpler method than
6719 # locking read-write transactions for doing several consistent
6720 # reads. However, this type of transaction does not support writes.
6721 #
6722 # Snapshot transactions do not take locks. Instead, they work by
6723 # choosing a Cloud Spanner timestamp, then executing all reads at that
6724 # timestamp. Since they do not acquire locks, they do not block
6725 # concurrent read-write transactions.
6726 #
6727 # Unlike locking read-write transactions, snapshot read-only
6728 # transactions never abort. They can fail if the chosen read
6729 # timestamp is garbage collected; however, the default garbage
6730 # collection policy is generous enough that most applications do not
6731 # need to worry about this in practice.
6732 #
6733 # Snapshot read-only transactions do not need to call
6734 # Commit or
6735 # Rollback (and in fact are not
6736 # permitted to do so).
6737 #
6738 # To execute a snapshot transaction, the client specifies a timestamp
6739 # bound, which tells Cloud Spanner how to choose a read timestamp.
6740 #
6741 # The types of timestamp bound are:
6742 #
6743 # - Strong (the default).
6744 # - Bounded staleness.
6745 # - Exact staleness.
6746 #
6747 # If the Cloud Spanner database to be read is geographically distributed,
6748 # stale read-only transactions can execute more quickly than strong
6749 # or read-write transaction, because they are able to execute far
6750 # from the leader replica.
6751 #
6752 # Each type of timestamp bound is discussed in detail below.
6753 #
6754 # ### Strong
6755 #
6756 # Strong reads are guaranteed to see the effects of all transactions
6757 # that have committed before the start of the read. Furthermore, all
6758 # rows yielded by a single read are consistent with each other -- if
6759 # any part of the read observes a transaction, all parts of the read
6760 # see the transaction.
6761 #
6762 # Strong reads are not repeatable: two consecutive strong read-only
6763 # transactions might return inconsistent results if there are
6764 # concurrent writes. If consistency across reads is required, the
6765 # reads should be executed within a transaction or at an exact read
6766 # timestamp.
6767 #
6768 # See TransactionOptions.ReadOnly.strong.
6769 #
6770 # ### Exact Staleness
6771 #
6772 # These timestamp bounds execute reads at a user-specified
6773 # timestamp. Reads at a timestamp are guaranteed to see a consistent
6774 # prefix of the global transaction history: they observe
6775 # modifications done by all transactions with a commit timestamp &lt;=
6776 # the read timestamp, and observe none of the modifications done by
6777 # transactions with a larger commit timestamp. They will block until
6778 # all conflicting transactions that may be assigned commit timestamps
6779 # &lt;= the read timestamp have finished.
6780 #
6781 # The timestamp can either be expressed as an absolute Cloud Spanner commit
6782 # timestamp or a staleness relative to the current time.
6783 #
6784 # These modes do not require a &quot;negotiation phase&quot; to pick a
6785 # timestamp. As a result, they execute slightly faster than the
6786 # equivalent boundedly stale concurrency modes. On the other hand,
6787 # boundedly stale reads usually return fresher results.
6788 #
6789 # See TransactionOptions.ReadOnly.read_timestamp and
6790 # TransactionOptions.ReadOnly.exact_staleness.
6791 #
6792 # ### Bounded Staleness
6793 #
6794 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
6795 # subject to a user-provided staleness bound. Cloud Spanner chooses the
6796 # newest timestamp within the staleness bound that allows execution
6797 # of the reads at the closest available replica without blocking.
6798 #
6799 # All rows yielded are consistent with each other -- if any part of
6800 # the read observes a transaction, all parts of the read see the
6801 # transaction. Boundedly stale reads are not repeatable: two stale
6802 # reads, even if they use the same staleness bound, can execute at
6803 # different timestamps and thus return inconsistent results.
6804 #
6805 # Boundedly stale reads execute in two phases: the first phase
6806 # negotiates a timestamp among all replicas needed to serve the
6807 # read. In the second phase, reads are executed at the negotiated
6808 # timestamp.
6809 #
6810 # As a result of the two phase execution, bounded staleness reads are
6811 # usually a little slower than comparable exact staleness
6812 # reads. However, they are typically able to return fresher
6813 # results, and are more likely to execute at the closest replica.
6814 #
6815 # Because the timestamp negotiation requires up-front knowledge of
6816 # which rows will be read, it can only be used with single-use
6817 # read-only transactions.
6818 #
6819 # See TransactionOptions.ReadOnly.max_staleness and
6820 # TransactionOptions.ReadOnly.min_read_timestamp.
6821 #
6822 # ### Old Read Timestamps and Garbage Collection
6823 #
6824 # Cloud Spanner continuously garbage collects deleted and overwritten data
6825 # in the background to reclaim storage space. This process is known
6826 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
6827 # are one hour old. Because of this, Cloud Spanner cannot perform reads
6828 # at read timestamps more than one hour in the past. This
6829 # restriction also applies to in-progress reads and/or SQL queries whose
6830 # timestamp become too old while executing. Reads and SQL queries with
6831 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
6832 #
6833 # ## Partitioned DML Transactions
6834 #
6835 # Partitioned DML transactions are used to execute DML statements with a
6836 # different execution strategy that provides different, and often better,
6837 # scalability properties for large, table-wide operations than DML in a
6838 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
6839 # should prefer using ReadWrite transactions.
6840 #
6841 # Partitioned DML partitions the keyspace and runs the DML statement on each
6842 # partition in separate, internal transactions. These transactions commit
6843 # automatically when complete, and run independently from one another.
6844 #
6845 # To reduce lock contention, this execution strategy only acquires read locks
6846 # on rows that match the WHERE clause of the statement. Additionally, the
6847 # smaller per-partition transactions hold locks for less time.
6848 #
6849 # That said, Partitioned DML is not a drop-in replacement for standard DML used
6850 # in ReadWrite transactions.
6851 #
6852 # - The DML statement must be fully-partitionable. Specifically, the statement
6853 # must be expressible as the union of many statements which each access only
6854 # a single row of the table.
6855 #
6856 # - The statement is not applied atomically to all rows of the table. Rather,
6857 # the statement is applied atomically to partitions of the table, in
6858 # independent transactions. Secondary index rows are updated atomically
6859 # with the base table rows.
6860 #
6861 # - Partitioned DML does not guarantee exactly-once execution semantics
6862 # against a partition. The statement will be applied at least once to each
6863 # partition. It is strongly recommended that the DML statement should be
6864 # idempotent to avoid unexpected results. For instance, it is potentially
6865 # dangerous to run a statement such as
6866 # `UPDATE table SET column = column + 1` as it could be run multiple times
6867 # against some rows.
6868 #
6869 # - The partitions are committed automatically - there is no support for
6870 # Commit or Rollback. If the call returns an error, or if the client issuing
6871 # the ExecuteSql call dies, it is possible that some rows had the statement
6872 # executed on them successfully. It is also possible that statement was
6873 # never executed against other rows.
6874 #
6875 # - Partitioned DML transactions may only contain the execution of a single
6876 # DML statement via ExecuteSql or ExecuteStreamingSql.
6877 #
6878 # - If any error is encountered during the execution of the partitioned DML
6879 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
6880 # value that cannot be stored due to schema constraints), then the
6881 # operation is stopped at that point and an error is returned. It is
6882 # possible that at this point, some partitions have been committed (or even
6883 # committed multiple times), and other partitions have not been run at all.
6884 #
6885 # Given the above, Partitioned DML is good fit for large, database-wide,
6886 # operations that are idempotent, such as deleting old rows from a very large
6887 # table.
6888 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
6889 #
6890 # Authorization to begin a Partitioned DML transaction requires
6891 # `spanner.databases.beginPartitionedDmlTransaction` permission
6892 # on the `session` resource.
6893 },
6894 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
6895 #
6896 # Authorization to begin a read-write transaction requires
6897 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
6898 # on the `session` resource.
6899 # transaction type has no options.
6900 },
6901 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
6902 #
6903 # Authorization to begin a read-only transaction requires
6904 # `spanner.databases.beginReadOnlyTransaction` permission
6905 # on the `session` resource.
6906 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
6907 # seconds. Guarantees that all writes that have committed more
6908 # than the specified number of seconds ago are visible. Because
6909 # Cloud Spanner chooses the exact timestamp, this mode works even if
6910 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
6911 # commit timestamps.
6912 #
6913 # Useful for reading the freshest data available at a nearby
6914 # replica, while bounding the possible staleness if the local
6915 # replica has fallen behind.
6916 #
6917 # Note that this option can only be used in single-use
6918 # transactions.
6919 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
6920 #
6921 # This is useful for requesting fresher data than some previous
6922 # read, or data that is fresh enough to observe the effects of some
6923 # previously committed transaction whose timestamp is known.
6924 #
6925 # Note that this option can only be used in single-use transactions.
6926 #
6927 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
6928 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
6929 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
6930 # are visible.
6931 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
6932 # the Transaction message that describes the transaction.
6933 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
6934 # old. The timestamp is chosen soon after the read is started.
6935 #
6936 # Guarantees that all writes that have committed more than the
6937 # specified number of seconds ago are visible. Because Cloud Spanner
6938 # chooses the exact timestamp, this mode works even if the client&#x27;s
6939 # local clock is substantially skewed from Cloud Spanner commit
6940 # timestamps.
6941 #
6942 # Useful for reading at nearby replicas without the distributed
6943 # timestamp negotiation overhead of `max_staleness`.
6944 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
6945 # reads at a specific timestamp are repeatable; the same read at
6946 # the same timestamp always returns the same data. If the
6947 # timestamp is in the future, the read will block until the
6948 # specified timestamp, modulo the read&#x27;s deadline.
6949 #
6950 # Useful for large scale consistent reads such as mapreduces, or
6951 # for coordinating many reads against a consistent snapshot of the
6952 # data.
6953 #
6954 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
6955 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
6956 },
6957 },
6958 },
6959 &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
6960 # previously created using PartitionRead(). There must be an exact
6961 # match for the values of fields common to this message and the
6962 # PartitionReadRequest message used to create this partition_token.
6963 &quot;columns&quot;: [ # Required. The columns of table to be returned for each row matching
6964 # this request.
6965 &quot;A String&quot;,
6966 ],
6967 &quot;limit&quot;: &quot;A String&quot;, # If greater than zero, only the first `limit` rows are yielded. If `limit`
6968 # is zero, the default is no limit. A limit cannot be specified if
6969 # `partition_token` is set.
6970 &quot;table&quot;: &quot;A String&quot;, # Required. The name of the table in the database to be read.
6971 &quot;resumeToken&quot;: &quot;A String&quot;, # If this request is resuming a previously interrupted read,
6972 # `resume_token` should be copied from the last
6973 # PartialResultSet yielded before the interruption. Doing this
6974 # enables the new read to resume where the last read left off. The
6975 # rest of the request parameters must exactly match the request
6976 # that yielded this token.
Bu Sun Kim65020912020-05-20 12:08:20 -07006977 &quot;keySet&quot;: { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All # Required. `key_set` identifies the rows to be yielded. `key_set` names the
6978 # primary keys of the rows in table to be yielded, unless index
6979 # is present. If index is present, then key_set instead names
6980 # index keys in index.
6981 #
6982 # If the partition_token field is empty, rows are yielded
6983 # in table primary key order (if index is empty) or index key order
6984 # (if index is non-empty). If the partition_token field is not
6985 # empty, rows will be yielded in an unspecified order.
6986 #
6987 # It is not an error for the `key_set` to name rows that do not
6988 # exist in the database. Read yields nothing for nonexistent rows.
6989 # the keys are expected to be in the same table or index. The keys need
6990 # not be sorted in any particular way.
6991 #
6992 # If the same key is specified multiple times in the set (for example
6993 # if two ranges, two keys, or a key and a range overlap), Cloud Spanner
6994 # behaves as if the key were only specified once.
6995 &quot;ranges&quot;: [ # A list of key ranges. See KeyRange for more information about
6996 # key range specifications.
6997 { # KeyRange represents a range of rows in a table or index.
6998 #
6999 # A range has a start key and an end key. These keys can be open or
7000 # closed, indicating if the range includes rows with that key.
7001 #
7002 # Keys are represented by lists, where the ith value in the list
7003 # corresponds to the ith component of the table or index primary key.
7004 # Individual values are encoded as described
7005 # here.
7006 #
7007 # For example, consider the following table definition:
7008 #
7009 # CREATE TABLE UserEvents (
7010 # UserName STRING(MAX),
7011 # EventDate STRING(10)
7012 # ) PRIMARY KEY(UserName, EventDate);
7013 #
7014 # The following keys name rows in this table:
7015 #
7016 # &quot;Bob&quot;, &quot;2014-09-23&quot;
7017 #
7018 # Since the `UserEvents` table&#x27;s `PRIMARY KEY` clause names two
7019 # columns, each `UserEvents` key has two elements; the first is the
7020 # `UserName`, and the second is the `EventDate`.
7021 #
7022 # Key ranges with multiple components are interpreted
7023 # lexicographically by component using the table or index key&#x27;s declared
7024 # sort order. For example, the following range returns all events for
7025 # user `&quot;Bob&quot;` that occurred in the year 2015:
7026 #
7027 # &quot;start_closed&quot;: [&quot;Bob&quot;, &quot;2015-01-01&quot;]
7028 # &quot;end_closed&quot;: [&quot;Bob&quot;, &quot;2015-12-31&quot;]
7029 #
7030 # Start and end keys can omit trailing key components. This affects the
7031 # inclusion and exclusion of rows that exactly match the provided key
7032 # components: if the key is closed, then rows that exactly match the
7033 # provided components are included; if the key is open, then rows
7034 # that exactly match are not included.
7035 #
7036 # For example, the following range includes all events for `&quot;Bob&quot;` that
7037 # occurred during and after the year 2000:
7038 #
7039 # &quot;start_closed&quot;: [&quot;Bob&quot;, &quot;2000-01-01&quot;]
7040 # &quot;end_closed&quot;: [&quot;Bob&quot;]
7041 #
7042 # The next example retrieves all events for `&quot;Bob&quot;`:
7043 #
7044 # &quot;start_closed&quot;: [&quot;Bob&quot;]
7045 # &quot;end_closed&quot;: [&quot;Bob&quot;]
7046 #
7047 # To retrieve events before the year 2000:
7048 #
7049 # &quot;start_closed&quot;: [&quot;Bob&quot;]
7050 # &quot;end_open&quot;: [&quot;Bob&quot;, &quot;2000-01-01&quot;]
7051 #
7052 # The following range includes all rows in the table:
7053 #
7054 # &quot;start_closed&quot;: []
7055 # &quot;end_closed&quot;: []
7056 #
7057 # This range returns all users whose `UserName` begins with any
7058 # character from A to C:
7059 #
7060 # &quot;start_closed&quot;: [&quot;A&quot;]
7061 # &quot;end_open&quot;: [&quot;D&quot;]
7062 #
7063 # This range returns all users whose `UserName` begins with B:
7064 #
7065 # &quot;start_closed&quot;: [&quot;B&quot;]
7066 # &quot;end_open&quot;: [&quot;C&quot;]
7067 #
7068 # Key ranges honor column sort order. For example, suppose a table is
7069 # defined as follows:
7070 #
7071 # CREATE TABLE DescendingSortedTable {
7072 # Key INT64,
7073 # ...
7074 # ) PRIMARY KEY(Key DESC);
7075 #
7076 # The following range retrieves all rows with key values between 1
7077 # and 100 inclusive:
7078 #
7079 # &quot;start_closed&quot;: [&quot;100&quot;]
7080 # &quot;end_closed&quot;: [&quot;1&quot;]
7081 #
7082 # Note that 100 is passed as the start, and 1 is passed as the end,
7083 # because `Key` is a descending column in the schema.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007084 &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
7085 # `len(start_open)` key columns exactly match `start_open`.
Bu Sun Kim65020912020-05-20 12:08:20 -07007086 &quot;&quot;,
7087 ],
7088 &quot;endClosed&quot;: [ # If the end is closed, then the range includes all rows whose
7089 # first `len(end_closed)` key columns exactly match `end_closed`.
7090 &quot;&quot;,
7091 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007092 &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
7093 # `len(end_open)` key columns exactly match `end_open`.
Bu Sun Kim65020912020-05-20 12:08:20 -07007094 &quot;&quot;,
7095 ],
7096 &quot;startClosed&quot;: [ # If the start is closed, then the range includes all rows whose
7097 # first `len(start_closed)` key columns exactly match `start_closed`.
7098 &quot;&quot;,
7099 ],
7100 },
7101 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07007102 &quot;keys&quot;: [ # A list of specific keys. Entries in `keys` should have exactly as
7103 # many elements as there are columns in the primary or index key
7104 # with which this `KeySet` is used. Individual key values are
7105 # encoded as described here.
7106 [
7107 &quot;&quot;,
7108 ],
7109 ],
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007110 &quot;all&quot;: True or False, # For convenience `all` can be set to `true` to indicate that this
7111 # `KeySet` matches all keys in the table or index. Note that any keys
7112 # specified in `keys` or `ranges` are only yielded once.
Bu Sun Kim65020912020-05-20 12:08:20 -07007113 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007114 &quot;index&quot;: &quot;A String&quot;, # If non-empty, the name of an index on table. This index is
7115 # used instead of the table primary key when interpreting key_set
7116 # and sorting result rows. See key_set for further information.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007117 }
7118
7119 x__xgafv: string, V1 error format.
7120 Allowed values
7121 1 - v1 error format
7122 2 - v2 error format
7123
7124Returns:
7125 An object of the form:
7126
7127 { # Results from Read or
7128 # ExecuteSql.
Bu Sun Kim65020912020-05-20 12:08:20 -07007129 &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
7130 &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
7131 # set. For example, a SQL query like `&quot;SELECT UserId, UserName FROM
7132 # Users&quot;` could return a `row_type` value like:
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007133 #
Bu Sun Kim65020912020-05-20 12:08:20 -07007134 # &quot;fields&quot;: [
7135 # { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
7136 # { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007137 # ]
Bu Sun Kim65020912020-05-20 12:08:20 -07007138 &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007139 # significant, because values of this struct type are represented as
7140 # lists, where the order of field values matches the order of
7141 # fields in the StructType. In turn, the order of fields
7142 # matches the order of columns in a read request, or the order of
7143 # fields in the `SELECT` clause of a query.
7144 { # Message representing a single field of a struct.
Bu Sun Kim65020912020-05-20 12:08:20 -07007145 &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
7146 # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
7147 # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
7148 # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
7149 # columns might have an empty name (e.g., !&quot;SELECT
7150 # UPPER(ColName)&quot;`). Note that a query result can contain
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007151 # multiple fields with the same name.
Bu Sun Kim65020912020-05-20 12:08:20 -07007152 &quot;type&quot;: # Object with schema name: Type # The type of the field.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007153 },
7154 ],
7155 },
Bu Sun Kim65020912020-05-20 12:08:20 -07007156 &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007157 # information about the new transaction is yielded here.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007158 &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
7159 # for the transaction. Not returned by default: see
7160 # TransactionOptions.ReadOnly.return_read_timestamp.
7161 #
7162 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
7163 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
Bu Sun Kim65020912020-05-20 12:08:20 -07007164 &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007165 # Read,
7166 # ExecuteSql,
7167 # Commit, or
7168 # Rollback calls.
7169 #
7170 # Single-use read-only transactions do not have IDs, because
7171 # single-use transactions do not support multiple requests.
7172 },
7173 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007174 &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the SQL statement that
7175 # produced this result set. These can be requested by setting
7176 # ExecuteSqlRequest.query_mode.
7177 # DML statements always produce stats containing the number of rows
7178 # modified, unless executed using the
7179 # ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode.
7180 # Other fields may or may not be populated, based on the
7181 # ExecuteSqlRequest.query_mode.
7182 &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
7183 # returns a lower bound of the rows modified.
7184 &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
7185 &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
7186 # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
7187 # `plan_nodes`.
7188 { # Node information for nodes appearing in a QueryPlan.plan_nodes.
7189 &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
7190 &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
7191 # key-value pairs. Only present if the plan was returned as a result of a
7192 # profile query. For example, number of executions, number of rows/time per
7193 # execution etc.
7194 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
7195 },
7196 &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
7197 # `SCALAR` PlanNode(s).
7198 &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
7199 # where the `description` string of this node references a `SCALAR`
7200 # subquery contained in the expression subtree rooted at this node. The
7201 # referenced `SCALAR` subquery may not necessarily be a direct child of
7202 # this node.
7203 &quot;a_key&quot;: 42,
7204 },
7205 &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
7206 },
7207 &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
7208 # For example, a Parameter Reference node could have the following
7209 # information in its metadata:
7210 #
7211 # {
7212 # &quot;parameter_reference&quot;: &quot;param1&quot;,
7213 # &quot;parameter_type&quot;: &quot;array&quot;
7214 # }
7215 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
7216 },
7217 &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
7218 { # Metadata associated with a parent-child relationship appearing in a
7219 # PlanNode.
7220 &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
7221 # distinguish between the build child and the probe child, or in the case
7222 # of the child being an output variable, to represent the tag associated
7223 # with the output variable.
7224 &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
7225 # to an output variable of the parent node. The field carries the name of
7226 # the output variable.
7227 # For example, a `TableScan` operator that reads rows from a table will
7228 # have child links to the `SCALAR` nodes representing the output variables
7229 # created for each column that is read by the operator. The corresponding
7230 # `variable` fields will be set to the variable names assigned to the
7231 # columns.
7232 &quot;childIndex&quot;: 42, # The node to which the link points.
7233 },
7234 ],
7235 &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
7236 &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
7237 # different kinds of nodes differently. For example, If the node is a
7238 # SCALAR node, it will have a condensed representation
7239 # which can be used to directly embed a description of the node in its
7240 # parent.
7241 },
7242 ],
7243 },
7244 &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
7245 &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
7246 # the query is profiled. For example, a query could return the statistics as
7247 # follows:
7248 #
7249 # {
7250 # &quot;rows_returned&quot;: &quot;3&quot;,
7251 # &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
7252 # &quot;cpu_time&quot;: &quot;1.19 secs&quot;
7253 # }
7254 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
7255 },
7256 },
7257 &quot;rows&quot;: [ # Each element in `rows` is a row whose format is defined by
7258 # metadata.row_type. The ith element
7259 # in each row matches the ith field in
7260 # metadata.row_type. Elements are
7261 # encoded based on type as described
7262 # here.
7263 [
7264 &quot;&quot;,
7265 ],
7266 ],
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007267 }</pre>
7268</div>
7269
7270<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07007271 <code class="details" id="rollback">rollback(session, body=None, x__xgafv=None)</code>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007272 <pre>Rolls back a transaction, releasing any locks it holds. It is a good
7273idea to call this for any transaction that includes one or more
7274Read or ExecuteSql requests and
7275ultimately decides not to commit.
7276
7277`Rollback` returns `OK` if it successfully aborts the transaction, the
7278transaction was already aborted, or the transaction is not
7279found. `Rollback` never returns `ABORTED`.
7280
7281Args:
7282 session: string, Required. The session in which the transaction to roll back is running. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -07007283 body: object, The request body.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007284 The object takes the form of:
7285
7286{ # The request for Rollback.
Bu Sun Kim65020912020-05-20 12:08:20 -07007287 &quot;transactionId&quot;: &quot;A String&quot;, # Required. The transaction to roll back.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007288 }
7289
7290 x__xgafv: string, V1 error format.
7291 Allowed values
7292 1 - v1 error format
7293 2 - v2 error format
7294
7295Returns:
7296 An object of the form:
7297
7298 { # A generic empty message that you can re-use to avoid defining duplicated
7299 # empty messages in your APIs. A typical example is to use it as the request
7300 # or the response type of an API method. For instance:
7301 #
7302 # service Foo {
7303 # rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
7304 # }
7305 #
7306 # The JSON representation for `Empty` is empty JSON object `{}`.
7307 }</pre>
7308</div>
7309
7310<div class="method">
Dan O'Mearadd494642020-05-01 07:42:23 -07007311 <code class="details" id="streamingRead">streamingRead(session, body=None, x__xgafv=None)</code>
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007312 <pre>Like Read, except returns the result set as a
7313stream. Unlike Read, there is no limit on the
7314size of the returned result set. However, no individual row in
7315the result set can exceed 100 MiB, and no column value can exceed
731610 MiB.
7317
7318Args:
7319 session: string, Required. The session in which the read should be performed. (required)
Dan O'Mearadd494642020-05-01 07:42:23 -07007320 body: object, The request body.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007321 The object takes the form of:
7322
7323{ # The request for Read and
7324 # StreamingRead.
Bu Sun Kim65020912020-05-20 12:08:20 -07007325 &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use. If none is provided, the default is a
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04007326 # temporary read-only transaction with strong concurrency.
7327 # Read or
7328 # ExecuteSql call runs.
7329 #
7330 # See TransactionOptions for more information about transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007331 &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
Bu Sun Kim65020912020-05-20 12:08:20 -07007332 &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
7333 # it. The transaction ID of the new transaction is returned in
7334 # ResultSetMetadata.transaction, which is a Transaction.
7335 #
7336 #
7337 # Each session can have at most one active transaction at a time. After the
7338 # active transaction is completed, the session can immediately be
7339 # re-used for the next transaction. It is not necessary to create a
7340 # new session for each transaction.
7341 #
7342 # # Transaction Modes
7343 #
7344 # Cloud Spanner supports three transaction modes:
7345 #
7346 # 1. Locking read-write. This type of transaction is the only way
7347 # to write data into Cloud Spanner. These transactions rely on
7348 # pessimistic locking and, if necessary, two-phase commit.
7349 # Locking read-write transactions may abort, requiring the
7350 # application to retry.
7351 #
7352 # 2. Snapshot read-only. This transaction type provides guaranteed
7353 # consistency across several reads, but does not allow
7354 # writes. Snapshot read-only transactions can be configured to
7355 # read at timestamps in the past. Snapshot read-only
7356 # transactions do not need to be committed.
7357 #
7358 # 3. Partitioned DML. This type of transaction is used to execute
7359 # a single Partitioned DML statement. Partitioned DML partitions
7360 # the key space and runs the DML statement over each partition
7361 # in parallel using separate, internal transactions that commit
7362 # independently. Partitioned DML transactions do not need to be
7363 # committed.
7364 #
7365 # For transactions that only read, snapshot read-only transactions
7366 # provide simpler semantics and are almost always faster. In
7367 # particular, read-only transactions do not take locks, so they do
7368 # not conflict with read-write transactions. As a consequence of not
7369 # taking locks, they also do not abort, so retry loops are not needed.
7370 #
7371 # Transactions may only read/write data in a single database. They
7372 # may, however, read/write data in different tables within that
7373 # database.
7374 #
7375 # ## Locking Read-Write Transactions
7376 #
7377 # Locking transactions may be used to atomically read-modify-write
7378 # data anywhere in a database. This type of transaction is externally
7379 # consistent.
7380 #
7381 # Clients should attempt to minimize the amount of time a transaction
7382 # is active. Faster transactions commit with higher probability
7383 # and cause less contention. Cloud Spanner attempts to keep read locks
7384 # active as long as the transaction continues to do reads, and the
7385 # transaction has not been terminated by
7386 # Commit or
7387 # Rollback. Long periods of
7388 # inactivity at the client may cause Cloud Spanner to release a
7389 # transaction&#x27;s locks and abort it.
7390 #
7391 # Conceptually, a read-write transaction consists of zero or more
7392 # reads or SQL statements followed by
7393 # Commit. At any time before
7394 # Commit, the client can send a
7395 # Rollback request to abort the
7396 # transaction.
7397 #
7398 # ### Semantics
7399 #
7400 # Cloud Spanner can commit the transaction if all read locks it acquired
7401 # are still valid at commit time, and it is able to acquire write
7402 # locks for all writes. Cloud Spanner can abort the transaction for any
7403 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
7404 # that the transaction has not modified any user data in Cloud Spanner.
7405 #
7406 # Unless the transaction commits, Cloud Spanner makes no guarantees about
7407 # how long the transaction&#x27;s locks were held for. It is an error to
7408 # use Cloud Spanner locks for any sort of mutual exclusion other than
7409 # between Cloud Spanner transactions themselves.
7410 #
7411 # ### Retrying Aborted Transactions
7412 #
7413 # When a transaction aborts, the application can choose to retry the
7414 # whole transaction again. To maximize the chances of successfully
7415 # committing the retry, the client should execute the retry in the
7416 # same session as the original attempt. The original session&#x27;s lock
7417 # priority increases with each consecutive abort, meaning that each
7418 # attempt has a slightly better chance of success than the previous.
7419 #
7420 # Under some circumstances (e.g., many transactions attempting to
7421 # modify the same row(s)), a transaction can abort many times in a
7422 # short period before successfully committing. Thus, it is not a good
7423 # idea to cap the number of retries a transaction can attempt;
7424 # instead, it is better to limit the total amount of wall time spent
7425 # retrying.
7426 #
7427 # ### Idle Transactions
7428 #
7429 # A transaction is considered idle if it has no outstanding reads or
7430 # SQL queries and has not started a read or SQL query within the last 10
7431 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
7432 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
7433 # fail with error `ABORTED`.
7434 #
7435 # If this behavior is undesirable, periodically executing a simple
7436 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
7437 # transaction from becoming idle.
7438 #
7439 # ## Snapshot Read-Only Transactions
7440 #
7441 # Snapshot read-only transactions provides a simpler method than
7442 # locking read-write transactions for doing several consistent
7443 # reads. However, this type of transaction does not support writes.
7444 #
7445 # Snapshot transactions do not take locks. Instead, they work by
7446 # choosing a Cloud Spanner timestamp, then executing all reads at that
7447 # timestamp. Since they do not acquire locks, they do not block
7448 # concurrent read-write transactions.
7449 #
7450 # Unlike locking read-write transactions, snapshot read-only
7451 # transactions never abort. They can fail if the chosen read
7452 # timestamp is garbage collected; however, the default garbage
7453 # collection policy is generous enough that most applications do not
7454 # need to worry about this in practice.
7455 #
7456 # Snapshot read-only transactions do not need to call
7457 # Commit or
7458 # Rollback (and in fact are not
7459 # permitted to do so).
7460 #
7461 # To execute a snapshot transaction, the client specifies a timestamp
7462 # bound, which tells Cloud Spanner how to choose a read timestamp.
7463 #
7464 # The types of timestamp bound are:
7465 #
7466 # - Strong (the default).
7467 # - Bounded staleness.
7468 # - Exact staleness.
7469 #
7470 # If the Cloud Spanner database to be read is geographically distributed,
7471 # stale read-only transactions can execute more quickly than strong
7472 # or read-write transaction, because they are able to execute far
7473 # from the leader replica.
7474 #
7475 # Each type of timestamp bound is discussed in detail below.
7476 #
7477 # ### Strong
7478 #
7479 # Strong reads are guaranteed to see the effects of all transactions
7480 # that have committed before the start of the read. Furthermore, all
7481 # rows yielded by a single read are consistent with each other -- if
7482 # any part of the read observes a transaction, all parts of the read
7483 # see the transaction.
7484 #
7485 # Strong reads are not repeatable: two consecutive strong read-only
7486 # transactions might return inconsistent results if there are
7487 # concurrent writes. If consistency across reads is required, the
7488 # reads should be executed within a transaction or at an exact read
7489 # timestamp.
7490 #
7491 # See TransactionOptions.ReadOnly.strong.
7492 #
7493 # ### Exact Staleness
7494 #
7495 # These timestamp bounds execute reads at a user-specified
7496 # timestamp. Reads at a timestamp are guaranteed to see a consistent
7497 # prefix of the global transaction history: they observe
7498 # modifications done by all transactions with a commit timestamp &lt;=
7499 # the read timestamp, and observe none of the modifications done by
7500 # transactions with a larger commit timestamp. They will block until
7501 # all conflicting transactions that may be assigned commit timestamps
7502 # &lt;= the read timestamp have finished.
7503 #
7504 # The timestamp can either be expressed as an absolute Cloud Spanner commit
7505 # timestamp or a staleness relative to the current time.
7506 #
7507 # These modes do not require a &quot;negotiation phase&quot; to pick a
7508 # timestamp. As a result, they execute slightly faster than the
7509 # equivalent boundedly stale concurrency modes. On the other hand,
7510 # boundedly stale reads usually return fresher results.
7511 #
7512 # See TransactionOptions.ReadOnly.read_timestamp and
7513 # TransactionOptions.ReadOnly.exact_staleness.
7514 #
7515 # ### Bounded Staleness
7516 #
7517 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
7518 # subject to a user-provided staleness bound. Cloud Spanner chooses the
7519 # newest timestamp within the staleness bound that allows execution
7520 # of the reads at the closest available replica without blocking.
7521 #
7522 # All rows yielded are consistent with each other -- if any part of
7523 # the read observes a transaction, all parts of the read see the
7524 # transaction. Boundedly stale reads are not repeatable: two stale
7525 # reads, even if they use the same staleness bound, can execute at
7526 # different timestamps and thus return inconsistent results.
7527 #
7528 # Boundedly stale reads execute in two phases: the first phase
7529 # negotiates a timestamp among all replicas needed to serve the
7530 # read. In the second phase, reads are executed at the negotiated
7531 # timestamp.
7532 #
7533 # As a result of the two phase execution, bounded staleness reads are
7534 # usually a little slower than comparable exact staleness
7535 # reads. However, they are typically able to return fresher
7536 # results, and are more likely to execute at the closest replica.
7537 #
7538 # Because the timestamp negotiation requires up-front knowledge of
7539 # which rows will be read, it can only be used with single-use
7540 # read-only transactions.
7541 #
7542 # See TransactionOptions.ReadOnly.max_staleness and
7543 # TransactionOptions.ReadOnly.min_read_timestamp.
7544 #
7545 # ### Old Read Timestamps and Garbage Collection
7546 #
7547 # Cloud Spanner continuously garbage collects deleted and overwritten data
7548 # in the background to reclaim storage space. This process is known
7549 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
7550 # are one hour old. Because of this, Cloud Spanner cannot perform reads
7551 # at read timestamps more than one hour in the past. This
7552 # restriction also applies to in-progress reads and/or SQL queries whose
7553 # timestamp become too old while executing. Reads and SQL queries with
7554 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
7555 #
7556 # ## Partitioned DML Transactions
7557 #
7558 # Partitioned DML transactions are used to execute DML statements with a
7559 # different execution strategy that provides different, and often better,
7560 # scalability properties for large, table-wide operations than DML in a
7561 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
7562 # should prefer using ReadWrite transactions.
7563 #
7564 # Partitioned DML partitions the keyspace and runs the DML statement on each
7565 # partition in separate, internal transactions. These transactions commit
7566 # automatically when complete, and run independently from one another.
7567 #
7568 # To reduce lock contention, this execution strategy only acquires read locks
7569 # on rows that match the WHERE clause of the statement. Additionally, the
7570 # smaller per-partition transactions hold locks for less time.
7571 #
7572 # That said, Partitioned DML is not a drop-in replacement for standard DML used
7573 # in ReadWrite transactions.
7574 #
7575 # - The DML statement must be fully-partitionable. Specifically, the statement
7576 # must be expressible as the union of many statements which each access only
7577 # a single row of the table.
7578 #
7579 # - The statement is not applied atomically to all rows of the table. Rather,
7580 # the statement is applied atomically to partitions of the table, in
7581 # independent transactions. Secondary index rows are updated atomically
7582 # with the base table rows.
7583 #
7584 # - Partitioned DML does not guarantee exactly-once execution semantics
7585 # against a partition. The statement will be applied at least once to each
7586 # partition. It is strongly recommended that the DML statement should be
7587 # idempotent to avoid unexpected results. For instance, it is potentially
7588 # dangerous to run a statement such as
7589 # `UPDATE table SET column = column + 1` as it could be run multiple times
7590 # against some rows.
7591 #
7592 # - The partitions are committed automatically - there is no support for
7593 # Commit or Rollback. If the call returns an error, or if the client issuing
7594 # the ExecuteSql call dies, it is possible that some rows had the statement
7595 # executed on them successfully. It is also possible that statement was
7596 # never executed against other rows.
7597 #
7598 # - Partitioned DML transactions may only contain the execution of a single
7599 # DML statement via ExecuteSql or ExecuteStreamingSql.
7600 #
7601 # - If any error is encountered during the execution of the partitioned DML
7602 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
7603 # value that cannot be stored due to schema constraints), then the
7604 # operation is stopped at that point and an error is returned. It is
7605 # possible that at this point, some partitions have been committed (or even
7606 # committed multiple times), and other partitions have not been run at all.
7607 #
7608 # Given the above, Partitioned DML is good fit for large, database-wide,
7609 # operations that are idempotent, such as deleting old rows from a very large
7610 # table.
7611 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07007612 #
7613 # Authorization to begin a Partitioned DML transaction requires
7614 # `spanner.databases.beginPartitionedDmlTransaction` permission
7615 # on the `session` resource.
7616 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007617 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
7618 #
7619 # Authorization to begin a read-write transaction requires
7620 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
7621 # on the `session` resource.
7622 # transaction type has no options.
7623 },
Bu Sun Kim65020912020-05-20 12:08:20 -07007624 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
7625 #
7626 # Authorization to begin a read-only transaction requires
7627 # `spanner.databases.beginReadOnlyTransaction` permission
7628 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -07007629 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
7630 # seconds. Guarantees that all writes that have committed more
7631 # than the specified number of seconds ago are visible. Because
7632 # Cloud Spanner chooses the exact timestamp, this mode works even if
7633 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
7634 # commit timestamps.
7635 #
7636 # Useful for reading the freshest data available at a nearby
7637 # replica, while bounding the possible staleness if the local
7638 # replica has fallen behind.
7639 #
7640 # Note that this option can only be used in single-use
7641 # transactions.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007642 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
7643 #
7644 # This is useful for requesting fresher data than some previous
7645 # read, or data that is fresh enough to observe the effects of some
7646 # previously committed transaction whose timestamp is known.
7647 #
7648 # Note that this option can only be used in single-use transactions.
7649 #
7650 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
7651 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
7652 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
7653 # are visible.
Bu Sun Kim65020912020-05-20 12:08:20 -07007654 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
7655 # the Transaction message that describes the transaction.
7656 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
7657 # old. The timestamp is chosen soon after the read is started.
7658 #
7659 # Guarantees that all writes that have committed more than the
7660 # specified number of seconds ago are visible. Because Cloud Spanner
7661 # chooses the exact timestamp, this mode works even if the client&#x27;s
7662 # local clock is substantially skewed from Cloud Spanner commit
7663 # timestamps.
7664 #
7665 # Useful for reading at nearby replicas without the distributed
7666 # timestamp negotiation overhead of `max_staleness`.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007667 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
7668 # reads at a specific timestamp are repeatable; the same read at
7669 # the same timestamp always returns the same data. If the
7670 # timestamp is in the future, the read will block until the
7671 # specified timestamp, modulo the read&#x27;s deadline.
7672 #
7673 # Useful for large scale consistent reads such as mapreduces, or
7674 # for coordinating many reads against a consistent snapshot of the
7675 # data.
7676 #
7677 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
7678 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
7679 },
7680 },
7681 &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
7682 # This is the most efficient way to execute a transaction that
7683 # consists of a single SQL query.
7684 #
7685 #
7686 # Each session can have at most one active transaction at a time. After the
7687 # active transaction is completed, the session can immediately be
7688 # re-used for the next transaction. It is not necessary to create a
7689 # new session for each transaction.
7690 #
7691 # # Transaction Modes
7692 #
7693 # Cloud Spanner supports three transaction modes:
7694 #
7695 # 1. Locking read-write. This type of transaction is the only way
7696 # to write data into Cloud Spanner. These transactions rely on
7697 # pessimistic locking and, if necessary, two-phase commit.
7698 # Locking read-write transactions may abort, requiring the
7699 # application to retry.
7700 #
7701 # 2. Snapshot read-only. This transaction type provides guaranteed
7702 # consistency across several reads, but does not allow
7703 # writes. Snapshot read-only transactions can be configured to
7704 # read at timestamps in the past. Snapshot read-only
7705 # transactions do not need to be committed.
7706 #
7707 # 3. Partitioned DML. This type of transaction is used to execute
7708 # a single Partitioned DML statement. Partitioned DML partitions
7709 # the key space and runs the DML statement over each partition
7710 # in parallel using separate, internal transactions that commit
7711 # independently. Partitioned DML transactions do not need to be
7712 # committed.
7713 #
7714 # For transactions that only read, snapshot read-only transactions
7715 # provide simpler semantics and are almost always faster. In
7716 # particular, read-only transactions do not take locks, so they do
7717 # not conflict with read-write transactions. As a consequence of not
7718 # taking locks, they also do not abort, so retry loops are not needed.
7719 #
7720 # Transactions may only read/write data in a single database. They
7721 # may, however, read/write data in different tables within that
7722 # database.
7723 #
7724 # ## Locking Read-Write Transactions
7725 #
7726 # Locking transactions may be used to atomically read-modify-write
7727 # data anywhere in a database. This type of transaction is externally
7728 # consistent.
7729 #
7730 # Clients should attempt to minimize the amount of time a transaction
7731 # is active. Faster transactions commit with higher probability
7732 # and cause less contention. Cloud Spanner attempts to keep read locks
7733 # active as long as the transaction continues to do reads, and the
7734 # transaction has not been terminated by
7735 # Commit or
7736 # Rollback. Long periods of
7737 # inactivity at the client may cause Cloud Spanner to release a
7738 # transaction&#x27;s locks and abort it.
7739 #
7740 # Conceptually, a read-write transaction consists of zero or more
7741 # reads or SQL statements followed by
7742 # Commit. At any time before
7743 # Commit, the client can send a
7744 # Rollback request to abort the
7745 # transaction.
7746 #
7747 # ### Semantics
7748 #
7749 # Cloud Spanner can commit the transaction if all read locks it acquired
7750 # are still valid at commit time, and it is able to acquire write
7751 # locks for all writes. Cloud Spanner can abort the transaction for any
7752 # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
7753 # that the transaction has not modified any user data in Cloud Spanner.
7754 #
7755 # Unless the transaction commits, Cloud Spanner makes no guarantees about
7756 # how long the transaction&#x27;s locks were held for. It is an error to
7757 # use Cloud Spanner locks for any sort of mutual exclusion other than
7758 # between Cloud Spanner transactions themselves.
7759 #
7760 # ### Retrying Aborted Transactions
7761 #
7762 # When a transaction aborts, the application can choose to retry the
7763 # whole transaction again. To maximize the chances of successfully
7764 # committing the retry, the client should execute the retry in the
7765 # same session as the original attempt. The original session&#x27;s lock
7766 # priority increases with each consecutive abort, meaning that each
7767 # attempt has a slightly better chance of success than the previous.
7768 #
7769 # Under some circumstances (e.g., many transactions attempting to
7770 # modify the same row(s)), a transaction can abort many times in a
7771 # short period before successfully committing. Thus, it is not a good
7772 # idea to cap the number of retries a transaction can attempt;
7773 # instead, it is better to limit the total amount of wall time spent
7774 # retrying.
7775 #
7776 # ### Idle Transactions
7777 #
7778 # A transaction is considered idle if it has no outstanding reads or
7779 # SQL queries and has not started a read or SQL query within the last 10
7780 # seconds. Idle transactions can be aborted by Cloud Spanner so that they
7781 # don&#x27;t hold on to locks indefinitely. In that case, the commit will
7782 # fail with error `ABORTED`.
7783 #
7784 # If this behavior is undesirable, periodically executing a simple
7785 # SQL query in the transaction (e.g., `SELECT 1`) prevents the
7786 # transaction from becoming idle.
7787 #
7788 # ## Snapshot Read-Only Transactions
7789 #
7790 # Snapshot read-only transactions provides a simpler method than
7791 # locking read-write transactions for doing several consistent
7792 # reads. However, this type of transaction does not support writes.
7793 #
7794 # Snapshot transactions do not take locks. Instead, they work by
7795 # choosing a Cloud Spanner timestamp, then executing all reads at that
7796 # timestamp. Since they do not acquire locks, they do not block
7797 # concurrent read-write transactions.
7798 #
7799 # Unlike locking read-write transactions, snapshot read-only
7800 # transactions never abort. They can fail if the chosen read
7801 # timestamp is garbage collected; however, the default garbage
7802 # collection policy is generous enough that most applications do not
7803 # need to worry about this in practice.
7804 #
7805 # Snapshot read-only transactions do not need to call
7806 # Commit or
7807 # Rollback (and in fact are not
7808 # permitted to do so).
7809 #
7810 # To execute a snapshot transaction, the client specifies a timestamp
7811 # bound, which tells Cloud Spanner how to choose a read timestamp.
7812 #
7813 # The types of timestamp bound are:
7814 #
7815 # - Strong (the default).
7816 # - Bounded staleness.
7817 # - Exact staleness.
7818 #
7819 # If the Cloud Spanner database to be read is geographically distributed,
7820 # stale read-only transactions can execute more quickly than strong
7821 # or read-write transaction, because they are able to execute far
7822 # from the leader replica.
7823 #
7824 # Each type of timestamp bound is discussed in detail below.
7825 #
7826 # ### Strong
7827 #
7828 # Strong reads are guaranteed to see the effects of all transactions
7829 # that have committed before the start of the read. Furthermore, all
7830 # rows yielded by a single read are consistent with each other -- if
7831 # any part of the read observes a transaction, all parts of the read
7832 # see the transaction.
7833 #
7834 # Strong reads are not repeatable: two consecutive strong read-only
7835 # transactions might return inconsistent results if there are
7836 # concurrent writes. If consistency across reads is required, the
7837 # reads should be executed within a transaction or at an exact read
7838 # timestamp.
7839 #
7840 # See TransactionOptions.ReadOnly.strong.
7841 #
7842 # ### Exact Staleness
7843 #
7844 # These timestamp bounds execute reads at a user-specified
7845 # timestamp. Reads at a timestamp are guaranteed to see a consistent
7846 # prefix of the global transaction history: they observe
7847 # modifications done by all transactions with a commit timestamp &lt;=
7848 # the read timestamp, and observe none of the modifications done by
7849 # transactions with a larger commit timestamp. They will block until
7850 # all conflicting transactions that may be assigned commit timestamps
7851 # &lt;= the read timestamp have finished.
7852 #
7853 # The timestamp can either be expressed as an absolute Cloud Spanner commit
7854 # timestamp or a staleness relative to the current time.
7855 #
7856 # These modes do not require a &quot;negotiation phase&quot; to pick a
7857 # timestamp. As a result, they execute slightly faster than the
7858 # equivalent boundedly stale concurrency modes. On the other hand,
7859 # boundedly stale reads usually return fresher results.
7860 #
7861 # See TransactionOptions.ReadOnly.read_timestamp and
7862 # TransactionOptions.ReadOnly.exact_staleness.
7863 #
7864 # ### Bounded Staleness
7865 #
7866 # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
7867 # subject to a user-provided staleness bound. Cloud Spanner chooses the
7868 # newest timestamp within the staleness bound that allows execution
7869 # of the reads at the closest available replica without blocking.
7870 #
7871 # All rows yielded are consistent with each other -- if any part of
7872 # the read observes a transaction, all parts of the read see the
7873 # transaction. Boundedly stale reads are not repeatable: two stale
7874 # reads, even if they use the same staleness bound, can execute at
7875 # different timestamps and thus return inconsistent results.
7876 #
7877 # Boundedly stale reads execute in two phases: the first phase
7878 # negotiates a timestamp among all replicas needed to serve the
7879 # read. In the second phase, reads are executed at the negotiated
7880 # timestamp.
7881 #
7882 # As a result of the two phase execution, bounded staleness reads are
7883 # usually a little slower than comparable exact staleness
7884 # reads. However, they are typically able to return fresher
7885 # results, and are more likely to execute at the closest replica.
7886 #
7887 # Because the timestamp negotiation requires up-front knowledge of
7888 # which rows will be read, it can only be used with single-use
7889 # read-only transactions.
7890 #
7891 # See TransactionOptions.ReadOnly.max_staleness and
7892 # TransactionOptions.ReadOnly.min_read_timestamp.
7893 #
7894 # ### Old Read Timestamps and Garbage Collection
7895 #
7896 # Cloud Spanner continuously garbage collects deleted and overwritten data
7897 # in the background to reclaim storage space. This process is known
7898 # as &quot;version GC&quot;. By default, version GC reclaims versions after they
7899 # are one hour old. Because of this, Cloud Spanner cannot perform reads
7900 # at read timestamps more than one hour in the past. This
7901 # restriction also applies to in-progress reads and/or SQL queries whose
7902 # timestamp become too old while executing. Reads and SQL queries with
7903 # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
7904 #
7905 # ## Partitioned DML Transactions
7906 #
7907 # Partitioned DML transactions are used to execute DML statements with a
7908 # different execution strategy that provides different, and often better,
7909 # scalability properties for large, table-wide operations than DML in a
7910 # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
7911 # should prefer using ReadWrite transactions.
7912 #
7913 # Partitioned DML partitions the keyspace and runs the DML statement on each
7914 # partition in separate, internal transactions. These transactions commit
7915 # automatically when complete, and run independently from one another.
7916 #
7917 # To reduce lock contention, this execution strategy only acquires read locks
7918 # on rows that match the WHERE clause of the statement. Additionally, the
7919 # smaller per-partition transactions hold locks for less time.
7920 #
7921 # That said, Partitioned DML is not a drop-in replacement for standard DML used
7922 # in ReadWrite transactions.
7923 #
7924 # - The DML statement must be fully-partitionable. Specifically, the statement
7925 # must be expressible as the union of many statements which each access only
7926 # a single row of the table.
7927 #
7928 # - The statement is not applied atomically to all rows of the table. Rather,
7929 # the statement is applied atomically to partitions of the table, in
7930 # independent transactions. Secondary index rows are updated atomically
7931 # with the base table rows.
7932 #
7933 # - Partitioned DML does not guarantee exactly-once execution semantics
7934 # against a partition. The statement will be applied at least once to each
7935 # partition. It is strongly recommended that the DML statement should be
7936 # idempotent to avoid unexpected results. For instance, it is potentially
7937 # dangerous to run a statement such as
7938 # `UPDATE table SET column = column + 1` as it could be run multiple times
7939 # against some rows.
7940 #
7941 # - The partitions are committed automatically - there is no support for
7942 # Commit or Rollback. If the call returns an error, or if the client issuing
7943 # the ExecuteSql call dies, it is possible that some rows had the statement
7944 # executed on them successfully. It is also possible that statement was
7945 # never executed against other rows.
7946 #
7947 # - Partitioned DML transactions may only contain the execution of a single
7948 # DML statement via ExecuteSql or ExecuteStreamingSql.
7949 #
7950 # - If any error is encountered during the execution of the partitioned DML
7951 # operation (for instance, a UNIQUE INDEX violation, division by zero, or a
7952 # value that cannot be stored due to schema constraints), then the
7953 # operation is stopped at that point and an error is returned. It is
7954 # possible that at this point, some partitions have been committed (or even
7955 # committed multiple times), and other partitions have not been run at all.
7956 #
7957 # Given the above, Partitioned DML is good fit for large, database-wide,
7958 # operations that are idempotent, such as deleting old rows from a very large
7959 # table.
7960 &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
7961 #
7962 # Authorization to begin a Partitioned DML transaction requires
7963 # `spanner.databases.beginPartitionedDmlTransaction` permission
7964 # on the `session` resource.
Bu Sun Kim65020912020-05-20 12:08:20 -07007965 },
7966 &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
7967 #
7968 # Authorization to begin a read-write transaction requires
7969 # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
7970 # on the `session` resource.
7971 # transaction type has no options.
7972 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07007973 &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
7974 #
7975 # Authorization to begin a read-only transaction requires
7976 # `spanner.databases.beginReadOnlyTransaction` permission
7977 # on the `session` resource.
7978 &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
7979 # seconds. Guarantees that all writes that have committed more
7980 # than the specified number of seconds ago are visible. Because
7981 # Cloud Spanner chooses the exact timestamp, this mode works even if
7982 # the client&#x27;s local clock is substantially skewed from Cloud Spanner
7983 # commit timestamps.
7984 #
7985 # Useful for reading the freshest data available at a nearby
7986 # replica, while bounding the possible staleness if the local
7987 # replica has fallen behind.
7988 #
7989 # Note that this option can only be used in single-use
7990 # transactions.
7991 &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
7992 #
7993 # This is useful for requesting fresher data than some previous
7994 # read, or data that is fresh enough to observe the effects of some
7995 # previously committed transaction whose timestamp is known.
7996 #
7997 # Note that this option can only be used in single-use transactions.
7998 #
7999 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
8000 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
8001 &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
8002 # are visible.
8003 &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
8004 # the Transaction message that describes the transaction.
8005 &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
8006 # old. The timestamp is chosen soon after the read is started.
8007 #
8008 # Guarantees that all writes that have committed more than the
8009 # specified number of seconds ago are visible. Because Cloud Spanner
8010 # chooses the exact timestamp, this mode works even if the client&#x27;s
8011 # local clock is substantially skewed from Cloud Spanner commit
8012 # timestamps.
8013 #
8014 # Useful for reading at nearby replicas without the distributed
8015 # timestamp negotiation overhead of `max_staleness`.
8016 &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
8017 # reads at a specific timestamp are repeatable; the same read at
8018 # the same timestamp always returns the same data. If the
8019 # timestamp is in the future, the read will block until the
8020 # specified timestamp, modulo the read&#x27;s deadline.
8021 #
8022 # Useful for large scale consistent reads such as mapreduces, or
8023 # for coordinating many reads against a consistent snapshot of the
8024 # data.
8025 #
8026 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
8027 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
8028 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008029 },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008030 },
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07008031 &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
8032 # previously created using PartitionRead(). There must be an exact
8033 # match for the values of fields common to this message and the
8034 # PartitionReadRequest message used to create this partition_token.
8035 &quot;columns&quot;: [ # Required. The columns of table to be returned for each row matching
8036 # this request.
8037 &quot;A String&quot;,
8038 ],
8039 &quot;limit&quot;: &quot;A String&quot;, # If greater than zero, only the first `limit` rows are yielded. If `limit`
8040 # is zero, the default is no limit. A limit cannot be specified if
8041 # `partition_token` is set.
8042 &quot;table&quot;: &quot;A String&quot;, # Required. The name of the table in the database to be read.
Bu Sun Kim65020912020-05-20 12:08:20 -07008043 &quot;resumeToken&quot;: &quot;A String&quot;, # If this request is resuming a previously interrupted read,
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008044 # `resume_token` should be copied from the last
8045 # PartialResultSet yielded before the interruption. Doing this
8046 # enables the new read to resume where the last read left off. The
8047 # rest of the request parameters must exactly match the request
8048 # that yielded this token.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07008049 &quot;keySet&quot;: { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All # Required. `key_set` identifies the rows to be yielded. `key_set` names the
8050 # primary keys of the rows in table to be yielded, unless index
8051 # is present. If index is present, then key_set instead names
8052 # index keys in index.
8053 #
8054 # If the partition_token field is empty, rows are yielded
8055 # in table primary key order (if index is empty) or index key order
8056 # (if index is non-empty). If the partition_token field is not
8057 # empty, rows will be yielded in an unspecified order.
8058 #
8059 # It is not an error for the `key_set` to name rows that do not
8060 # exist in the database. Read yields nothing for nonexistent rows.
8061 # the keys are expected to be in the same table or index. The keys need
8062 # not be sorted in any particular way.
8063 #
8064 # If the same key is specified multiple times in the set (for example
8065 # if two ranges, two keys, or a key and a range overlap), Cloud Spanner
8066 # behaves as if the key were only specified once.
8067 &quot;ranges&quot;: [ # A list of key ranges. See KeyRange for more information about
8068 # key range specifications.
8069 { # KeyRange represents a range of rows in a table or index.
8070 #
8071 # A range has a start key and an end key. These keys can be open or
8072 # closed, indicating if the range includes rows with that key.
8073 #
8074 # Keys are represented by lists, where the ith value in the list
8075 # corresponds to the ith component of the table or index primary key.
8076 # Individual values are encoded as described
8077 # here.
8078 #
8079 # For example, consider the following table definition:
8080 #
8081 # CREATE TABLE UserEvents (
8082 # UserName STRING(MAX),
8083 # EventDate STRING(10)
8084 # ) PRIMARY KEY(UserName, EventDate);
8085 #
8086 # The following keys name rows in this table:
8087 #
8088 # &quot;Bob&quot;, &quot;2014-09-23&quot;
8089 #
8090 # Since the `UserEvents` table&#x27;s `PRIMARY KEY` clause names two
8091 # columns, each `UserEvents` key has two elements; the first is the
8092 # `UserName`, and the second is the `EventDate`.
8093 #
8094 # Key ranges with multiple components are interpreted
8095 # lexicographically by component using the table or index key&#x27;s declared
8096 # sort order. For example, the following range returns all events for
8097 # user `&quot;Bob&quot;` that occurred in the year 2015:
8098 #
8099 # &quot;start_closed&quot;: [&quot;Bob&quot;, &quot;2015-01-01&quot;]
8100 # &quot;end_closed&quot;: [&quot;Bob&quot;, &quot;2015-12-31&quot;]
8101 #
8102 # Start and end keys can omit trailing key components. This affects the
8103 # inclusion and exclusion of rows that exactly match the provided key
8104 # components: if the key is closed, then rows that exactly match the
8105 # provided components are included; if the key is open, then rows
8106 # that exactly match are not included.
8107 #
8108 # For example, the following range includes all events for `&quot;Bob&quot;` that
8109 # occurred during and after the year 2000:
8110 #
8111 # &quot;start_closed&quot;: [&quot;Bob&quot;, &quot;2000-01-01&quot;]
8112 # &quot;end_closed&quot;: [&quot;Bob&quot;]
8113 #
8114 # The next example retrieves all events for `&quot;Bob&quot;`:
8115 #
8116 # &quot;start_closed&quot;: [&quot;Bob&quot;]
8117 # &quot;end_closed&quot;: [&quot;Bob&quot;]
8118 #
8119 # To retrieve events before the year 2000:
8120 #
8121 # &quot;start_closed&quot;: [&quot;Bob&quot;]
8122 # &quot;end_open&quot;: [&quot;Bob&quot;, &quot;2000-01-01&quot;]
8123 #
8124 # The following range includes all rows in the table:
8125 #
8126 # &quot;start_closed&quot;: []
8127 # &quot;end_closed&quot;: []
8128 #
8129 # This range returns all users whose `UserName` begins with any
8130 # character from A to C:
8131 #
8132 # &quot;start_closed&quot;: [&quot;A&quot;]
8133 # &quot;end_open&quot;: [&quot;D&quot;]
8134 #
8135 # This range returns all users whose `UserName` begins with B:
8136 #
8137 # &quot;start_closed&quot;: [&quot;B&quot;]
8138 # &quot;end_open&quot;: [&quot;C&quot;]
8139 #
8140 # Key ranges honor column sort order. For example, suppose a table is
8141 # defined as follows:
8142 #
8143 # CREATE TABLE DescendingSortedTable {
8144 # Key INT64,
8145 # ...
8146 # ) PRIMARY KEY(Key DESC);
8147 #
8148 # The following range retrieves all rows with key values between 1
8149 # and 100 inclusive:
8150 #
8151 # &quot;start_closed&quot;: [&quot;100&quot;]
8152 # &quot;end_closed&quot;: [&quot;1&quot;]
8153 #
8154 # Note that 100 is passed as the start, and 1 is passed as the end,
8155 # because `Key` is a descending column in the schema.
8156 &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
8157 # `len(start_open)` key columns exactly match `start_open`.
8158 &quot;&quot;,
8159 ],
8160 &quot;endClosed&quot;: [ # If the end is closed, then the range includes all rows whose
8161 # first `len(end_closed)` key columns exactly match `end_closed`.
8162 &quot;&quot;,
8163 ],
8164 &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
8165 # `len(end_open)` key columns exactly match `end_open`.
8166 &quot;&quot;,
8167 ],
8168 &quot;startClosed&quot;: [ # If the start is closed, then the range includes all rows whose
8169 # first `len(start_closed)` key columns exactly match `start_closed`.
8170 &quot;&quot;,
8171 ],
8172 },
8173 ],
8174 &quot;keys&quot;: [ # A list of specific keys. Entries in `keys` should have exactly as
8175 # many elements as there are columns in the primary or index key
8176 # with which this `KeySet` is used. Individual key values are
8177 # encoded as described here.
8178 [
8179 &quot;&quot;,
8180 ],
8181 ],
8182 &quot;all&quot;: True or False, # For convenience `all` can be set to `true` to indicate that this
8183 # `KeySet` matches all keys in the table or index. Note that any keys
8184 # specified in `keys` or `ranges` are only yielded once.
8185 },
8186 &quot;index&quot;: &quot;A String&quot;, # If non-empty, the name of an index on table. This index is
8187 # used instead of the table primary key when interpreting key_set
8188 # and sorting result rows. See key_set for further information.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008189 }
8190
8191 x__xgafv: string, V1 error format.
8192 Allowed values
8193 1 - v1 error format
8194 2 - v2 error format
8195
8196Returns:
8197 An object of the form:
8198
8199 { # Partial results from a streaming read or SQL query. Streaming reads and
8200 # SQL queries better tolerate large result sets, large rows, and large
8201 # values, but are a little trickier to consume.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07008202 &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the statement that produced this
8203 # streaming result set. These can be requested by setting
8204 # ExecuteSqlRequest.query_mode and are sent
8205 # only once with the last response in the stream.
8206 # This field will also be present in the last response for DML
8207 # statements.
8208 &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
8209 # returns a lower bound of the rows modified.
8210 &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
8211 &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
8212 # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
8213 # `plan_nodes`.
8214 { # Node information for nodes appearing in a QueryPlan.plan_nodes.
8215 &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
8216 &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
8217 # key-value pairs. Only present if the plan was returned as a result of a
8218 # profile query. For example, number of executions, number of rows/time per
8219 # execution etc.
8220 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
8221 },
8222 &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
8223 # `SCALAR` PlanNode(s).
8224 &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
8225 # where the `description` string of this node references a `SCALAR`
8226 # subquery contained in the expression subtree rooted at this node. The
8227 # referenced `SCALAR` subquery may not necessarily be a direct child of
8228 # this node.
8229 &quot;a_key&quot;: 42,
8230 },
8231 &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
8232 },
8233 &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
8234 # For example, a Parameter Reference node could have the following
8235 # information in its metadata:
8236 #
8237 # {
8238 # &quot;parameter_reference&quot;: &quot;param1&quot;,
8239 # &quot;parameter_type&quot;: &quot;array&quot;
8240 # }
8241 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
8242 },
8243 &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
8244 { # Metadata associated with a parent-child relationship appearing in a
8245 # PlanNode.
8246 &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
8247 # distinguish between the build child and the probe child, or in the case
8248 # of the child being an output variable, to represent the tag associated
8249 # with the output variable.
8250 &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
8251 # to an output variable of the parent node. The field carries the name of
8252 # the output variable.
8253 # For example, a `TableScan` operator that reads rows from a table will
8254 # have child links to the `SCALAR` nodes representing the output variables
8255 # created for each column that is read by the operator. The corresponding
8256 # `variable` fields will be set to the variable names assigned to the
8257 # columns.
8258 &quot;childIndex&quot;: 42, # The node to which the link points.
8259 },
8260 ],
8261 &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
8262 &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
8263 # different kinds of nodes differently. For example, If the node is a
8264 # SCALAR node, it will have a condensed representation
8265 # which can be used to directly embed a description of the node in its
8266 # parent.
8267 },
8268 ],
8269 },
8270 &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
8271 &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
8272 # the query is profiled. For example, a query could return the statistics as
8273 # follows:
8274 #
8275 # {
8276 # &quot;rows_returned&quot;: &quot;3&quot;,
8277 # &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
8278 # &quot;cpu_time&quot;: &quot;1.19 secs&quot;
8279 # }
8280 &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
8281 },
8282 },
8283 &quot;chunkedValue&quot;: True or False, # If true, then the final value in values is chunked, and must
8284 # be combined with more values from subsequent `PartialResultSet`s
8285 # to obtain a complete field value.
Bu Sun Kim65020912020-05-20 12:08:20 -07008286 &quot;values&quot;: [ # A streamed result set consists of a stream of values, which might
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008287 # be split into many `PartialResultSet` messages to accommodate
8288 # large rows and/or large values. Every N complete values defines a
8289 # row, where N is equal to the number of entries in
8290 # metadata.row_type.fields.
8291 #
8292 # Most values are encoded based on type as described
8293 # here.
8294 #
Bu Sun Kim65020912020-05-20 12:08:20 -07008295 # It is possible that the last value in values is &quot;chunked&quot;,
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008296 # meaning that the rest of the value is sent in subsequent
8297 # `PartialResultSet`(s). This is denoted by the chunked_value
8298 # field. Two or more chunked values can be merged to form a
8299 # complete value as follows:
8300 #
8301 # * `bool/number/null`: cannot be chunked
8302 # * `string`: concatenate the strings
8303 # * `list`: concatenate the lists. If the last element in a list is a
8304 # `string`, `list`, or `object`, merge it with the first element in
8305 # the next list by applying these rules recursively.
8306 # * `object`: concatenate the (field name, field value) pairs. If a
8307 # field name is duplicated, then apply these rules recursively
8308 # to merge the field values.
8309 #
8310 # Some examples of merging:
8311 #
8312 # # Strings are concatenated.
Bu Sun Kim65020912020-05-20 12:08:20 -07008313 # &quot;foo&quot;, &quot;bar&quot; =&gt; &quot;foobar&quot;
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008314 #
8315 # # Lists of non-strings are concatenated.
Dan O'Mearadd494642020-05-01 07:42:23 -07008316 # [2, 3], [4] =&gt; [2, 3, 4]
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008317 #
8318 # # Lists are concatenated, but the last and first elements are merged
8319 # # because they are strings.
Bu Sun Kim65020912020-05-20 12:08:20 -07008320 # [&quot;a&quot;, &quot;b&quot;], [&quot;c&quot;, &quot;d&quot;] =&gt; [&quot;a&quot;, &quot;bc&quot;, &quot;d&quot;]
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008321 #
8322 # # Lists are concatenated, but the last and first elements are merged
8323 # # because they are lists. Recursively, the last and first elements
8324 # # of the inner lists are merged because they are strings.
Bu Sun Kim65020912020-05-20 12:08:20 -07008325 # [&quot;a&quot;, [&quot;b&quot;, &quot;c&quot;]], [[&quot;d&quot;], &quot;e&quot;] =&gt; [&quot;a&quot;, [&quot;b&quot;, &quot;cd&quot;], &quot;e&quot;]
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008326 #
8327 # # Non-overlapping object fields are combined.
Bu Sun Kim65020912020-05-20 12:08:20 -07008328 # {&quot;a&quot;: &quot;1&quot;}, {&quot;b&quot;: &quot;2&quot;} =&gt; {&quot;a&quot;: &quot;1&quot;, &quot;b&quot;: 2&quot;}
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008329 #
8330 # # Overlapping object fields are merged.
Bu Sun Kim65020912020-05-20 12:08:20 -07008331 # {&quot;a&quot;: &quot;1&quot;}, {&quot;a&quot;: &quot;2&quot;} =&gt; {&quot;a&quot;: &quot;12&quot;}
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008332 #
8333 # # Examples of merging objects containing lists of strings.
Bu Sun Kim65020912020-05-20 12:08:20 -07008334 # {&quot;a&quot;: [&quot;1&quot;]}, {&quot;a&quot;: [&quot;2&quot;]} =&gt; {&quot;a&quot;: [&quot;12&quot;]}
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008335 #
8336 # For a more complete example, suppose a streaming SQL query is
8337 # yielding a result set whose rows contain a single string
8338 # field. The following `PartialResultSet`s might be yielded:
8339 #
8340 # {
Bu Sun Kim65020912020-05-20 12:08:20 -07008341 # &quot;metadata&quot;: { ... }
8342 # &quot;values&quot;: [&quot;Hello&quot;, &quot;W&quot;]
8343 # &quot;chunked_value&quot;: true
8344 # &quot;resume_token&quot;: &quot;Af65...&quot;
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008345 # }
8346 # {
Bu Sun Kim65020912020-05-20 12:08:20 -07008347 # &quot;values&quot;: [&quot;orl&quot;]
8348 # &quot;chunked_value&quot;: true
8349 # &quot;resume_token&quot;: &quot;Bqp2...&quot;
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008350 # }
8351 # {
Bu Sun Kim65020912020-05-20 12:08:20 -07008352 # &quot;values&quot;: [&quot;d&quot;]
8353 # &quot;resume_token&quot;: &quot;Zx1B...&quot;
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008354 # }
8355 #
8356 # This sequence of `PartialResultSet`s encodes two rows, one
Bu Sun Kim65020912020-05-20 12:08:20 -07008357 # containing the field value `&quot;Hello&quot;`, and a second containing the
8358 # field value `&quot;World&quot; = &quot;W&quot; + &quot;orl&quot; + &quot;d&quot;`.
8359 &quot;&quot;,
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008360 ],
Bu Sun Kim65020912020-05-20 12:08:20 -07008361 &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008362 # Only present in the first response.
Bu Sun Kim65020912020-05-20 12:08:20 -07008363 &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
8364 # set. For example, a SQL query like `&quot;SELECT UserId, UserName FROM
8365 # Users&quot;` could return a `row_type` value like:
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008366 #
Bu Sun Kim65020912020-05-20 12:08:20 -07008367 # &quot;fields&quot;: [
8368 # { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
8369 # { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008370 # ]
Bu Sun Kim65020912020-05-20 12:08:20 -07008371 &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008372 # significant, because values of this struct type are represented as
8373 # lists, where the order of field values matches the order of
8374 # fields in the StructType. In turn, the order of fields
8375 # matches the order of columns in a read request, or the order of
8376 # fields in the `SELECT` clause of a query.
8377 { # Message representing a single field of a struct.
Bu Sun Kim65020912020-05-20 12:08:20 -07008378 &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
8379 # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
8380 # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
8381 # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
8382 # columns might have an empty name (e.g., !&quot;SELECT
8383 # UPPER(ColName)&quot;`). Note that a query result can contain
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008384 # multiple fields with the same name.
Bu Sun Kim65020912020-05-20 12:08:20 -07008385 &quot;type&quot;: # Object with schema name: Type # The type of the field.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008386 },
8387 ],
8388 },
Bu Sun Kim65020912020-05-20 12:08:20 -07008389 &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008390 # information about the new transaction is yielded here.
Bu Sun Kim4ed7d3f2020-05-27 12:20:54 -07008391 &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
8392 # for the transaction. Not returned by default: see
8393 # TransactionOptions.ReadOnly.return_read_timestamp.
8394 #
8395 # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
8396 # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
Bu Sun Kim65020912020-05-20 12:08:20 -07008397 &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008398 # Read,
8399 # ExecuteSql,
8400 # Commit, or
8401 # Rollback calls.
8402 #
8403 # Single-use read-only transactions do not have IDs, because
8404 # single-use transactions do not support multiple requests.
8405 },
8406 },
Bu Sun Kim65020912020-05-20 12:08:20 -07008407 &quot;resumeToken&quot;: &quot;A String&quot;, # Streaming calls might be interrupted for a variety of reasons, such
8408 # as TCP connection loss. If this occurs, the stream of results can
8409 # be resumed by re-sending the original request and including
8410 # `resume_token`. Note that executing any other transaction in the
8411 # same session invalidates the token.
Sai Cheemalapatic30d2b52017-03-13 12:12:03 -04008412 }</pre>
8413</div>
8414
8415</body></html>