docs: update generated docs (#981)

diff --git a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html
index 064238a..e5b5372 100644
--- a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html
+++ b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html
@@ -102,7 +102,7 @@
   <code><a href="#get">get(name, x__xgafv=None)</a></code></p>
 <p class="firstline">Gets a session. Returns `NOT_FOUND` if the session does not exist.</p>
 <p class="toc_element">
-  <code><a href="#list">list(database, filter=None, pageSize=None, pageToken=None, x__xgafv=None)</a></code></p>
+  <code><a href="#list">list(database, filter=None, pageToken=None, pageSize=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Lists all sessions in a given database.</p>
 <p class="toc_element">
   <code><a href="#list_next">list_next(previous_request, previous_response)</a></code></p>
@@ -138,8 +138,9 @@
 { # The request for BatchCreateSessions.
     &quot;sessionTemplate&quot;: { # A session in the Cloud Spanner API. # Parameters to be applied to each created session.
       &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
-      &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
-          # when creating a session are ignored.
+      &quot;name&quot;: &quot;A String&quot;, # Output only. The name of the session. This is always system-assigned.
+      &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
+          # typically earlier than the actual last use time.
       &quot;labels&quot;: { # The labels for the session.
           #
           #  * Label keys must be between 1 and 63 characters long and must conform to
@@ -151,8 +152,6 @@
           # See https://goo.gl/xmQnxf for more information on and examples of labels.
         &quot;a_key&quot;: &quot;A String&quot;,
       },
-      &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
-          # typically earlier than the actual last use time.
     },
     &quot;sessionCount&quot;: 42, # Required. The number of sessions to be created in this batch call.
         # The API may return fewer than the requested number of sessions. If a
@@ -173,8 +172,9 @@
     &quot;session&quot;: [ # The freshly created sessions.
       { # A session in the Cloud Spanner API.
         &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
-        &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
-            # when creating a session are ignored.
+        &quot;name&quot;: &quot;A String&quot;, # Output only. The name of the session. This is always system-assigned.
+        &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
+            # typically earlier than the actual last use time.
         &quot;labels&quot;: { # The labels for the session.
             #
             #  * Label keys must be between 1 and 63 characters long and must conform to
@@ -186,8 +186,6 @@
             # See https://goo.gl/xmQnxf for more information on and examples of labels.
           &quot;a_key&quot;: &quot;A String&quot;,
         },
-        &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
-            # typically earlier than the actual last use time.
       },
     ],
   }</pre>
@@ -209,10 +207,11 @@
     &quot;options&quot;: { # # Transactions # Required. Options for the new transaction.
         #
         #
-        # Each session can have at most one active transaction at a time. After the
-        # active transaction is completed, the session can immediately be
-        # re-used for the next transaction. It is not necessary to create a
-        # new session for each transaction.
+        # Each session can have at most one active transaction at a time (note that
+        # standalone reads and queries use a transaction internally and do count
+        # towards the one transaction limit). After the active transaction is
+        # completed, the session can immediately be re-used for the next transaction.
+        # It is not necessary to create a new session for each transaction.
         #
         # # Transaction Modes
         #
@@ -483,12 +482,6 @@
         # Given the above, Partitioned DML is good fit for large, database-wide,
         # operations that are idempotent, such as deleting old rows from a very large
         # table.
-      &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-          #
-          # Authorization to begin a Partitioned DML transaction requires
-          # `spanner.databases.beginPartitionedDmlTransaction` permission
-          # on the `session` resource.
-      },
       &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
           #
           # Authorization to begin a read-write transaction requires
@@ -501,6 +494,39 @@
           # Authorization to begin a read-only transaction requires
           # `spanner.databases.beginReadOnlyTransaction` permission
           # on the `session` resource.
+        &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+            # reads at a specific timestamp are repeatable; the same read at
+            # the same timestamp always returns the same data. If the
+            # timestamp is in the future, the read will block until the
+            # specified timestamp, modulo the read&#x27;s deadline.
+            #
+            # Useful for large scale consistent reads such as mapreduces, or
+            # for coordinating many reads against a consistent snapshot of the
+            # data.
+            #
+            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+        &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+            #
+            # This is useful for requesting fresher data than some previous
+            # read, or data that is fresh enough to observe the effects of some
+            # previously committed transaction whose timestamp is known.
+            #
+            # Note that this option can only be used in single-use transactions.
+            #
+            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+        &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+            # old. The timestamp is chosen soon after the read is started.
+            #
+            # Guarantees that all writes that have committed more than the
+            # specified number of seconds ago are visible. Because Cloud Spanner
+            # chooses the exact timestamp, this mode works even if the client&#x27;s
+            # local clock is substantially skewed from Cloud Spanner commit
+            # timestamps.
+            #
+            # Useful for reading at nearby replicas without the distributed
+            # timestamp negotiation overhead of `max_staleness`.
         &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
             # seconds. Guarantees that all writes that have committed more
             # than the specified number of seconds ago are visible. Because
@@ -514,43 +540,16 @@
             #
             # Note that this option can only be used in single-use
             # transactions.
-        &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-            #
-            # This is useful for requesting fresher data than some previous
-            # read, or data that is fresh enough to observe the effects of some
-            # previously committed transaction whose timestamp is known.
-            #
-            # Note that this option can only be used in single-use transactions.
-            #
-            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-            # are visible.
         &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
             # the Transaction message that describes the transaction.
-        &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-            # old. The timestamp is chosen soon after the read is started.
-            #
-            # Guarantees that all writes that have committed more than the
-            # specified number of seconds ago are visible. Because Cloud Spanner
-            # chooses the exact timestamp, this mode works even if the client&#x27;s
-            # local clock is substantially skewed from Cloud Spanner commit
-            # timestamps.
-            #
-            # Useful for reading at nearby replicas without the distributed
-            # timestamp negotiation overhead of `max_staleness`.
-        &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-            # reads at a specific timestamp are repeatable; the same read at
-            # the same timestamp always returns the same data. If the
-            # timestamp is in the future, the read will block until the
-            # specified timestamp, modulo the read&#x27;s deadline.
-            #
-            # Useful for large scale consistent reads such as mapreduces, or
-            # for coordinating many reads against a consistent snapshot of the
-            # data.
-            #
-            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+        &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+            # are visible.
+      },
+      &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+          #
+          # Authorization to begin a Partitioned DML transaction requires
+          # `spanner.databases.beginPartitionedDmlTransaction` permission
+          # on the `session` resource.
       },
     },
   }
@@ -592,13 +591,18 @@
 reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
 the transaction from the beginning, re-using the same session.
 
+On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
+for example, if the client job experiences a 1+ hour networking failure.
+At that point, Cloud Spanner has lost track of the transaction outcome and
+we recommend that you perform another read from the database to see the
+state of things as they are now.
+
 Args:
   session: string, Required. The session in which the transaction to be committed is running. (required)
   body: object, The request body.
     The object takes the form of:
 
 { # The request for Commit.
-    &quot;transactionId&quot;: &quot;A String&quot;, # Commit a previously-started transaction.
     &quot;singleUseTransaction&quot;: { # # Transactions # Execute mutations in a temporary transaction. Note that unlike
         # commit of a previously-started transaction, commit with a
         # temporary transaction is non-idempotent. That is, if the
@@ -610,10 +614,11 @@
         # Commit instead.
         #
         #
-        # Each session can have at most one active transaction at a time. After the
-        # active transaction is completed, the session can immediately be
-        # re-used for the next transaction. It is not necessary to create a
-        # new session for each transaction.
+        # Each session can have at most one active transaction at a time (note that
+        # standalone reads and queries use a transaction internally and do count
+        # towards the one transaction limit). After the active transaction is
+        # completed, the session can immediately be re-used for the next transaction.
+        # It is not necessary to create a new session for each transaction.
         #
         # # Transaction Modes
         #
@@ -884,12 +889,6 @@
         # Given the above, Partitioned DML is good fit for large, database-wide,
         # operations that are idempotent, such as deleting old rows from a very large
         # table.
-      &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-          #
-          # Authorization to begin a Partitioned DML transaction requires
-          # `spanner.databases.beginPartitionedDmlTransaction` permission
-          # on the `session` resource.
-      },
       &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
           #
           # Authorization to begin a read-write transaction requires
@@ -902,6 +901,39 @@
           # Authorization to begin a read-only transaction requires
           # `spanner.databases.beginReadOnlyTransaction` permission
           # on the `session` resource.
+        &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+            # reads at a specific timestamp are repeatable; the same read at
+            # the same timestamp always returns the same data. If the
+            # timestamp is in the future, the read will block until the
+            # specified timestamp, modulo the read&#x27;s deadline.
+            #
+            # Useful for large scale consistent reads such as mapreduces, or
+            # for coordinating many reads against a consistent snapshot of the
+            # data.
+            #
+            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+        &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+            #
+            # This is useful for requesting fresher data than some previous
+            # read, or data that is fresh enough to observe the effects of some
+            # previously committed transaction whose timestamp is known.
+            #
+            # Note that this option can only be used in single-use transactions.
+            #
+            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+        &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+            # old. The timestamp is chosen soon after the read is started.
+            #
+            # Guarantees that all writes that have committed more than the
+            # specified number of seconds ago are visible. Because Cloud Spanner
+            # chooses the exact timestamp, this mode works even if the client&#x27;s
+            # local clock is substantially skewed from Cloud Spanner commit
+            # timestamps.
+            #
+            # Useful for reading at nearby replicas without the distributed
+            # timestamp negotiation overhead of `max_staleness`.
         &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
             # seconds. Guarantees that all writes that have committed more
             # than the specified number of seconds ago are visible. Because
@@ -915,78 +947,27 @@
             #
             # Note that this option can only be used in single-use
             # transactions.
-        &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-            #
-            # This is useful for requesting fresher data than some previous
-            # read, or data that is fresh enough to observe the effects of some
-            # previously committed transaction whose timestamp is known.
-            #
-            # Note that this option can only be used in single-use transactions.
-            #
-            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-            # are visible.
         &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
             # the Transaction message that describes the transaction.
-        &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-            # old. The timestamp is chosen soon after the read is started.
-            #
-            # Guarantees that all writes that have committed more than the
-            # specified number of seconds ago are visible. Because Cloud Spanner
-            # chooses the exact timestamp, this mode works even if the client&#x27;s
-            # local clock is substantially skewed from Cloud Spanner commit
-            # timestamps.
-            #
-            # Useful for reading at nearby replicas without the distributed
-            # timestamp negotiation overhead of `max_staleness`.
-        &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-            # reads at a specific timestamp are repeatable; the same read at
-            # the same timestamp always returns the same data. If the
-            # timestamp is in the future, the read will block until the
-            # specified timestamp, modulo the read&#x27;s deadline.
-            #
-            # Useful for large scale consistent reads such as mapreduces, or
-            # for coordinating many reads against a consistent snapshot of the
-            # data.
-            #
-            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+        &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+            # are visible.
+      },
+      &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+          #
+          # Authorization to begin a Partitioned DML transaction requires
+          # `spanner.databases.beginPartitionedDmlTransaction` permission
+          # on the `session` resource.
       },
     },
+    &quot;transactionId&quot;: &quot;A String&quot;, # Commit a previously-started transaction.
     &quot;mutations&quot;: [ # The mutations to be executed when this transaction commits. All
         # mutations are applied atomically, in the order they appear in
         # this list.
       { # A modification to one or more Cloud Spanner rows.  Mutations can be
           # applied to a Cloud Spanner database by sending them in a
           # Commit call.
-        &quot;insert&quot;: { # Arguments to insert, update, insert_or_update, and # Insert new rows in a table. If any of the rows already exist,
-            # the write or transaction fails with error `ALREADY_EXISTS`.
-            # replace operations.
-          &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be written.
-          &quot;values&quot;: [ # The values to be written. `values` can contain more than one
-              # list of values. If it does, then multiple rows are written, one
-              # for each entry in `values`. Each list in `values` must have
-              # exactly as many entries as there are entries in columns
-              # above. Sending multiple lists is equivalent to sending multiple
-              # `Mutation`s, each containing one `values` entry and repeating
-              # table and columns. Individual values in each list are
-              # encoded as described here.
-            [
-              &quot;&quot;,
-            ],
-          ],
-          &quot;columns&quot;: [ # The names of the columns in table to be written.
-              #
-              # The list of columns must contain enough columns to allow
-              # Cloud Spanner to derive values for all primary key columns in the
-              # row(s) to be modified.
-            &quot;A String&quot;,
-          ],
-        },
         &quot;delete&quot;: { # Arguments to delete operations. # Delete rows from a table. Succeeds whether or not the named
             # rows were present.
-          &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be deleted.
           &quot;keySet&quot;: { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All # Required. The primary keys of the rows within table to delete.  The
               # primary keys must be specified in the order in which they appear in the
               # `PRIMARY KEY()` clause of the table&#x27;s equivalent DDL statement (the DDL
@@ -1088,22 +1069,22 @@
                   #
                   # Note that 100 is passed as the start, and 1 is passed as the end,
                   # because `Key` is a descending column in the schema.
-                &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
-                    # `len(start_open)` key columns exactly match `start_open`.
-                  &quot;&quot;,
-                ],
                 &quot;endClosed&quot;: [ # If the end is closed, then the range includes all rows whose
                     # first `len(end_closed)` key columns exactly match `end_closed`.
                   &quot;&quot;,
                 ],
-                &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
-                    # `len(end_open)` key columns exactly match `end_open`.
-                  &quot;&quot;,
-                ],
                 &quot;startClosed&quot;: [ # If the start is closed, then the range includes all rows whose
                     # first `len(start_closed)` key columns exactly match `start_closed`.
                   &quot;&quot;,
                 ],
+                &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
+                    # `len(start_open)` key columns exactly match `start_open`.
+                  &quot;&quot;,
+                ],
+                &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
+                    # `len(end_open)` key columns exactly match `end_open`.
+                  &quot;&quot;,
+                ],
               },
             ],
             &quot;keys&quot;: [ # A list of specific keys. Entries in `keys` should have exactly as
@@ -1118,6 +1099,7 @@
                 # `KeySet` matches all keys in the table or index. Note that any keys
                 # specified in `keys` or `ranges` are only yielded once.
           },
+          &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be deleted.
         },
         &quot;replace&quot;: { # Arguments to insert, update, insert_or_update, and # Like insert, except that if the row already exists, it is
             # deleted, and the column values provided are inserted
@@ -1130,6 +1112,13 @@
             # child rows before you replace the parent row.
             # replace operations.
           &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be written.
+          &quot;columns&quot;: [ # The names of the columns in table to be written.
+              #
+              # The list of columns must contain enough columns to allow
+              # Cloud Spanner to derive values for all primary key columns in the
+              # row(s) to be modified.
+            &quot;A String&quot;,
+          ],
           &quot;values&quot;: [ # The values to be written. `values` can contain more than one
               # list of values. If it does, then multiple rows are written, one
               # for each entry in `values`. Each list in `values` must have
@@ -1142,6 +1131,11 @@
               &quot;&quot;,
             ],
           ],
+        },
+        &quot;insert&quot;: { # Arguments to insert, update, insert_or_update, and # Insert new rows in a table. If any of the rows already exist,
+            # the write or transaction fails with error `ALREADY_EXISTS`.
+            # replace operations.
+          &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be written.
           &quot;columns&quot;: [ # The names of the columns in table to be written.
               #
               # The list of columns must contain enough columns to allow
@@ -1149,6 +1143,42 @@
               # row(s) to be modified.
             &quot;A String&quot;,
           ],
+          &quot;values&quot;: [ # The values to be written. `values` can contain more than one
+              # list of values. If it does, then multiple rows are written, one
+              # for each entry in `values`. Each list in `values` must have
+              # exactly as many entries as there are entries in columns
+              # above. Sending multiple lists is equivalent to sending multiple
+              # `Mutation`s, each containing one `values` entry and repeating
+              # table and columns. Individual values in each list are
+              # encoded as described here.
+            [
+              &quot;&quot;,
+            ],
+          ],
+        },
+        &quot;update&quot;: { # Arguments to insert, update, insert_or_update, and # Update existing rows in a table. If any of the rows does not
+            # already exist, the transaction fails with error `NOT_FOUND`.
+            # replace operations.
+          &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be written.
+          &quot;columns&quot;: [ # The names of the columns in table to be written.
+              #
+              # The list of columns must contain enough columns to allow
+              # Cloud Spanner to derive values for all primary key columns in the
+              # row(s) to be modified.
+            &quot;A String&quot;,
+          ],
+          &quot;values&quot;: [ # The values to be written. `values` can contain more than one
+              # list of values. If it does, then multiple rows are written, one
+              # for each entry in `values`. Each list in `values` must have
+              # exactly as many entries as there are entries in columns
+              # above. Sending multiple lists is equivalent to sending multiple
+              # `Mutation`s, each containing one `values` entry and repeating
+              # table and columns. Individual values in each list are
+              # encoded as described here.
+            [
+              &quot;&quot;,
+            ],
+          ],
         },
         &quot;insertOrUpdate&quot;: { # Arguments to insert, update, insert_or_update, and # Like insert, except that if the row already exists, then
             # its column values are overwritten with the ones provided. Any
@@ -1159,6 +1189,13 @@
             # even when the row already exists and will therefore actually be updated.
             # replace operations.
           &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be written.
+          &quot;columns&quot;: [ # The names of the columns in table to be written.
+              #
+              # The list of columns must contain enough columns to allow
+              # Cloud Spanner to derive values for all primary key columns in the
+              # row(s) to be modified.
+            &quot;A String&quot;,
+          ],
           &quot;values&quot;: [ # The values to be written. `values` can contain more than one
               # list of values. If it does, then multiple rows are written, one
               # for each entry in `values`. Each list in `values` must have
@@ -1171,37 +1208,6 @@
               &quot;&quot;,
             ],
           ],
-          &quot;columns&quot;: [ # The names of the columns in table to be written.
-              #
-              # The list of columns must contain enough columns to allow
-              # Cloud Spanner to derive values for all primary key columns in the
-              # row(s) to be modified.
-            &quot;A String&quot;,
-          ],
-        },
-        &quot;update&quot;: { # Arguments to insert, update, insert_or_update, and # Update existing rows in a table. If any of the rows does not
-            # already exist, the transaction fails with error `NOT_FOUND`.
-            # replace operations.
-          &quot;table&quot;: &quot;A String&quot;, # Required. The table whose rows will be written.
-          &quot;values&quot;: [ # The values to be written. `values` can contain more than one
-              # list of values. If it does, then multiple rows are written, one
-              # for each entry in `values`. Each list in `values` must have
-              # exactly as many entries as there are entries in columns
-              # above. Sending multiple lists is equivalent to sending multiple
-              # `Mutation`s, each containing one `values` entry and repeating
-              # table and columns. Individual values in each list are
-              # encoded as described here.
-            [
-              &quot;&quot;,
-            ],
-          ],
-          &quot;columns&quot;: [ # The names of the columns in table to be written.
-              #
-              # The list of columns must contain enough columns to allow
-              # Cloud Spanner to derive values for all primary key columns in the
-              # row(s) to be modified.
-            &quot;A String&quot;,
-          ],
         },
       },
     ],
@@ -1248,10 +1254,11 @@
     The object takes the form of:
 
 { # The request for CreateSession.
-    &quot;session&quot;: { # A session in the Cloud Spanner API. # The session to create.
+    &quot;session&quot;: { # A session in the Cloud Spanner API. # Required. The session to create.
       &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
-      &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
-          # when creating a session are ignored.
+      &quot;name&quot;: &quot;A String&quot;, # Output only. The name of the session. This is always system-assigned.
+      &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
+          # typically earlier than the actual last use time.
       &quot;labels&quot;: { # The labels for the session.
           #
           #  * Label keys must be between 1 and 63 characters long and must conform to
@@ -1263,8 +1270,6 @@
           # See https://goo.gl/xmQnxf for more information on and examples of labels.
         &quot;a_key&quot;: &quot;A String&quot;,
       },
-      &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
-          # typically earlier than the actual last use time.
     },
   }
 
@@ -1278,8 +1283,9 @@
 
     { # A session in the Cloud Spanner API.
     &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
-    &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
-        # when creating a session are ignored.
+    &quot;name&quot;: &quot;A String&quot;, # Output only. The name of the session. This is always system-assigned.
+    &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
+        # typically earlier than the actual last use time.
     &quot;labels&quot;: { # The labels for the session.
         #
         #  * Label keys must be between 1 and 63 characters long and must conform to
@@ -1291,8 +1297,6 @@
         # See https://goo.gl/xmQnxf for more information on and examples of labels.
       &quot;a_key&quot;: &quot;A String&quot;,
     },
-    &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
-        # typically earlier than the actual last use time.
   }</pre>
 </div>
 
@@ -1351,6 +1355,7 @@
         # 
         # Callers must provide at least one statement.
       { # A single DML statement.
+        &quot;sql&quot;: &quot;A String&quot;, # Required. The DML string.
         &quot;params&quot;: { # Parameter names and values that bind to placeholders in the DML string.
             #
             # A parameter placeholder consists of the `@` character followed by the
@@ -1365,7 +1370,6 @@
             # It is an error to execute a SQL statement with unbound parameters.
           &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
         },
-        &quot;sql&quot;: &quot;A String&quot;, # Required. The DML string.
         &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
             # from a JSON value.  For example, values of type `BYTES` and values
             # of type `STRING` both appear in params as JSON strings.
@@ -1376,9 +1380,9 @@
             # about SQL types.
           &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
               # table cell or returned from an SQL query.
+            &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
             &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
                 # is the type of the array elements.
-            &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
             &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
                 # provides type information for the struct&#x27;s fields.
               &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
@@ -1388,6 +1392,7 @@
                   # matches the order of columns in a read request, or the order of
                   # fields in the `SELECT` clause of a query.
                 { # Message representing a single field of a struct.
+                  &quot;type&quot;: # Object with schema name: Type # The type of the field.
                   &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
                       # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
                       # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
@@ -1395,7 +1400,6 @@
                       # columns might have an empty name (e.g., !&quot;SELECT
                       # UPPER(ColName)&quot;`). Note that a query result can contain
                       # multiple fields with the same name.
-                  &quot;type&quot;: # Object with schema name: Type # The type of the field.
                 },
               ],
             },
@@ -1420,365 +1424,16 @@
         # ExecuteSql call runs.
         #
         # See TransactionOptions for more information about transactions.
-      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
-      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
-          # it. The transaction ID of the new transaction is returned in
-          # ResultSetMetadata.transaction, which is a Transaction.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
       &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
           # This is the most efficient way to execute a transaction that
           # consists of a single SQL query.
           #
           #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
           #
           # # Transaction Modes
           #
@@ -2049,12 +1704,6 @@
           # Given the above, Partitioned DML is good fit for large, database-wide,
           # operations that are idempotent, such as deleting old rows from a very large
           # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
         &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
             #
             # Authorization to begin a read-write transaction requires
@@ -2067,6 +1716,39 @@
             # Authorization to begin a read-only transaction requires
             # `spanner.databases.beginReadOnlyTransaction` permission
             # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
           &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
               # seconds. Guarantees that all writes that have committed more
               # than the specified number of seconds ago are visible. Because
@@ -2080,31 +1762,310 @@
               #
               # Note that this option can only be used in single-use
               # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
           &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
               # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
+          # it. The transaction ID of the new transaction is returned in
+          # ResultSetMetadata.transaction, which is a Transaction.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
           &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
               # reads at a specific timestamp are repeatable; the same read at
               # the same timestamp always returns the same data. If the
@@ -2117,8 +2078,53 @@
               #
               # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
               # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
         },
       },
+      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
     },
   }
 
@@ -2154,25 +2160,6 @@
       # * Response: 2 ResultSet messages, and a syntax error (`INVALID_ARGUMENT`)
       #   status. The number of ResultSet messages indicates that the third
       #   statement failed, and the fourth and fifth statements were not executed.
-    &quot;status&quot;: { # The `Status` type defines a logical error model that is suitable for # If all DML statements are executed successfully, the status is `OK`.
-        # Otherwise, the error status of the first failed statement.
-        # different programming environments, including REST APIs and RPC APIs. It is
-        # used by [gRPC](https://github.com/grpc). Each `Status` message contains
-        # three pieces of data: error code, error message, and error details.
-        #
-        # You can find out more about this error model and how to work with it in the
-        # [API Design Guide](https://cloud.google.com/apis/design/errors).
-      &quot;details&quot;: [ # A list of messages that carry the error details.  There is a common set of
-          # message types for APIs to use.
-        {
-          &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
-        },
-      ],
-      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
-          # user-facing error message should be localized and sent in the
-          # google.rpc.Status.details field, or localized by the client.
-      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
-    },
     &quot;resultSets&quot;: [ # One ResultSet for each statement in the request that ran successfully,
         # in the same order as the statements in the request. Each ResultSet does
         # not contain any rows. The ResultSetStats in each ResultSet contain
@@ -2182,33 +2169,100 @@
         # ResultSetMetadata.
       { # Results from Read or
           # ExecuteSql.
-        &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
-          &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
-              # set.  For example, a SQL query like `&quot;SELECT UserId, UserName FROM
-              # Users&quot;` could return a `row_type` value like:
+        &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the SQL statement that
+            # produced this result set. These can be requested by setting
+            # ExecuteSqlRequest.query_mode.
+            # DML statements always produce stats containing the number of rows
+            # modified, unless executed using the
+            # ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode.
+            # Other fields may or may not be populated, based on the
+            # ExecuteSqlRequest.query_mode.
+          &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
+              # the query is profiled. For example, a query could return the statistics as
+              # follows:
               #
-              #     &quot;fields&quot;: [
-              #       { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
-              #       { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
-              #     ]
-            &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
-                # significant, because values of this struct type are represented as
-                # lists, where the order of field values matches the order of
-                # fields in the StructType. In turn, the order of fields
-                # matches the order of columns in a read request, or the order of
-                # fields in the `SELECT` clause of a query.
-              { # Message representing a single field of a struct.
-                &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
-                    # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
-                    # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
-                    # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
-                    # columns might have an empty name (e.g., !&quot;SELECT
-                    # UPPER(ColName)&quot;`). Note that a query result can contain
-                    # multiple fields with the same name.
-                &quot;type&quot;: # Object with schema name: Type # The type of the field.
+              #     {
+              #       &quot;rows_returned&quot;: &quot;3&quot;,
+              #       &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
+              #       &quot;cpu_time&quot;: &quot;1.19 secs&quot;
+              #     }
+            &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+          },
+          &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
+          &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
+              # returns a lower bound of the rows modified.
+          &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
+            &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
+                # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
+                # `plan_nodes`.
+              { # Node information for nodes appearing in a QueryPlan.plan_nodes.
+                &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
+                  { # Metadata associated with a parent-child relationship appearing in a
+                      # PlanNode.
+                    &quot;childIndex&quot;: 42, # The node to which the link points.
+                    &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
+                        # distinguish between the build child and the probe child, or in the case
+                        # of the child being an output variable, to represent the tag associated
+                        # with the output variable.
+                    &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
+                        # to an output variable of the parent node. The field carries the name of
+                        # the output variable.
+                        # For example, a `TableScan` operator that reads rows from a table will
+                        # have child links to the `SCALAR` nodes representing the output variables
+                        # created for each column that is read by the operator. The corresponding
+                        # `variable` fields will be set to the variable names assigned to the
+                        # columns.
+                  },
+                ],
+                &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
+                    # For example, a Parameter Reference node could have the following
+                    # information in its metadata:
+                    #
+                    #     {
+                    #       &quot;parameter_reference&quot;: &quot;param1&quot;,
+                    #       &quot;parameter_type&quot;: &quot;array&quot;
+                    #     }
+                  &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+                },
+                &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
+                    # different kinds of nodes differently. For example, If the node is a
+                    # SCALAR node, it will have a condensed representation
+                    # which can be used to directly embed a description of the node in its
+                    # parent.
+                &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
+                    # `SCALAR` PlanNode(s).
+                  &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
+                      # where the `description` string of this node references a `SCALAR`
+                      # subquery contained in the expression subtree rooted at this node. The
+                      # referenced `SCALAR` subquery may not necessarily be a direct child of
+                      # this node.
+                    &quot;a_key&quot;: 42,
+                  },
+                  &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
+                },
+                &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
+                &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
+                &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
+                    # key-value pairs. Only present if the plan was returned as a result of a
+                    # profile query. For example, number of executions, number of rows/time per
+                    # execution etc.
+                  &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+                },
               },
             ],
           },
+        },
+        &quot;rows&quot;: [ # Each element in `rows` is a row whose format is defined by
+            # metadata.row_type. The ith element
+            # in each row matches the ith field in
+            # metadata.row_type. Elements are
+            # encoded based on type as described
+            # here.
+          [
+            &quot;&quot;,
+          ],
+        ],
+        &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
           &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
               # information about the new transaction is yielded here.
             &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
@@ -2226,102 +2280,54 @@
                 # Single-use read-only transactions do not have IDs, because
                 # single-use transactions do not support multiple requests.
           },
-        },
-        &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the SQL statement that
-            # produced this result set. These can be requested by setting
-            # ExecuteSqlRequest.query_mode.
-            # DML statements always produce stats containing the number of rows
-            # modified, unless executed using the
-            # ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode.
-            # Other fields may or may not be populated, based on the
-            # ExecuteSqlRequest.query_mode.
-          &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
-              # returns a lower bound of the rows modified.
-          &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
-            &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
-                # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
-                # `plan_nodes`.
-              { # Node information for nodes appearing in a QueryPlan.plan_nodes.
-                &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
-                &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
-                    # key-value pairs. Only present if the plan was returned as a result of a
-                    # profile query. For example, number of executions, number of rows/time per
-                    # execution etc.
-                  &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-                },
-                &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
-                    # `SCALAR` PlanNode(s).
-                  &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
-                      # where the `description` string of this node references a `SCALAR`
-                      # subquery contained in the expression subtree rooted at this node. The
-                      # referenced `SCALAR` subquery may not necessarily be a direct child of
-                      # this node.
-                    &quot;a_key&quot;: 42,
-                  },
-                  &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
-                },
-                &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
-                    # For example, a Parameter Reference node could have the following
-                    # information in its metadata:
-                    #
-                    #     {
-                    #       &quot;parameter_reference&quot;: &quot;param1&quot;,
-                    #       &quot;parameter_type&quot;: &quot;array&quot;
-                    #     }
-                  &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-                },
-                &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
-                  { # Metadata associated with a parent-child relationship appearing in a
-                      # PlanNode.
-                    &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
-                        # distinguish between the build child and the probe child, or in the case
-                        # of the child being an output variable, to represent the tag associated
-                        # with the output variable.
-                    &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
-                        # to an output variable of the parent node. The field carries the name of
-                        # the output variable.
-                        # For example, a `TableScan` operator that reads rows from a table will
-                        # have child links to the `SCALAR` nodes representing the output variables
-                        # created for each column that is read by the operator. The corresponding
-                        # `variable` fields will be set to the variable names assigned to the
-                        # columns.
-                    &quot;childIndex&quot;: 42, # The node to which the link points.
-                  },
-                ],
-                &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
-                &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
-                    # different kinds of nodes differently. For example, If the node is a
-                    # SCALAR node, it will have a condensed representation
-                    # which can be used to directly embed a description of the node in its
-                    # parent.
+          &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
+              # set.  For example, a SQL query like `&quot;SELECT UserId, UserName FROM
+              # Users&quot;` could return a `row_type` value like:
+              #
+              #     &quot;fields&quot;: [
+              #       { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
+              #       { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
+              #     ]
+            &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
+                # significant, because values of this struct type are represented as
+                # lists, where the order of field values matches the order of
+                # fields in the StructType. In turn, the order of fields
+                # matches the order of columns in a read request, or the order of
+                # fields in the `SELECT` clause of a query.
+              { # Message representing a single field of a struct.
+                &quot;type&quot;: # Object with schema name: Type # The type of the field.
+                &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
+                    # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
+                    # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
+                    # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
+                    # columns might have an empty name (e.g., !&quot;SELECT
+                    # UPPER(ColName)&quot;`). Note that a query result can contain
+                    # multiple fields with the same name.
               },
             ],
           },
-          &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
-          &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
-              # the query is profiled. For example, a query could return the statistics as
-              # follows:
-              #
-              #     {
-              #       &quot;rows_returned&quot;: &quot;3&quot;,
-              #       &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
-              #       &quot;cpu_time&quot;: &quot;1.19 secs&quot;
-              #     }
-            &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-          },
         },
-        &quot;rows&quot;: [ # Each element in `rows` is a row whose format is defined by
-            # metadata.row_type. The ith element
-            # in each row matches the ith field in
-            # metadata.row_type. Elements are
-            # encoded based on type as described
-            # here.
-          [
-            &quot;&quot;,
-          ],
-        ],
       },
     ],
+    &quot;status&quot;: { # The `Status` type defines a logical error model that is suitable for # If all DML statements are executed successfully, the status is `OK`.
+        # Otherwise, the error status of the first failed statement.
+        # different programming environments, including REST APIs and RPC APIs. It is
+        # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+        # three pieces of data: error code, error message, and error details.
+        #
+        # You can find out more about this error model and how to work with it in the
+        # [API Design Guide](https://cloud.google.com/apis/design/errors).
+      &quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
+          # user-facing error message should be localized and sent in the
+          # google.rpc.Status.details field, or localized by the client.
+      &quot;details&quot;: [ # A list of messages that carry the error details.  There is a common set of
+          # message types for APIs to use.
+        {
+          &quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
+        },
+      ],
+      &quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
+    },
   }</pre>
 </div>
 
@@ -2346,737 +2352,6 @@
 
 { # The request for ExecuteSql and
       # ExecuteStreamingSql.
-    &quot;seqno&quot;: &quot;A String&quot;, # A per-transaction sequence number used to identify this request. This field
-        # makes each request idempotent such that if the request is received multiple
-        # times, at most one will succeed.
-        # 
-        # The sequence number must be monotonically increasing within the
-        # transaction. If a request arrives for the first time with an out-of-order
-        # sequence number, the transaction may be aborted. Replays of previously
-        # handled requests will yield the same response as the first execution.
-        # 
-        # Required for DML statements. Ignored for queries.
-    &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use.
-        # 
-        # For queries, if none is provided, the default is a temporary read-only
-        # transaction with strong concurrency.
-        # 
-        # Standard DML statements require a read-write transaction. To protect
-        # against replays, single-use transactions are not supported.  The caller
-        # must either supply an existing transaction ID or begin a new transaction.
-        # 
-        # Partitioned DML requires an existing Partitioned DML transaction ID.
-        # Read or
-        # ExecuteSql call runs.
-        #
-        # See TransactionOptions for more information about transactions.
-      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
-      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
-          # it. The transaction ID of the new transaction is returned in
-          # ResultSetMetadata.transaction, which is a Transaction.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
-      &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
-          # This is the most efficient way to execute a transaction that
-          # consists of a single SQL query.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
-    },
-    &quot;queryMode&quot;: &quot;A String&quot;, # Used to control the amount of debugging information returned in
-        # ResultSetStats. If partition_token is set, query_mode can only
-        # be set to QueryMode.NORMAL.
-    &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
-        # previously created using PartitionQuery().  There must be an exact
-        # match for the values of fields common to this message and the
-        # PartitionQueryRequest message used to create this partition_token.
     &quot;resumeToken&quot;: &quot;A String&quot;, # If this request is resuming a previously interrupted SQL statement
         # execution, `resume_token` should be copied from the last
         # PartialResultSet yielded before the interruption. Doing this
@@ -3104,6 +2379,774 @@
           #
           # The `optimizer_version` statement hint has precedence over this setting.
     },
+    &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
+        # previously created using PartitionQuery().  There must be an exact
+        # match for the values of fields common to this message and the
+        # PartitionQueryRequest message used to create this partition_token.
+    &quot;queryMode&quot;: &quot;A String&quot;, # Used to control the amount of debugging information returned in
+        # ResultSetStats. If partition_token is set, query_mode can only
+        # be set to QueryMode.NORMAL.
+    &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use.
+        # 
+        # For queries, if none is provided, the default is a temporary read-only
+        # transaction with strong concurrency.
+        # 
+        # Standard DML statements require a read-write transaction. To protect
+        # against replays, single-use transactions are not supported.  The caller
+        # must either supply an existing transaction ID or begin a new transaction.
+        # 
+        # Partitioned DML requires an existing Partitioned DML transaction ID.
+        # Read or
+        # ExecuteSql call runs.
+        #
+        # See TransactionOptions for more information about transactions.
+      &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
+          # This is the most efficient way to execute a transaction that
+          # consists of a single SQL query.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
+          # it. The transaction ID of the new transaction is returned in
+          # ResultSetMetadata.transaction, which is a Transaction.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
+    },
+    &quot;seqno&quot;: &quot;A String&quot;, # A per-transaction sequence number used to identify this request. This field
+        # makes each request idempotent such that if the request is received multiple
+        # times, at most one will succeed.
+        # 
+        # The sequence number must be monotonically increasing within the
+        # transaction. If a request arrives for the first time with an out-of-order
+        # sequence number, the transaction may be aborted. Replays of previously
+        # handled requests will yield the same response as the first execution.
+        # 
+        # Required for DML statements. Ignored for queries.
+    &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
+        # from a JSON value.  For example, values of type `BYTES` and values
+        # of type `STRING` both appear in params as JSON strings.
+        # 
+        # In these cases, `param_types` can be used to specify the exact
+        # SQL type for some or all of the SQL statement parameters. See the
+        # definition of Type for more information
+        # about SQL types.
+      &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
+          # table cell or returned from an SQL query.
+        &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
+        &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
+            # is the type of the array elements.
+        &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
+            # provides type information for the struct&#x27;s fields.
+          &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
+              # significant, because values of this struct type are represented as
+              # lists, where the order of field values matches the order of
+              # fields in the StructType. In turn, the order of fields
+              # matches the order of columns in a read request, or the order of
+              # fields in the `SELECT` clause of a query.
+            { # Message representing a single field of a struct.
+              &quot;type&quot;: # Object with schema name: Type # The type of the field.
+              &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
+                  # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
+                  # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
+                  # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
+                  # columns might have an empty name (e.g., !&quot;SELECT
+                  # UPPER(ColName)&quot;`). Note that a query result can contain
+                  # multiple fields with the same name.
+            },
+          ],
+        },
+      },
+    },
     &quot;params&quot;: { # Parameter names and values that bind to placeholders in the SQL string.
         # 
         # A parameter placeholder consists of the `@` character followed by the
@@ -3119,41 +3162,6 @@
       &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
     },
     &quot;sql&quot;: &quot;A String&quot;, # Required. The SQL string.
-    &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
-        # from a JSON value.  For example, values of type `BYTES` and values
-        # of type `STRING` both appear in params as JSON strings.
-        # 
-        # In these cases, `param_types` can be used to specify the exact
-        # SQL type for some or all of the SQL statement parameters. See the
-        # definition of Type for more information
-        # about SQL types.
-      &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
-          # table cell or returned from an SQL query.
-        &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
-            # is the type of the array elements.
-        &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
-        &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
-            # provides type information for the struct&#x27;s fields.
-          &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
-              # significant, because values of this struct type are represented as
-              # lists, where the order of field values matches the order of
-              # fields in the StructType. In turn, the order of fields
-              # matches the order of columns in a read request, or the order of
-              # fields in the `SELECT` clause of a query.
-            { # Message representing a single field of a struct.
-              &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
-                  # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
-                  # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
-                  # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
-                  # columns might have an empty name (e.g., !&quot;SELECT
-                  # UPPER(ColName)&quot;`). Note that a query result can contain
-                  # multiple fields with the same name.
-              &quot;type&quot;: # Object with schema name: Type # The type of the field.
-            },
-          ],
-        },
-      },
-    },
   }
 
   x__xgafv: string, V1 error format.
@@ -3166,33 +3174,100 @@
 
     { # Results from Read or
       # ExecuteSql.
-    &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
-      &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
-          # set.  For example, a SQL query like `&quot;SELECT UserId, UserName FROM
-          # Users&quot;` could return a `row_type` value like:
+    &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the SQL statement that
+        # produced this result set. These can be requested by setting
+        # ExecuteSqlRequest.query_mode.
+        # DML statements always produce stats containing the number of rows
+        # modified, unless executed using the
+        # ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode.
+        # Other fields may or may not be populated, based on the
+        # ExecuteSqlRequest.query_mode.
+      &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
+          # the query is profiled. For example, a query could return the statistics as
+          # follows:
           #
-          #     &quot;fields&quot;: [
-          #       { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
-          #       { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
-          #     ]
-        &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
-            # significant, because values of this struct type are represented as
-            # lists, where the order of field values matches the order of
-            # fields in the StructType. In turn, the order of fields
-            # matches the order of columns in a read request, or the order of
-            # fields in the `SELECT` clause of a query.
-          { # Message representing a single field of a struct.
-            &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
-                # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
-                # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
-                # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
-                # columns might have an empty name (e.g., !&quot;SELECT
-                # UPPER(ColName)&quot;`). Note that a query result can contain
-                # multiple fields with the same name.
-            &quot;type&quot;: # Object with schema name: Type # The type of the field.
+          #     {
+          #       &quot;rows_returned&quot;: &quot;3&quot;,
+          #       &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
+          #       &quot;cpu_time&quot;: &quot;1.19 secs&quot;
+          #     }
+        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+      },
+      &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
+      &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
+          # returns a lower bound of the rows modified.
+      &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
+        &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
+            # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
+            # `plan_nodes`.
+          { # Node information for nodes appearing in a QueryPlan.plan_nodes.
+            &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
+              { # Metadata associated with a parent-child relationship appearing in a
+                  # PlanNode.
+                &quot;childIndex&quot;: 42, # The node to which the link points.
+                &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
+                    # distinguish between the build child and the probe child, or in the case
+                    # of the child being an output variable, to represent the tag associated
+                    # with the output variable.
+                &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
+                    # to an output variable of the parent node. The field carries the name of
+                    # the output variable.
+                    # For example, a `TableScan` operator that reads rows from a table will
+                    # have child links to the `SCALAR` nodes representing the output variables
+                    # created for each column that is read by the operator. The corresponding
+                    # `variable` fields will be set to the variable names assigned to the
+                    # columns.
+              },
+            ],
+            &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
+                # For example, a Parameter Reference node could have the following
+                # information in its metadata:
+                #
+                #     {
+                #       &quot;parameter_reference&quot;: &quot;param1&quot;,
+                #       &quot;parameter_type&quot;: &quot;array&quot;
+                #     }
+              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+            },
+            &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
+                # different kinds of nodes differently. For example, If the node is a
+                # SCALAR node, it will have a condensed representation
+                # which can be used to directly embed a description of the node in its
+                # parent.
+            &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
+                # `SCALAR` PlanNode(s).
+              &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
+                  # where the `description` string of this node references a `SCALAR`
+                  # subquery contained in the expression subtree rooted at this node. The
+                  # referenced `SCALAR` subquery may not necessarily be a direct child of
+                  # this node.
+                &quot;a_key&quot;: 42,
+              },
+              &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
+            },
+            &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
+            &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
+            &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
+                # key-value pairs. Only present if the plan was returned as a result of a
+                # profile query. For example, number of executions, number of rows/time per
+                # execution etc.
+              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+            },
           },
         ],
       },
+    },
+    &quot;rows&quot;: [ # Each element in `rows` is a row whose format is defined by
+        # metadata.row_type. The ith element
+        # in each row matches the ith field in
+        # metadata.row_type. Elements are
+        # encoded based on type as described
+        # here.
+      [
+        &quot;&quot;,
+      ],
+    ],
+    &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
       &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
           # information about the new transaction is yielded here.
         &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
@@ -3210,100 +3285,33 @@
             # Single-use read-only transactions do not have IDs, because
             # single-use transactions do not support multiple requests.
       },
-    },
-    &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the SQL statement that
-        # produced this result set. These can be requested by setting
-        # ExecuteSqlRequest.query_mode.
-        # DML statements always produce stats containing the number of rows
-        # modified, unless executed using the
-        # ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode.
-        # Other fields may or may not be populated, based on the
-        # ExecuteSqlRequest.query_mode.
-      &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
-          # returns a lower bound of the rows modified.
-      &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
-        &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
-            # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
-            # `plan_nodes`.
-          { # Node information for nodes appearing in a QueryPlan.plan_nodes.
-            &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
-            &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
-                # key-value pairs. Only present if the plan was returned as a result of a
-                # profile query. For example, number of executions, number of rows/time per
-                # execution etc.
-              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-            },
-            &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
-                # `SCALAR` PlanNode(s).
-              &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
-                  # where the `description` string of this node references a `SCALAR`
-                  # subquery contained in the expression subtree rooted at this node. The
-                  # referenced `SCALAR` subquery may not necessarily be a direct child of
-                  # this node.
-                &quot;a_key&quot;: 42,
-              },
-              &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
-            },
-            &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
-                # For example, a Parameter Reference node could have the following
-                # information in its metadata:
-                #
-                #     {
-                #       &quot;parameter_reference&quot;: &quot;param1&quot;,
-                #       &quot;parameter_type&quot;: &quot;array&quot;
-                #     }
-              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-            },
-            &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
-              { # Metadata associated with a parent-child relationship appearing in a
-                  # PlanNode.
-                &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
-                    # distinguish between the build child and the probe child, or in the case
-                    # of the child being an output variable, to represent the tag associated
-                    # with the output variable.
-                &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
-                    # to an output variable of the parent node. The field carries the name of
-                    # the output variable.
-                    # For example, a `TableScan` operator that reads rows from a table will
-                    # have child links to the `SCALAR` nodes representing the output variables
-                    # created for each column that is read by the operator. The corresponding
-                    # `variable` fields will be set to the variable names assigned to the
-                    # columns.
-                &quot;childIndex&quot;: 42, # The node to which the link points.
-              },
-            ],
-            &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
-            &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
-                # different kinds of nodes differently. For example, If the node is a
-                # SCALAR node, it will have a condensed representation
-                # which can be used to directly embed a description of the node in its
-                # parent.
+      &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
+          # set.  For example, a SQL query like `&quot;SELECT UserId, UserName FROM
+          # Users&quot;` could return a `row_type` value like:
+          #
+          #     &quot;fields&quot;: [
+          #       { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
+          #       { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
+          #     ]
+        &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
+            # significant, because values of this struct type are represented as
+            # lists, where the order of field values matches the order of
+            # fields in the StructType. In turn, the order of fields
+            # matches the order of columns in a read request, or the order of
+            # fields in the `SELECT` clause of a query.
+          { # Message representing a single field of a struct.
+            &quot;type&quot;: # Object with schema name: Type # The type of the field.
+            &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
+                # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
+                # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
+                # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
+                # columns might have an empty name (e.g., !&quot;SELECT
+                # UPPER(ColName)&quot;`). Note that a query result can contain
+                # multiple fields with the same name.
           },
         ],
       },
-      &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
-      &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
-          # the query is profiled. For example, a query could return the statistics as
-          # follows:
-          #
-          #     {
-          #       &quot;rows_returned&quot;: &quot;3&quot;,
-          #       &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
-          #       &quot;cpu_time&quot;: &quot;1.19 secs&quot;
-          #     }
-        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-      },
     },
-    &quot;rows&quot;: [ # Each element in `rows` is a row whose format is defined by
-        # metadata.row_type. The ith element
-        # in each row matches the ith field in
-        # metadata.row_type. Elements are
-        # encoded based on type as described
-        # here.
-      [
-        &quot;&quot;,
-      ],
-    ],
   }</pre>
 </div>
 
@@ -3322,737 +3330,6 @@
 
 { # The request for ExecuteSql and
       # ExecuteStreamingSql.
-    &quot;seqno&quot;: &quot;A String&quot;, # A per-transaction sequence number used to identify this request. This field
-        # makes each request idempotent such that if the request is received multiple
-        # times, at most one will succeed.
-        # 
-        # The sequence number must be monotonically increasing within the
-        # transaction. If a request arrives for the first time with an out-of-order
-        # sequence number, the transaction may be aborted. Replays of previously
-        # handled requests will yield the same response as the first execution.
-        # 
-        # Required for DML statements. Ignored for queries.
-    &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use.
-        # 
-        # For queries, if none is provided, the default is a temporary read-only
-        # transaction with strong concurrency.
-        # 
-        # Standard DML statements require a read-write transaction. To protect
-        # against replays, single-use transactions are not supported.  The caller
-        # must either supply an existing transaction ID or begin a new transaction.
-        # 
-        # Partitioned DML requires an existing Partitioned DML transaction ID.
-        # Read or
-        # ExecuteSql call runs.
-        #
-        # See TransactionOptions for more information about transactions.
-      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
-      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
-          # it. The transaction ID of the new transaction is returned in
-          # ResultSetMetadata.transaction, which is a Transaction.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
-      &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
-          # This is the most efficient way to execute a transaction that
-          # consists of a single SQL query.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
-    },
-    &quot;queryMode&quot;: &quot;A String&quot;, # Used to control the amount of debugging information returned in
-        # ResultSetStats. If partition_token is set, query_mode can only
-        # be set to QueryMode.NORMAL.
-    &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
-        # previously created using PartitionQuery().  There must be an exact
-        # match for the values of fields common to this message and the
-        # PartitionQueryRequest message used to create this partition_token.
     &quot;resumeToken&quot;: &quot;A String&quot;, # If this request is resuming a previously interrupted SQL statement
         # execution, `resume_token` should be copied from the last
         # PartialResultSet yielded before the interruption. Doing this
@@ -4080,6 +3357,774 @@
           #
           # The `optimizer_version` statement hint has precedence over this setting.
     },
+    &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
+        # previously created using PartitionQuery().  There must be an exact
+        # match for the values of fields common to this message and the
+        # PartitionQueryRequest message used to create this partition_token.
+    &quot;queryMode&quot;: &quot;A String&quot;, # Used to control the amount of debugging information returned in
+        # ResultSetStats. If partition_token is set, query_mode can only
+        # be set to QueryMode.NORMAL.
+    &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use.
+        # 
+        # For queries, if none is provided, the default is a temporary read-only
+        # transaction with strong concurrency.
+        # 
+        # Standard DML statements require a read-write transaction. To protect
+        # against replays, single-use transactions are not supported.  The caller
+        # must either supply an existing transaction ID or begin a new transaction.
+        # 
+        # Partitioned DML requires an existing Partitioned DML transaction ID.
+        # Read or
+        # ExecuteSql call runs.
+        #
+        # See TransactionOptions for more information about transactions.
+      &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
+          # This is the most efficient way to execute a transaction that
+          # consists of a single SQL query.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
+          # it. The transaction ID of the new transaction is returned in
+          # ResultSetMetadata.transaction, which is a Transaction.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
+    },
+    &quot;seqno&quot;: &quot;A String&quot;, # A per-transaction sequence number used to identify this request. This field
+        # makes each request idempotent such that if the request is received multiple
+        # times, at most one will succeed.
+        # 
+        # The sequence number must be monotonically increasing within the
+        # transaction. If a request arrives for the first time with an out-of-order
+        # sequence number, the transaction may be aborted. Replays of previously
+        # handled requests will yield the same response as the first execution.
+        # 
+        # Required for DML statements. Ignored for queries.
+    &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
+        # from a JSON value.  For example, values of type `BYTES` and values
+        # of type `STRING` both appear in params as JSON strings.
+        # 
+        # In these cases, `param_types` can be used to specify the exact
+        # SQL type for some or all of the SQL statement parameters. See the
+        # definition of Type for more information
+        # about SQL types.
+      &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
+          # table cell or returned from an SQL query.
+        &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
+        &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
+            # is the type of the array elements.
+        &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
+            # provides type information for the struct&#x27;s fields.
+          &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
+              # significant, because values of this struct type are represented as
+              # lists, where the order of field values matches the order of
+              # fields in the StructType. In turn, the order of fields
+              # matches the order of columns in a read request, or the order of
+              # fields in the `SELECT` clause of a query.
+            { # Message representing a single field of a struct.
+              &quot;type&quot;: # Object with schema name: Type # The type of the field.
+              &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
+                  # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
+                  # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
+                  # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
+                  # columns might have an empty name (e.g., !&quot;SELECT
+                  # UPPER(ColName)&quot;`). Note that a query result can contain
+                  # multiple fields with the same name.
+            },
+          ],
+        },
+      },
+    },
     &quot;params&quot;: { # Parameter names and values that bind to placeholders in the SQL string.
         # 
         # A parameter placeholder consists of the `@` character followed by the
@@ -4095,41 +4140,6 @@
       &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
     },
     &quot;sql&quot;: &quot;A String&quot;, # Required. The SQL string.
-    &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
-        # from a JSON value.  For example, values of type `BYTES` and values
-        # of type `STRING` both appear in params as JSON strings.
-        # 
-        # In these cases, `param_types` can be used to specify the exact
-        # SQL type for some or all of the SQL statement parameters. See the
-        # definition of Type for more information
-        # about SQL types.
-      &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
-          # table cell or returned from an SQL query.
-        &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
-            # is the type of the array elements.
-        &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
-        &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
-            # provides type information for the struct&#x27;s fields.
-          &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
-              # significant, because values of this struct type are represented as
-              # lists, where the order of field values matches the order of
-              # fields in the StructType. In turn, the order of fields
-              # matches the order of columns in a read request, or the order of
-              # fields in the `SELECT` clause of a query.
-            { # Message representing a single field of a struct.
-              &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
-                  # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
-                  # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
-                  # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
-                  # columns might have an empty name (e.g., !&quot;SELECT
-                  # UPPER(ColName)&quot;`). Note that a query result can contain
-                  # multiple fields with the same name.
-              &quot;type&quot;: # Object with schema name: Type # The type of the field.
-            },
-          ],
-        },
-      },
-    },
   }
 
   x__xgafv: string, V1 error format.
@@ -4149,6 +4159,18 @@
         # only once with the last response in the stream.
         # This field will also be present in the last response for DML
         # statements.
+      &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
+          # the query is profiled. For example, a query could return the statistics as
+          # follows:
+          #
+          #     {
+          #       &quot;rows_returned&quot;: &quot;3&quot;,
+          #       &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
+          #       &quot;cpu_time&quot;: &quot;1.19 secs&quot;
+          #     }
+        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+      },
+      &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
       &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
           # returns a lower bound of the rows modified.
       &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
@@ -4156,37 +4178,10 @@
             # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
             # `plan_nodes`.
           { # Node information for nodes appearing in a QueryPlan.plan_nodes.
-            &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
-            &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
-                # key-value pairs. Only present if the plan was returned as a result of a
-                # profile query. For example, number of executions, number of rows/time per
-                # execution etc.
-              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-            },
-            &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
-                # `SCALAR` PlanNode(s).
-              &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
-                  # where the `description` string of this node references a `SCALAR`
-                  # subquery contained in the expression subtree rooted at this node. The
-                  # referenced `SCALAR` subquery may not necessarily be a direct child of
-                  # this node.
-                &quot;a_key&quot;: 42,
-              },
-              &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
-            },
-            &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
-                # For example, a Parameter Reference node could have the following
-                # information in its metadata:
-                #
-                #     {
-                #       &quot;parameter_reference&quot;: &quot;param1&quot;,
-                #       &quot;parameter_type&quot;: &quot;array&quot;
-                #     }
-              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-            },
             &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
               { # Metadata associated with a parent-child relationship appearing in a
                   # PlanNode.
+                &quot;childIndex&quot;: 42, # The node to which the link points.
                 &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
                     # distinguish between the build child and the probe child, or in the case
                     # of the child being an output variable, to represent the tag associated
@@ -4199,34 +4194,97 @@
                     # created for each column that is read by the operator. The corresponding
                     # `variable` fields will be set to the variable names assigned to the
                     # columns.
-                &quot;childIndex&quot;: 42, # The node to which the link points.
               },
             ],
-            &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
+            &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
+                # For example, a Parameter Reference node could have the following
+                # information in its metadata:
+                #
+                #     {
+                #       &quot;parameter_reference&quot;: &quot;param1&quot;,
+                #       &quot;parameter_type&quot;: &quot;array&quot;
+                #     }
+              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+            },
             &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
                 # different kinds of nodes differently. For example, If the node is a
                 # SCALAR node, it will have a condensed representation
                 # which can be used to directly embed a description of the node in its
                 # parent.
+            &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
+                # `SCALAR` PlanNode(s).
+              &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
+                  # where the `description` string of this node references a `SCALAR`
+                  # subquery contained in the expression subtree rooted at this node. The
+                  # referenced `SCALAR` subquery may not necessarily be a direct child of
+                  # this node.
+                &quot;a_key&quot;: 42,
+              },
+              &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
+            },
+            &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
+            &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
+            &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
+                # key-value pairs. Only present if the plan was returned as a result of a
+                # profile query. For example, number of executions, number of rows/time per
+                # execution etc.
+              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+            },
           },
         ],
       },
-      &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
-      &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
-          # the query is profiled. For example, a query could return the statistics as
-          # follows:
+    },
+    &quot;resumeToken&quot;: &quot;A String&quot;, # Streaming calls might be interrupted for a variety of reasons, such
+        # as TCP connection loss. If this occurs, the stream of results can
+        # be resumed by re-sending the original request and including
+        # `resume_token`. Note that executing any other transaction in the
+        # same session invalidates the token.
+    &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
+        # Only present in the first response.
+      &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
+          # information about the new transaction is yielded here.
+        &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
+            # for the transaction. Not returned by default: see
+            # TransactionOptions.ReadOnly.return_read_timestamp.
+            #
+            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+        &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
+            # Read,
+            # ExecuteSql,
+            # Commit, or
+            # Rollback calls.
+            #
+            # Single-use read-only transactions do not have IDs, because
+            # single-use transactions do not support multiple requests.
+      },
+      &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
+          # set.  For example, a SQL query like `&quot;SELECT UserId, UserName FROM
+          # Users&quot;` could return a `row_type` value like:
           #
-          #     {
-          #       &quot;rows_returned&quot;: &quot;3&quot;,
-          #       &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
-          #       &quot;cpu_time&quot;: &quot;1.19 secs&quot;
-          #     }
-        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+          #     &quot;fields&quot;: [
+          #       { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
+          #       { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
+          #     ]
+        &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
+            # significant, because values of this struct type are represented as
+            # lists, where the order of field values matches the order of
+            # fields in the StructType. In turn, the order of fields
+            # matches the order of columns in a read request, or the order of
+            # fields in the `SELECT` clause of a query.
+          { # Message representing a single field of a struct.
+            &quot;type&quot;: # Object with schema name: Type # The type of the field.
+            &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
+                # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
+                # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
+                # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
+                # columns might have an empty name (e.g., !&quot;SELECT
+                # UPPER(ColName)&quot;`). Note that a query result can contain
+                # multiple fields with the same name.
+          },
+        ],
       },
     },
-    &quot;chunkedValue&quot;: True or False, # If true, then the final value in values is chunked, and must
-        # be combined with more values from subsequent `PartialResultSet`s
-        # to obtain a complete field value.
     &quot;values&quot;: [ # A streamed result set consists of a stream of values, which might
         # be split into many `PartialResultSet` messages to accommodate
         # large rows and/or large values. Every N complete values defines a
@@ -4302,57 +4360,9 @@
         # field value `&quot;World&quot; = &quot;W&quot; + &quot;orl&quot; + &quot;d&quot;`.
       &quot;&quot;,
     ],
-    &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
-        # Only present in the first response.
-      &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
-          # set.  For example, a SQL query like `&quot;SELECT UserId, UserName FROM
-          # Users&quot;` could return a `row_type` value like:
-          #
-          #     &quot;fields&quot;: [
-          #       { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
-          #       { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
-          #     ]
-        &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
-            # significant, because values of this struct type are represented as
-            # lists, where the order of field values matches the order of
-            # fields in the StructType. In turn, the order of fields
-            # matches the order of columns in a read request, or the order of
-            # fields in the `SELECT` clause of a query.
-          { # Message representing a single field of a struct.
-            &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
-                # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
-                # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
-                # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
-                # columns might have an empty name (e.g., !&quot;SELECT
-                # UPPER(ColName)&quot;`). Note that a query result can contain
-                # multiple fields with the same name.
-            &quot;type&quot;: # Object with schema name: Type # The type of the field.
-          },
-        ],
-      },
-      &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
-          # information about the new transaction is yielded here.
-        &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
-            # for the transaction. Not returned by default: see
-            # TransactionOptions.ReadOnly.return_read_timestamp.
-            #
-            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
-            # Read,
-            # ExecuteSql,
-            # Commit, or
-            # Rollback calls.
-            #
-            # Single-use read-only transactions do not have IDs, because
-            # single-use transactions do not support multiple requests.
-      },
-    },
-    &quot;resumeToken&quot;: &quot;A String&quot;, # Streaming calls might be interrupted for a variety of reasons, such
-        # as TCP connection loss. If this occurs, the stream of results can
-        # be resumed by re-sending the original request and including
-        # `resume_token`. Note that executing any other transaction in the
-        # same session invalidates the token.
+    &quot;chunkedValue&quot;: True or False, # If true, then the final value in values is chunked, and must
+        # be combined with more values from subsequent `PartialResultSet`s
+        # to obtain a complete field value.
   }</pre>
 </div>
 
@@ -4374,8 +4384,9 @@
 
     { # A session in the Cloud Spanner API.
     &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
-    &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
-        # when creating a session are ignored.
+    &quot;name&quot;: &quot;A String&quot;, # Output only. The name of the session. This is always system-assigned.
+    &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
+        # typically earlier than the actual last use time.
     &quot;labels&quot;: { # The labels for the session.
         #
         #  * Label keys must be between 1 and 63 characters long and must conform to
@@ -4387,13 +4398,11 @@
         # See https://goo.gl/xmQnxf for more information on and examples of labels.
       &quot;a_key&quot;: &quot;A String&quot;,
     },
-    &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
-        # typically earlier than the actual last use time.
   }</pre>
 </div>
 
 <div class="method">
-    <code class="details" id="list">list(database, filter=None, pageSize=None, pageToken=None, x__xgafv=None)</code>
+    <code class="details" id="list">list(database, filter=None, pageToken=None, pageSize=None, x__xgafv=None)</code>
   <pre>Lists all sessions in a given database.
 
 Args:
@@ -4408,11 +4417,11 @@
   * `labels.env:*` --&gt; The session has the label &quot;env&quot;.
   * `labels.env:dev` --&gt; The session has the label &quot;env&quot; and the value of
                        the label contains the string &quot;dev&quot;.
-  pageSize: integer, Number of sessions to be returned in the response. If 0 or less, defaults
-to the server&#x27;s maximum allowed page size.
   pageToken: string, If non-empty, `page_token` should contain a
 next_page_token from a previous
 ListSessionsResponse.
+  pageSize: integer, Number of sessions to be returned in the response. If 0 or less, defaults
+to the server&#x27;s maximum allowed page size.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -4422,11 +4431,15 @@
   An object of the form:
 
     { # The response for ListSessions.
+    &quot;nextPageToken&quot;: &quot;A String&quot;, # `next_page_token` can be sent in a subsequent
+        # ListSessions call to fetch more of the matching
+        # sessions.
     &quot;sessions&quot;: [ # The list of requested sessions.
       { # A session in the Cloud Spanner API.
         &quot;createTime&quot;: &quot;A String&quot;, # Output only. The timestamp when the session is created.
-        &quot;name&quot;: &quot;A String&quot;, # The name of the session. This is always system-assigned; values provided
-            # when creating a session are ignored.
+        &quot;name&quot;: &quot;A String&quot;, # Output only. The name of the session. This is always system-assigned.
+        &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
+            # typically earlier than the actual last use time.
         &quot;labels&quot;: { # The labels for the session.
             #
             #  * Label keys must be between 1 and 63 characters long and must conform to
@@ -4438,13 +4451,8 @@
             # See https://goo.gl/xmQnxf for more information on and examples of labels.
           &quot;a_key&quot;: &quot;A String&quot;,
         },
-        &quot;approximateLastUseTime&quot;: &quot;A String&quot;, # Output only. The approximate timestamp when the session is last used. It is
-            # typically earlier than the actual last use time.
       },
     ],
-    &quot;nextPageToken&quot;: &quot;A String&quot;, # `next_page_token` can be sent in a subsequent
-        # ListSessions call to fetch more of the matching
-        # sessions.
   }</pre>
 </div>
 
@@ -4496,6 +4504,759 @@
         # It is an error to execute a SQL statement with unbound parameters.
       &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
     },
+    &quot;sql&quot;: &quot;A String&quot;, # Required. The query request to generate partitions for. The request will fail if
+        # the query is not root partitionable. The query plan of a root
+        # partitionable query has a single distributed union operator. A distributed
+        # union operator conceptually divides one or more tables into multiple
+        # splits, remotely evaluates a subquery independently on each split, and
+        # then unions all results.
+        # 
+        # This must not contain DML commands, such as INSERT, UPDATE, or
+        # DELETE. Use ExecuteStreamingSql with a
+        # PartitionedDml transaction for large, partition-friendly DML operations.
+    &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
+        # from a JSON value.  For example, values of type `BYTES` and values
+        # of type `STRING` both appear in params as JSON strings.
+        # 
+        # In these cases, `param_types` can be used to specify the exact
+        # SQL type for some or all of the SQL query parameters. See the
+        # definition of Type for more information
+        # about SQL types.
+      &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
+          # table cell or returned from an SQL query.
+        &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
+        &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
+            # is the type of the array elements.
+        &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
+            # provides type information for the struct&#x27;s fields.
+          &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
+              # significant, because values of this struct type are represented as
+              # lists, where the order of field values matches the order of
+              # fields in the StructType. In turn, the order of fields
+              # matches the order of columns in a read request, or the order of
+              # fields in the `SELECT` clause of a query.
+            { # Message representing a single field of a struct.
+              &quot;type&quot;: # Object with schema name: Type # The type of the field.
+              &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
+                  # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
+                  # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
+                  # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
+                  # columns might have an empty name (e.g., !&quot;SELECT
+                  # UPPER(ColName)&quot;`). Note that a query result can contain
+                  # multiple fields with the same name.
+            },
+          ],
+        },
+      },
+    },
+    &quot;transaction&quot;: { # This message is used to select the transaction in which a # Read only snapshot transactions are supported, read/write and single use
+        # transactions are not.
+        # Read or
+        # ExecuteSql call runs.
+        #
+        # See TransactionOptions for more information about transactions.
+      &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
+          # This is the most efficient way to execute a transaction that
+          # consists of a single SQL query.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
+          # it. The transaction ID of the new transaction is returned in
+          # ResultSetMetadata.transaction, which is a Transaction.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
+    },
     &quot;partitionOptions&quot;: { # Options for a PartitionQueryRequest and # Additional options that affect how many partitions are created.
         # PartitionReadRequest.
       &quot;maxPartitions&quot;: &quot;A String&quot;, # **Note:** This hint is currently ignored by PartitionQuery and
@@ -4513,757 +5274,6 @@
           # option is currently 1 GiB.  This is only a hint. The actual size of each
           # partition may be smaller or larger than this size request.
     },
-    &quot;sql&quot;: &quot;A String&quot;, # Required. The query request to generate partitions for. The request will fail if
-        # the query is not root partitionable. The query plan of a root
-        # partitionable query has a single distributed union operator. A distributed
-        # union operator conceptually divides one or more tables into multiple
-        # splits, remotely evaluates a subquery independently on each split, and
-        # then unions all results.
-        # 
-        # This must not contain DML commands, such as INSERT, UPDATE, or
-        # DELETE. Use ExecuteStreamingSql with a
-        # PartitionedDml transaction for large, partition-friendly DML operations.
-    &quot;transaction&quot;: { # This message is used to select the transaction in which a # Read only snapshot transactions are supported, read/write and single use
-        # transactions are not.
-        # Read or
-        # ExecuteSql call runs.
-        #
-        # See TransactionOptions for more information about transactions.
-      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
-      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
-          # it. The transaction ID of the new transaction is returned in
-          # ResultSetMetadata.transaction, which is a Transaction.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
-      &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
-          # This is the most efficient way to execute a transaction that
-          # consists of a single SQL query.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
-    },
-    &quot;paramTypes&quot;: { # It is not always possible for Cloud Spanner to infer the right SQL type
-        # from a JSON value.  For example, values of type `BYTES` and values
-        # of type `STRING` both appear in params as JSON strings.
-        # 
-        # In these cases, `param_types` can be used to specify the exact
-        # SQL type for some or all of the SQL query parameters. See the
-        # definition of Type for more information
-        # about SQL types.
-      &quot;a_key&quot;: { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a
-          # table cell or returned from an SQL query.
-        &quot;arrayElementType&quot;: # Object with schema name: Type # If code == ARRAY, then `array_element_type`
-            # is the type of the array elements.
-        &quot;code&quot;: &quot;A String&quot;, # Required. The TypeCode for this type.
-        &quot;structType&quot;: { # `StructType` defines the fields of a STRUCT type. # If code == STRUCT, then `struct_type`
-            # provides type information for the struct&#x27;s fields.
-          &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
-              # significant, because values of this struct type are represented as
-              # lists, where the order of field values matches the order of
-              # fields in the StructType. In turn, the order of fields
-              # matches the order of columns in a read request, or the order of
-              # fields in the `SELECT` clause of a query.
-            { # Message representing a single field of a struct.
-              &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
-                  # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
-                  # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
-                  # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
-                  # columns might have an empty name (e.g., !&quot;SELECT
-                  # UPPER(ColName)&quot;`). Note that a query result can contain
-                  # multiple fields with the same name.
-              &quot;type&quot;: # Object with schema name: Type # The type of the field.
-            },
-          ],
-        },
-      },
-    },
   }
 
   x__xgafv: string, V1 error format.
@@ -5328,6 +5338,7 @@
     &quot;index&quot;: &quot;A String&quot;, # If non-empty, the name of an index on table. This index is
         # used instead of the table primary key when interpreting key_set
         # and sorting result rows. See key_set for further information.
+    &quot;table&quot;: &quot;A String&quot;, # Required. The name of the table in the database to be read.
     &quot;keySet&quot;: { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All # Required. `key_set` identifies the rows to be yielded. `key_set` names the
         # primary keys of the rows in table to be yielded, unless index
         # is present. If index is present, then key_set instead names
@@ -5430,22 +5441,22 @@
             #
             # Note that 100 is passed as the start, and 1 is passed as the end,
             # because `Key` is a descending column in the schema.
-          &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
-              # `len(start_open)` key columns exactly match `start_open`.
-            &quot;&quot;,
-          ],
           &quot;endClosed&quot;: [ # If the end is closed, then the range includes all rows whose
               # first `len(end_closed)` key columns exactly match `end_closed`.
             &quot;&quot;,
           ],
-          &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
-              # `len(end_open)` key columns exactly match `end_open`.
-            &quot;&quot;,
-          ],
           &quot;startClosed&quot;: [ # If the start is closed, then the range includes all rows whose
               # first `len(start_closed)` key columns exactly match `start_closed`.
             &quot;&quot;,
           ],
+          &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
+              # `len(start_open)` key columns exactly match `start_open`.
+            &quot;&quot;,
+          ],
+          &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
+              # `len(end_open)` key columns exactly match `end_open`.
+            &quot;&quot;,
+          ],
         },
       ],
       &quot;keys&quot;: [ # A list of specific keys. Entries in `keys` should have exactly as
@@ -5477,371 +5488,26 @@
           # option is currently 1 GiB.  This is only a hint. The actual size of each
           # partition may be smaller or larger than this size request.
     },
+    &quot;columns&quot;: [ # The columns of table to be returned for each row matching
+        # this request.
+      &quot;A String&quot;,
+    ],
     &quot;transaction&quot;: { # This message is used to select the transaction in which a # Read only snapshot transactions are supported, read/write and single use
         # transactions are not.
         # Read or
         # ExecuteSql call runs.
         #
         # See TransactionOptions for more information about transactions.
-      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
-      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
-          # it. The transaction ID of the new transaction is returned in
-          # ResultSetMetadata.transaction, which is a Transaction.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
       &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
           # This is the most efficient way to execute a transaction that
           # consists of a single SQL query.
           #
           #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
           #
           # # Transaction Modes
           #
@@ -6112,12 +5778,6 @@
           # Given the above, Partitioned DML is good fit for large, database-wide,
           # operations that are idempotent, such as deleting old rows from a very large
           # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
         &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
             #
             # Authorization to begin a read-write transaction requires
@@ -6130,6 +5790,39 @@
             # Authorization to begin a read-only transaction requires
             # `spanner.databases.beginReadOnlyTransaction` permission
             # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
           &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
               # seconds. Guarantees that all writes that have committed more
               # than the specified number of seconds ago are visible. Because
@@ -6143,31 +5836,310 @@
               #
               # Note that this option can only be used in single-use
               # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
           &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
               # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
+          # it. The transaction ID of the new transaction is returned in
+          # ResultSetMetadata.transaction, which is a Transaction.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
           &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
               # reads at a specific timestamp are repeatable; the same read at
               # the same timestamp always returns the same data. If the
@@ -6180,14 +6152,54 @@
               #
               # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
               # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
         },
       },
+      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
     },
-    &quot;columns&quot;: [ # The columns of table to be returned for each row matching
-        # this request.
-      &quot;A String&quot;,
-    ],
-    &quot;table&quot;: &quot;A String&quot;, # Required. The name of the table in the database to be read.
   }
 
   x__xgafv: string, V1 error format.
@@ -6250,716 +6262,12 @@
 
 { # The request for Read and
       # StreamingRead.
-    &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use. If none is provided, the default is a
-        # temporary read-only transaction with strong concurrency.
-        # Read or
-        # ExecuteSql call runs.
-        #
-        # See TransactionOptions for more information about transactions.
-      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
-      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
-          # it. The transaction ID of the new transaction is returned in
-          # ResultSetMetadata.transaction, which is a Transaction.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
-      &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
-          # This is the most efficient way to execute a transaction that
-          # consists of a single SQL query.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
-    },
-    &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
-        # previously created using PartitionRead().    There must be an exact
-        # match for the values of fields common to this message and the
-        # PartitionReadRequest message used to create this partition_token.
+    &quot;resumeToken&quot;: &quot;A String&quot;, # If this request is resuming a previously interrupted read,
+        # `resume_token` should be copied from the last
+        # PartialResultSet yielded before the interruption. Doing this
+        # enables the new read to resume where the last read left off. The
+        # rest of the request parameters must exactly match the request
+        # that yielded this token.
     &quot;columns&quot;: [ # Required. The columns of table to be returned for each row matching
         # this request.
       &quot;A String&quot;,
@@ -6967,13 +6275,722 @@
     &quot;limit&quot;: &quot;A String&quot;, # If greater than zero, only the first `limit` rows are yielded. If `limit`
         # is zero, the default is no limit. A limit cannot be specified if
         # `partition_token` is set.
+    &quot;index&quot;: &quot;A String&quot;, # If non-empty, the name of an index on table. This index is
+        # used instead of the table primary key when interpreting key_set
+        # and sorting result rows. See key_set for further information.
     &quot;table&quot;: &quot;A String&quot;, # Required. The name of the table in the database to be read.
-    &quot;resumeToken&quot;: &quot;A String&quot;, # If this request is resuming a previously interrupted read,
-        # `resume_token` should be copied from the last
-        # PartialResultSet yielded before the interruption. Doing this
-        # enables the new read to resume where the last read left off. The
-        # rest of the request parameters must exactly match the request
-        # that yielded this token.
+    &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use. If none is provided, the default is a
+        # temporary read-only transaction with strong concurrency.
+        # Read or
+        # ExecuteSql call runs.
+        #
+        # See TransactionOptions for more information about transactions.
+      &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
+          # This is the most efficient way to execute a transaction that
+          # consists of a single SQL query.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
+          # it. The transaction ID of the new transaction is returned in
+          # ResultSetMetadata.transaction, which is a Transaction.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
+    },
+    &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
+        # previously created using PartitionRead().    There must be an exact
+        # match for the values of fields common to this message and the
+        # PartitionReadRequest message used to create this partition_token.
     &quot;keySet&quot;: { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All # Required. `key_set` identifies the rows to be yielded. `key_set` names the
         # primary keys of the rows in table to be yielded, unless index
         # is present. If index is present, then key_set instead names
@@ -7081,22 +7098,22 @@
             #
             # Note that 100 is passed as the start, and 1 is passed as the end,
             # because `Key` is a descending column in the schema.
-          &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
-              # `len(start_open)` key columns exactly match `start_open`.
-            &quot;&quot;,
-          ],
           &quot;endClosed&quot;: [ # If the end is closed, then the range includes all rows whose
               # first `len(end_closed)` key columns exactly match `end_closed`.
             &quot;&quot;,
           ],
-          &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
-              # `len(end_open)` key columns exactly match `end_open`.
-            &quot;&quot;,
-          ],
           &quot;startClosed&quot;: [ # If the start is closed, then the range includes all rows whose
               # first `len(start_closed)` key columns exactly match `start_closed`.
             &quot;&quot;,
           ],
+          &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
+              # `len(start_open)` key columns exactly match `start_open`.
+            &quot;&quot;,
+          ],
+          &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
+              # `len(end_open)` key columns exactly match `end_open`.
+            &quot;&quot;,
+          ],
         },
       ],
       &quot;keys&quot;: [ # A list of specific keys. Entries in `keys` should have exactly as
@@ -7111,9 +7128,6 @@
           # `KeySet` matches all keys in the table or index. Note that any keys
           # specified in `keys` or `ranges` are only yielded once.
     },
-    &quot;index&quot;: &quot;A String&quot;, # If non-empty, the name of an index on table. This index is
-        # used instead of the table primary key when interpreting key_set
-        # and sorting result rows. See key_set for further information.
   }
 
   x__xgafv: string, V1 error format.
@@ -7126,33 +7140,100 @@
 
     { # Results from Read or
       # ExecuteSql.
-    &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
-      &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
-          # set.  For example, a SQL query like `&quot;SELECT UserId, UserName FROM
-          # Users&quot;` could return a `row_type` value like:
+    &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the SQL statement that
+        # produced this result set. These can be requested by setting
+        # ExecuteSqlRequest.query_mode.
+        # DML statements always produce stats containing the number of rows
+        # modified, unless executed using the
+        # ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode.
+        # Other fields may or may not be populated, based on the
+        # ExecuteSqlRequest.query_mode.
+      &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
+          # the query is profiled. For example, a query could return the statistics as
+          # follows:
           #
-          #     &quot;fields&quot;: [
-          #       { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
-          #       { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
-          #     ]
-        &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
-            # significant, because values of this struct type are represented as
-            # lists, where the order of field values matches the order of
-            # fields in the StructType. In turn, the order of fields
-            # matches the order of columns in a read request, or the order of
-            # fields in the `SELECT` clause of a query.
-          { # Message representing a single field of a struct.
-            &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
-                # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
-                # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
-                # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
-                # columns might have an empty name (e.g., !&quot;SELECT
-                # UPPER(ColName)&quot;`). Note that a query result can contain
-                # multiple fields with the same name.
-            &quot;type&quot;: # Object with schema name: Type # The type of the field.
+          #     {
+          #       &quot;rows_returned&quot;: &quot;3&quot;,
+          #       &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
+          #       &quot;cpu_time&quot;: &quot;1.19 secs&quot;
+          #     }
+        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+      },
+      &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
+      &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
+          # returns a lower bound of the rows modified.
+      &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
+        &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
+            # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
+            # `plan_nodes`.
+          { # Node information for nodes appearing in a QueryPlan.plan_nodes.
+            &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
+              { # Metadata associated with a parent-child relationship appearing in a
+                  # PlanNode.
+                &quot;childIndex&quot;: 42, # The node to which the link points.
+                &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
+                    # distinguish between the build child and the probe child, or in the case
+                    # of the child being an output variable, to represent the tag associated
+                    # with the output variable.
+                &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
+                    # to an output variable of the parent node. The field carries the name of
+                    # the output variable.
+                    # For example, a `TableScan` operator that reads rows from a table will
+                    # have child links to the `SCALAR` nodes representing the output variables
+                    # created for each column that is read by the operator. The corresponding
+                    # `variable` fields will be set to the variable names assigned to the
+                    # columns.
+              },
+            ],
+            &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
+                # For example, a Parameter Reference node could have the following
+                # information in its metadata:
+                #
+                #     {
+                #       &quot;parameter_reference&quot;: &quot;param1&quot;,
+                #       &quot;parameter_type&quot;: &quot;array&quot;
+                #     }
+              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+            },
+            &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
+                # different kinds of nodes differently. For example, If the node is a
+                # SCALAR node, it will have a condensed representation
+                # which can be used to directly embed a description of the node in its
+                # parent.
+            &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
+                # `SCALAR` PlanNode(s).
+              &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
+                  # where the `description` string of this node references a `SCALAR`
+                  # subquery contained in the expression subtree rooted at this node. The
+                  # referenced `SCALAR` subquery may not necessarily be a direct child of
+                  # this node.
+                &quot;a_key&quot;: 42,
+              },
+              &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
+            },
+            &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
+            &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
+            &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
+                # key-value pairs. Only present if the plan was returned as a result of a
+                # profile query. For example, number of executions, number of rows/time per
+                # execution etc.
+              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+            },
           },
         ],
       },
+    },
+    &quot;rows&quot;: [ # Each element in `rows` is a row whose format is defined by
+        # metadata.row_type. The ith element
+        # in each row matches the ith field in
+        # metadata.row_type. Elements are
+        # encoded based on type as described
+        # here.
+      [
+        &quot;&quot;,
+      ],
+    ],
+    &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
       &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
           # information about the new transaction is yielded here.
         &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
@@ -7170,100 +7251,33 @@
             # Single-use read-only transactions do not have IDs, because
             # single-use transactions do not support multiple requests.
       },
-    },
-    &quot;stats&quot;: { # Additional statistics about a ResultSet or PartialResultSet. # Query plan and execution statistics for the SQL statement that
-        # produced this result set. These can be requested by setting
-        # ExecuteSqlRequest.query_mode.
-        # DML statements always produce stats containing the number of rows
-        # modified, unless executed using the
-        # ExecuteSqlRequest.QueryMode.PLAN ExecuteSqlRequest.query_mode.
-        # Other fields may or may not be populated, based on the
-        # ExecuteSqlRequest.query_mode.
-      &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
-          # returns a lower bound of the rows modified.
-      &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
-        &quot;planNodes&quot;: [ # The nodes in the query plan. Plan nodes are returned in pre-order starting
-            # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
-            # `plan_nodes`.
-          { # Node information for nodes appearing in a QueryPlan.plan_nodes.
-            &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
-            &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
-                # key-value pairs. Only present if the plan was returned as a result of a
-                # profile query. For example, number of executions, number of rows/time per
-                # execution etc.
-              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-            },
-            &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
-                # `SCALAR` PlanNode(s).
-              &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
-                  # where the `description` string of this node references a `SCALAR`
-                  # subquery contained in the expression subtree rooted at this node. The
-                  # referenced `SCALAR` subquery may not necessarily be a direct child of
-                  # this node.
-                &quot;a_key&quot;: 42,
-              },
-              &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
-            },
-            &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
-                # For example, a Parameter Reference node could have the following
-                # information in its metadata:
-                #
-                #     {
-                #       &quot;parameter_reference&quot;: &quot;param1&quot;,
-                #       &quot;parameter_type&quot;: &quot;array&quot;
-                #     }
-              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-            },
-            &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
-              { # Metadata associated with a parent-child relationship appearing in a
-                  # PlanNode.
-                &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
-                    # distinguish between the build child and the probe child, or in the case
-                    # of the child being an output variable, to represent the tag associated
-                    # with the output variable.
-                &quot;variable&quot;: &quot;A String&quot;, # Only present if the child node is SCALAR and corresponds
-                    # to an output variable of the parent node. The field carries the name of
-                    # the output variable.
-                    # For example, a `TableScan` operator that reads rows from a table will
-                    # have child links to the `SCALAR` nodes representing the output variables
-                    # created for each column that is read by the operator. The corresponding
-                    # `variable` fields will be set to the variable names assigned to the
-                    # columns.
-                &quot;childIndex&quot;: 42, # The node to which the link points.
-              },
-            ],
-            &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
-            &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
-                # different kinds of nodes differently. For example, If the node is a
-                # SCALAR node, it will have a condensed representation
-                # which can be used to directly embed a description of the node in its
-                # parent.
+      &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
+          # set.  For example, a SQL query like `&quot;SELECT UserId, UserName FROM
+          # Users&quot;` could return a `row_type` value like:
+          #
+          #     &quot;fields&quot;: [
+          #       { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
+          #       { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
+          #     ]
+        &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
+            # significant, because values of this struct type are represented as
+            # lists, where the order of field values matches the order of
+            # fields in the StructType. In turn, the order of fields
+            # matches the order of columns in a read request, or the order of
+            # fields in the `SELECT` clause of a query.
+          { # Message representing a single field of a struct.
+            &quot;type&quot;: # Object with schema name: Type # The type of the field.
+            &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
+                # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
+                # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
+                # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
+                # columns might have an empty name (e.g., !&quot;SELECT
+                # UPPER(ColName)&quot;`). Note that a query result can contain
+                # multiple fields with the same name.
           },
         ],
       },
-      &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
-      &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
-          # the query is profiled. For example, a query could return the statistics as
-          # follows:
-          #
-          #     {
-          #       &quot;rows_returned&quot;: &quot;3&quot;,
-          #       &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
-          #       &quot;cpu_time&quot;: &quot;1.19 secs&quot;
-          #     }
-        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-      },
     },
-    &quot;rows&quot;: [ # Each element in `rows` is a row whose format is defined by
-        # metadata.row_type. The ith element
-        # in each row matches the ith field in
-        # metadata.row_type. Elements are
-        # encoded based on type as described
-        # here.
-      [
-        &quot;&quot;,
-      ],
-    ],
   }</pre>
 </div>
 
@@ -7322,716 +7336,12 @@
 
 { # The request for Read and
       # StreamingRead.
-    &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use. If none is provided, the default is a
-        # temporary read-only transaction with strong concurrency.
-        # Read or
-        # ExecuteSql call runs.
-        #
-        # See TransactionOptions for more information about transactions.
-      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
-      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
-          # it. The transaction ID of the new transaction is returned in
-          # ResultSetMetadata.transaction, which is a Transaction.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
-      &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
-          # This is the most efficient way to execute a transaction that
-          # consists of a single SQL query.
-          #
-          #
-          # Each session can have at most one active transaction at a time. After the
-          # active transaction is completed, the session can immediately be
-          # re-used for the next transaction. It is not necessary to create a
-          # new session for each transaction.
-          #
-          # # Transaction Modes
-          #
-          # Cloud Spanner supports three transaction modes:
-          #
-          #   1. Locking read-write. This type of transaction is the only way
-          #      to write data into Cloud Spanner. These transactions rely on
-          #      pessimistic locking and, if necessary, two-phase commit.
-          #      Locking read-write transactions may abort, requiring the
-          #      application to retry.
-          #
-          #   2. Snapshot read-only. This transaction type provides guaranteed
-          #      consistency across several reads, but does not allow
-          #      writes. Snapshot read-only transactions can be configured to
-          #      read at timestamps in the past. Snapshot read-only
-          #      transactions do not need to be committed.
-          #
-          #   3. Partitioned DML. This type of transaction is used to execute
-          #      a single Partitioned DML statement. Partitioned DML partitions
-          #      the key space and runs the DML statement over each partition
-          #      in parallel using separate, internal transactions that commit
-          #      independently. Partitioned DML transactions do not need to be
-          #      committed.
-          #
-          # For transactions that only read, snapshot read-only transactions
-          # provide simpler semantics and are almost always faster. In
-          # particular, read-only transactions do not take locks, so they do
-          # not conflict with read-write transactions. As a consequence of not
-          # taking locks, they also do not abort, so retry loops are not needed.
-          #
-          # Transactions may only read/write data in a single database. They
-          # may, however, read/write data in different tables within that
-          # database.
-          #
-          # ## Locking Read-Write Transactions
-          #
-          # Locking transactions may be used to atomically read-modify-write
-          # data anywhere in a database. This type of transaction is externally
-          # consistent.
-          #
-          # Clients should attempt to minimize the amount of time a transaction
-          # is active. Faster transactions commit with higher probability
-          # and cause less contention. Cloud Spanner attempts to keep read locks
-          # active as long as the transaction continues to do reads, and the
-          # transaction has not been terminated by
-          # Commit or
-          # Rollback.  Long periods of
-          # inactivity at the client may cause Cloud Spanner to release a
-          # transaction&#x27;s locks and abort it.
-          #
-          # Conceptually, a read-write transaction consists of zero or more
-          # reads or SQL statements followed by
-          # Commit. At any time before
-          # Commit, the client can send a
-          # Rollback request to abort the
-          # transaction.
-          #
-          # ### Semantics
-          #
-          # Cloud Spanner can commit the transaction if all read locks it acquired
-          # are still valid at commit time, and it is able to acquire write
-          # locks for all writes. Cloud Spanner can abort the transaction for any
-          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-          # that the transaction has not modified any user data in Cloud Spanner.
-          #
-          # Unless the transaction commits, Cloud Spanner makes no guarantees about
-          # how long the transaction&#x27;s locks were held for. It is an error to
-          # use Cloud Spanner locks for any sort of mutual exclusion other than
-          # between Cloud Spanner transactions themselves.
-          #
-          # ### Retrying Aborted Transactions
-          #
-          # When a transaction aborts, the application can choose to retry the
-          # whole transaction again. To maximize the chances of successfully
-          # committing the retry, the client should execute the retry in the
-          # same session as the original attempt. The original session&#x27;s lock
-          # priority increases with each consecutive abort, meaning that each
-          # attempt has a slightly better chance of success than the previous.
-          #
-          # Under some circumstances (e.g., many transactions attempting to
-          # modify the same row(s)), a transaction can abort many times in a
-          # short period before successfully committing. Thus, it is not a good
-          # idea to cap the number of retries a transaction can attempt;
-          # instead, it is better to limit the total amount of wall time spent
-          # retrying.
-          #
-          # ### Idle Transactions
-          #
-          # A transaction is considered idle if it has no outstanding reads or
-          # SQL queries and has not started a read or SQL query within the last 10
-          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
-          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
-          # fail with error `ABORTED`.
-          #
-          # If this behavior is undesirable, periodically executing a simple
-          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
-          # transaction from becoming idle.
-          #
-          # ## Snapshot Read-Only Transactions
-          #
-          # Snapshot read-only transactions provides a simpler method than
-          # locking read-write transactions for doing several consistent
-          # reads. However, this type of transaction does not support writes.
-          #
-          # Snapshot transactions do not take locks. Instead, they work by
-          # choosing a Cloud Spanner timestamp, then executing all reads at that
-          # timestamp. Since they do not acquire locks, they do not block
-          # concurrent read-write transactions.
-          #
-          # Unlike locking read-write transactions, snapshot read-only
-          # transactions never abort. They can fail if the chosen read
-          # timestamp is garbage collected; however, the default garbage
-          # collection policy is generous enough that most applications do not
-          # need to worry about this in practice.
-          #
-          # Snapshot read-only transactions do not need to call
-          # Commit or
-          # Rollback (and in fact are not
-          # permitted to do so).
-          #
-          # To execute a snapshot transaction, the client specifies a timestamp
-          # bound, which tells Cloud Spanner how to choose a read timestamp.
-          #
-          # The types of timestamp bound are:
-          #
-          #   - Strong (the default).
-          #   - Bounded staleness.
-          #   - Exact staleness.
-          #
-          # If the Cloud Spanner database to be read is geographically distributed,
-          # stale read-only transactions can execute more quickly than strong
-          # or read-write transaction, because they are able to execute far
-          # from the leader replica.
-          #
-          # Each type of timestamp bound is discussed in detail below.
-          #
-          # ### Strong
-          #
-          # Strong reads are guaranteed to see the effects of all transactions
-          # that have committed before the start of the read. Furthermore, all
-          # rows yielded by a single read are consistent with each other -- if
-          # any part of the read observes a transaction, all parts of the read
-          # see the transaction.
-          #
-          # Strong reads are not repeatable: two consecutive strong read-only
-          # transactions might return inconsistent results if there are
-          # concurrent writes. If consistency across reads is required, the
-          # reads should be executed within a transaction or at an exact read
-          # timestamp.
-          #
-          # See TransactionOptions.ReadOnly.strong.
-          #
-          # ### Exact Staleness
-          #
-          # These timestamp bounds execute reads at a user-specified
-          # timestamp. Reads at a timestamp are guaranteed to see a consistent
-          # prefix of the global transaction history: they observe
-          # modifications done by all transactions with a commit timestamp &lt;=
-          # the read timestamp, and observe none of the modifications done by
-          # transactions with a larger commit timestamp. They will block until
-          # all conflicting transactions that may be assigned commit timestamps
-          # &lt;= the read timestamp have finished.
-          #
-          # The timestamp can either be expressed as an absolute Cloud Spanner commit
-          # timestamp or a staleness relative to the current time.
-          #
-          # These modes do not require a &quot;negotiation phase&quot; to pick a
-          # timestamp. As a result, they execute slightly faster than the
-          # equivalent boundedly stale concurrency modes. On the other hand,
-          # boundedly stale reads usually return fresher results.
-          #
-          # See TransactionOptions.ReadOnly.read_timestamp and
-          # TransactionOptions.ReadOnly.exact_staleness.
-          #
-          # ### Bounded Staleness
-          #
-          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-          # subject to a user-provided staleness bound. Cloud Spanner chooses the
-          # newest timestamp within the staleness bound that allows execution
-          # of the reads at the closest available replica without blocking.
-          #
-          # All rows yielded are consistent with each other -- if any part of
-          # the read observes a transaction, all parts of the read see the
-          # transaction. Boundedly stale reads are not repeatable: two stale
-          # reads, even if they use the same staleness bound, can execute at
-          # different timestamps and thus return inconsistent results.
-          #
-          # Boundedly stale reads execute in two phases: the first phase
-          # negotiates a timestamp among all replicas needed to serve the
-          # read. In the second phase, reads are executed at the negotiated
-          # timestamp.
-          #
-          # As a result of the two phase execution, bounded staleness reads are
-          # usually a little slower than comparable exact staleness
-          # reads. However, they are typically able to return fresher
-          # results, and are more likely to execute at the closest replica.
-          #
-          # Because the timestamp negotiation requires up-front knowledge of
-          # which rows will be read, it can only be used with single-use
-          # read-only transactions.
-          #
-          # See TransactionOptions.ReadOnly.max_staleness and
-          # TransactionOptions.ReadOnly.min_read_timestamp.
-          #
-          # ### Old Read Timestamps and Garbage Collection
-          #
-          # Cloud Spanner continuously garbage collects deleted and overwritten data
-          # in the background to reclaim storage space. This process is known
-          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
-          # are one hour old. Because of this, Cloud Spanner cannot perform reads
-          # at read timestamps more than one hour in the past. This
-          # restriction also applies to in-progress reads and/or SQL queries whose
-          # timestamp become too old while executing. Reads and SQL queries with
-          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-          #
-          # ## Partitioned DML Transactions
-          #
-          # Partitioned DML transactions are used to execute DML statements with a
-          # different execution strategy that provides different, and often better,
-          # scalability properties for large, table-wide operations than DML in a
-          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-          # should prefer using ReadWrite transactions.
-          #
-          # Partitioned DML partitions the keyspace and runs the DML statement on each
-          # partition in separate, internal transactions. These transactions commit
-          # automatically when complete, and run independently from one another.
-          #
-          # To reduce lock contention, this execution strategy only acquires read locks
-          # on rows that match the WHERE clause of the statement. Additionally, the
-          # smaller per-partition transactions hold locks for less time.
-          #
-          # That said, Partitioned DML is not a drop-in replacement for standard DML used
-          # in ReadWrite transactions.
-          #
-          #  - The DML statement must be fully-partitionable. Specifically, the statement
-          #    must be expressible as the union of many statements which each access only
-          #    a single row of the table.
-          #
-          #  - The statement is not applied atomically to all rows of the table. Rather,
-          #    the statement is applied atomically to partitions of the table, in
-          #    independent transactions. Secondary index rows are updated atomically
-          #    with the base table rows.
-          #
-          #  - Partitioned DML does not guarantee exactly-once execution semantics
-          #    against a partition. The statement will be applied at least once to each
-          #    partition. It is strongly recommended that the DML statement should be
-          #    idempotent to avoid unexpected results. For instance, it is potentially
-          #    dangerous to run a statement such as
-          #    `UPDATE table SET column = column + 1` as it could be run multiple times
-          #    against some rows.
-          #
-          #  - The partitions are committed automatically - there is no support for
-          #    Commit or Rollback. If the call returns an error, or if the client issuing
-          #    the ExecuteSql call dies, it is possible that some rows had the statement
-          #    executed on them successfully. It is also possible that statement was
-          #    never executed against other rows.
-          #
-          #  - Partitioned DML transactions may only contain the execution of a single
-          #    DML statement via ExecuteSql or ExecuteStreamingSql.
-          #
-          #  - If any error is encountered during the execution of the partitioned DML
-          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-          #    value that cannot be stored due to schema constraints), then the
-          #    operation is stopped at that point and an error is returned. It is
-          #    possible that at this point, some partitions have been committed (or even
-          #    committed multiple times), and other partitions have not been run at all.
-          #
-          # Given the above, Partitioned DML is good fit for large, database-wide,
-          # operations that are idempotent, such as deleting old rows from a very large
-          # table.
-        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
-            #
-            # Authorization to begin a Partitioned DML transaction requires
-            # `spanner.databases.beginPartitionedDmlTransaction` permission
-            # on the `session` resource.
-        },
-        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
-            #
-            # Authorization to begin a read-write transaction requires
-            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
-            # on the `session` resource.
-            # transaction type has no options.
-        },
-        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
-            #
-            # Authorization to begin a read-only transaction requires
-            # `spanner.databases.beginReadOnlyTransaction` permission
-            # on the `session` resource.
-          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
-              # seconds. Guarantees that all writes that have committed more
-              # than the specified number of seconds ago are visible. Because
-              # Cloud Spanner chooses the exact timestamp, this mode works even if
-              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
-              # commit timestamps.
-              #
-              # Useful for reading the freshest data available at a nearby
-              # replica, while bounding the possible staleness if the local
-              # replica has fallen behind.
-              #
-              # Note that this option can only be used in single-use
-              # transactions.
-          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
-              #
-              # This is useful for requesting fresher data than some previous
-              # read, or data that is fresh enough to observe the effects of some
-              # previously committed transaction whose timestamp is known.
-              #
-              # Note that this option can only be used in single-use transactions.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
-              # are visible.
-          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
-              # the Transaction message that describes the transaction.
-          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
-              # old. The timestamp is chosen soon after the read is started.
-              #
-              # Guarantees that all writes that have committed more than the
-              # specified number of seconds ago are visible. Because Cloud Spanner
-              # chooses the exact timestamp, this mode works even if the client&#x27;s
-              # local clock is substantially skewed from Cloud Spanner commit
-              # timestamps.
-              #
-              # Useful for reading at nearby replicas without the distributed
-              # timestamp negotiation overhead of `max_staleness`.
-          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
-              # reads at a specific timestamp are repeatable; the same read at
-              # the same timestamp always returns the same data. If the
-              # timestamp is in the future, the read will block until the
-              # specified timestamp, modulo the read&#x27;s deadline.
-              #
-              # Useful for large scale consistent reads such as mapreduces, or
-              # for coordinating many reads against a consistent snapshot of the
-              # data.
-              #
-              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        },
-      },
-    },
-    &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
-        # previously created using PartitionRead().    There must be an exact
-        # match for the values of fields common to this message and the
-        # PartitionReadRequest message used to create this partition_token.
+    &quot;resumeToken&quot;: &quot;A String&quot;, # If this request is resuming a previously interrupted read,
+        # `resume_token` should be copied from the last
+        # PartialResultSet yielded before the interruption. Doing this
+        # enables the new read to resume where the last read left off. The
+        # rest of the request parameters must exactly match the request
+        # that yielded this token.
     &quot;columns&quot;: [ # Required. The columns of table to be returned for each row matching
         # this request.
       &quot;A String&quot;,
@@ -8039,13 +7349,722 @@
     &quot;limit&quot;: &quot;A String&quot;, # If greater than zero, only the first `limit` rows are yielded. If `limit`
         # is zero, the default is no limit. A limit cannot be specified if
         # `partition_token` is set.
+    &quot;index&quot;: &quot;A String&quot;, # If non-empty, the name of an index on table. This index is
+        # used instead of the table primary key when interpreting key_set
+        # and sorting result rows. See key_set for further information.
     &quot;table&quot;: &quot;A String&quot;, # Required. The name of the table in the database to be read.
-    &quot;resumeToken&quot;: &quot;A String&quot;, # If this request is resuming a previously interrupted read,
-        # `resume_token` should be copied from the last
-        # PartialResultSet yielded before the interruption. Doing this
-        # enables the new read to resume where the last read left off. The
-        # rest of the request parameters must exactly match the request
-        # that yielded this token.
+    &quot;transaction&quot;: { # This message is used to select the transaction in which a # The transaction to use. If none is provided, the default is a
+        # temporary read-only transaction with strong concurrency.
+        # Read or
+        # ExecuteSql call runs.
+        #
+        # See TransactionOptions for more information about transactions.
+      &quot;singleUse&quot;: { # # Transactions # Execute the read or SQL query in a temporary transaction.
+          # This is the most efficient way to execute a transaction that
+          # consists of a single SQL query.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;begin&quot;: { # # Transactions # Begin a new transaction and execute this read or SQL query in
+          # it. The transaction ID of the new transaction is returned in
+          # ResultSetMetadata.transaction, which is a Transaction.
+          #
+          #
+          # Each session can have at most one active transaction at a time (note that
+          # standalone reads and queries use a transaction internally and do count
+          # towards the one transaction limit). After the active transaction is
+          # completed, the session can immediately be re-used for the next transaction.
+          # It is not necessary to create a new session for each transaction.
+          #
+          # # Transaction Modes
+          #
+          # Cloud Spanner supports three transaction modes:
+          #
+          #   1. Locking read-write. This type of transaction is the only way
+          #      to write data into Cloud Spanner. These transactions rely on
+          #      pessimistic locking and, if necessary, two-phase commit.
+          #      Locking read-write transactions may abort, requiring the
+          #      application to retry.
+          #
+          #   2. Snapshot read-only. This transaction type provides guaranteed
+          #      consistency across several reads, but does not allow
+          #      writes. Snapshot read-only transactions can be configured to
+          #      read at timestamps in the past. Snapshot read-only
+          #      transactions do not need to be committed.
+          #
+          #   3. Partitioned DML. This type of transaction is used to execute
+          #      a single Partitioned DML statement. Partitioned DML partitions
+          #      the key space and runs the DML statement over each partition
+          #      in parallel using separate, internal transactions that commit
+          #      independently. Partitioned DML transactions do not need to be
+          #      committed.
+          #
+          # For transactions that only read, snapshot read-only transactions
+          # provide simpler semantics and are almost always faster. In
+          # particular, read-only transactions do not take locks, so they do
+          # not conflict with read-write transactions. As a consequence of not
+          # taking locks, they also do not abort, so retry loops are not needed.
+          #
+          # Transactions may only read/write data in a single database. They
+          # may, however, read/write data in different tables within that
+          # database.
+          #
+          # ## Locking Read-Write Transactions
+          #
+          # Locking transactions may be used to atomically read-modify-write
+          # data anywhere in a database. This type of transaction is externally
+          # consistent.
+          #
+          # Clients should attempt to minimize the amount of time a transaction
+          # is active. Faster transactions commit with higher probability
+          # and cause less contention. Cloud Spanner attempts to keep read locks
+          # active as long as the transaction continues to do reads, and the
+          # transaction has not been terminated by
+          # Commit or
+          # Rollback.  Long periods of
+          # inactivity at the client may cause Cloud Spanner to release a
+          # transaction&#x27;s locks and abort it.
+          #
+          # Conceptually, a read-write transaction consists of zero or more
+          # reads or SQL statements followed by
+          # Commit. At any time before
+          # Commit, the client can send a
+          # Rollback request to abort the
+          # transaction.
+          #
+          # ### Semantics
+          #
+          # Cloud Spanner can commit the transaction if all read locks it acquired
+          # are still valid at commit time, and it is able to acquire write
+          # locks for all writes. Cloud Spanner can abort the transaction for any
+          # reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
+          # that the transaction has not modified any user data in Cloud Spanner.
+          #
+          # Unless the transaction commits, Cloud Spanner makes no guarantees about
+          # how long the transaction&#x27;s locks were held for. It is an error to
+          # use Cloud Spanner locks for any sort of mutual exclusion other than
+          # between Cloud Spanner transactions themselves.
+          #
+          # ### Retrying Aborted Transactions
+          #
+          # When a transaction aborts, the application can choose to retry the
+          # whole transaction again. To maximize the chances of successfully
+          # committing the retry, the client should execute the retry in the
+          # same session as the original attempt. The original session&#x27;s lock
+          # priority increases with each consecutive abort, meaning that each
+          # attempt has a slightly better chance of success than the previous.
+          #
+          # Under some circumstances (e.g., many transactions attempting to
+          # modify the same row(s)), a transaction can abort many times in a
+          # short period before successfully committing. Thus, it is not a good
+          # idea to cap the number of retries a transaction can attempt;
+          # instead, it is better to limit the total amount of wall time spent
+          # retrying.
+          #
+          # ### Idle Transactions
+          #
+          # A transaction is considered idle if it has no outstanding reads or
+          # SQL queries and has not started a read or SQL query within the last 10
+          # seconds. Idle transactions can be aborted by Cloud Spanner so that they
+          # don&#x27;t hold on to locks indefinitely. In that case, the commit will
+          # fail with error `ABORTED`.
+          #
+          # If this behavior is undesirable, periodically executing a simple
+          # SQL query in the transaction (e.g., `SELECT 1`) prevents the
+          # transaction from becoming idle.
+          #
+          # ## Snapshot Read-Only Transactions
+          #
+          # Snapshot read-only transactions provides a simpler method than
+          # locking read-write transactions for doing several consistent
+          # reads. However, this type of transaction does not support writes.
+          #
+          # Snapshot transactions do not take locks. Instead, they work by
+          # choosing a Cloud Spanner timestamp, then executing all reads at that
+          # timestamp. Since they do not acquire locks, they do not block
+          # concurrent read-write transactions.
+          #
+          # Unlike locking read-write transactions, snapshot read-only
+          # transactions never abort. They can fail if the chosen read
+          # timestamp is garbage collected; however, the default garbage
+          # collection policy is generous enough that most applications do not
+          # need to worry about this in practice.
+          #
+          # Snapshot read-only transactions do not need to call
+          # Commit or
+          # Rollback (and in fact are not
+          # permitted to do so).
+          #
+          # To execute a snapshot transaction, the client specifies a timestamp
+          # bound, which tells Cloud Spanner how to choose a read timestamp.
+          #
+          # The types of timestamp bound are:
+          #
+          #   - Strong (the default).
+          #   - Bounded staleness.
+          #   - Exact staleness.
+          #
+          # If the Cloud Spanner database to be read is geographically distributed,
+          # stale read-only transactions can execute more quickly than strong
+          # or read-write transaction, because they are able to execute far
+          # from the leader replica.
+          #
+          # Each type of timestamp bound is discussed in detail below.
+          #
+          # ### Strong
+          #
+          # Strong reads are guaranteed to see the effects of all transactions
+          # that have committed before the start of the read. Furthermore, all
+          # rows yielded by a single read are consistent with each other -- if
+          # any part of the read observes a transaction, all parts of the read
+          # see the transaction.
+          #
+          # Strong reads are not repeatable: two consecutive strong read-only
+          # transactions might return inconsistent results if there are
+          # concurrent writes. If consistency across reads is required, the
+          # reads should be executed within a transaction or at an exact read
+          # timestamp.
+          #
+          # See TransactionOptions.ReadOnly.strong.
+          #
+          # ### Exact Staleness
+          #
+          # These timestamp bounds execute reads at a user-specified
+          # timestamp. Reads at a timestamp are guaranteed to see a consistent
+          # prefix of the global transaction history: they observe
+          # modifications done by all transactions with a commit timestamp &lt;=
+          # the read timestamp, and observe none of the modifications done by
+          # transactions with a larger commit timestamp. They will block until
+          # all conflicting transactions that may be assigned commit timestamps
+          # &lt;= the read timestamp have finished.
+          #
+          # The timestamp can either be expressed as an absolute Cloud Spanner commit
+          # timestamp or a staleness relative to the current time.
+          #
+          # These modes do not require a &quot;negotiation phase&quot; to pick a
+          # timestamp. As a result, they execute slightly faster than the
+          # equivalent boundedly stale concurrency modes. On the other hand,
+          # boundedly stale reads usually return fresher results.
+          #
+          # See TransactionOptions.ReadOnly.read_timestamp and
+          # TransactionOptions.ReadOnly.exact_staleness.
+          #
+          # ### Bounded Staleness
+          #
+          # Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
+          # subject to a user-provided staleness bound. Cloud Spanner chooses the
+          # newest timestamp within the staleness bound that allows execution
+          # of the reads at the closest available replica without blocking.
+          #
+          # All rows yielded are consistent with each other -- if any part of
+          # the read observes a transaction, all parts of the read see the
+          # transaction. Boundedly stale reads are not repeatable: two stale
+          # reads, even if they use the same staleness bound, can execute at
+          # different timestamps and thus return inconsistent results.
+          #
+          # Boundedly stale reads execute in two phases: the first phase
+          # negotiates a timestamp among all replicas needed to serve the
+          # read. In the second phase, reads are executed at the negotiated
+          # timestamp.
+          #
+          # As a result of the two phase execution, bounded staleness reads are
+          # usually a little slower than comparable exact staleness
+          # reads. However, they are typically able to return fresher
+          # results, and are more likely to execute at the closest replica.
+          #
+          # Because the timestamp negotiation requires up-front knowledge of
+          # which rows will be read, it can only be used with single-use
+          # read-only transactions.
+          #
+          # See TransactionOptions.ReadOnly.max_staleness and
+          # TransactionOptions.ReadOnly.min_read_timestamp.
+          #
+          # ### Old Read Timestamps and Garbage Collection
+          #
+          # Cloud Spanner continuously garbage collects deleted and overwritten data
+          # in the background to reclaim storage space. This process is known
+          # as &quot;version GC&quot;. By default, version GC reclaims versions after they
+          # are one hour old. Because of this, Cloud Spanner cannot perform reads
+          # at read timestamps more than one hour in the past. This
+          # restriction also applies to in-progress reads and/or SQL queries whose
+          # timestamp become too old while executing. Reads and SQL queries with
+          # too-old read timestamps fail with the error `FAILED_PRECONDITION`.
+          #
+          # ## Partitioned DML Transactions
+          #
+          # Partitioned DML transactions are used to execute DML statements with a
+          # different execution strategy that provides different, and often better,
+          # scalability properties for large, table-wide operations than DML in a
+          # ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
+          # should prefer using ReadWrite transactions.
+          #
+          # Partitioned DML partitions the keyspace and runs the DML statement on each
+          # partition in separate, internal transactions. These transactions commit
+          # automatically when complete, and run independently from one another.
+          #
+          # To reduce lock contention, this execution strategy only acquires read locks
+          # on rows that match the WHERE clause of the statement. Additionally, the
+          # smaller per-partition transactions hold locks for less time.
+          #
+          # That said, Partitioned DML is not a drop-in replacement for standard DML used
+          # in ReadWrite transactions.
+          #
+          #  - The DML statement must be fully-partitionable. Specifically, the statement
+          #    must be expressible as the union of many statements which each access only
+          #    a single row of the table.
+          #
+          #  - The statement is not applied atomically to all rows of the table. Rather,
+          #    the statement is applied atomically to partitions of the table, in
+          #    independent transactions. Secondary index rows are updated atomically
+          #    with the base table rows.
+          #
+          #  - Partitioned DML does not guarantee exactly-once execution semantics
+          #    against a partition. The statement will be applied at least once to each
+          #    partition. It is strongly recommended that the DML statement should be
+          #    idempotent to avoid unexpected results. For instance, it is potentially
+          #    dangerous to run a statement such as
+          #    `UPDATE table SET column = column + 1` as it could be run multiple times
+          #    against some rows.
+          #
+          #  - The partitions are committed automatically - there is no support for
+          #    Commit or Rollback. If the call returns an error, or if the client issuing
+          #    the ExecuteSql call dies, it is possible that some rows had the statement
+          #    executed on them successfully. It is also possible that statement was
+          #    never executed against other rows.
+          #
+          #  - Partitioned DML transactions may only contain the execution of a single
+          #    DML statement via ExecuteSql or ExecuteStreamingSql.
+          #
+          #  - If any error is encountered during the execution of the partitioned DML
+          #    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
+          #    value that cannot be stored due to schema constraints), then the
+          #    operation is stopped at that point and an error is returned. It is
+          #    possible that at this point, some partitions have been committed (or even
+          #    committed multiple times), and other partitions have not been run at all.
+          #
+          # Given the above, Partitioned DML is good fit for large, database-wide,
+          # operations that are idempotent, such as deleting old rows from a very large
+          # table.
+        &quot;readWrite&quot;: { # Message type to initiate a read-write transaction. Currently this # Transaction may write.
+            #
+            # Authorization to begin a read-write transaction requires
+            # `spanner.databases.beginOrRollbackReadWriteTransaction` permission
+            # on the `session` resource.
+            # transaction type has no options.
+        },
+        &quot;readOnly&quot;: { # Message type to initiate a read-only transaction. # Transaction will not write.
+            #
+            # Authorization to begin a read-only transaction requires
+            # `spanner.databases.beginReadOnlyTransaction` permission
+            # on the `session` resource.
+          &quot;readTimestamp&quot;: &quot;A String&quot;, # Executes all reads at the given timestamp. Unlike other modes,
+              # reads at a specific timestamp are repeatable; the same read at
+              # the same timestamp always returns the same data. If the
+              # timestamp is in the future, the read will block until the
+              # specified timestamp, modulo the read&#x27;s deadline.
+              #
+              # Useful for large scale consistent reads such as mapreduces, or
+              # for coordinating many reads against a consistent snapshot of the
+              # data.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;minReadTimestamp&quot;: &quot;A String&quot;, # Executes all reads at a timestamp &gt;= `min_read_timestamp`.
+              #
+              # This is useful for requesting fresher data than some previous
+              # read, or data that is fresh enough to observe the effects of some
+              # previously committed transaction whose timestamp is known.
+              #
+              # Note that this option can only be used in single-use transactions.
+              #
+              # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+              # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+          &quot;exactStaleness&quot;: &quot;A String&quot;, # Executes all reads at a timestamp that is `exact_staleness`
+              # old. The timestamp is chosen soon after the read is started.
+              #
+              # Guarantees that all writes that have committed more than the
+              # specified number of seconds ago are visible. Because Cloud Spanner
+              # chooses the exact timestamp, this mode works even if the client&#x27;s
+              # local clock is substantially skewed from Cloud Spanner commit
+              # timestamps.
+              #
+              # Useful for reading at nearby replicas without the distributed
+              # timestamp negotiation overhead of `max_staleness`.
+          &quot;maxStaleness&quot;: &quot;A String&quot;, # Read data at a timestamp &gt;= `NOW - max_staleness`
+              # seconds. Guarantees that all writes that have committed more
+              # than the specified number of seconds ago are visible. Because
+              # Cloud Spanner chooses the exact timestamp, this mode works even if
+              # the client&#x27;s local clock is substantially skewed from Cloud Spanner
+              # commit timestamps.
+              #
+              # Useful for reading the freshest data available at a nearby
+              # replica, while bounding the possible staleness if the local
+              # replica has fallen behind.
+              #
+              # Note that this option can only be used in single-use
+              # transactions.
+          &quot;returnReadTimestamp&quot;: True or False, # If true, the Cloud Spanner-selected read timestamp is included in
+              # the Transaction message that describes the transaction.
+          &quot;strong&quot;: True or False, # Read at a timestamp where all previously committed transactions
+              # are visible.
+        },
+        &quot;partitionedDml&quot;: { # Message type to initiate a Partitioned DML transaction. # Partitioned DML transaction.
+            #
+            # Authorization to begin a Partitioned DML transaction requires
+            # `spanner.databases.beginPartitionedDmlTransaction` permission
+            # on the `session` resource.
+        },
+      },
+      &quot;id&quot;: &quot;A String&quot;, # Execute the read or SQL query in a previously-started transaction.
+    },
+    &quot;partitionToken&quot;: &quot;A String&quot;, # If present, results will be restricted to the specified partition
+        # previously created using PartitionRead().    There must be an exact
+        # match for the values of fields common to this message and the
+        # PartitionReadRequest message used to create this partition_token.
     &quot;keySet&quot;: { # `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All # Required. `key_set` identifies the rows to be yielded. `key_set` names the
         # primary keys of the rows in table to be yielded, unless index
         # is present. If index is present, then key_set instead names
@@ -8153,22 +8172,22 @@
             #
             # Note that 100 is passed as the start, and 1 is passed as the end,
             # because `Key` is a descending column in the schema.
-          &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
-              # `len(start_open)` key columns exactly match `start_open`.
-            &quot;&quot;,
-          ],
           &quot;endClosed&quot;: [ # If the end is closed, then the range includes all rows whose
               # first `len(end_closed)` key columns exactly match `end_closed`.
             &quot;&quot;,
           ],
-          &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
-              # `len(end_open)` key columns exactly match `end_open`.
-            &quot;&quot;,
-          ],
           &quot;startClosed&quot;: [ # If the start is closed, then the range includes all rows whose
               # first `len(start_closed)` key columns exactly match `start_closed`.
             &quot;&quot;,
           ],
+          &quot;startOpen&quot;: [ # If the start is open, then the range excludes rows whose first
+              # `len(start_open)` key columns exactly match `start_open`.
+            &quot;&quot;,
+          ],
+          &quot;endOpen&quot;: [ # If the end is open, then the range excludes rows whose first
+              # `len(end_open)` key columns exactly match `end_open`.
+            &quot;&quot;,
+          ],
         },
       ],
       &quot;keys&quot;: [ # A list of specific keys. Entries in `keys` should have exactly as
@@ -8183,9 +8202,6 @@
           # `KeySet` matches all keys in the table or index. Note that any keys
           # specified in `keys` or `ranges` are only yielded once.
     },
-    &quot;index&quot;: &quot;A String&quot;, # If non-empty, the name of an index on table. This index is
-        # used instead of the table primary key when interpreting key_set
-        # and sorting result rows. See key_set for further information.
   }
 
   x__xgafv: string, V1 error format.
@@ -8205,6 +8221,18 @@
         # only once with the last response in the stream.
         # This field will also be present in the last response for DML
         # statements.
+      &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
+          # the query is profiled. For example, a query could return the statistics as
+          # follows:
+          #
+          #     {
+          #       &quot;rows_returned&quot;: &quot;3&quot;,
+          #       &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
+          #       &quot;cpu_time&quot;: &quot;1.19 secs&quot;
+          #     }
+        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+      },
+      &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
       &quot;rowCountLowerBound&quot;: &quot;A String&quot;, # Partitioned DML does not offer exactly-once semantics, so it
           # returns a lower bound of the rows modified.
       &quot;queryPlan&quot;: { # Contains an ordered list of nodes appearing in the query plan. # QueryPlan for the query associated with this result.
@@ -8212,37 +8240,10 @@
             # with the plan root. Each PlanNode&#x27;s `id` corresponds to its index in
             # `plan_nodes`.
           { # Node information for nodes appearing in a QueryPlan.plan_nodes.
-            &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
-            &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
-                # key-value pairs. Only present if the plan was returned as a result of a
-                # profile query. For example, number of executions, number of rows/time per
-                # execution etc.
-              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-            },
-            &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
-                # `SCALAR` PlanNode(s).
-              &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
-                  # where the `description` string of this node references a `SCALAR`
-                  # subquery contained in the expression subtree rooted at this node. The
-                  # referenced `SCALAR` subquery may not necessarily be a direct child of
-                  # this node.
-                &quot;a_key&quot;: 42,
-              },
-              &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
-            },
-            &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
-                # For example, a Parameter Reference node could have the following
-                # information in its metadata:
-                #
-                #     {
-                #       &quot;parameter_reference&quot;: &quot;param1&quot;,
-                #       &quot;parameter_type&quot;: &quot;array&quot;
-                #     }
-              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
-            },
             &quot;childLinks&quot;: [ # List of child node `index`es and their relationship to this parent.
               { # Metadata associated with a parent-child relationship appearing in a
                   # PlanNode.
+                &quot;childIndex&quot;: 42, # The node to which the link points.
                 &quot;type&quot;: &quot;A String&quot;, # The type of the link. For example, in Hash Joins this could be used to
                     # distinguish between the build child and the probe child, or in the case
                     # of the child being an output variable, to represent the tag associated
@@ -8255,34 +8256,97 @@
                     # created for each column that is read by the operator. The corresponding
                     # `variable` fields will be set to the variable names assigned to the
                     # columns.
-                &quot;childIndex&quot;: 42, # The node to which the link points.
               },
             ],
-            &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
+            &quot;metadata&quot;: { # Attributes relevant to the node contained in a group of key-value pairs.
+                # For example, a Parameter Reference node could have the following
+                # information in its metadata:
+                #
+                #     {
+                #       &quot;parameter_reference&quot;: &quot;param1&quot;,
+                #       &quot;parameter_type&quot;: &quot;array&quot;
+                #     }
+              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+            },
             &quot;kind&quot;: &quot;A String&quot;, # Used to determine the type of node. May be needed for visualizing
                 # different kinds of nodes differently. For example, If the node is a
                 # SCALAR node, it will have a condensed representation
                 # which can be used to directly embed a description of the node in its
                 # parent.
+            &quot;shortRepresentation&quot;: { # Condensed representation of a node and its subtree. Only present for # Condensed representation for SCALAR nodes.
+                # `SCALAR` PlanNode(s).
+              &quot;subqueries&quot;: { # A mapping of (subquery variable name) -&gt; (subquery node id) for cases
+                  # where the `description` string of this node references a `SCALAR`
+                  # subquery contained in the expression subtree rooted at this node. The
+                  # referenced `SCALAR` subquery may not necessarily be a direct child of
+                  # this node.
+                &quot;a_key&quot;: 42,
+              },
+              &quot;description&quot;: &quot;A String&quot;, # A string representation of the expression subtree rooted at this node.
+            },
+            &quot;displayName&quot;: &quot;A String&quot;, # The display name for the node.
+            &quot;index&quot;: 42, # The `PlanNode`&#x27;s index in node list.
+            &quot;executionStats&quot;: { # The execution statistics associated with the node, contained in a group of
+                # key-value pairs. Only present if the plan was returned as a result of a
+                # profile query. For example, number of executions, number of rows/time per
+                # execution etc.
+              &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+            },
           },
         ],
       },
-      &quot;rowCountExact&quot;: &quot;A String&quot;, # Standard DML returns an exact count of rows that were modified.
-      &quot;queryStats&quot;: { # Aggregated statistics from the execution of the query. Only present when
-          # the query is profiled. For example, a query could return the statistics as
-          # follows:
+    },
+    &quot;resumeToken&quot;: &quot;A String&quot;, # Streaming calls might be interrupted for a variety of reasons, such
+        # as TCP connection loss. If this occurs, the stream of results can
+        # be resumed by re-sending the original request and including
+        # `resume_token`. Note that executing any other transaction in the
+        # same session invalidates the token.
+    &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
+        # Only present in the first response.
+      &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
+          # information about the new transaction is yielded here.
+        &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
+            # for the transaction. Not returned by default: see
+            # TransactionOptions.ReadOnly.return_read_timestamp.
+            #
+            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
+            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
+        &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
+            # Read,
+            # ExecuteSql,
+            # Commit, or
+            # Rollback calls.
+            #
+            # Single-use read-only transactions do not have IDs, because
+            # single-use transactions do not support multiple requests.
+      },
+      &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
+          # set.  For example, a SQL query like `&quot;SELECT UserId, UserName FROM
+          # Users&quot;` could return a `row_type` value like:
           #
-          #     {
-          #       &quot;rows_returned&quot;: &quot;3&quot;,
-          #       &quot;elapsed_time&quot;: &quot;1.22 secs&quot;,
-          #       &quot;cpu_time&quot;: &quot;1.19 secs&quot;
-          #     }
-        &quot;a_key&quot;: &quot;&quot;, # Properties of the object.
+          #     &quot;fields&quot;: [
+          #       { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
+          #       { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
+          #     ]
+        &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
+            # significant, because values of this struct type are represented as
+            # lists, where the order of field values matches the order of
+            # fields in the StructType. In turn, the order of fields
+            # matches the order of columns in a read request, or the order of
+            # fields in the `SELECT` clause of a query.
+          { # Message representing a single field of a struct.
+            &quot;type&quot;: # Object with schema name: Type # The type of the field.
+            &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
+                # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
+                # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
+                # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
+                # columns might have an empty name (e.g., !&quot;SELECT
+                # UPPER(ColName)&quot;`). Note that a query result can contain
+                # multiple fields with the same name.
+          },
+        ],
       },
     },
-    &quot;chunkedValue&quot;: True or False, # If true, then the final value in values is chunked, and must
-        # be combined with more values from subsequent `PartialResultSet`s
-        # to obtain a complete field value.
     &quot;values&quot;: [ # A streamed result set consists of a stream of values, which might
         # be split into many `PartialResultSet` messages to accommodate
         # large rows and/or large values. Every N complete values defines a
@@ -8358,57 +8422,9 @@
         # field value `&quot;World&quot; = &quot;W&quot; + &quot;orl&quot; + &quot;d&quot;`.
       &quot;&quot;,
     ],
-    &quot;metadata&quot;: { # Metadata about a ResultSet or PartialResultSet. # Metadata about the result set, such as row type information.
-        # Only present in the first response.
-      &quot;rowType&quot;: { # `StructType` defines the fields of a STRUCT type. # Indicates the field names and types for the rows in the result
-          # set.  For example, a SQL query like `&quot;SELECT UserId, UserName FROM
-          # Users&quot;` could return a `row_type` value like:
-          #
-          #     &quot;fields&quot;: [
-          #       { &quot;name&quot;: &quot;UserId&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;INT64&quot; } },
-          #       { &quot;name&quot;: &quot;UserName&quot;, &quot;type&quot;: { &quot;code&quot;: &quot;STRING&quot; } },
-          #     ]
-        &quot;fields&quot;: [ # The list of fields that make up this struct. Order is
-            # significant, because values of this struct type are represented as
-            # lists, where the order of field values matches the order of
-            # fields in the StructType. In turn, the order of fields
-            # matches the order of columns in a read request, or the order of
-            # fields in the `SELECT` clause of a query.
-          { # Message representing a single field of a struct.
-            &quot;name&quot;: &quot;A String&quot;, # The name of the field. For reads, this is the column name. For
-                # SQL queries, it is the column alias (e.g., `&quot;Word&quot;` in the
-                # query `&quot;SELECT &#x27;hello&#x27; AS Word&quot;`), or the column name (e.g.,
-                # `&quot;ColName&quot;` in the query `&quot;SELECT ColName FROM Table&quot;`). Some
-                # columns might have an empty name (e.g., !&quot;SELECT
-                # UPPER(ColName)&quot;`). Note that a query result can contain
-                # multiple fields with the same name.
-            &quot;type&quot;: # Object with schema name: Type # The type of the field.
-          },
-        ],
-      },
-      &quot;transaction&quot;: { # A transaction. # If the read or SQL query began a transaction as a side-effect, the
-          # information about the new transaction is yielded here.
-        &quot;readTimestamp&quot;: &quot;A String&quot;, # For snapshot read-only transactions, the read timestamp chosen
-            # for the transaction. Not returned by default: see
-            # TransactionOptions.ReadOnly.return_read_timestamp.
-            #
-            # A timestamp in RFC3339 UTC \&quot;Zulu\&quot; format, accurate to nanoseconds.
-            # Example: `&quot;2014-10-02T15:01:23.045123456Z&quot;`.
-        &quot;id&quot;: &quot;A String&quot;, # `id` may be used to identify the transaction in subsequent
-            # Read,
-            # ExecuteSql,
-            # Commit, or
-            # Rollback calls.
-            #
-            # Single-use read-only transactions do not have IDs, because
-            # single-use transactions do not support multiple requests.
-      },
-    },
-    &quot;resumeToken&quot;: &quot;A String&quot;, # Streaming calls might be interrupted for a variety of reasons, such
-        # as TCP connection loss. If this occurs, the stream of results can
-        # be resumed by re-sending the original request and including
-        # `resume_token`. Note that executing any other transaction in the
-        # same session invalidates the token.
+    &quot;chunkedValue&quot;: True or False, # If true, then the final value in values is chunked, and must
+        # be combined with more values from subsequent `PartialResultSet`s
+        # to obtain a complete field value.
   }</pre>
 </div>