chore: regens API reference docs (#889)

diff --git a/docs/dyn/speech_v1.speech.html b/docs/dyn/speech_v1.speech.html
index 18a6564..e743764 100644
--- a/docs/dyn/speech_v1.speech.html
+++ b/docs/dyn/speech_v1.speech.html
@@ -75,14 +75,14 @@
 <h1><a href="speech_v1.html">Cloud Speech-to-Text API</a> . <a href="speech_v1.speech.html">speech</a></h1>
 <h2>Instance Methods</h2>
 <p class="toc_element">
-  <code><a href="#longrunningrecognize">longrunningrecognize(body, x__xgafv=None)</a></code></p>
+  <code><a href="#longrunningrecognize">longrunningrecognize(body=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Performs asynchronous speech recognition: receive results via the</p>
 <p class="toc_element">
-  <code><a href="#recognize">recognize(body, x__xgafv=None)</a></code></p>
+  <code><a href="#recognize">recognize(body=None, x__xgafv=None)</a></code></p>
 <p class="firstline">Performs synchronous speech recognition: receive results after all audio</p>
 <h3>Method Details</h3>
 <div class="method">
-    <code class="details" id="longrunningrecognize">longrunningrecognize(body, x__xgafv=None)</code>
+    <code class="details" id="longrunningrecognize">longrunningrecognize(body=None, x__xgafv=None)</code>
   <pre>Performs asynchronous speech recognition: receive results via the
 google.longrunning.Operations interface. Returns either an
 `Operation.error` or an `Operation.response` which contains
@@ -91,15 +91,15 @@
 [how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
 
 Args:
-  body: object, The request body. (required)
+  body: object, The request body.
     The object takes the form of:
 
 { # The top-level message sent by the client for the `LongRunningRecognize`
       # method.
-    "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # *Required* The audio data to be recognized.
+    "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # Required. The audio data to be recognized.
         # Either `content` or `uri` must be supplied. Supplying both or neither
         # returns google.rpc.Code.INVALID_ARGUMENT. See
-        # [content limits](/speech-to-text/quotas#content).
+        # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
       "content": "A String", # The audio data bytes encoded as specified in
           # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
           # pure binary representation, whereas JSON representations use base64.
@@ -111,15 +111,16 @@
           # google.rpc.Code.INVALID_ARGUMENT). For more information, see
           # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
     },
-    "config": { # Provides information to the recognizer that specifies how to process the # *Required* Provides information to the recognizer that specifies how to
+    "config": { # Provides information to the recognizer that specifies how to process the # Required. Provides information to the recognizer that specifies how to
         # process the request.
         # request.
-      "languageCode": "A String", # *Required* The language of the supplied audio as a
+      "languageCode": "A String", # Required. The language of the supplied audio as a
           # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
           # Example: "en-US".
-          # See [Language Support](/speech-to-text/docs/languages)
-          # for a list of the currently supported language codes.
-      "audioChannelCount": 42, # *Optional* The number of channels in the input audio data.
+          # See [Language
+          # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+          # of the currently supported language codes.
+      "audioChannelCount": 42, # The number of channels in the input audio data.
           # ONLY set this for MULTI-CHANNEL recognition.
           # Valid values for LINEAR16 and FLAC are `1`-`8`.
           # Valid values for OGG_OPUS are '1'-'254'.
@@ -131,44 +132,34 @@
       "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
           # This field is optional for `FLAC` and `WAV` audio files and required
           # for all other audio formats. For details, see AudioEncoding.
-      "enableAutomaticPunctuation": True or False, # *Optional* If 'true', adds punctuation to recognition result hypotheses.
+      "enableAutomaticPunctuation": True or False, # If 'true', adds punctuation to recognition result hypotheses.
           # This feature is only available in select languages. Setting this for
           # requests in other languages has no effect at all.
           # The default 'false' value does not add punctuation to result hypotheses.
-          # Note: This is currently offered as an experimental service, complimentary
-          # to all users. In the future this may be exclusively available as a
-          # premium feature.
-      "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
+      "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
           # to get each channel recognized separately. The recognition result will
           # contain a `channel_tag` field to state which channel that result belongs
           # to. If this is not true, we will only recognize the first channel. The
           # request is billed cumulatively for all channels recognized:
           # `audio_channel_count` multiplied by the length of the audio.
-      "enableWordTimeOffsets": True or False, # *Optional* If `true`, the top result includes a list of words and
+      "enableWordTimeOffsets": True or False, # If `true`, the top result includes a list of words and
           # the start and end time offsets (timestamps) for those words. If
           # `false`, no word-level time offset information is returned. The default is
           # `false`.
-      "maxAlternatives": 42, # *Optional* Maximum number of recognition hypotheses to be returned.
+      "maxAlternatives": 42, # Maximum number of recognition hypotheses to be returned.
           # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
           # within each `SpeechRecognitionResult`.
           # The server may return fewer than `max_alternatives`.
           # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
           # one. If omitted, will return a maximum of one.
-      "useEnhanced": True or False, # *Optional* Set to true to use an enhanced model for speech recognition.
+      "useEnhanced": True or False, # Set to true to use an enhanced model for speech recognition.
           # If `use_enhanced` is set to true and the `model` field is not set, then
-          # an appropriate enhanced model is chosen if:
-          # 1. project is eligible for requesting enhanced models
-          # 2. an enhanced model exists for the audio
+          # an appropriate enhanced model is chosen if an enhanced model exists for
+          # the audio.
           #
           # If `use_enhanced` is true and an enhanced version of the specified model
           # does not exist, then the speech is recognized using the standard version
           # of the specified model.
-          #
-          # Enhanced speech models require that you opt-in to data logging using
-          # instructions in the
-          # [documentation](/speech-to-text/docs/enable-data-logging). If you set
-          # `use_enhanced` to true and you have not enabled audio logging, then you
-          # will receive an error.
       "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
           # `RecognitionAudio` messages. Valid values are: 8000-48000.
           # 16000 is optimal. For best results, set the sampling rate of the audio
@@ -176,53 +167,74 @@
           # the audio source (instead of re-sampling).
           # This field is optional for FLAC and WAV audio files, but is
           # required for all other audio formats. For details, see AudioEncoding.
-      "profanityFilter": True or False, # *Optional* If set to `true`, the server will attempt to filter out
+      "profanityFilter": True or False, # If set to `true`, the server will attempt to filter out
           # profanities, replacing all but the initial character in each filtered word
           # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
           # won't be filtered out.
-      "model": "A String", # *Optional* Which model to select for the given request. Select the model
+      "model": "A String", # Which model to select for the given request. Select the model
           # best suited to your domain to get best results. If a model is not
           # explicitly specified, then we auto-select a model based on the parameters
           # in the RecognitionConfig.
-          # <table>
-          #   <tr>
-          #     <td><b>Model</b></td>
-          #     <td><b>Description</b></td>
-          #   </tr>
-          #   <tr>
-          #     <td><code>command_and_search</code></td>
-          #     <td>Best for short queries such as voice commands or voice search.</td>
-          #   </tr>
-          #   <tr>
-          #     <td><code>phone_call</code></td>
-          #     <td>Best for audio that originated from a phone call (typically
-          #     recorded at an 8khz sampling rate).</td>
-          #   </tr>
-          #   <tr>
-          #     <td><code>video</code></td>
-          #     <td>Best for audio that originated from from video or includes multiple
+          # &lt;table&gt;
+          #   &lt;tr&gt;
+          #     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
+          #     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
+          #   &lt;/tr&gt;
+          #   &lt;tr&gt;
+          #     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
+          #     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
+          #   &lt;/tr&gt;
+          #   &lt;tr&gt;
+          #     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
+          #     &lt;td&gt;Best for audio that originated from a phone call (typically
+          #     recorded at an 8khz sampling rate).&lt;/td&gt;
+          #   &lt;/tr&gt;
+          #   &lt;tr&gt;
+          #     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
+          #     &lt;td&gt;Best for audio that originated from from video or includes multiple
           #         speakers. Ideally the audio is recorded at a 16khz or greater
           #         sampling rate. This is a premium model that costs more than the
-          #         standard rate.</td>
-          #   </tr>
-          #   <tr>
-          #     <td><code>default</code></td>
-          #     <td>Best for audio that is not one of the specific audio models.
+          #         standard rate.&lt;/td&gt;
+          #   &lt;/tr&gt;
+          #   &lt;tr&gt;
+          #     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
+          #     &lt;td&gt;Best for audio that is not one of the specific audio models.
           #         For example, long-form audio. Ideally the audio is high-fidelity,
-          #         recorded at a 16khz or greater sampling rate.</td>
-          #   </tr>
-          # </table>
-      "speechContexts": [ # *Optional* array of SpeechContext.
+          #         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
+          #   &lt;/tr&gt;
+          # &lt;/table&gt;
+      "diarizationConfig": { # Config to enable speaker diarization. # Config to enable speaker diarization and set additional
+          # parameters to make diarization better suited for your application.
+          # Note: When this is enabled, we send all the words from the beginning of the
+          # audio for the top alternative in every consecutive STREAMING responses.
+          # This is done in order to improve our speaker tags as our models learn to
+          # identify the speakers in the conversation over time.
+          # For non-streaming requests, the diarization results will be provided only
+          # in the top alternative of the FINAL SpeechRecognitionResult.
+        "minSpeakerCount": 42, # Minimum number of speakers in the conversation. This range gives you more
+            # flexibility by allowing the system to automatically determine the correct
+            # number of speakers. If not set, the default value is 2.
+        "enableSpeakerDiarization": True or False, # If 'true', enables speaker detection for each recognized word in
+            # the top alternative of the recognition result using a speaker_tag provided
+            # in the WordInfo.
+        "maxSpeakerCount": 42, # Maximum number of speakers in the conversation. This range gives you more
+            # flexibility by allowing the system to automatically determine the correct
+            # number of speakers. If not set, the default value is 6.
+        "speakerTag": 42, # Output only. Unused.
+      },
+      "speechContexts": [ # Array of SpeechContext.
           # A means to provide context to assist the speech recognition. For more
-          # information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+          # information, see
+          # [speech
+          # adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
         { # Provides "hints" to the speech recognizer to favor specific words and phrases
             # in the results.
-          "phrases": [ # *Optional* A list of strings containing words and phrases "hints" so that
+          "phrases": [ # A list of strings containing words and phrases "hints" so that
               # the speech recognition is more likely to recognize them. This can be used
               # to improve the accuracy for specific words and phrases, for example, if
               # specific commands are typically spoken by the user. This can also be used
               # to add additional words to the vocabulary of the recognizer. See
-              # [usage limits](/speech-to-text/quotas#content).
+              # [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
               #
               # List items can also be set to classes for groups of words that represent
               # common concepts that occur in natural language. For example, rather than
@@ -233,12 +245,10 @@
           ],
         },
       ],
-      "metadata": { # Description of audio data to be recognized. # *Optional* Metadata regarding this request.
+      "metadata": { # Description of audio data to be recognized. # Metadata regarding this request.
         "recordingDeviceType": "A String", # The type of device the speech was recorded with.
         "originalMediaType": "A String", # The original media the speech was recorded on.
         "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
-        "obfuscatedId": "A String", # Obfuscated (privacy-protected) ID of the user, to identify number of
-            # unique users using the service.
         "originalMimeType": "A String", # Mime type of the original audio file.  For example `audio/m4a`,
             # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
             # A list of possible audio mime types is maintained at
@@ -267,6 +277,12 @@
 
     { # This resource represents a long-running operation that is the result of a
       # network API call.
+    "metadata": { # Service-specific metadata associated with the operation.  It typically
+        # contains progress information and common metadata such as create time.
+        # Some services might not provide such metadata.  Any method that returns a
+        # long-running operation should document the metadata type, if any.
+      "a_key": "", # Properties of the object. Contains field @type with type URL.
+    },
     "error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
         # different programming environments, including REST APIs and RPC APIs. It is
         # used by [gRPC](https://github.com/grpc). Each `Status` message contains
@@ -301,29 +317,23 @@
     "name": "A String", # The server-assigned name, which is only unique within the same service that
         # originally returns it. If you use the default HTTP mapping, the
         # `name` should be a resource name ending with `operations/{unique_id}`.
-    "metadata": { # Service-specific metadata associated with the operation.  It typically
-        # contains progress information and common metadata such as create time.
-        # Some services might not provide such metadata.  Any method that returns a
-        # long-running operation should document the metadata type, if any.
-      "a_key": "", # Properties of the object. Contains field @type with type URL.
-    },
   }</pre>
 </div>
 
 <div class="method">
-    <code class="details" id="recognize">recognize(body, x__xgafv=None)</code>
+    <code class="details" id="recognize">recognize(body=None, x__xgafv=None)</code>
   <pre>Performs synchronous speech recognition: receive results after all audio
 has been sent and processed.
 
 Args:
-  body: object, The request body. (required)
+  body: object, The request body.
     The object takes the form of:
 
 { # The top-level message sent by the client for the `Recognize` method.
-    "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # *Required* The audio data to be recognized.
+    "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # Required. The audio data to be recognized.
         # Either `content` or `uri` must be supplied. Supplying both or neither
         # returns google.rpc.Code.INVALID_ARGUMENT. See
-        # [content limits](/speech-to-text/quotas#content).
+        # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
       "content": "A String", # The audio data bytes encoded as specified in
           # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
           # pure binary representation, whereas JSON representations use base64.
@@ -335,15 +345,16 @@
           # google.rpc.Code.INVALID_ARGUMENT). For more information, see
           # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
     },
-    "config": { # Provides information to the recognizer that specifies how to process the # *Required* Provides information to the recognizer that specifies how to
+    "config": { # Provides information to the recognizer that specifies how to process the # Required. Provides information to the recognizer that specifies how to
         # process the request.
         # request.
-      "languageCode": "A String", # *Required* The language of the supplied audio as a
+      "languageCode": "A String", # Required. The language of the supplied audio as a
           # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
           # Example: "en-US".
-          # See [Language Support](/speech-to-text/docs/languages)
-          # for a list of the currently supported language codes.
-      "audioChannelCount": 42, # *Optional* The number of channels in the input audio data.
+          # See [Language
+          # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+          # of the currently supported language codes.
+      "audioChannelCount": 42, # The number of channels in the input audio data.
           # ONLY set this for MULTI-CHANNEL recognition.
           # Valid values for LINEAR16 and FLAC are `1`-`8`.
           # Valid values for OGG_OPUS are '1'-'254'.
@@ -355,44 +366,34 @@
       "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
           # This field is optional for `FLAC` and `WAV` audio files and required
           # for all other audio formats. For details, see AudioEncoding.
-      "enableAutomaticPunctuation": True or False, # *Optional* If 'true', adds punctuation to recognition result hypotheses.
+      "enableAutomaticPunctuation": True or False, # If 'true', adds punctuation to recognition result hypotheses.
           # This feature is only available in select languages. Setting this for
           # requests in other languages has no effect at all.
           # The default 'false' value does not add punctuation to result hypotheses.
-          # Note: This is currently offered as an experimental service, complimentary
-          # to all users. In the future this may be exclusively available as a
-          # premium feature.
-      "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
+      "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
           # to get each channel recognized separately. The recognition result will
           # contain a `channel_tag` field to state which channel that result belongs
           # to. If this is not true, we will only recognize the first channel. The
           # request is billed cumulatively for all channels recognized:
           # `audio_channel_count` multiplied by the length of the audio.
-      "enableWordTimeOffsets": True or False, # *Optional* If `true`, the top result includes a list of words and
+      "enableWordTimeOffsets": True or False, # If `true`, the top result includes a list of words and
           # the start and end time offsets (timestamps) for those words. If
           # `false`, no word-level time offset information is returned. The default is
           # `false`.
-      "maxAlternatives": 42, # *Optional* Maximum number of recognition hypotheses to be returned.
+      "maxAlternatives": 42, # Maximum number of recognition hypotheses to be returned.
           # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
           # within each `SpeechRecognitionResult`.
           # The server may return fewer than `max_alternatives`.
           # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
           # one. If omitted, will return a maximum of one.
-      "useEnhanced": True or False, # *Optional* Set to true to use an enhanced model for speech recognition.
+      "useEnhanced": True or False, # Set to true to use an enhanced model for speech recognition.
           # If `use_enhanced` is set to true and the `model` field is not set, then
-          # an appropriate enhanced model is chosen if:
-          # 1. project is eligible for requesting enhanced models
-          # 2. an enhanced model exists for the audio
+          # an appropriate enhanced model is chosen if an enhanced model exists for
+          # the audio.
           #
           # If `use_enhanced` is true and an enhanced version of the specified model
           # does not exist, then the speech is recognized using the standard version
           # of the specified model.
-          #
-          # Enhanced speech models require that you opt-in to data logging using
-          # instructions in the
-          # [documentation](/speech-to-text/docs/enable-data-logging). If you set
-          # `use_enhanced` to true and you have not enabled audio logging, then you
-          # will receive an error.
       "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
           # `RecognitionAudio` messages. Valid values are: 8000-48000.
           # 16000 is optimal. For best results, set the sampling rate of the audio
@@ -400,53 +401,74 @@
           # the audio source (instead of re-sampling).
           # This field is optional for FLAC and WAV audio files, but is
           # required for all other audio formats. For details, see AudioEncoding.
-      "profanityFilter": True or False, # *Optional* If set to `true`, the server will attempt to filter out
+      "profanityFilter": True or False, # If set to `true`, the server will attempt to filter out
           # profanities, replacing all but the initial character in each filtered word
           # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
           # won't be filtered out.
-      "model": "A String", # *Optional* Which model to select for the given request. Select the model
+      "model": "A String", # Which model to select for the given request. Select the model
           # best suited to your domain to get best results. If a model is not
           # explicitly specified, then we auto-select a model based on the parameters
           # in the RecognitionConfig.
-          # <table>
-          #   <tr>
-          #     <td><b>Model</b></td>
-          #     <td><b>Description</b></td>
-          #   </tr>
-          #   <tr>
-          #     <td><code>command_and_search</code></td>
-          #     <td>Best for short queries such as voice commands or voice search.</td>
-          #   </tr>
-          #   <tr>
-          #     <td><code>phone_call</code></td>
-          #     <td>Best for audio that originated from a phone call (typically
-          #     recorded at an 8khz sampling rate).</td>
-          #   </tr>
-          #   <tr>
-          #     <td><code>video</code></td>
-          #     <td>Best for audio that originated from from video or includes multiple
+          # &lt;table&gt;
+          #   &lt;tr&gt;
+          #     &lt;td&gt;&lt;b&gt;Model&lt;/b&gt;&lt;/td&gt;
+          #     &lt;td&gt;&lt;b&gt;Description&lt;/b&gt;&lt;/td&gt;
+          #   &lt;/tr&gt;
+          #   &lt;tr&gt;
+          #     &lt;td&gt;&lt;code&gt;command_and_search&lt;/code&gt;&lt;/td&gt;
+          #     &lt;td&gt;Best for short queries such as voice commands or voice search.&lt;/td&gt;
+          #   &lt;/tr&gt;
+          #   &lt;tr&gt;
+          #     &lt;td&gt;&lt;code&gt;phone_call&lt;/code&gt;&lt;/td&gt;
+          #     &lt;td&gt;Best for audio that originated from a phone call (typically
+          #     recorded at an 8khz sampling rate).&lt;/td&gt;
+          #   &lt;/tr&gt;
+          #   &lt;tr&gt;
+          #     &lt;td&gt;&lt;code&gt;video&lt;/code&gt;&lt;/td&gt;
+          #     &lt;td&gt;Best for audio that originated from from video or includes multiple
           #         speakers. Ideally the audio is recorded at a 16khz or greater
           #         sampling rate. This is a premium model that costs more than the
-          #         standard rate.</td>
-          #   </tr>
-          #   <tr>
-          #     <td><code>default</code></td>
-          #     <td>Best for audio that is not one of the specific audio models.
+          #         standard rate.&lt;/td&gt;
+          #   &lt;/tr&gt;
+          #   &lt;tr&gt;
+          #     &lt;td&gt;&lt;code&gt;default&lt;/code&gt;&lt;/td&gt;
+          #     &lt;td&gt;Best for audio that is not one of the specific audio models.
           #         For example, long-form audio. Ideally the audio is high-fidelity,
-          #         recorded at a 16khz or greater sampling rate.</td>
-          #   </tr>
-          # </table>
-      "speechContexts": [ # *Optional* array of SpeechContext.
+          #         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
+          #   &lt;/tr&gt;
+          # &lt;/table&gt;
+      "diarizationConfig": { # Config to enable speaker diarization. # Config to enable speaker diarization and set additional
+          # parameters to make diarization better suited for your application.
+          # Note: When this is enabled, we send all the words from the beginning of the
+          # audio for the top alternative in every consecutive STREAMING responses.
+          # This is done in order to improve our speaker tags as our models learn to
+          # identify the speakers in the conversation over time.
+          # For non-streaming requests, the diarization results will be provided only
+          # in the top alternative of the FINAL SpeechRecognitionResult.
+        "minSpeakerCount": 42, # Minimum number of speakers in the conversation. This range gives you more
+            # flexibility by allowing the system to automatically determine the correct
+            # number of speakers. If not set, the default value is 2.
+        "enableSpeakerDiarization": True or False, # If 'true', enables speaker detection for each recognized word in
+            # the top alternative of the recognition result using a speaker_tag provided
+            # in the WordInfo.
+        "maxSpeakerCount": 42, # Maximum number of speakers in the conversation. This range gives you more
+            # flexibility by allowing the system to automatically determine the correct
+            # number of speakers. If not set, the default value is 6.
+        "speakerTag": 42, # Output only. Unused.
+      },
+      "speechContexts": [ # Array of SpeechContext.
           # A means to provide context to assist the speech recognition. For more
-          # information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
+          # information, see
+          # [speech
+          # adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
         { # Provides "hints" to the speech recognizer to favor specific words and phrases
             # in the results.
-          "phrases": [ # *Optional* A list of strings containing words and phrases "hints" so that
+          "phrases": [ # A list of strings containing words and phrases "hints" so that
               # the speech recognition is more likely to recognize them. This can be used
               # to improve the accuracy for specific words and phrases, for example, if
               # specific commands are typically spoken by the user. This can also be used
               # to add additional words to the vocabulary of the recognizer. See
-              # [usage limits](/speech-to-text/quotas#content).
+              # [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
               #
               # List items can also be set to classes for groups of words that represent
               # common concepts that occur in natural language. For example, rather than
@@ -457,12 +479,10 @@
           ],
         },
       ],
-      "metadata": { # Description of audio data to be recognized. # *Optional* Metadata regarding this request.
+      "metadata": { # Description of audio data to be recognized. # Metadata regarding this request.
         "recordingDeviceType": "A String", # The type of device the speech was recorded with.
         "originalMediaType": "A String", # The original media the speech was recorded on.
         "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
-        "obfuscatedId": "A String", # Obfuscated (privacy-protected) ID of the user, to identify number of
-            # unique users using the service.
         "originalMimeType": "A String", # Mime type of the original audio file.  For example `audio/m4a`,
             # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
             # A list of possible audio mime types is maintained at
@@ -492,42 +512,47 @@
     { # The only message returned to the client by the `Recognize` method. It
       # contains the result as zero or more sequential `SpeechRecognitionResult`
       # messages.
-    "results": [ # Output only. Sequential list of transcription results corresponding to
+    "results": [ # Sequential list of transcription results corresponding to
         # sequential portions of audio.
       { # A speech recognition result corresponding to a portion of the audio.
         "channelTag": 42, # For multi-channel audio, this is the channel number corresponding to the
             # recognized result for the audio from that channel.
             # For audio_channel_count = N, its output values can range from '1' to 'N'.
-        "alternatives": [ # Output only. May contain one or more recognition hypotheses (up to the
+        "alternatives": [ # May contain one or more recognition hypotheses (up to the
             # maximum specified in `max_alternatives`).
             # These alternatives are ordered in terms of accuracy, with the top (first)
             # alternative being the most probable, as ranked by the recognizer.
           { # Alternative hypotheses (a.k.a. n-best list).
-            "confidence": 3.14, # Output only. The confidence estimate between 0.0 and 1.0. A higher number
+            "confidence": 3.14, # The confidence estimate between 0.0 and 1.0. A higher number
                 # indicates an estimated greater likelihood that the recognized words are
                 # correct. This field is set only for the top alternative of a non-streaming
                 # result or, of a streaming result where `is_final=true`.
                 # This field is not guaranteed to be accurate and users should not rely on it
                 # to be always provided.
                 # The default of 0.0 is a sentinel value indicating `confidence` was not set.
-            "transcript": "A String", # Output only. Transcript text representing the words that the user spoke.
-            "words": [ # Output only. A list of word-specific information for each recognized word.
+            "transcript": "A String", # Transcript text representing the words that the user spoke.
+            "words": [ # A list of word-specific information for each recognized word.
                 # Note: When `enable_speaker_diarization` is true, you will see all the words
                 # from the beginning of the audio.
               { # Word-specific information for recognized words.
-                "endTime": "A String", # Output only. Time offset relative to the beginning of the audio,
+                "endTime": "A String", # Time offset relative to the beginning of the audio,
                     # and corresponding to the end of the spoken word.
                     # This field is only set if `enable_word_time_offsets=true` and only
                     # in the top hypothesis.
                     # This is an experimental feature and the accuracy of the time offset can
                     # vary.
-                "word": "A String", # Output only. The word corresponding to this set of information.
-                "startTime": "A String", # Output only. Time offset relative to the beginning of the audio,
+                "word": "A String", # The word corresponding to this set of information.
+                "startTime": "A String", # Time offset relative to the beginning of the audio,
                     # and corresponding to the start of the spoken word.
                     # This field is only set if `enable_word_time_offsets=true` and only
                     # in the top hypothesis.
                     # This is an experimental feature and the accuracy of the time offset can
                     # vary.
+                "speakerTag": 42, # Output only. A distinct integer value is assigned for every speaker within
+                    # the audio. This field specifies which one of those speakers was detected to
+                    # have spoken this word. Value ranges from '1' to diarization_speaker_count.
+                    # speaker_tag is set if enable_speaker_diarization = 'true' and only in the
+                    # top alternative.
               },
             ],
           },