docs: update generated docs (#981)

diff --git a/docs/dyn/speech_v1.speech.html b/docs/dyn/speech_v1.speech.html
index fd71ac9..fe57cce 100644
--- a/docs/dyn/speech_v1.speech.html
+++ b/docs/dyn/speech_v1.speech.html
@@ -96,92 +96,24 @@
 
 { # The top-level message sent by the client for the `LongRunningRecognize`
       # method.
+    "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # Required. The audio data to be recognized.
+        # Either `content` or `uri` must be supplied. Supplying both or neither
+        # returns google.rpc.Code.INVALID_ARGUMENT. See
+        # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
+      "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
+          # `RecognitionConfig`. The file must not be compressed (for example, gzip).
+          # Currently, only Google Cloud Storage URIs are
+          # supported, which must be specified in the following format:
+          # `gs://bucket_name/object_name` (other URI formats return
+          # google.rpc.Code.INVALID_ARGUMENT). For more information, see
+          # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+      "content": "A String", # The audio data bytes encoded as specified in
+          # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
+          # pure binary representation, whereas JSON representations use base64.
+    },
     "config": { # Provides information to the recognizer that specifies how to process the # Required. Provides information to the recognizer that specifies how to
         # process the request.
         # request.
-      "enableWordTimeOffsets": True or False, # If `true`, the top result includes a list of words and
-          # the start and end time offsets (timestamps) for those words. If
-          # `false`, no word-level time offset information is returned. The default is
-          # `false`.
-      "diarizationConfig": { # Config to enable speaker diarization. # Config to enable speaker diarization and set additional
-          # parameters to make diarization better suited for your application.
-          # Note: When this is enabled, we send all the words from the beginning of the
-          # audio for the top alternative in every consecutive STREAMING responses.
-          # This is done in order to improve our speaker tags as our models learn to
-          # identify the speakers in the conversation over time.
-          # For non-streaming requests, the diarization results will be provided only
-          # in the top alternative of the FINAL SpeechRecognitionResult.
-        "minSpeakerCount": 42, # Minimum number of speakers in the conversation. This range gives you more
-            # flexibility by allowing the system to automatically determine the correct
-            # number of speakers. If not set, the default value is 2.
-        "maxSpeakerCount": 42, # Maximum number of speakers in the conversation. This range gives you more
-            # flexibility by allowing the system to automatically determine the correct
-            # number of speakers. If not set, the default value is 6.
-        "speakerTag": 42, # Output only. Unused.
-        "enableSpeakerDiarization": True or False, # If 'true', enables speaker detection for each recognized word in
-            # the top alternative of the recognition result using a speaker_tag provided
-            # in the WordInfo.
-      },
-      "languageCode": "A String", # Required. The language of the supplied audio as a
-          # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
-          # Example: "en-US".
-          # See [Language
-          # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
-          # of the currently supported language codes.
-      "profanityFilter": True or False, # If set to `true`, the server will attempt to filter out
-          # profanities, replacing all but the initial character in each filtered word
-          # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
-          # won't be filtered out.
-      "useEnhanced": True or False, # Set to true to use an enhanced model for speech recognition.
-          # If `use_enhanced` is set to true and the `model` field is not set, then
-          # an appropriate enhanced model is chosen if an enhanced model exists for
-          # the audio.
-          #
-          # If `use_enhanced` is true and an enhanced version of the specified model
-          # does not exist, then the speech is recognized using the standard version
-          # of the specified model.
-      "metadata": { # Description of audio data to be recognized. # Metadata regarding this request.
-        "originalMediaType": "A String", # The original media the speech was recorded on.
-        "recordingDeviceType": "A String", # The type of device the speech was recorded with.
-        "interactionType": "A String", # The use case most closely describing the audio content to be recognized.
-        "audioTopic": "A String", # Description of the content. Eg. "Recordings of federal supreme court
-            # hearings from 2012".
-        "originalMimeType": "A String", # Mime type of the original audio file.  For example `audio/m4a`,
-            # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
-            # A list of possible audio mime types is maintained at
-            # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
-        "recordingDeviceName": "A String", # The device used to make the recording.  Examples 'Nexus 5X' or
-            # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
-            # 'Cardioid Microphone'.
-        "industryNaicsCodeOfAudio": 42, # The industry vertical to which this speech recognition request most
-            # closely applies. This is most indicative of the topics contained
-            # in the audio.  Use the 6-digit NAICS code to identify the industry
-            # vertical - see https://www.naics.com/search/.
-        "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
-      },
-      "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
-          # `RecognitionAudio` messages. Valid values are: 8000-48000.
-          # 16000 is optimal. For best results, set the sampling rate of the audio
-          # source to 16000 Hz. If that's not possible, use the native sample rate of
-          # the audio source (instead of re-sampling).
-          # This field is optional for FLAC and WAV audio files, but is
-          # required for all other audio formats. For details, see AudioEncoding.
-      "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
-          # to get each channel recognized separately. The recognition result will
-          # contain a `channel_tag` field to state which channel that result belongs
-          # to. If this is not true, we will only recognize the first channel. The
-          # request is billed cumulatively for all channels recognized:
-          # `audio_channel_count` multiplied by the length of the audio.
-      "enableAutomaticPunctuation": True or False, # If 'true', adds punctuation to recognition result hypotheses.
-          # This feature is only available in select languages. Setting this for
-          # requests in other languages has no effect at all.
-          # The default 'false' value does not add punctuation to result hypotheses.
-      "maxAlternatives": 42, # Maximum number of recognition hypotheses to be returned.
-          # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
-          # within each `SpeechRecognitionResult`.
-          # The server may return fewer than `max_alternatives`.
-          # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
-          # one. If omitted, will return a maximum of one.
       "speechContexts": [ # Array of SpeechContext.
           # A means to provide context to assist the speech recognition. For more
           # information, see
@@ -205,6 +137,98 @@
           ],
         },
       ],
+      "languageCode": "A String", # Required. The language of the supplied audio as a
+          # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
+          # Example: "en-US".
+          # See [Language
+          # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+          # of the currently supported language codes.
+      "useEnhanced": True or False, # Set to true to use an enhanced model for speech recognition.
+          # If `use_enhanced` is set to true and the `model` field is not set, then
+          # an appropriate enhanced model is chosen if an enhanced model exists for
+          # the audio.
+          #
+          # If `use_enhanced` is true and an enhanced version of the specified model
+          # does not exist, then the speech is recognized using the standard version
+          # of the specified model.
+      "enableWordTimeOffsets": True or False, # If `true`, the top result includes a list of words and
+          # the start and end time offsets (timestamps) for those words. If
+          # `false`, no word-level time offset information is returned. The default is
+          # `false`.
+      "diarizationConfig": { # Config to enable speaker diarization. # Config to enable speaker diarization and set additional
+          # parameters to make diarization better suited for your application.
+          # Note: When this is enabled, we send all the words from the beginning of the
+          # audio for the top alternative in every consecutive STREAMING responses.
+          # This is done in order to improve our speaker tags as our models learn to
+          # identify the speakers in the conversation over time.
+          # For non-streaming requests, the diarization results will be provided only
+          # in the top alternative of the FINAL SpeechRecognitionResult.
+        "speakerTag": 42, # Output only. Unused.
+        "minSpeakerCount": 42, # Minimum number of speakers in the conversation. This range gives you more
+            # flexibility by allowing the system to automatically determine the correct
+            # number of speakers. If not set, the default value is 2.
+        "enableSpeakerDiarization": True or False, # If 'true', enables speaker detection for each recognized word in
+            # the top alternative of the recognition result using a speaker_tag provided
+            # in the WordInfo.
+        "maxSpeakerCount": 42, # Maximum number of speakers in the conversation. This range gives you more
+            # flexibility by allowing the system to automatically determine the correct
+            # number of speakers. If not set, the default value is 6.
+      },
+      "profanityFilter": True or False, # If set to `true`, the server will attempt to filter out
+          # profanities, replacing all but the initial character in each filtered word
+          # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
+          # won't be filtered out.
+      "enableAutomaticPunctuation": True or False, # If 'true', adds punctuation to recognition result hypotheses.
+          # This feature is only available in select languages. Setting this for
+          # requests in other languages has no effect at all.
+          # The default 'false' value does not add punctuation to result hypotheses.
+      "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
+          # to get each channel recognized separately. The recognition result will
+          # contain a `channel_tag` field to state which channel that result belongs
+          # to. If this is not true, we will only recognize the first channel. The
+          # request is billed cumulatively for all channels recognized:
+          # `audio_channel_count` multiplied by the length of the audio.
+      "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
+          # `RecognitionAudio` messages. Valid values are: 8000-48000.
+          # 16000 is optimal. For best results, set the sampling rate of the audio
+          # source to 16000 Hz. If that's not possible, use the native sample rate of
+          # the audio source (instead of re-sampling).
+          # This field is optional for FLAC and WAV audio files, but is
+          # required for all other audio formats. For details, see AudioEncoding.
+      "metadata": { # Description of audio data to be recognized. # Metadata regarding this request.
+        "originalMediaType": "A String", # The original media the speech was recorded on.
+        "recordingDeviceName": "A String", # The device used to make the recording.  Examples 'Nexus 5X' or
+            # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
+            # 'Cardioid Microphone'.
+        "industryNaicsCodeOfAudio": 42, # The industry vertical to which this speech recognition request most
+            # closely applies. This is most indicative of the topics contained
+            # in the audio.  Use the 6-digit NAICS code to identify the industry
+            # vertical - see https://www.naics.com/search/.
+        "originalMimeType": "A String", # Mime type of the original audio file.  For example `audio/m4a`,
+            # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
+            # A list of possible audio mime types is maintained at
+            # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
+        "audioTopic": "A String", # Description of the content. Eg. "Recordings of federal supreme court
+            # hearings from 2012".
+        "recordingDeviceType": "A String", # The type of device the speech was recorded with.
+        "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
+        "interactionType": "A String", # The use case most closely describing the audio content to be recognized.
+      },
+      "maxAlternatives": 42, # Maximum number of recognition hypotheses to be returned.
+          # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
+          # within each `SpeechRecognitionResult`.
+          # The server may return fewer than `max_alternatives`.
+          # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
+          # one. If omitted, will return a maximum of one.
+      "audioChannelCount": 42, # The number of channels in the input audio data.
+          # ONLY set this for MULTI-CHANNEL recognition.
+          # Valid values for LINEAR16 and FLAC are `1`-`8`.
+          # Valid values for OGG_OPUS are '1'-'254'.
+          # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
+          # If `0` or omitted, defaults to one channel (mono).
+          # Note: We only recognize the first channel by default.
+          # To perform independent recognition on each channel set
+          # `enable_separate_recognition_per_channel` to 'true'.
       "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
           # This field is optional for `FLAC` and `WAV` audio files and required
           # for all other audio formats. For details, see AudioEncoding.
@@ -240,30 +264,6 @@
           #         recorded at a 16khz or greater sampling rate.</td>
           #   </tr>
           # </table>
-      "audioChannelCount": 42, # The number of channels in the input audio data.
-          # ONLY set this for MULTI-CHANNEL recognition.
-          # Valid values for LINEAR16 and FLAC are `1`-`8`.
-          # Valid values for OGG_OPUS are '1'-'254'.
-          # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
-          # If `0` or omitted, defaults to one channel (mono).
-          # Note: We only recognize the first channel by default.
-          # To perform independent recognition on each channel set
-          # `enable_separate_recognition_per_channel` to 'true'.
-    },
-    "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # Required. The audio data to be recognized.
-        # Either `content` or `uri` must be supplied. Supplying both or neither
-        # returns google.rpc.Code.INVALID_ARGUMENT. See
-        # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
-      "content": "A String", # The audio data bytes encoded as specified in
-          # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
-          # pure binary representation, whereas JSON representations use base64.
-      "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
-          # `RecognitionConfig`. The file must not be compressed (for example, gzip).
-          # Currently, only Google Cloud Storage URIs are
-          # supported, which must be specified in the following format:
-          # `gs://bucket_name/object_name` (other URI formats return
-          # google.rpc.Code.INVALID_ARGUMENT). For more information, see
-          # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
     },
   }
 
@@ -287,9 +287,18 @@
         # `TakeSnapshotResponse`.
       "a_key": "", # Properties of the object. Contains field @type with type URL.
     },
+    "metadata": { # Service-specific metadata associated with the operation.  It typically
+        # contains progress information and common metadata such as create time.
+        # Some services might not provide such metadata.  Any method that returns a
+        # long-running operation should document the metadata type, if any.
+      "a_key": "", # Properties of the object. Contains field @type with type URL.
+    },
     "name": "A String", # The server-assigned name, which is only unique within the same service that
         # originally returns it. If you use the default HTTP mapping, the
         # `name` should be a resource name ending with `operations/{unique_id}`.
+    "done": True or False, # If the value is `false`, it means the operation is still in progress.
+        # If `true`, the operation is completed, and either `error` or `response` is
+        # available.
     "error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
         # different programming environments, including REST APIs and RPC APIs. It is
         # used by [gRPC](https://github.com/grpc). Each `Status` message contains
@@ -297,26 +306,17 @@
         #
         # You can find out more about this error model and how to work with it in the
         # [API Design Guide](https://cloud.google.com/apis/design/errors).
+      "message": "A String", # A developer-facing error message, which should be in English. Any
+          # user-facing error message should be localized and sent in the
+          # google.rpc.Status.details field, or localized by the client.
+      "code": 42, # The status code, which should be an enum value of google.rpc.Code.
       "details": [ # A list of messages that carry the error details.  There is a common set of
           # message types for APIs to use.
         {
           "a_key": "", # Properties of the object. Contains field @type with type URL.
         },
       ],
-      "code": 42, # The status code, which should be an enum value of google.rpc.Code.
-      "message": "A String", # A developer-facing error message, which should be in English. Any
-          # user-facing error message should be localized and sent in the
-          # google.rpc.Status.details field, or localized by the client.
     },
-    "metadata": { # Service-specific metadata associated with the operation.  It typically
-        # contains progress information and common metadata such as create time.
-        # Some services might not provide such metadata.  Any method that returns a
-        # long-running operation should document the metadata type, if any.
-      "a_key": "", # Properties of the object. Contains field @type with type URL.
-    },
-    "done": True or False, # If the value is `false`, it means the operation is still in progress.
-        # If `true`, the operation is completed, and either `error` or `response` is
-        # available.
   }</pre>
 </div>
 
@@ -333,89 +333,6 @@
     &quot;config&quot;: { # Provides information to the recognizer that specifies how to process the # Required. Provides information to the recognizer that specifies how to
         # process the request.
         # request.
-      &quot;enableWordTimeOffsets&quot;: True or False, # If `true`, the top result includes a list of words and
-          # the start and end time offsets (timestamps) for those words. If
-          # `false`, no word-level time offset information is returned. The default is
-          # `false`.
-      &quot;diarizationConfig&quot;: { # Config to enable speaker diarization. # Config to enable speaker diarization and set additional
-          # parameters to make diarization better suited for your application.
-          # Note: When this is enabled, we send all the words from the beginning of the
-          # audio for the top alternative in every consecutive STREAMING responses.
-          # This is done in order to improve our speaker tags as our models learn to
-          # identify the speakers in the conversation over time.
-          # For non-streaming requests, the diarization results will be provided only
-          # in the top alternative of the FINAL SpeechRecognitionResult.
-        &quot;minSpeakerCount&quot;: 42, # Minimum number of speakers in the conversation. This range gives you more
-            # flexibility by allowing the system to automatically determine the correct
-            # number of speakers. If not set, the default value is 2.
-        &quot;maxSpeakerCount&quot;: 42, # Maximum number of speakers in the conversation. This range gives you more
-            # flexibility by allowing the system to automatically determine the correct
-            # number of speakers. If not set, the default value is 6.
-        &quot;speakerTag&quot;: 42, # Output only. Unused.
-        &quot;enableSpeakerDiarization&quot;: True or False, # If &#x27;true&#x27;, enables speaker detection for each recognized word in
-            # the top alternative of the recognition result using a speaker_tag provided
-            # in the WordInfo.
-      },
-      &quot;languageCode&quot;: &quot;A String&quot;, # Required. The language of the supplied audio as a
-          # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
-          # Example: &quot;en-US&quot;.
-          # See [Language
-          # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
-          # of the currently supported language codes.
-      &quot;profanityFilter&quot;: True or False, # If set to `true`, the server will attempt to filter out
-          # profanities, replacing all but the initial character in each filtered word
-          # with asterisks, e.g. &quot;f***&quot;. If set to `false` or omitted, profanities
-          # won&#x27;t be filtered out.
-      &quot;useEnhanced&quot;: True or False, # Set to true to use an enhanced model for speech recognition.
-          # If `use_enhanced` is set to true and the `model` field is not set, then
-          # an appropriate enhanced model is chosen if an enhanced model exists for
-          # the audio.
-          #
-          # If `use_enhanced` is true and an enhanced version of the specified model
-          # does not exist, then the speech is recognized using the standard version
-          # of the specified model.
-      &quot;metadata&quot;: { # Description of audio data to be recognized. # Metadata regarding this request.
-        &quot;originalMediaType&quot;: &quot;A String&quot;, # The original media the speech was recorded on.
-        &quot;recordingDeviceType&quot;: &quot;A String&quot;, # The type of device the speech was recorded with.
-        &quot;interactionType&quot;: &quot;A String&quot;, # The use case most closely describing the audio content to be recognized.
-        &quot;audioTopic&quot;: &quot;A String&quot;, # Description of the content. Eg. &quot;Recordings of federal supreme court
-            # hearings from 2012&quot;.
-        &quot;originalMimeType&quot;: &quot;A String&quot;, # Mime type of the original audio file.  For example `audio/m4a`,
-            # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
-            # A list of possible audio mime types is maintained at
-            # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
-        &quot;recordingDeviceName&quot;: &quot;A String&quot;, # The device used to make the recording.  Examples &#x27;Nexus 5X&#x27; or
-            # &#x27;Polycom SoundStation IP 6000&#x27; or &#x27;POTS&#x27; or &#x27;VoIP&#x27; or
-            # &#x27;Cardioid Microphone&#x27;.
-        &quot;industryNaicsCodeOfAudio&quot;: 42, # The industry vertical to which this speech recognition request most
-            # closely applies. This is most indicative of the topics contained
-            # in the audio.  Use the 6-digit NAICS code to identify the industry
-            # vertical - see https://www.naics.com/search/.
-        &quot;microphoneDistance&quot;: &quot;A String&quot;, # The audio type that most closely describes the audio being recognized.
-      },
-      &quot;sampleRateHertz&quot;: 42, # Sample rate in Hertz of the audio data sent in all
-          # `RecognitionAudio` messages. Valid values are: 8000-48000.
-          # 16000 is optimal. For best results, set the sampling rate of the audio
-          # source to 16000 Hz. If that&#x27;s not possible, use the native sample rate of
-          # the audio source (instead of re-sampling).
-          # This field is optional for FLAC and WAV audio files, but is
-          # required for all other audio formats. For details, see AudioEncoding.
-      &quot;enableSeparateRecognitionPerChannel&quot;: True or False, # This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
-          # to get each channel recognized separately. The recognition result will
-          # contain a `channel_tag` field to state which channel that result belongs
-          # to. If this is not true, we will only recognize the first channel. The
-          # request is billed cumulatively for all channels recognized:
-          # `audio_channel_count` multiplied by the length of the audio.
-      &quot;enableAutomaticPunctuation&quot;: True or False, # If &#x27;true&#x27;, adds punctuation to recognition result hypotheses.
-          # This feature is only available in select languages. Setting this for
-          # requests in other languages has no effect at all.
-          # The default &#x27;false&#x27; value does not add punctuation to result hypotheses.
-      &quot;maxAlternatives&quot;: 42, # Maximum number of recognition hypotheses to be returned.
-          # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
-          # within each `SpeechRecognitionResult`.
-          # The server may return fewer than `max_alternatives`.
-          # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
-          # one. If omitted, will return a maximum of one.
       &quot;speechContexts&quot;: [ # Array of SpeechContext.
           # A means to provide context to assist the speech recognition. For more
           # information, see
@@ -439,6 +356,98 @@
           ],
         },
       ],
+      &quot;languageCode&quot;: &quot;A String&quot;, # Required. The language of the supplied audio as a
+          # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
+          # Example: &quot;en-US&quot;.
+          # See [Language
+          # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+          # of the currently supported language codes.
+      &quot;useEnhanced&quot;: True or False, # Set to true to use an enhanced model for speech recognition.
+          # If `use_enhanced` is set to true and the `model` field is not set, then
+          # an appropriate enhanced model is chosen if an enhanced model exists for
+          # the audio.
+          #
+          # If `use_enhanced` is true and an enhanced version of the specified model
+          # does not exist, then the speech is recognized using the standard version
+          # of the specified model.
+      &quot;enableWordTimeOffsets&quot;: True or False, # If `true`, the top result includes a list of words and
+          # the start and end time offsets (timestamps) for those words. If
+          # `false`, no word-level time offset information is returned. The default is
+          # `false`.
+      &quot;diarizationConfig&quot;: { # Config to enable speaker diarization. # Config to enable speaker diarization and set additional
+          # parameters to make diarization better suited for your application.
+          # Note: When this is enabled, we send all the words from the beginning of the
+          # audio for the top alternative in every consecutive STREAMING responses.
+          # This is done in order to improve our speaker tags as our models learn to
+          # identify the speakers in the conversation over time.
+          # For non-streaming requests, the diarization results will be provided only
+          # in the top alternative of the FINAL SpeechRecognitionResult.
+        &quot;speakerTag&quot;: 42, # Output only. Unused.
+        &quot;minSpeakerCount&quot;: 42, # Minimum number of speakers in the conversation. This range gives you more
+            # flexibility by allowing the system to automatically determine the correct
+            # number of speakers. If not set, the default value is 2.
+        &quot;enableSpeakerDiarization&quot;: True or False, # If &#x27;true&#x27;, enables speaker detection for each recognized word in
+            # the top alternative of the recognition result using a speaker_tag provided
+            # in the WordInfo.
+        &quot;maxSpeakerCount&quot;: 42, # Maximum number of speakers in the conversation. This range gives you more
+            # flexibility by allowing the system to automatically determine the correct
+            # number of speakers. If not set, the default value is 6.
+      },
+      &quot;profanityFilter&quot;: True or False, # If set to `true`, the server will attempt to filter out
+          # profanities, replacing all but the initial character in each filtered word
+          # with asterisks, e.g. &quot;f***&quot;. If set to `false` or omitted, profanities
+          # won&#x27;t be filtered out.
+      &quot;enableAutomaticPunctuation&quot;: True or False, # If &#x27;true&#x27;, adds punctuation to recognition result hypotheses.
+          # This feature is only available in select languages. Setting this for
+          # requests in other languages has no effect at all.
+          # The default &#x27;false&#x27; value does not add punctuation to result hypotheses.
+      &quot;enableSeparateRecognitionPerChannel&quot;: True or False, # This needs to be set to `true` explicitly and `audio_channel_count` &gt; 1
+          # to get each channel recognized separately. The recognition result will
+          # contain a `channel_tag` field to state which channel that result belongs
+          # to. If this is not true, we will only recognize the first channel. The
+          # request is billed cumulatively for all channels recognized:
+          # `audio_channel_count` multiplied by the length of the audio.
+      &quot;sampleRateHertz&quot;: 42, # Sample rate in Hertz of the audio data sent in all
+          # `RecognitionAudio` messages. Valid values are: 8000-48000.
+          # 16000 is optimal. For best results, set the sampling rate of the audio
+          # source to 16000 Hz. If that&#x27;s not possible, use the native sample rate of
+          # the audio source (instead of re-sampling).
+          # This field is optional for FLAC and WAV audio files, but is
+          # required for all other audio formats. For details, see AudioEncoding.
+      &quot;metadata&quot;: { # Description of audio data to be recognized. # Metadata regarding this request.
+        &quot;originalMediaType&quot;: &quot;A String&quot;, # The original media the speech was recorded on.
+        &quot;recordingDeviceName&quot;: &quot;A String&quot;, # The device used to make the recording.  Examples &#x27;Nexus 5X&#x27; or
+            # &#x27;Polycom SoundStation IP 6000&#x27; or &#x27;POTS&#x27; or &#x27;VoIP&#x27; or
+            # &#x27;Cardioid Microphone&#x27;.
+        &quot;industryNaicsCodeOfAudio&quot;: 42, # The industry vertical to which this speech recognition request most
+            # closely applies. This is most indicative of the topics contained
+            # in the audio.  Use the 6-digit NAICS code to identify the industry
+            # vertical - see https://www.naics.com/search/.
+        &quot;originalMimeType&quot;: &quot;A String&quot;, # Mime type of the original audio file.  For example `audio/m4a`,
+            # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
+            # A list of possible audio mime types is maintained at
+            # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
+        &quot;audioTopic&quot;: &quot;A String&quot;, # Description of the content. Eg. &quot;Recordings of federal supreme court
+            # hearings from 2012&quot;.
+        &quot;recordingDeviceType&quot;: &quot;A String&quot;, # The type of device the speech was recorded with.
+        &quot;microphoneDistance&quot;: &quot;A String&quot;, # The audio type that most closely describes the audio being recognized.
+        &quot;interactionType&quot;: &quot;A String&quot;, # The use case most closely describing the audio content to be recognized.
+      },
+      &quot;maxAlternatives&quot;: 42, # Maximum number of recognition hypotheses to be returned.
+          # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
+          # within each `SpeechRecognitionResult`.
+          # The server may return fewer than `max_alternatives`.
+          # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
+          # one. If omitted, will return a maximum of one.
+      &quot;audioChannelCount&quot;: 42, # The number of channels in the input audio data.
+          # ONLY set this for MULTI-CHANNEL recognition.
+          # Valid values for LINEAR16 and FLAC are `1`-`8`.
+          # Valid values for OGG_OPUS are &#x27;1&#x27;-&#x27;254&#x27;.
+          # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
+          # If `0` or omitted, defaults to one channel (mono).
+          # Note: We only recognize the first channel by default.
+          # To perform independent recognition on each channel set
+          # `enable_separate_recognition_per_channel` to &#x27;true&#x27;.
       &quot;encoding&quot;: &quot;A String&quot;, # Encoding of audio data sent in all `RecognitionAudio` messages.
           # This field is optional for `FLAC` and `WAV` audio files and required
           # for all other audio formats. For details, see AudioEncoding.
@@ -474,23 +483,11 @@
           #         recorded at a 16khz or greater sampling rate.&lt;/td&gt;
           #   &lt;/tr&gt;
           # &lt;/table&gt;
-      &quot;audioChannelCount&quot;: 42, # The number of channels in the input audio data.
-          # ONLY set this for MULTI-CHANNEL recognition.
-          # Valid values for LINEAR16 and FLAC are `1`-`8`.
-          # Valid values for OGG_OPUS are &#x27;1&#x27;-&#x27;254&#x27;.
-          # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
-          # If `0` or omitted, defaults to one channel (mono).
-          # Note: We only recognize the first channel by default.
-          # To perform independent recognition on each channel set
-          # `enable_separate_recognition_per_channel` to &#x27;true&#x27;.
     },
     &quot;audio&quot;: { # Contains audio data in the encoding specified in the `RecognitionConfig`. # Required. The audio data to be recognized.
         # Either `content` or `uri` must be supplied. Supplying both or neither
         # returns google.rpc.Code.INVALID_ARGUMENT. See
         # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
-      &quot;content&quot;: &quot;A String&quot;, # The audio data bytes encoded as specified in
-          # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
-          # pure binary representation, whereas JSON representations use base64.
       &quot;uri&quot;: &quot;A String&quot;, # URI that points to a file that contains audio data bytes as specified in
           # `RecognitionConfig`. The file must not be compressed (for example, gzip).
           # Currently, only Google Cloud Storage URIs are
@@ -498,6 +495,9 @@
           # `gs://bucket_name/object_name` (other URI formats return
           # google.rpc.Code.INVALID_ARGUMENT). For more information, see
           # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+      &quot;content&quot;: &quot;A String&quot;, # The audio data bytes encoded as specified in
+          # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
+          # pure binary representation, whereas JSON representations use base64.
     },
   }
 
@@ -523,6 +523,7 @@
             # These alternatives are ordered in terms of accuracy, with the top (first)
             # alternative being the most probable, as ranked by the recognizer.
           { # Alternative hypotheses (a.k.a. n-best list).
+            &quot;transcript&quot;: &quot;A String&quot;, # Transcript text representing the words that the user spoke.
             &quot;confidence&quot;: 3.14, # The confidence estimate between 0.0 and 1.0. A higher number
                 # indicates an estimated greater likelihood that the recognized words are
                 # correct. This field is set only for the top alternative of a non-streaming
@@ -530,11 +531,15 @@
                 # This field is not guaranteed to be accurate and users should not rely on it
                 # to be always provided.
                 # The default of 0.0 is a sentinel value indicating `confidence` was not set.
-            &quot;transcript&quot;: &quot;A String&quot;, # Transcript text representing the words that the user spoke.
             &quot;words&quot;: [ # A list of word-specific information for each recognized word.
                 # Note: When `enable_speaker_diarization` is true, you will see all the words
                 # from the beginning of the audio.
               { # Word-specific information for recognized words.
+                &quot;speakerTag&quot;: 42, # Output only. A distinct integer value is assigned for every speaker within
+                    # the audio. This field specifies which one of those speakers was detected to
+                    # have spoken this word. Value ranges from &#x27;1&#x27; to diarization_speaker_count.
+                    # speaker_tag is set if enable_speaker_diarization = &#x27;true&#x27; and only in the
+                    # top alternative.
                 &quot;endTime&quot;: &quot;A String&quot;, # Time offset relative to the beginning of the audio,
                     # and corresponding to the end of the spoken word.
                     # This field is only set if `enable_word_time_offsets=true` and only
@@ -548,11 +553,6 @@
                     # This is an experimental feature and the accuracy of the time offset can
                     # vary.
                 &quot;word&quot;: &quot;A String&quot;, # The word corresponding to this set of information.
-                &quot;speakerTag&quot;: 42, # Output only. A distinct integer value is assigned for every speaker within
-                    # the audio. This field specifies which one of those speakers was detected to
-                    # have spoken this word. Value ranges from &#x27;1&#x27; to diarization_speaker_count.
-                    # speaker_tag is set if enable_speaker_diarization = &#x27;true&#x27; and only in the
-                    # top alternative.
               },
             ],
           },