Regen all docs. (#700)

* Stop recursing if discovery == {}

* Generate docs with 'make docs'.
diff --git a/docs/dyn/speech_v1.speech.html b/docs/dyn/speech_v1.speech.html
index e07a219..18a6564 100644
--- a/docs/dyn/speech_v1.speech.html
+++ b/docs/dyn/speech_v1.speech.html
@@ -72,7 +72,7 @@
 
 </style>
 
-<h1><a href="speech_v1.html">Google Cloud Speech API</a> . <a href="speech_v1.speech.html">speech</a></h1>
+<h1><a href="speech_v1.html">Cloud Speech-to-Text API</a> . <a href="speech_v1.speech.html">speech</a></h1>
 <h2>Instance Methods</h2>
 <p class="toc_element">
   <code><a href="#longrunningrecognize">longrunningrecognize(body, x__xgafv=None)</a></code></p>
@@ -87,6 +87,8 @@
 google.longrunning.Operations interface. Returns either an
 `Operation.error` or an `Operation.response` which contains
 a `LongRunningRecognizeResponse` message.
+For more information on asynchronous speech recognition, see the
+[how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
 
 Args:
   body: object, The request body. (required)
@@ -97,12 +99,13 @@
     "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # *Required* The audio data to be recognized.
         # Either `content` or `uri` must be supplied. Supplying both or neither
         # returns google.rpc.Code.INVALID_ARGUMENT. See
-        # [audio limits](https://cloud.google.com/speech/limits#content).
+        # [content limits](/speech-to-text/quotas#content).
       "content": "A String", # The audio data bytes encoded as specified in
-          # `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+          # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
           # pure binary representation, whereas JSON representations use base64.
       "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
-          # `RecognitionConfig`. Currently, only Google Cloud Storage URIs are
+          # `RecognitionConfig`. The file must not be compressed (for example, gzip).
+          # Currently, only Google Cloud Storage URIs are
           # supported, which must be specified in the following format:
           # `gs://bucket_name/object_name` (other URI formats return
           # google.rpc.Code.INVALID_ARGUMENT). For more information, see
@@ -114,25 +117,104 @@
       "languageCode": "A String", # *Required* The language of the supplied audio as a
           # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
           # Example: "en-US".
-          # See [Language Support](https://cloud.google.com/speech/docs/languages)
+          # See [Language Support](/speech-to-text/docs/languages)
           # for a list of the currently supported language codes.
-      "encoding": "A String", # *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
+      "audioChannelCount": 42, # *Optional* The number of channels in the input audio data.
+          # ONLY set this for MULTI-CHANNEL recognition.
+          # Valid values for LINEAR16 and FLAC are `1`-`8`.
+          # Valid values for OGG_OPUS are '1'-'254'.
+          # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
+          # If `0` or omitted, defaults to one channel (mono).
+          # Note: We only recognize the first channel by default.
+          # To perform independent recognition on each channel set
+          # `enable_separate_recognition_per_channel` to 'true'.
+      "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
+          # This field is optional for `FLAC` and `WAV` audio files and required
+          # for all other audio formats. For details, see AudioEncoding.
+      "enableAutomaticPunctuation": True or False, # *Optional* If 'true', adds punctuation to recognition result hypotheses.
+          # This feature is only available in select languages. Setting this for
+          # requests in other languages has no effect at all.
+          # The default 'false' value does not add punctuation to result hypotheses.
+          # Note: This is currently offered as an experimental service, complimentary
+          # to all users. In the future this may be exclusively available as a
+          # premium feature.
+      "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
+          # to get each channel recognized separately. The recognition result will
+          # contain a `channel_tag` field to state which channel that result belongs
+          # to. If this is not true, we will only recognize the first channel. The
+          # request is billed cumulatively for all channels recognized:
+          # `audio_channel_count` multiplied by the length of the audio.
+      "enableWordTimeOffsets": True or False, # *Optional* If `true`, the top result includes a list of words and
+          # the start and end time offsets (timestamps) for those words. If
+          # `false`, no word-level time offset information is returned. The default is
+          # `false`.
       "maxAlternatives": 42, # *Optional* Maximum number of recognition hypotheses to be returned.
           # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
           # within each `SpeechRecognitionResult`.
           # The server may return fewer than `max_alternatives`.
           # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
           # one. If omitted, will return a maximum of one.
-      "sampleRateHertz": 42, # *Required* Sample rate in Hertz of the audio data sent in all
+      "useEnhanced": True or False, # *Optional* Set to true to use an enhanced model for speech recognition.
+          # If `use_enhanced` is set to true and the `model` field is not set, then
+          # an appropriate enhanced model is chosen if:
+          # 1. project is eligible for requesting enhanced models
+          # 2. an enhanced model exists for the audio
+          #
+          # If `use_enhanced` is true and an enhanced version of the specified model
+          # does not exist, then the speech is recognized using the standard version
+          # of the specified model.
+          #
+          # Enhanced speech models require that you opt-in to data logging using
+          # instructions in the
+          # [documentation](/speech-to-text/docs/enable-data-logging). If you set
+          # `use_enhanced` to true and you have not enabled audio logging, then you
+          # will receive an error.
+      "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
           # `RecognitionAudio` messages. Valid values are: 8000-48000.
           # 16000 is optimal. For best results, set the sampling rate of the audio
           # source to 16000 Hz. If that's not possible, use the native sample rate of
           # the audio source (instead of re-sampling).
+          # This field is optional for FLAC and WAV audio files, but is
+          # required for all other audio formats. For details, see AudioEncoding.
       "profanityFilter": True or False, # *Optional* If set to `true`, the server will attempt to filter out
           # profanities, replacing all but the initial character in each filtered word
           # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
           # won't be filtered out.
-      "speechContexts": [ # *Optional* A means to provide context to assist the speech recognition.
+      "model": "A String", # *Optional* Which model to select for the given request. Select the model
+          # best suited to your domain to get best results. If a model is not
+          # explicitly specified, then we auto-select a model based on the parameters
+          # in the RecognitionConfig.
+          # <table>
+          #   <tr>
+          #     <td><b>Model</b></td>
+          #     <td><b>Description</b></td>
+          #   </tr>
+          #   <tr>
+          #     <td><code>command_and_search</code></td>
+          #     <td>Best for short queries such as voice commands or voice search.</td>
+          #   </tr>
+          #   <tr>
+          #     <td><code>phone_call</code></td>
+          #     <td>Best for audio that originated from a phone call (typically
+          #     recorded at an 8khz sampling rate).</td>
+          #   </tr>
+          #   <tr>
+          #     <td><code>video</code></td>
+          #     <td>Best for audio that originated from from video or includes multiple
+          #         speakers. Ideally the audio is recorded at a 16khz or greater
+          #         sampling rate. This is a premium model that costs more than the
+          #         standard rate.</td>
+          #   </tr>
+          #   <tr>
+          #     <td><code>default</code></td>
+          #     <td>Best for audio that is not one of the specific audio models.
+          #         For example, long-form audio. Ideally the audio is high-fidelity,
+          #         recorded at a 16khz or greater sampling rate.</td>
+          #   </tr>
+          # </table>
+      "speechContexts": [ # *Optional* array of SpeechContext.
+          # A means to provide context to assist the speech recognition. For more
+          # information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
         { # Provides "hints" to the speech recognizer to favor specific words and phrases
             # in the results.
           "phrases": [ # *Optional* A list of strings containing words and phrases "hints" so that
@@ -140,11 +222,38 @@
               # to improve the accuracy for specific words and phrases, for example, if
               # specific commands are typically spoken by the user. This can also be used
               # to add additional words to the vocabulary of the recognizer. See
-              # [usage limits](https://cloud.google.com/speech/limits#content).
+              # [usage limits](/speech-to-text/quotas#content).
+              #
+              # List items can also be set to classes for groups of words that represent
+              # common concepts that occur in natural language. For example, rather than
+              # providing phrase hints for every month of the year, using the $MONTH class
+              # improves the likelihood of correctly transcribing audio that includes
+              # months.
             "A String",
           ],
         },
       ],
+      "metadata": { # Description of audio data to be recognized. # *Optional* Metadata regarding this request.
+        "recordingDeviceType": "A String", # The type of device the speech was recorded with.
+        "originalMediaType": "A String", # The original media the speech was recorded on.
+        "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
+        "obfuscatedId": "A String", # Obfuscated (privacy-protected) ID of the user, to identify number of
+            # unique users using the service.
+        "originalMimeType": "A String", # Mime type of the original audio file.  For example `audio/m4a`,
+            # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
+            # A list of possible audio mime types is maintained at
+            # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
+        "industryNaicsCodeOfAudio": 42, # The industry vertical to which this speech recognition request most
+            # closely applies. This is most indicative of the topics contained
+            # in the audio.  Use the 6-digit NAICS code to identify the industry
+            # vertical - see https://www.naics.com/search/.
+        "audioTopic": "A String", # Description of the content. Eg. "Recordings of federal supreme court
+            # hearings from 2012".
+        "recordingDeviceName": "A String", # The device used to make the recording.  Examples 'Nexus 5X' or
+            # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
+            # 'Cardioid Microphone'.
+        "interactionType": "A String", # The use case most closely describing the audio content to be recognized.
+      },
     },
   }
 
@@ -158,14 +267,26 @@
 
     { # This resource represents a long-running operation that is the result of a
       # network API call.
-    "metadata": { # Service-specific metadata associated with the operation.  It typically
-        # contains progress information and common metadata such as create time.
-        # Some services might not provide such metadata.  Any method that returns a
-        # long-running operation should document the metadata type, if any.
-      "a_key": "", # Properties of the object. Contains field @type with type URL.
+    "error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
+        # different programming environments, including REST APIs and RPC APIs. It is
+        # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+        # three pieces of data: error code, error message, and error details.
+        #
+        # You can find out more about this error model and how to work with it in the
+        # [API Design Guide](https://cloud.google.com/apis/design/errors).
+      "message": "A String", # A developer-facing error message, which should be in English. Any
+          # user-facing error message should be localized and sent in the
+          # google.rpc.Status.details field, or localized by the client.
+      "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+      "details": [ # A list of messages that carry the error details.  There is a common set of
+          # message types for APIs to use.
+        {
+          "a_key": "", # Properties of the object. Contains field @type with type URL.
+        },
+      ],
     },
     "done": True or False, # If the value is `false`, it means the operation is still in progress.
-        # If true, the operation is completed, and either `error` or `response` is
+        # If `true`, the operation is completed, and either `error` or `response` is
         # available.
     "response": { # The normal response of the operation in case of success.  If the original
         # method returns no data on success, such as `Delete`, the response is
@@ -179,69 +300,12 @@
     },
     "name": "A String", # The server-assigned name, which is only unique within the same service that
         # originally returns it. If you use the default HTTP mapping, the
-        # `name` should have the format of `operations/some/unique/name`.
-    "error": { # The `Status` type defines a logical error model that is suitable for different # The error result of the operation in case of failure or cancellation.
-        # programming environments, including REST APIs and RPC APIs. It is used by
-        # [gRPC](https://github.com/grpc). The error model is designed to be:
-        #
-        # - Simple to use and understand for most users
-        # - Flexible enough to meet unexpected needs
-        #
-        # # Overview
-        #
-        # The `Status` message contains three pieces of data: error code, error message,
-        # and error details. The error code should be an enum value of
-        # google.rpc.Code, but it may accept additional error codes if needed.  The
-        # error message should be a developer-facing English message that helps
-        # developers *understand* and *resolve* the error. If a localized user-facing
-        # error message is needed, put the localized message in the error details or
-        # localize it in the client. The optional error details may contain arbitrary
-        # information about the error. There is a predefined set of error detail types
-        # in the package `google.rpc` that can be used for common error conditions.
-        #
-        # # Language mapping
-        #
-        # The `Status` message is the logical representation of the error model, but it
-        # is not necessarily the actual wire format. When the `Status` message is
-        # exposed in different client libraries and different wire protocols, it can be
-        # mapped differently. For example, it will likely be mapped to some exceptions
-        # in Java, but more likely mapped to some error codes in C.
-        #
-        # # Other uses
-        #
-        # The error model and the `Status` message can be used in a variety of
-        # environments, either with or without APIs, to provide a
-        # consistent developer experience across different environments.
-        #
-        # Example uses of this error model include:
-        #
-        # - Partial errors. If a service needs to return partial errors to the client,
-        #     it may embed the `Status` in the normal response to indicate the partial
-        #     errors.
-        #
-        # - Workflow errors. A typical workflow has multiple steps. Each step may
-        #     have a `Status` message for error reporting.
-        #
-        # - Batch operations. If a client uses batch request and batch response, the
-        #     `Status` message should be used directly inside batch response, one for
-        #     each error sub-response.
-        #
-        # - Asynchronous operations. If an API call embeds asynchronous operation
-        #     results in its response, the status of those operations should be
-        #     represented directly using the `Status` message.
-        #
-        # - Logging. If some API errors are stored in logs, the message `Status` could
-        #     be used directly after any stripping needed for security/privacy reasons.
-      "message": "A String", # A developer-facing error message, which should be in English. Any
-          # user-facing error message should be localized and sent in the
-          # google.rpc.Status.details field, or localized by the client.
-      "code": 42, # The status code, which should be an enum value of google.rpc.Code.
-      "details": [ # A list of messages that carry the error details.  There will be a
-          # common set of message types for APIs to use.
-        {
-          "a_key": "", # Properties of the object. Contains field @type with type URL.
-        },
-      ],
+        # `name` should be a resource name ending with `operations/{unique_id}`.
+    "metadata": { # Service-specific metadata associated with the operation.  It typically
+        # contains progress information and common metadata such as create time.
+        # Some services might not provide such metadata.  Any method that returns a
+        # long-running operation should document the metadata type, if any.
+      "a_key": "", # Properties of the object. Contains field @type with type URL.
     },
   }</pre>
 </div>
@@ -259,12 +323,13 @@
     "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # *Required* The audio data to be recognized.
         # Either `content` or `uri` must be supplied. Supplying both or neither
         # returns google.rpc.Code.INVALID_ARGUMENT. See
-        # [audio limits](https://cloud.google.com/speech/limits#content).
+        # [content limits](/speech-to-text/quotas#content).
       "content": "A String", # The audio data bytes encoded as specified in
-          # `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
+          # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
           # pure binary representation, whereas JSON representations use base64.
       "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
-          # `RecognitionConfig`. Currently, only Google Cloud Storage URIs are
+          # `RecognitionConfig`. The file must not be compressed (for example, gzip).
+          # Currently, only Google Cloud Storage URIs are
           # supported, which must be specified in the following format:
           # `gs://bucket_name/object_name` (other URI formats return
           # google.rpc.Code.INVALID_ARGUMENT). For more information, see
@@ -276,25 +341,104 @@
       "languageCode": "A String", # *Required* The language of the supplied audio as a
           # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
           # Example: "en-US".
-          # See [Language Support](https://cloud.google.com/speech/docs/languages)
+          # See [Language Support](/speech-to-text/docs/languages)
           # for a list of the currently supported language codes.
-      "encoding": "A String", # *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
+      "audioChannelCount": 42, # *Optional* The number of channels in the input audio data.
+          # ONLY set this for MULTI-CHANNEL recognition.
+          # Valid values for LINEAR16 and FLAC are `1`-`8`.
+          # Valid values for OGG_OPUS are '1'-'254'.
+          # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
+          # If `0` or omitted, defaults to one channel (mono).
+          # Note: We only recognize the first channel by default.
+          # To perform independent recognition on each channel set
+          # `enable_separate_recognition_per_channel` to 'true'.
+      "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
+          # This field is optional for `FLAC` and `WAV` audio files and required
+          # for all other audio formats. For details, see AudioEncoding.
+      "enableAutomaticPunctuation": True or False, # *Optional* If 'true', adds punctuation to recognition result hypotheses.
+          # This feature is only available in select languages. Setting this for
+          # requests in other languages has no effect at all.
+          # The default 'false' value does not add punctuation to result hypotheses.
+          # Note: This is currently offered as an experimental service, complimentary
+          # to all users. In the future this may be exclusively available as a
+          # premium feature.
+      "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
+          # to get each channel recognized separately. The recognition result will
+          # contain a `channel_tag` field to state which channel that result belongs
+          # to. If this is not true, we will only recognize the first channel. The
+          # request is billed cumulatively for all channels recognized:
+          # `audio_channel_count` multiplied by the length of the audio.
+      "enableWordTimeOffsets": True or False, # *Optional* If `true`, the top result includes a list of words and
+          # the start and end time offsets (timestamps) for those words. If
+          # `false`, no word-level time offset information is returned. The default is
+          # `false`.
       "maxAlternatives": 42, # *Optional* Maximum number of recognition hypotheses to be returned.
           # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
           # within each `SpeechRecognitionResult`.
           # The server may return fewer than `max_alternatives`.
           # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
           # one. If omitted, will return a maximum of one.
-      "sampleRateHertz": 42, # *Required* Sample rate in Hertz of the audio data sent in all
+      "useEnhanced": True or False, # *Optional* Set to true to use an enhanced model for speech recognition.
+          # If `use_enhanced` is set to true and the `model` field is not set, then
+          # an appropriate enhanced model is chosen if:
+          # 1. project is eligible for requesting enhanced models
+          # 2. an enhanced model exists for the audio
+          #
+          # If `use_enhanced` is true and an enhanced version of the specified model
+          # does not exist, then the speech is recognized using the standard version
+          # of the specified model.
+          #
+          # Enhanced speech models require that you opt-in to data logging using
+          # instructions in the
+          # [documentation](/speech-to-text/docs/enable-data-logging). If you set
+          # `use_enhanced` to true and you have not enabled audio logging, then you
+          # will receive an error.
+      "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
           # `RecognitionAudio` messages. Valid values are: 8000-48000.
           # 16000 is optimal. For best results, set the sampling rate of the audio
           # source to 16000 Hz. If that's not possible, use the native sample rate of
           # the audio source (instead of re-sampling).
+          # This field is optional for FLAC and WAV audio files, but is
+          # required for all other audio formats. For details, see AudioEncoding.
       "profanityFilter": True or False, # *Optional* If set to `true`, the server will attempt to filter out
           # profanities, replacing all but the initial character in each filtered word
           # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
           # won't be filtered out.
-      "speechContexts": [ # *Optional* A means to provide context to assist the speech recognition.
+      "model": "A String", # *Optional* Which model to select for the given request. Select the model
+          # best suited to your domain to get best results. If a model is not
+          # explicitly specified, then we auto-select a model based on the parameters
+          # in the RecognitionConfig.
+          # <table>
+          #   <tr>
+          #     <td><b>Model</b></td>
+          #     <td><b>Description</b></td>
+          #   </tr>
+          #   <tr>
+          #     <td><code>command_and_search</code></td>
+          #     <td>Best for short queries such as voice commands or voice search.</td>
+          #   </tr>
+          #   <tr>
+          #     <td><code>phone_call</code></td>
+          #     <td>Best for audio that originated from a phone call (typically
+          #     recorded at an 8khz sampling rate).</td>
+          #   </tr>
+          #   <tr>
+          #     <td><code>video</code></td>
+          #     <td>Best for audio that originated from from video or includes multiple
+          #         speakers. Ideally the audio is recorded at a 16khz or greater
+          #         sampling rate. This is a premium model that costs more than the
+          #         standard rate.</td>
+          #   </tr>
+          #   <tr>
+          #     <td><code>default</code></td>
+          #     <td>Best for audio that is not one of the specific audio models.
+          #         For example, long-form audio. Ideally the audio is high-fidelity,
+          #         recorded at a 16khz or greater sampling rate.</td>
+          #   </tr>
+          # </table>
+      "speechContexts": [ # *Optional* array of SpeechContext.
+          # A means to provide context to assist the speech recognition. For more
+          # information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
         { # Provides "hints" to the speech recognizer to favor specific words and phrases
             # in the results.
           "phrases": [ # *Optional* A list of strings containing words and phrases "hints" so that
@@ -302,11 +446,38 @@
               # to improve the accuracy for specific words and phrases, for example, if
               # specific commands are typically spoken by the user. This can also be used
               # to add additional words to the vocabulary of the recognizer. See
-              # [usage limits](https://cloud.google.com/speech/limits#content).
+              # [usage limits](/speech-to-text/quotas#content).
+              #
+              # List items can also be set to classes for groups of words that represent
+              # common concepts that occur in natural language. For example, rather than
+              # providing phrase hints for every month of the year, using the $MONTH class
+              # improves the likelihood of correctly transcribing audio that includes
+              # months.
             "A String",
           ],
         },
       ],
+      "metadata": { # Description of audio data to be recognized. # *Optional* Metadata regarding this request.
+        "recordingDeviceType": "A String", # The type of device the speech was recorded with.
+        "originalMediaType": "A String", # The original media the speech was recorded on.
+        "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
+        "obfuscatedId": "A String", # Obfuscated (privacy-protected) ID of the user, to identify number of
+            # unique users using the service.
+        "originalMimeType": "A String", # Mime type of the original audio file.  For example `audio/m4a`,
+            # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
+            # A list of possible audio mime types is maintained at
+            # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
+        "industryNaicsCodeOfAudio": 42, # The industry vertical to which this speech recognition request most
+            # closely applies. This is most indicative of the topics contained
+            # in the audio.  Use the 6-digit NAICS code to identify the industry
+            # vertical - see https://www.naics.com/search/.
+        "audioTopic": "A String", # Description of the content. Eg. "Recordings of federal supreme court
+            # hearings from 2012".
+        "recordingDeviceName": "A String", # The device used to make the recording.  Examples 'Nexus 5X' or
+            # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
+            # 'Cardioid Microphone'.
+        "interactionType": "A String", # The use case most closely describing the audio content to be recognized.
+      },
     },
   }
 
@@ -321,22 +492,44 @@
     { # The only message returned to the client by the `Recognize` method. It
       # contains the result as zero or more sequential `SpeechRecognitionResult`
       # messages.
-    "results": [ # *Output-only* Sequential list of transcription results corresponding to
+    "results": [ # Output only. Sequential list of transcription results corresponding to
         # sequential portions of audio.
       { # A speech recognition result corresponding to a portion of the audio.
-        "alternatives": [ # *Output-only* May contain one or more recognition hypotheses (up to the
+        "channelTag": 42, # For multi-channel audio, this is the channel number corresponding to the
+            # recognized result for the audio from that channel.
+            # For audio_channel_count = N, its output values can range from '1' to 'N'.
+        "alternatives": [ # Output only. May contain one or more recognition hypotheses (up to the
             # maximum specified in `max_alternatives`).
-            # These alternatives are ordered in terms of accuracy, with the first/top
+            # These alternatives are ordered in terms of accuracy, with the top (first)
             # alternative being the most probable, as ranked by the recognizer.
           { # Alternative hypotheses (a.k.a. n-best list).
-            "confidence": 3.14, # *Output-only* The confidence estimate between 0.0 and 1.0. A higher number
+            "confidence": 3.14, # Output only. The confidence estimate between 0.0 and 1.0. A higher number
                 # indicates an estimated greater likelihood that the recognized words are
-                # correct. This field is typically provided only for the top hypothesis, and
-                # only for `is_final=true` results. Clients should not rely on the
-                # `confidence` field as it is not guaranteed to be accurate, or even set, in
-                # any of the results.
+                # correct. This field is set only for the top alternative of a non-streaming
+                # result or, of a streaming result where `is_final=true`.
+                # This field is not guaranteed to be accurate and users should not rely on it
+                # to be always provided.
                 # The default of 0.0 is a sentinel value indicating `confidence` was not set.
-            "transcript": "A String", # *Output-only* Transcript text representing the words that the user spoke.
+            "transcript": "A String", # Output only. Transcript text representing the words that the user spoke.
+            "words": [ # Output only. A list of word-specific information for each recognized word.
+                # Note: When `enable_speaker_diarization` is true, you will see all the words
+                # from the beginning of the audio.
+              { # Word-specific information for recognized words.
+                "endTime": "A String", # Output only. Time offset relative to the beginning of the audio,
+                    # and corresponding to the end of the spoken word.
+                    # This field is only set if `enable_word_time_offsets=true` and only
+                    # in the top hypothesis.
+                    # This is an experimental feature and the accuracy of the time offset can
+                    # vary.
+                "word": "A String", # Output only. The word corresponding to this set of information.
+                "startTime": "A String", # Output only. Time offset relative to the beginning of the audio,
+                    # and corresponding to the start of the spoken word.
+                    # This field is only set if `enable_word_time_offsets=true` and only
+                    # in the top hypothesis.
+                    # This is an experimental feature and the accuracy of the time offset can
+                    # vary.
+              },
+            ],
           },
         ],
       },