docs: docs update (#911)
Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly:
- [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/google-api-python-client/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea
- [ ] Ensure the tests and linter pass
- [ ] Code coverage does not decrease (if any source code was changed)
- [ ] Appropriate docs were updated (if necessary)
Fixes #<issue_number_goes_here> 🦕
diff --git a/docs/dyn/speech_v1.speech.html b/docs/dyn/speech_v1.speech.html
index e743764..cfa98c7 100644
--- a/docs/dyn/speech_v1.speech.html
+++ b/docs/dyn/speech_v1.speech.html
@@ -96,82 +96,52 @@
{ # The top-level message sent by the client for the `LongRunningRecognize`
# method.
- "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # Required. The audio data to be recognized.
- # Either `content` or `uri` must be supplied. Supplying both or neither
- # returns google.rpc.Code.INVALID_ARGUMENT. See
- # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
- "content": "A String", # The audio data bytes encoded as specified in
- # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
- # pure binary representation, whereas JSON representations use base64.
- "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
- # `RecognitionConfig`. The file must not be compressed (for example, gzip).
- # Currently, only Google Cloud Storage URIs are
- # supported, which must be specified in the following format:
- # `gs://bucket_name/object_name` (other URI formats return
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
- # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- },
- "config": { # Provides information to the recognizer that specifies how to process the # Required. Provides information to the recognizer that specifies how to
+ "config": { # Provides information to the recognizer that specifies how to process the # Required. Provides information to the recognizer that specifies how to
# process the request.
# request.
- "languageCode": "A String", # Required. The language of the supplied audio as a
- # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
- # Example: "en-US".
- # See [Language
- # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
- # of the currently supported language codes.
- "audioChannelCount": 42, # The number of channels in the input audio data.
- # ONLY set this for MULTI-CHANNEL recognition.
- # Valid values for LINEAR16 and FLAC are `1`-`8`.
- # Valid values for OGG_OPUS are '1'-'254'.
- # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
- # If `0` or omitted, defaults to one channel (mono).
- # Note: We only recognize the first channel by default.
- # To perform independent recognition on each channel set
- # `enable_separate_recognition_per_channel` to 'true'.
- "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
- # This field is optional for `FLAC` and `WAV` audio files and required
- # for all other audio formats. For details, see AudioEncoding.
- "enableAutomaticPunctuation": True or False, # If 'true', adds punctuation to recognition result hypotheses.
- # This feature is only available in select languages. Setting this for
- # requests in other languages has no effect at all.
- # The default 'false' value does not add punctuation to result hypotheses.
- "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
+ "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
# to get each channel recognized separately. The recognition result will
# contain a `channel_tag` field to state which channel that result belongs
# to. If this is not true, we will only recognize the first channel. The
# request is billed cumulatively for all channels recognized:
# `audio_channel_count` multiplied by the length of the audio.
- "enableWordTimeOffsets": True or False, # If `true`, the top result includes a list of words and
- # the start and end time offsets (timestamps) for those words. If
- # `false`, no word-level time offset information is returned. The default is
- # `false`.
- "maxAlternatives": 42, # Maximum number of recognition hypotheses to be returned.
+ "enableAutomaticPunctuation": True or False, # If 'true', adds punctuation to recognition result hypotheses.
+ # This feature is only available in select languages. Setting this for
+ # requests in other languages has no effect at all.
+ # The default 'false' value does not add punctuation to result hypotheses.
+ "maxAlternatives": 42, # Maximum number of recognition hypotheses to be returned.
# Specifically, the maximum number of `SpeechRecognitionAlternative` messages
# within each `SpeechRecognitionResult`.
# The server may return fewer than `max_alternatives`.
# Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
# one. If omitted, will return a maximum of one.
- "useEnhanced": True or False, # Set to true to use an enhanced model for speech recognition.
- # If `use_enhanced` is set to true and the `model` field is not set, then
- # an appropriate enhanced model is chosen if an enhanced model exists for
- # the audio.
- #
- # If `use_enhanced` is true and an enhanced version of the specified model
- # does not exist, then the speech is recognized using the standard version
- # of the specified model.
- "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
- # `RecognitionAudio` messages. Valid values are: 8000-48000.
- # 16000 is optimal. For best results, set the sampling rate of the audio
- # source to 16000 Hz. If that's not possible, use the native sample rate of
- # the audio source (instead of re-sampling).
- # This field is optional for FLAC and WAV audio files, but is
- # required for all other audio formats. For details, see AudioEncoding.
- "profanityFilter": True or False, # If set to `true`, the server will attempt to filter out
- # profanities, replacing all but the initial character in each filtered word
- # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
- # won't be filtered out.
- "model": "A String", # Which model to select for the given request. Select the model
+ "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
+ # This field is optional for `FLAC` and `WAV` audio files and required
+ # for all other audio formats. For details, see AudioEncoding.
+ "speechContexts": [ # Array of SpeechContext.
+ # A means to provide context to assist the speech recognition. For more
+ # information, see
+ # [speech
+ # adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
+ { # Provides "hints" to the speech recognizer to favor specific words and phrases
+ # in the results.
+ "phrases": [ # A list of strings containing words and phrases "hints" so that
+ # the speech recognition is more likely to recognize them. This can be used
+ # to improve the accuracy for specific words and phrases, for example, if
+ # specific commands are typically spoken by the user. This can also be used
+ # to add additional words to the vocabulary of the recognizer. See
+ # [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+ #
+ # List items can also be set to classes for groups of words that represent
+ # common concepts that occur in natural language. For example, rather than
+ # providing phrase hints for every month of the year, using the $MONTH class
+ # improves the likelihood of correctly transcribing audio that includes
+ # months.
+ "A String",
+ ],
+ },
+ ],
+ "model": "A String", # Which model to select for the given request. Select the model
# best suited to your domain to get best results. If a model is not
# explicitly specified, then we auto-select a model based on the parameters
# in the RecognitionConfig.
@@ -203,7 +173,16 @@
# recorded at a 16khz or greater sampling rate.</td>
# </tr>
# </table>
- "diarizationConfig": { # Config to enable speaker diarization. # Config to enable speaker diarization and set additional
+ "audioChannelCount": 42, # The number of channels in the input audio data.
+ # ONLY set this for MULTI-CHANNEL recognition.
+ # Valid values for LINEAR16 and FLAC are `1`-`8`.
+ # Valid values for OGG_OPUS are '1'-'254'.
+ # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
+ # If `0` or omitted, defaults to one channel (mono).
+ # Note: We only recognize the first channel by default.
+ # To perform independent recognition on each channel set
+ # `enable_separate_recognition_per_channel` to 'true'.
+ "diarizationConfig": { # Config to enable speaker diarization. # Config to enable speaker diarization and set additional
# parameters to make diarization better suited for your application.
# Note: When this is enabled, we send all the words from the beginning of the
# audio for the top alternative in every consecutive STREAMING responses.
@@ -211,59 +190,80 @@
# identify the speakers in the conversation over time.
# For non-streaming requests, the diarization results will be provided only
# in the top alternative of the FINAL SpeechRecognitionResult.
- "minSpeakerCount": 42, # Minimum number of speakers in the conversation. This range gives you more
+ "minSpeakerCount": 42, # Minimum number of speakers in the conversation. This range gives you more
# flexibility by allowing the system to automatically determine the correct
# number of speakers. If not set, the default value is 2.
- "enableSpeakerDiarization": True or False, # If 'true', enables speaker detection for each recognized word in
- # the top alternative of the recognition result using a speaker_tag provided
- # in the WordInfo.
- "maxSpeakerCount": 42, # Maximum number of speakers in the conversation. This range gives you more
+ "maxSpeakerCount": 42, # Maximum number of speakers in the conversation. This range gives you more
# flexibility by allowing the system to automatically determine the correct
# number of speakers. If not set, the default value is 6.
- "speakerTag": 42, # Output only. Unused.
+ "speakerTag": 42, # Output only. Unused.
+ "enableSpeakerDiarization": True or False, # If 'true', enables speaker detection for each recognized word in
+ # the top alternative of the recognition result using a speaker_tag provided
+ # in the WordInfo.
},
- "speechContexts": [ # Array of SpeechContext.
- # A means to provide context to assist the speech recognition. For more
- # information, see
- # [speech
- # adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
- { # Provides "hints" to the speech recognizer to favor specific words and phrases
- # in the results.
- "phrases": [ # A list of strings containing words and phrases "hints" so that
- # the speech recognition is more likely to recognize them. This can be used
- # to improve the accuracy for specific words and phrases, for example, if
- # specific commands are typically spoken by the user. This can also be used
- # to add additional words to the vocabulary of the recognizer. See
- # [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
- #
- # List items can also be set to classes for groups of words that represent
- # common concepts that occur in natural language. For example, rather than
- # providing phrase hints for every month of the year, using the $MONTH class
- # improves the likelihood of correctly transcribing audio that includes
- # months.
- "A String",
- ],
- },
- ],
- "metadata": { # Description of audio data to be recognized. # Metadata regarding this request.
- "recordingDeviceType": "A String", # The type of device the speech was recorded with.
- "originalMediaType": "A String", # The original media the speech was recorded on.
- "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
- "originalMimeType": "A String", # Mime type of the original audio file. For example `audio/m4a`,
+ "enableWordTimeOffsets": True or False, # If `true`, the top result includes a list of words and
+ # the start and end time offsets (timestamps) for those words. If
+ # `false`, no word-level time offset information is returned. The default is
+ # `false`.
+ "languageCode": "A String", # Required. The language of the supplied audio as a
+ # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
+ # Example: "en-US".
+ # See [Language
+ # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+ # of the currently supported language codes.
+ "profanityFilter": True or False, # If set to `true`, the server will attempt to filter out
+ # profanities, replacing all but the initial character in each filtered word
+ # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
+ # won't be filtered out.
+ "useEnhanced": True or False, # Set to true to use an enhanced model for speech recognition.
+ # If `use_enhanced` is set to true and the `model` field is not set, then
+ # an appropriate enhanced model is chosen if an enhanced model exists for
+ # the audio.
+ #
+ # If `use_enhanced` is true and an enhanced version of the specified model
+ # does not exist, then the speech is recognized using the standard version
+ # of the specified model.
+ "metadata": { # Description of audio data to be recognized. # Metadata regarding this request.
+ "recordingDeviceName": "A String", # The device used to make the recording. Examples 'Nexus 5X' or
+ # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
+ # 'Cardioid Microphone'.
+ "audioTopic": "A String", # Description of the content. Eg. "Recordings of federal supreme court
+ # hearings from 2012".
+ "originalMimeType": "A String", # Mime type of the original audio file. For example `audio/m4a`,
# `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
# A list of possible audio mime types is maintained at
# http://www.iana.org/assignments/media-types/media-types.xhtml#audio
- "industryNaicsCodeOfAudio": 42, # The industry vertical to which this speech recognition request most
+ "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
+ "industryNaicsCodeOfAudio": 42, # The industry vertical to which this speech recognition request most
# closely applies. This is most indicative of the topics contained
# in the audio. Use the 6-digit NAICS code to identify the industry
# vertical - see https://www.naics.com/search/.
- "audioTopic": "A String", # Description of the content. Eg. "Recordings of federal supreme court
- # hearings from 2012".
- "recordingDeviceName": "A String", # The device used to make the recording. Examples 'Nexus 5X' or
- # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
- # 'Cardioid Microphone'.
- "interactionType": "A String", # The use case most closely describing the audio content to be recognized.
+ "originalMediaType": "A String", # The original media the speech was recorded on.
+ "recordingDeviceType": "A String", # The type of device the speech was recorded with.
+ "interactionType": "A String", # The use case most closely describing the audio content to be recognized.
},
+ "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
+ # `RecognitionAudio` messages. Valid values are: 8000-48000.
+ # 16000 is optimal. For best results, set the sampling rate of the audio
+ # source to 16000 Hz. If that's not possible, use the native sample rate of
+ # the audio source (instead of re-sampling).
+ # This field is optional for FLAC and WAV audio files, but is
+ # required for all other audio formats. For details, see AudioEncoding.
+ },
+ "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # Required. The audio data to be recognized.
+ # Either `content` or `uri` must be supplied. Supplying both or neither
+ # returns google.rpc.Code.INVALID_ARGUMENT. See
+ # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
+ "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
+ # `RecognitionConfig`. The file must not be compressed (for example, gzip).
+ # Currently, only Google Cloud Storage URIs are
+ # supported, which must be specified in the following format:
+ # `gs://bucket_name/object_name` (other URI formats return
+ # google.rpc.Code.INVALID_ARGUMENT). For more information, see
+ # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ "content": "A String", # The audio data bytes encoded as specified in
+ # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
+ # pure binary representation, whereas JSON representations use base64.
},
}
@@ -277,34 +277,10 @@
{ # This resource represents a long-running operation that is the result of a
# network API call.
- "metadata": { # Service-specific metadata associated with the operation. It typically
- # contains progress information and common metadata such as create time.
- # Some services might not provide such metadata. Any method that returns a
- # long-running operation should document the metadata type, if any.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- "error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
- # different programming environments, including REST APIs and RPC APIs. It is
- # used by [gRPC](https://github.com/grpc). Each `Status` message contains
- # three pieces of data: error code, error message, and error details.
- #
- # You can find out more about this error model and how to work with it in the
- # [API Design Guide](https://cloud.google.com/apis/design/errors).
- "message": "A String", # A developer-facing error message, which should be in English. Any
- # user-facing error message should be localized and sent in the
- # google.rpc.Status.details field, or localized by the client.
- "code": 42, # The status code, which should be an enum value of google.rpc.Code.
- "details": [ # A list of messages that carry the error details. There is a common set of
- # message types for APIs to use.
- {
- "a_key": "", # Properties of the object. Contains field @type with type URL.
- },
- ],
- },
- "done": True or False, # If the value is `false`, it means the operation is still in progress.
+ "done": True or False, # If the value is `false`, it means the operation is still in progress.
# If `true`, the operation is completed, and either `error` or `response` is
# available.
- "response": { # The normal response of the operation in case of success. If the original
+ "response": { # The normal response of the operation in case of success. If the original
# method returns no data on success, such as `Delete`, the response is
# `google.protobuf.Empty`. If the original method is standard
# `Get`/`Create`/`Update`, the response should be the resource. For other
@@ -312,11 +288,35 @@
# is the original method name. For example, if the original method name
# is `TakeSnapshot()`, the inferred response type is
# `TakeSnapshotResponse`.
- "a_key": "", # Properties of the object. Contains field @type with type URL.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
},
- "name": "A String", # The server-assigned name, which is only unique within the same service that
+ "name": "A String", # The server-assigned name, which is only unique within the same service that
# originally returns it. If you use the default HTTP mapping, the
# `name` should be a resource name ending with `operations/{unique_id}`.
+ "error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
+ # different programming environments, including REST APIs and RPC APIs. It is
+ # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+ # three pieces of data: error code, error message, and error details.
+ #
+ # You can find out more about this error model and how to work with it in the
+ # [API Design Guide](https://cloud.google.com/apis/design/errors).
+ "details": [ # A list of messages that carry the error details. There is a common set of
+ # message types for APIs to use.
+ {
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
+ ],
+ "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+ "message": "A String", # A developer-facing error message, which should be in English. Any
+ # user-facing error message should be localized and sent in the
+ # google.rpc.Status.details field, or localized by the client.
+ },
+ "metadata": { # Service-specific metadata associated with the operation. It typically
+ # contains progress information and common metadata such as create time.
+ # Some services might not provide such metadata. Any method that returns a
+ # long-running operation should document the metadata type, if any.
+ "a_key": "", # Properties of the object. Contains field @type with type URL.
+ },
}</pre>
</div>
@@ -330,82 +330,52 @@
The object takes the form of:
{ # The top-level message sent by the client for the `Recognize` method.
- "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # Required. The audio data to be recognized.
- # Either `content` or `uri` must be supplied. Supplying both or neither
- # returns google.rpc.Code.INVALID_ARGUMENT. See
- # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
- "content": "A String", # The audio data bytes encoded as specified in
- # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
- # pure binary representation, whereas JSON representations use base64.
- "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
- # `RecognitionConfig`. The file must not be compressed (for example, gzip).
- # Currently, only Google Cloud Storage URIs are
- # supported, which must be specified in the following format:
- # `gs://bucket_name/object_name` (other URI formats return
- # google.rpc.Code.INVALID_ARGUMENT). For more information, see
- # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
- },
- "config": { # Provides information to the recognizer that specifies how to process the # Required. Provides information to the recognizer that specifies how to
+ "config": { # Provides information to the recognizer that specifies how to process the # Required. Provides information to the recognizer that specifies how to
# process the request.
# request.
- "languageCode": "A String", # Required. The language of the supplied audio as a
- # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
- # Example: "en-US".
- # See [Language
- # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
- # of the currently supported language codes.
- "audioChannelCount": 42, # The number of channels in the input audio data.
- # ONLY set this for MULTI-CHANNEL recognition.
- # Valid values for LINEAR16 and FLAC are `1`-`8`.
- # Valid values for OGG_OPUS are '1'-'254'.
- # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
- # If `0` or omitted, defaults to one channel (mono).
- # Note: We only recognize the first channel by default.
- # To perform independent recognition on each channel set
- # `enable_separate_recognition_per_channel` to 'true'.
- "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
- # This field is optional for `FLAC` and `WAV` audio files and required
- # for all other audio formats. For details, see AudioEncoding.
- "enableAutomaticPunctuation": True or False, # If 'true', adds punctuation to recognition result hypotheses.
- # This feature is only available in select languages. Setting this for
- # requests in other languages has no effect at all.
- # The default 'false' value does not add punctuation to result hypotheses.
- "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
+ "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
# to get each channel recognized separately. The recognition result will
# contain a `channel_tag` field to state which channel that result belongs
# to. If this is not true, we will only recognize the first channel. The
# request is billed cumulatively for all channels recognized:
# `audio_channel_count` multiplied by the length of the audio.
- "enableWordTimeOffsets": True or False, # If `true`, the top result includes a list of words and
- # the start and end time offsets (timestamps) for those words. If
- # `false`, no word-level time offset information is returned. The default is
- # `false`.
- "maxAlternatives": 42, # Maximum number of recognition hypotheses to be returned.
+ "enableAutomaticPunctuation": True or False, # If 'true', adds punctuation to recognition result hypotheses.
+ # This feature is only available in select languages. Setting this for
+ # requests in other languages has no effect at all.
+ # The default 'false' value does not add punctuation to result hypotheses.
+ "maxAlternatives": 42, # Maximum number of recognition hypotheses to be returned.
# Specifically, the maximum number of `SpeechRecognitionAlternative` messages
# within each `SpeechRecognitionResult`.
# The server may return fewer than `max_alternatives`.
# Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
# one. If omitted, will return a maximum of one.
- "useEnhanced": True or False, # Set to true to use an enhanced model for speech recognition.
- # If `use_enhanced` is set to true and the `model` field is not set, then
- # an appropriate enhanced model is chosen if an enhanced model exists for
- # the audio.
- #
- # If `use_enhanced` is true and an enhanced version of the specified model
- # does not exist, then the speech is recognized using the standard version
- # of the specified model.
- "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
- # `RecognitionAudio` messages. Valid values are: 8000-48000.
- # 16000 is optimal. For best results, set the sampling rate of the audio
- # source to 16000 Hz. If that's not possible, use the native sample rate of
- # the audio source (instead of re-sampling).
- # This field is optional for FLAC and WAV audio files, but is
- # required for all other audio formats. For details, see AudioEncoding.
- "profanityFilter": True or False, # If set to `true`, the server will attempt to filter out
- # profanities, replacing all but the initial character in each filtered word
- # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
- # won't be filtered out.
- "model": "A String", # Which model to select for the given request. Select the model
+ "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
+ # This field is optional for `FLAC` and `WAV` audio files and required
+ # for all other audio formats. For details, see AudioEncoding.
+ "speechContexts": [ # Array of SpeechContext.
+ # A means to provide context to assist the speech recognition. For more
+ # information, see
+ # [speech
+ # adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
+ { # Provides "hints" to the speech recognizer to favor specific words and phrases
+ # in the results.
+ "phrases": [ # A list of strings containing words and phrases "hints" so that
+ # the speech recognition is more likely to recognize them. This can be used
+ # to improve the accuracy for specific words and phrases, for example, if
+ # specific commands are typically spoken by the user. This can also be used
+ # to add additional words to the vocabulary of the recognizer. See
+ # [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
+ #
+ # List items can also be set to classes for groups of words that represent
+ # common concepts that occur in natural language. For example, rather than
+ # providing phrase hints for every month of the year, using the $MONTH class
+ # improves the likelihood of correctly transcribing audio that includes
+ # months.
+ "A String",
+ ],
+ },
+ ],
+ "model": "A String", # Which model to select for the given request. Select the model
# best suited to your domain to get best results. If a model is not
# explicitly specified, then we auto-select a model based on the parameters
# in the RecognitionConfig.
@@ -437,7 +407,16 @@
# recorded at a 16khz or greater sampling rate.</td>
# </tr>
# </table>
- "diarizationConfig": { # Config to enable speaker diarization. # Config to enable speaker diarization and set additional
+ "audioChannelCount": 42, # The number of channels in the input audio data.
+ # ONLY set this for MULTI-CHANNEL recognition.
+ # Valid values for LINEAR16 and FLAC are `1`-`8`.
+ # Valid values for OGG_OPUS are '1'-'254'.
+ # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
+ # If `0` or omitted, defaults to one channel (mono).
+ # Note: We only recognize the first channel by default.
+ # To perform independent recognition on each channel set
+ # `enable_separate_recognition_per_channel` to 'true'.
+ "diarizationConfig": { # Config to enable speaker diarization. # Config to enable speaker diarization and set additional
# parameters to make diarization better suited for your application.
# Note: When this is enabled, we send all the words from the beginning of the
# audio for the top alternative in every consecutive STREAMING responses.
@@ -445,59 +424,80 @@
# identify the speakers in the conversation over time.
# For non-streaming requests, the diarization results will be provided only
# in the top alternative of the FINAL SpeechRecognitionResult.
- "minSpeakerCount": 42, # Minimum number of speakers in the conversation. This range gives you more
+ "minSpeakerCount": 42, # Minimum number of speakers in the conversation. This range gives you more
# flexibility by allowing the system to automatically determine the correct
# number of speakers. If not set, the default value is 2.
- "enableSpeakerDiarization": True or False, # If 'true', enables speaker detection for each recognized word in
- # the top alternative of the recognition result using a speaker_tag provided
- # in the WordInfo.
- "maxSpeakerCount": 42, # Maximum number of speakers in the conversation. This range gives you more
+ "maxSpeakerCount": 42, # Maximum number of speakers in the conversation. This range gives you more
# flexibility by allowing the system to automatically determine the correct
# number of speakers. If not set, the default value is 6.
- "speakerTag": 42, # Output only. Unused.
+ "speakerTag": 42, # Output only. Unused.
+ "enableSpeakerDiarization": True or False, # If 'true', enables speaker detection for each recognized word in
+ # the top alternative of the recognition result using a speaker_tag provided
+ # in the WordInfo.
},
- "speechContexts": [ # Array of SpeechContext.
- # A means to provide context to assist the speech recognition. For more
- # information, see
- # [speech
- # adaptation](https://cloud.google.com/speech-to-text/docs/context-strength).
- { # Provides "hints" to the speech recognizer to favor specific words and phrases
- # in the results.
- "phrases": [ # A list of strings containing words and phrases "hints" so that
- # the speech recognition is more likely to recognize them. This can be used
- # to improve the accuracy for specific words and phrases, for example, if
- # specific commands are typically spoken by the user. This can also be used
- # to add additional words to the vocabulary of the recognizer. See
- # [usage limits](https://cloud.google.com/speech-to-text/quotas#content).
- #
- # List items can also be set to classes for groups of words that represent
- # common concepts that occur in natural language. For example, rather than
- # providing phrase hints for every month of the year, using the $MONTH class
- # improves the likelihood of correctly transcribing audio that includes
- # months.
- "A String",
- ],
- },
- ],
- "metadata": { # Description of audio data to be recognized. # Metadata regarding this request.
- "recordingDeviceType": "A String", # The type of device the speech was recorded with.
- "originalMediaType": "A String", # The original media the speech was recorded on.
- "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
- "originalMimeType": "A String", # Mime type of the original audio file. For example `audio/m4a`,
+ "enableWordTimeOffsets": True or False, # If `true`, the top result includes a list of words and
+ # the start and end time offsets (timestamps) for those words. If
+ # `false`, no word-level time offset information is returned. The default is
+ # `false`.
+ "languageCode": "A String", # Required. The language of the supplied audio as a
+ # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
+ # Example: "en-US".
+ # See [Language
+ # Support](https://cloud.google.com/speech-to-text/docs/languages) for a list
+ # of the currently supported language codes.
+ "profanityFilter": True or False, # If set to `true`, the server will attempt to filter out
+ # profanities, replacing all but the initial character in each filtered word
+ # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
+ # won't be filtered out.
+ "useEnhanced": True or False, # Set to true to use an enhanced model for speech recognition.
+ # If `use_enhanced` is set to true and the `model` field is not set, then
+ # an appropriate enhanced model is chosen if an enhanced model exists for
+ # the audio.
+ #
+ # If `use_enhanced` is true and an enhanced version of the specified model
+ # does not exist, then the speech is recognized using the standard version
+ # of the specified model.
+ "metadata": { # Description of audio data to be recognized. # Metadata regarding this request.
+ "recordingDeviceName": "A String", # The device used to make the recording. Examples 'Nexus 5X' or
+ # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
+ # 'Cardioid Microphone'.
+ "audioTopic": "A String", # Description of the content. Eg. "Recordings of federal supreme court
+ # hearings from 2012".
+ "originalMimeType": "A String", # Mime type of the original audio file. For example `audio/m4a`,
# `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
# A list of possible audio mime types is maintained at
# http://www.iana.org/assignments/media-types/media-types.xhtml#audio
- "industryNaicsCodeOfAudio": 42, # The industry vertical to which this speech recognition request most
+ "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
+ "industryNaicsCodeOfAudio": 42, # The industry vertical to which this speech recognition request most
# closely applies. This is most indicative of the topics contained
# in the audio. Use the 6-digit NAICS code to identify the industry
# vertical - see https://www.naics.com/search/.
- "audioTopic": "A String", # Description of the content. Eg. "Recordings of federal supreme court
- # hearings from 2012".
- "recordingDeviceName": "A String", # The device used to make the recording. Examples 'Nexus 5X' or
- # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
- # 'Cardioid Microphone'.
- "interactionType": "A String", # The use case most closely describing the audio content to be recognized.
+ "originalMediaType": "A String", # The original media the speech was recorded on.
+ "recordingDeviceType": "A String", # The type of device the speech was recorded with.
+ "interactionType": "A String", # The use case most closely describing the audio content to be recognized.
},
+ "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
+ # `RecognitionAudio` messages. Valid values are: 8000-48000.
+ # 16000 is optimal. For best results, set the sampling rate of the audio
+ # source to 16000 Hz. If that's not possible, use the native sample rate of
+ # the audio source (instead of re-sampling).
+ # This field is optional for FLAC and WAV audio files, but is
+ # required for all other audio formats. For details, see AudioEncoding.
+ },
+ "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # Required. The audio data to be recognized.
+ # Either `content` or `uri` must be supplied. Supplying both or neither
+ # returns google.rpc.Code.INVALID_ARGUMENT. See
+ # [content limits](https://cloud.google.com/speech-to-text/quotas#content).
+ "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
+ # `RecognitionConfig`. The file must not be compressed (for example, gzip).
+ # Currently, only Google Cloud Storage URIs are
+ # supported, which must be specified in the following format:
+ # `gs://bucket_name/object_name` (other URI formats return
+ # google.rpc.Code.INVALID_ARGUMENT). For more information, see
+ # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
+ "content": "A String", # The audio data bytes encoded as specified in
+ # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
+ # pure binary representation, whereas JSON representations use base64.
},
}
@@ -512,51 +512,51 @@
{ # The only message returned to the client by the `Recognize` method. It
# contains the result as zero or more sequential `SpeechRecognitionResult`
# messages.
- "results": [ # Sequential list of transcription results corresponding to
+ "results": [ # Sequential list of transcription results corresponding to
# sequential portions of audio.
{ # A speech recognition result corresponding to a portion of the audio.
- "channelTag": 42, # For multi-channel audio, this is the channel number corresponding to the
- # recognized result for the audio from that channel.
- # For audio_channel_count = N, its output values can range from '1' to 'N'.
- "alternatives": [ # May contain one or more recognition hypotheses (up to the
+ "alternatives": [ # May contain one or more recognition hypotheses (up to the
# maximum specified in `max_alternatives`).
# These alternatives are ordered in terms of accuracy, with the top (first)
# alternative being the most probable, as ranked by the recognizer.
{ # Alternative hypotheses (a.k.a. n-best list).
- "confidence": 3.14, # The confidence estimate between 0.0 and 1.0. A higher number
+ "confidence": 3.14, # The confidence estimate between 0.0 and 1.0. A higher number
# indicates an estimated greater likelihood that the recognized words are
# correct. This field is set only for the top alternative of a non-streaming
# result or, of a streaming result where `is_final=true`.
# This field is not guaranteed to be accurate and users should not rely on it
# to be always provided.
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
- "transcript": "A String", # Transcript text representing the words that the user spoke.
- "words": [ # A list of word-specific information for each recognized word.
+ "transcript": "A String", # Transcript text representing the words that the user spoke.
+ "words": [ # A list of word-specific information for each recognized word.
# Note: When `enable_speaker_diarization` is true, you will see all the words
# from the beginning of the audio.
{ # Word-specific information for recognized words.
- "endTime": "A String", # Time offset relative to the beginning of the audio,
+ "word": "A String", # The word corresponding to this set of information.
+ "speakerTag": 42, # Output only. A distinct integer value is assigned for every speaker within
+ # the audio. This field specifies which one of those speakers was detected to
+ # have spoken this word. Value ranges from '1' to diarization_speaker_count.
+ # speaker_tag is set if enable_speaker_diarization = 'true' and only in the
+ # top alternative.
+ "endTime": "A String", # Time offset relative to the beginning of the audio,
# and corresponding to the end of the spoken word.
# This field is only set if `enable_word_time_offsets=true` and only
# in the top hypothesis.
# This is an experimental feature and the accuracy of the time offset can
# vary.
- "word": "A String", # The word corresponding to this set of information.
- "startTime": "A String", # Time offset relative to the beginning of the audio,
+ "startTime": "A String", # Time offset relative to the beginning of the audio,
# and corresponding to the start of the spoken word.
# This field is only set if `enable_word_time_offsets=true` and only
# in the top hypothesis.
# This is an experimental feature and the accuracy of the time offset can
# vary.
- "speakerTag": 42, # Output only. A distinct integer value is assigned for every speaker within
- # the audio. This field specifies which one of those speakers was detected to
- # have spoken this word. Value ranges from '1' to diarization_speaker_count.
- # speaker_tag is set if enable_speaker_diarization = 'true' and only in the
- # top alternative.
},
],
},
],
+ "channelTag": 42, # For multi-channel audio, this is the channel number corresponding to the
+ # recognized result for the audio from that channel.
+ # For audio_channel_count = N, its output values can range from '1' to 'N'.
},
],
}</pre>