blob: 3c8f8dd7e61276c706ce3771a2bd88633fd221f5 [file] [log] [blame]
Bu Sun Kim715bd7f2019-06-14 16:50:42 -07001<html><body>
2<style>
3
4body, h1, h2, h3, div, span, p, pre, a {
5 margin: 0;
6 padding: 0;
7 border: 0;
8 font-weight: inherit;
9 font-style: inherit;
10 font-size: 100%;
11 font-family: inherit;
12 vertical-align: baseline;
13}
14
15body {
16 font-size: 13px;
17 padding: 1em;
18}
19
20h1 {
21 font-size: 26px;
22 margin-bottom: 1em;
23}
24
25h2 {
26 font-size: 24px;
27 margin-bottom: 1em;
28}
29
30h3 {
31 font-size: 20px;
32 margin-bottom: 1em;
33 margin-top: 1em;
34}
35
36pre, code {
37 line-height: 1.5;
38 font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
39}
40
41pre {
42 margin-top: 0.5em;
43}
44
45h1, h2, h3, p {
46 font-family: Arial, sans serif;
47}
48
49h1, h2, h3 {
50 border-bottom: solid #CCC 1px;
51}
52
53.toc_element {
54 margin-top: 0.5em;
55}
56
57.firstline {
58 margin-left: 2 em;
59}
60
61.method {
62 margin-top: 1em;
63 border: solid 1px #CCC;
64 padding: 1em;
65 background: #EEE;
66}
67
68.details {
69 font-weight: bold;
70 font-size: 14px;
71}
72
73</style>
74
75<h1><a href="speech_v1p1beta1.html">Cloud Speech-to-Text API</a> . <a href="speech_v1p1beta1.speech.html">speech</a></h1>
76<h2>Instance Methods</h2>
77<p class="toc_element">
78 <code><a href="#longrunningrecognize">longrunningrecognize(body, x__xgafv=None)</a></code></p>
79<p class="firstline">Performs asynchronous speech recognition: receive results via the</p>
80<p class="toc_element">
81 <code><a href="#recognize">recognize(body, x__xgafv=None)</a></code></p>
82<p class="firstline">Performs synchronous speech recognition: receive results after all audio</p>
83<h3>Method Details</h3>
84<div class="method">
85 <code class="details" id="longrunningrecognize">longrunningrecognize(body, x__xgafv=None)</code>
86 <pre>Performs asynchronous speech recognition: receive results via the
87google.longrunning.Operations interface. Returns either an
88`Operation.error` or an `Operation.response` which contains
89a `LongRunningRecognizeResponse` message.
90For more information on asynchronous speech recognition, see the
91[how-to](https://cloud.google.com/speech-to-text/docs/async-recognize).
92
93Args:
94 body: object, The request body. (required)
95 The object takes the form of:
96
97{ # The top-level message sent by the client for the `LongRunningRecognize`
98 # method.
99 "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # *Required* The audio data to be recognized.
100 # Either `content` or `uri` must be supplied. Supplying both or neither
101 # returns google.rpc.Code.INVALID_ARGUMENT. See
102 # [content limits](/speech-to-text/quotas#content).
103 "content": "A String", # The audio data bytes encoded as specified in
104 # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
105 # pure binary representation, whereas JSON representations use base64.
106 "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
107 # `RecognitionConfig`. The file must not be compressed (for example, gzip).
108 # Currently, only Google Cloud Storage URIs are
109 # supported, which must be specified in the following format:
110 # `gs://bucket_name/object_name` (other URI formats return
111 # google.rpc.Code.INVALID_ARGUMENT). For more information, see
112 # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
113 },
114 "config": { # Provides information to the recognizer that specifies how to process the # *Required* Provides information to the recognizer that specifies how to
115 # process the request.
116 # request.
117 "languageCode": "A String", # *Required* The language of the supplied audio as a
118 # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
119 # Example: "en-US".
120 # See [Language Support](/speech-to-text/docs/languages)
121 # for a list of the currently supported language codes.
122 "audioChannelCount": 42, # *Optional* The number of channels in the input audio data.
123 # ONLY set this for MULTI-CHANNEL recognition.
124 # Valid values for LINEAR16 and FLAC are `1`-`8`.
125 # Valid values for OGG_OPUS are '1'-'254'.
126 # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
127 # If `0` or omitted, defaults to one channel (mono).
128 # Note: We only recognize the first channel by default.
129 # To perform independent recognition on each channel set
130 # `enable_separate_recognition_per_channel` to 'true'.
131 "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
132 # This field is optional for `FLAC` and `WAV` audio files and required
133 # for all other audio formats. For details, see AudioEncoding.
134 "enableAutomaticPunctuation": True or False, # *Optional* If 'true', adds punctuation to recognition result hypotheses.
135 # This feature is only available in select languages. Setting this for
136 # requests in other languages has no effect at all.
137 # The default 'false' value does not add punctuation to result hypotheses.
138 # Note: This is currently offered as an experimental service, complimentary
139 # to all users. In the future this may be exclusively available as a
140 # premium feature.
141 "alternativeLanguageCodes": [ # *Optional* A list of up to 3 additional
142 # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
143 # listing possible alternative languages of the supplied audio.
144 # See [Language Support](/speech-to-text/docs/languages)
145 # for a list of the currently supported language codes.
146 # If alternative languages are listed, recognition result will contain
147 # recognition in the most likely language detected including the main
148 # language_code. The recognition result will include the language tag
149 # of the language detected in the audio.
150 # Note: This feature is only supported for Voice Command and Voice Search
151 # use cases and performance may vary for other use cases (e.g., phone call
152 # transcription).
153 "A String",
154 ],
155 "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
156 # to get each channel recognized separately. The recognition result will
157 # contain a `channel_tag` field to state which channel that result belongs
158 # to. If this is not true, we will only recognize the first channel. The
159 # request is billed cumulatively for all channels recognized:
160 # `audio_channel_count` multiplied by the length of the audio.
161 "enableWordTimeOffsets": True or False, # *Optional* If `true`, the top result includes a list of words and
162 # the start and end time offsets (timestamps) for those words. If
163 # `false`, no word-level time offset information is returned. The default is
164 # `false`.
165 "enableSpeakerDiarization": True or False, # *Optional* If 'true', enables speaker detection for each recognized word in
166 # the top alternative of the recognition result using a speaker_tag provided
167 # in the WordInfo.
168 # Note: Use diarization_config instead. This field will be DEPRECATED soon.
169 "maxAlternatives": 42, # *Optional* Maximum number of recognition hypotheses to be returned.
170 # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
171 # within each `SpeechRecognitionResult`.
172 # The server may return fewer than `max_alternatives`.
173 # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
174 # one. If omitted, will return a maximum of one.
175 "profanityFilter": True or False, # *Optional* If set to `true`, the server will attempt to filter out
176 # profanities, replacing all but the initial character in each filtered word
177 # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
178 # won't be filtered out.
179 "useEnhanced": True or False, # *Optional* Set to true to use an enhanced model for speech recognition.
180 # If `use_enhanced` is set to true and the `model` field is not set, then
181 # an appropriate enhanced model is chosen if:
182 # 1. project is eligible for requesting enhanced models
183 # 2. an enhanced model exists for the audio
184 #
185 # If `use_enhanced` is true and an enhanced version of the specified model
186 # does not exist, then the speech is recognized using the standard version
187 # of the specified model.
188 #
189 # Enhanced speech models require that you opt-in to data logging using
190 # instructions in the
191 # [documentation](/speech-to-text/docs/enable-data-logging). If you set
192 # `use_enhanced` to true and you have not enabled audio logging, then you
193 # will receive an error.
194 "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
195 # `RecognitionAudio` messages. Valid values are: 8000-48000.
196 # 16000 is optimal. For best results, set the sampling rate of the audio
197 # source to 16000 Hz. If that's not possible, use the native sample rate of
198 # the audio source (instead of re-sampling).
199 # This field is optional for FLAC and WAV audio files, but is
200 # required for all other audio formats. For details, see AudioEncoding.
201 "diarizationSpeakerCount": 42, # *Optional*
202 # If set, specifies the estimated number of speakers in the conversation.
203 # If not set, defaults to '2'.
204 # Ignored unless enable_speaker_diarization is set to true."
205 # Note: Use diarization_config instead. This field will be DEPRECATED soon.
206 "enableWordConfidence": True or False, # *Optional* If `true`, the top result includes a list of words and the
207 # confidence for those words. If `false`, no word-level confidence
208 # information is returned. The default is `false`.
209 "model": "A String", # *Optional* Which model to select for the given request. Select the model
210 # best suited to your domain to get best results. If a model is not
211 # explicitly specified, then we auto-select a model based on the parameters
212 # in the RecognitionConfig.
213 # <table>
214 # <tr>
215 # <td><b>Model</b></td>
216 # <td><b>Description</b></td>
217 # </tr>
218 # <tr>
219 # <td><code>command_and_search</code></td>
220 # <td>Best for short queries such as voice commands or voice search.</td>
221 # </tr>
222 # <tr>
223 # <td><code>phone_call</code></td>
224 # <td>Best for audio that originated from a phone call (typically
225 # recorded at an 8khz sampling rate).</td>
226 # </tr>
227 # <tr>
228 # <td><code>video</code></td>
229 # <td>Best for audio that originated from from video or includes multiple
230 # speakers. Ideally the audio is recorded at a 16khz or greater
231 # sampling rate. This is a premium model that costs more than the
232 # standard rate.</td>
233 # </tr>
234 # <tr>
235 # <td><code>default</code></td>
236 # <td>Best for audio that is not one of the specific audio models.
237 # For example, long-form audio. Ideally the audio is high-fidelity,
238 # recorded at a 16khz or greater sampling rate.</td>
239 # </tr>
240 # </table>
241 "diarizationConfig": { # *Optional* Config to enable speaker diarization and set additional
242 # parameters to make diarization better suited for your application.
243 # Note: When this is enabled, we send all the words from the beginning of the
244 # audio for the top alternative in every consecutive STREAMING responses.
245 # This is done in order to improve our speaker tags as our models learn to
246 # identify the speakers in the conversation over time.
247 # For non-streaming requests, the diarization results will be provided only
248 # in the top alternative of the FINAL SpeechRecognitionResult.
249 "minSpeakerCount": 42, # *Optional* Only used if diarization_speaker_count is not set.
250 # Minimum number of speakers in the conversation. This range gives you more
251 # flexibility by allowing the system to automatically determine the correct
252 # number of speakers. If not set, the default value is 2.
253 "enableSpeakerDiarization": True or False, # *Optional* If 'true', enables speaker detection for each recognized word in
254 # the top alternative of the recognition result using a speaker_tag provided
255 # in the WordInfo.
256 "maxSpeakerCount": 42, # *Optional* Only used if diarization_speaker_count is not set.
257 # Maximum number of speakers in the conversation. This range gives you more
258 # flexibility by allowing the system to automatically determine the correct
259 # number of speakers. If not set, the default value is 6.
260 },
261 "speechContexts": [ # *Optional* array of SpeechContext.
262 # A means to provide context to assist the speech recognition. For more
263 # information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
264 { # Provides "hints" to the speech recognizer to favor specific words and phrases
265 # in the results.
266 "phrases": [ # *Optional* A list of strings containing words and phrases "hints" so that
267 # the speech recognition is more likely to recognize them. This can be used
268 # to improve the accuracy for specific words and phrases, for example, if
269 # specific commands are typically spoken by the user. This can also be used
270 # to add additional words to the vocabulary of the recognizer. See
271 # [usage limits](/speech-to-text/quotas#content).
272 #
273 # List items can also be set to classes for groups of words that represent
274 # common concepts that occur in natural language. For example, rather than
275 # providing phrase hints for every month of the year, using the $MONTH class
276 # improves the likelihood of correctly transcribing audio that includes
277 # months.
278 "A String",
279 ],
280 "boost": 3.14, # Hint Boost. Positive value will increase the probability that a specific
281 # phrase will be recognized over other similar sounding phrases. The higher
282 # the boost, the higher the chance of false positive recognition as well.
283 # Negative boost values would correspond to anti-biasing. Anti-biasing is not
284 # enabled, so negative boost will simply be ignored. Though `boost` can
285 # accept a wide range of positive values, most use cases are best served with
286 # values between 0 and 20. We recommend using a binary search approach to
287 # finding the optimal value for your use case.
288 },
289 ],
290 "metadata": { # Description of audio data to be recognized. # *Optional* Metadata regarding this request.
291 "recordingDeviceType": "A String", # The type of device the speech was recorded with.
292 "originalMediaType": "A String", # The original media the speech was recorded on.
293 "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
294 "obfuscatedId": "A String", # Obfuscated (privacy-protected) ID of the user, to identify number of
295 # unique users using the service.
296 "recordingDeviceName": "A String", # The device used to make the recording. Examples 'Nexus 5X' or
297 # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
298 # 'Cardioid Microphone'.
299 "industryNaicsCodeOfAudio": 42, # The industry vertical to which this speech recognition request most
300 # closely applies. This is most indicative of the topics contained
301 # in the audio. Use the 6-digit NAICS code to identify the industry
302 # vertical - see https://www.naics.com/search/.
303 "audioTopic": "A String", # Description of the content. Eg. "Recordings of federal supreme court
304 # hearings from 2012".
305 "originalMimeType": "A String", # Mime type of the original audio file. For example `audio/m4a`,
306 # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
307 # A list of possible audio mime types is maintained at
308 # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
309 "interactionType": "A String", # The use case most closely describing the audio content to be recognized.
310 },
311 },
312 }
313
314 x__xgafv: string, V1 error format.
315 Allowed values
316 1 - v1 error format
317 2 - v2 error format
318
319Returns:
320 An object of the form:
321
322 { # This resource represents a long-running operation that is the result of a
323 # network API call.
324 "metadata": { # Service-specific metadata associated with the operation. It typically
325 # contains progress information and common metadata such as create time.
326 # Some services might not provide such metadata. Any method that returns a
327 # long-running operation should document the metadata type, if any.
328 "a_key": "", # Properties of the object. Contains field @type with type URL.
329 },
330 "error": { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
331 # different programming environments, including REST APIs and RPC APIs. It is
332 # used by [gRPC](https://github.com/grpc). Each `Status` message contains
333 # three pieces of data: error code, error message, and error details.
334 #
335 # You can find out more about this error model and how to work with it in the
336 # [API Design Guide](https://cloud.google.com/apis/design/errors).
337 "message": "A String", # A developer-facing error message, which should be in English. Any
338 # user-facing error message should be localized and sent in the
339 # google.rpc.Status.details field, or localized by the client.
340 "code": 42, # The status code, which should be an enum value of google.rpc.Code.
341 "details": [ # A list of messages that carry the error details. There is a common set of
342 # message types for APIs to use.
343 {
344 "a_key": "", # Properties of the object. Contains field @type with type URL.
345 },
346 ],
347 },
348 "done": True or False, # If the value is `false`, it means the operation is still in progress.
349 # If `true`, the operation is completed, and either `error` or `response` is
350 # available.
351 "response": { # The normal response of the operation in case of success. If the original
352 # method returns no data on success, such as `Delete`, the response is
353 # `google.protobuf.Empty`. If the original method is standard
354 # `Get`/`Create`/`Update`, the response should be the resource. For other
355 # methods, the response should have the type `XxxResponse`, where `Xxx`
356 # is the original method name. For example, if the original method name
357 # is `TakeSnapshot()`, the inferred response type is
358 # `TakeSnapshotResponse`.
359 "a_key": "", # Properties of the object. Contains field @type with type URL.
360 },
361 "name": "A String", # The server-assigned name, which is only unique within the same service that
362 # originally returns it. If you use the default HTTP mapping, the
363 # `name` should be a resource name ending with `operations/{unique_id}`.
364 }</pre>
365</div>
366
367<div class="method">
368 <code class="details" id="recognize">recognize(body, x__xgafv=None)</code>
369 <pre>Performs synchronous speech recognition: receive results after all audio
370has been sent and processed.
371
372Args:
373 body: object, The request body. (required)
374 The object takes the form of:
375
376{ # The top-level message sent by the client for the `Recognize` method.
377 "audio": { # Contains audio data in the encoding specified in the `RecognitionConfig`. # *Required* The audio data to be recognized.
378 # Either `content` or `uri` must be supplied. Supplying both or neither
379 # returns google.rpc.Code.INVALID_ARGUMENT. See
380 # [content limits](/speech-to-text/quotas#content).
381 "content": "A String", # The audio data bytes encoded as specified in
382 # `RecognitionConfig`. Note: as with all bytes fields, proto buffers use a
383 # pure binary representation, whereas JSON representations use base64.
384 "uri": "A String", # URI that points to a file that contains audio data bytes as specified in
385 # `RecognitionConfig`. The file must not be compressed (for example, gzip).
386 # Currently, only Google Cloud Storage URIs are
387 # supported, which must be specified in the following format:
388 # `gs://bucket_name/object_name` (other URI formats return
389 # google.rpc.Code.INVALID_ARGUMENT). For more information, see
390 # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
391 },
392 "config": { # Provides information to the recognizer that specifies how to process the # *Required* Provides information to the recognizer that specifies how to
393 # process the request.
394 # request.
395 "languageCode": "A String", # *Required* The language of the supplied audio as a
396 # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
397 # Example: "en-US".
398 # See [Language Support](/speech-to-text/docs/languages)
399 # for a list of the currently supported language codes.
400 "audioChannelCount": 42, # *Optional* The number of channels in the input audio data.
401 # ONLY set this for MULTI-CHANNEL recognition.
402 # Valid values for LINEAR16 and FLAC are `1`-`8`.
403 # Valid values for OGG_OPUS are '1'-'254'.
404 # Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
405 # If `0` or omitted, defaults to one channel (mono).
406 # Note: We only recognize the first channel by default.
407 # To perform independent recognition on each channel set
408 # `enable_separate_recognition_per_channel` to 'true'.
409 "encoding": "A String", # Encoding of audio data sent in all `RecognitionAudio` messages.
410 # This field is optional for `FLAC` and `WAV` audio files and required
411 # for all other audio formats. For details, see AudioEncoding.
412 "enableAutomaticPunctuation": True or False, # *Optional* If 'true', adds punctuation to recognition result hypotheses.
413 # This feature is only available in select languages. Setting this for
414 # requests in other languages has no effect at all.
415 # The default 'false' value does not add punctuation to result hypotheses.
416 # Note: This is currently offered as an experimental service, complimentary
417 # to all users. In the future this may be exclusively available as a
418 # premium feature.
419 "alternativeLanguageCodes": [ # *Optional* A list of up to 3 additional
420 # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
421 # listing possible alternative languages of the supplied audio.
422 # See [Language Support](/speech-to-text/docs/languages)
423 # for a list of the currently supported language codes.
424 # If alternative languages are listed, recognition result will contain
425 # recognition in the most likely language detected including the main
426 # language_code. The recognition result will include the language tag
427 # of the language detected in the audio.
428 # Note: This feature is only supported for Voice Command and Voice Search
429 # use cases and performance may vary for other use cases (e.g., phone call
430 # transcription).
431 "A String",
432 ],
433 "enableSeparateRecognitionPerChannel": True or False, # This needs to be set to `true` explicitly and `audio_channel_count` > 1
434 # to get each channel recognized separately. The recognition result will
435 # contain a `channel_tag` field to state which channel that result belongs
436 # to. If this is not true, we will only recognize the first channel. The
437 # request is billed cumulatively for all channels recognized:
438 # `audio_channel_count` multiplied by the length of the audio.
439 "enableWordTimeOffsets": True or False, # *Optional* If `true`, the top result includes a list of words and
440 # the start and end time offsets (timestamps) for those words. If
441 # `false`, no word-level time offset information is returned. The default is
442 # `false`.
443 "enableSpeakerDiarization": True or False, # *Optional* If 'true', enables speaker detection for each recognized word in
444 # the top alternative of the recognition result using a speaker_tag provided
445 # in the WordInfo.
446 # Note: Use diarization_config instead. This field will be DEPRECATED soon.
447 "maxAlternatives": 42, # *Optional* Maximum number of recognition hypotheses to be returned.
448 # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
449 # within each `SpeechRecognitionResult`.
450 # The server may return fewer than `max_alternatives`.
451 # Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
452 # one. If omitted, will return a maximum of one.
453 "profanityFilter": True or False, # *Optional* If set to `true`, the server will attempt to filter out
454 # profanities, replacing all but the initial character in each filtered word
455 # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
456 # won't be filtered out.
457 "useEnhanced": True or False, # *Optional* Set to true to use an enhanced model for speech recognition.
458 # If `use_enhanced` is set to true and the `model` field is not set, then
459 # an appropriate enhanced model is chosen if:
460 # 1. project is eligible for requesting enhanced models
461 # 2. an enhanced model exists for the audio
462 #
463 # If `use_enhanced` is true and an enhanced version of the specified model
464 # does not exist, then the speech is recognized using the standard version
465 # of the specified model.
466 #
467 # Enhanced speech models require that you opt-in to data logging using
468 # instructions in the
469 # [documentation](/speech-to-text/docs/enable-data-logging). If you set
470 # `use_enhanced` to true and you have not enabled audio logging, then you
471 # will receive an error.
472 "sampleRateHertz": 42, # Sample rate in Hertz of the audio data sent in all
473 # `RecognitionAudio` messages. Valid values are: 8000-48000.
474 # 16000 is optimal. For best results, set the sampling rate of the audio
475 # source to 16000 Hz. If that's not possible, use the native sample rate of
476 # the audio source (instead of re-sampling).
477 # This field is optional for FLAC and WAV audio files, but is
478 # required for all other audio formats. For details, see AudioEncoding.
479 "diarizationSpeakerCount": 42, # *Optional*
480 # If set, specifies the estimated number of speakers in the conversation.
481 # If not set, defaults to '2'.
482 # Ignored unless enable_speaker_diarization is set to true."
483 # Note: Use diarization_config instead. This field will be DEPRECATED soon.
484 "enableWordConfidence": True or False, # *Optional* If `true`, the top result includes a list of words and the
485 # confidence for those words. If `false`, no word-level confidence
486 # information is returned. The default is `false`.
487 "model": "A String", # *Optional* Which model to select for the given request. Select the model
488 # best suited to your domain to get best results. If a model is not
489 # explicitly specified, then we auto-select a model based on the parameters
490 # in the RecognitionConfig.
491 # <table>
492 # <tr>
493 # <td><b>Model</b></td>
494 # <td><b>Description</b></td>
495 # </tr>
496 # <tr>
497 # <td><code>command_and_search</code></td>
498 # <td>Best for short queries such as voice commands or voice search.</td>
499 # </tr>
500 # <tr>
501 # <td><code>phone_call</code></td>
502 # <td>Best for audio that originated from a phone call (typically
503 # recorded at an 8khz sampling rate).</td>
504 # </tr>
505 # <tr>
506 # <td><code>video</code></td>
507 # <td>Best for audio that originated from from video or includes multiple
508 # speakers. Ideally the audio is recorded at a 16khz or greater
509 # sampling rate. This is a premium model that costs more than the
510 # standard rate.</td>
511 # </tr>
512 # <tr>
513 # <td><code>default</code></td>
514 # <td>Best for audio that is not one of the specific audio models.
515 # For example, long-form audio. Ideally the audio is high-fidelity,
516 # recorded at a 16khz or greater sampling rate.</td>
517 # </tr>
518 # </table>
519 "diarizationConfig": { # *Optional* Config to enable speaker diarization and set additional
520 # parameters to make diarization better suited for your application.
521 # Note: When this is enabled, we send all the words from the beginning of the
522 # audio for the top alternative in every consecutive STREAMING responses.
523 # This is done in order to improve our speaker tags as our models learn to
524 # identify the speakers in the conversation over time.
525 # For non-streaming requests, the diarization results will be provided only
526 # in the top alternative of the FINAL SpeechRecognitionResult.
527 "minSpeakerCount": 42, # *Optional* Only used if diarization_speaker_count is not set.
528 # Minimum number of speakers in the conversation. This range gives you more
529 # flexibility by allowing the system to automatically determine the correct
530 # number of speakers. If not set, the default value is 2.
531 "enableSpeakerDiarization": True or False, # *Optional* If 'true', enables speaker detection for each recognized word in
532 # the top alternative of the recognition result using a speaker_tag provided
533 # in the WordInfo.
534 "maxSpeakerCount": 42, # *Optional* Only used if diarization_speaker_count is not set.
535 # Maximum number of speakers in the conversation. This range gives you more
536 # flexibility by allowing the system to automatically determine the correct
537 # number of speakers. If not set, the default value is 6.
538 },
539 "speechContexts": [ # *Optional* array of SpeechContext.
540 # A means to provide context to assist the speech recognition. For more
541 # information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
542 { # Provides "hints" to the speech recognizer to favor specific words and phrases
543 # in the results.
544 "phrases": [ # *Optional* A list of strings containing words and phrases "hints" so that
545 # the speech recognition is more likely to recognize them. This can be used
546 # to improve the accuracy for specific words and phrases, for example, if
547 # specific commands are typically spoken by the user. This can also be used
548 # to add additional words to the vocabulary of the recognizer. See
549 # [usage limits](/speech-to-text/quotas#content).
550 #
551 # List items can also be set to classes for groups of words that represent
552 # common concepts that occur in natural language. For example, rather than
553 # providing phrase hints for every month of the year, using the $MONTH class
554 # improves the likelihood of correctly transcribing audio that includes
555 # months.
556 "A String",
557 ],
558 "boost": 3.14, # Hint Boost. Positive value will increase the probability that a specific
559 # phrase will be recognized over other similar sounding phrases. The higher
560 # the boost, the higher the chance of false positive recognition as well.
561 # Negative boost values would correspond to anti-biasing. Anti-biasing is not
562 # enabled, so negative boost will simply be ignored. Though `boost` can
563 # accept a wide range of positive values, most use cases are best served with
564 # values between 0 and 20. We recommend using a binary search approach to
565 # finding the optimal value for your use case.
566 },
567 ],
568 "metadata": { # Description of audio data to be recognized. # *Optional* Metadata regarding this request.
569 "recordingDeviceType": "A String", # The type of device the speech was recorded with.
570 "originalMediaType": "A String", # The original media the speech was recorded on.
571 "microphoneDistance": "A String", # The audio type that most closely describes the audio being recognized.
572 "obfuscatedId": "A String", # Obfuscated (privacy-protected) ID of the user, to identify number of
573 # unique users using the service.
574 "recordingDeviceName": "A String", # The device used to make the recording. Examples 'Nexus 5X' or
575 # 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
576 # 'Cardioid Microphone'.
577 "industryNaicsCodeOfAudio": 42, # The industry vertical to which this speech recognition request most
578 # closely applies. This is most indicative of the topics contained
579 # in the audio. Use the 6-digit NAICS code to identify the industry
580 # vertical - see https://www.naics.com/search/.
581 "audioTopic": "A String", # Description of the content. Eg. "Recordings of federal supreme court
582 # hearings from 2012".
583 "originalMimeType": "A String", # Mime type of the original audio file. For example `audio/m4a`,
584 # `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
585 # A list of possible audio mime types is maintained at
586 # http://www.iana.org/assignments/media-types/media-types.xhtml#audio
587 "interactionType": "A String", # The use case most closely describing the audio content to be recognized.
588 },
589 },
590 "name": "A String", # *Optional* The name of the model to use for recognition.
591 }
592
593 x__xgafv: string, V1 error format.
594 Allowed values
595 1 - v1 error format
596 2 - v2 error format
597
598Returns:
599 An object of the form:
600
601 { # The only message returned to the client by the `Recognize` method. It
602 # contains the result as zero or more sequential `SpeechRecognitionResult`
603 # messages.
604 "results": [ # Output only. Sequential list of transcription results corresponding to
605 # sequential portions of audio.
606 { # A speech recognition result corresponding to a portion of the audio.
607 "languageCode": "A String", # Output only. The
608 # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
609 # language in this result. This language code was detected to have the most
610 # likelihood of being spoken in the audio.
611 "alternatives": [ # Output only. May contain one or more recognition hypotheses (up to the
612 # maximum specified in `max_alternatives`).
613 # These alternatives are ordered in terms of accuracy, with the top (first)
614 # alternative being the most probable, as ranked by the recognizer.
615 { # Alternative hypotheses (a.k.a. n-best list).
616 "confidence": 3.14, # Output only. The confidence estimate between 0.0 and 1.0. A higher number
617 # indicates an estimated greater likelihood that the recognized words are
618 # correct. This field is set only for the top alternative of a non-streaming
619 # result or, of a streaming result where `is_final=true`.
620 # This field is not guaranteed to be accurate and users should not rely on it
621 # to be always provided.
622 # The default of 0.0 is a sentinel value indicating `confidence` was not set.
623 "transcript": "A String", # Output only. Transcript text representing the words that the user spoke.
624 "words": [ # Output only. A list of word-specific information for each recognized word.
625 # Note: When `enable_speaker_diarization` is true, you will see all the words
626 # from the beginning of the audio.
627 { # Word-specific information for recognized words.
628 "confidence": 3.14, # Output only. The confidence estimate between 0.0 and 1.0. A higher number
629 # indicates an estimated greater likelihood that the recognized words are
630 # correct. This field is set only for the top alternative of a non-streaming
631 # result or, of a streaming result where `is_final=true`.
632 # This field is not guaranteed to be accurate and users should not rely on it
633 # to be always provided.
634 # The default of 0.0 is a sentinel value indicating `confidence` was not set.
635 "endTime": "A String", # Output only. Time offset relative to the beginning of the audio,
636 # and corresponding to the end of the spoken word.
637 # This field is only set if `enable_word_time_offsets=true` and only
638 # in the top hypothesis.
639 # This is an experimental feature and the accuracy of the time offset can
640 # vary.
641 "word": "A String", # Output only. The word corresponding to this set of information.
642 "startTime": "A String", # Output only. Time offset relative to the beginning of the audio,
643 # and corresponding to the start of the spoken word.
644 # This field is only set if `enable_word_time_offsets=true` and only
645 # in the top hypothesis.
646 # This is an experimental feature and the accuracy of the time offset can
647 # vary.
648 "speakerTag": 42, # Output only. A distinct integer value is assigned for every speaker within
649 # the audio. This field specifies which one of those speakers was detected to
650 # have spoken this word. Value ranges from '1' to diarization_speaker_count.
651 # speaker_tag is set if enable_speaker_diarization = 'true' and only in the
652 # top alternative.
653 },
654 ],
655 },
656 ],
657 "channelTag": 42, # For multi-channel audio, this is the channel number corresponding to the
658 # recognized result for the audio from that channel.
659 # For audio_channel_count = N, its output values can range from '1' to 'N'.
660 },
661 ],
662 }</pre>
663</div>
664
665</body></html>